python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Generic Counter sysfs interface
* Copyright (C) 2020 William Breathitt Gray
*/
#include <linux/counter.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/kstrtox.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "counter-sysfs.h"
static inline struct counter_device *counter_from_dev(struct device *dev)
{
return container_of(dev, struct counter_device, dev);
}
/**
* struct counter_attribute - Counter sysfs attribute
* @dev_attr: device attribute for sysfs
* @l: node to add Counter attribute to attribute group list
* @comp: Counter component callbacks and data
* @scope: Counter scope of the attribute
* @parent: pointer to the parent component
*/
struct counter_attribute {
struct device_attribute dev_attr;
struct list_head l;
struct counter_comp comp;
enum counter_scope scope;
void *parent;
};
#define to_counter_attribute(_dev_attr) \
container_of(_dev_attr, struct counter_attribute, dev_attr)
/**
* struct counter_attribute_group - container for attribute group
* @name: name of the attribute group
* @attr_list: list to keep track of created attributes
* @num_attr: number of attributes
*/
struct counter_attribute_group {
const char *name;
struct list_head attr_list;
size_t num_attr;
};
static const char *const counter_function_str[] = {
[COUNTER_FUNCTION_INCREASE] = "increase",
[COUNTER_FUNCTION_DECREASE] = "decrease",
[COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
[COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
[COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
[COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
[COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
[COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4"
};
static const char *const counter_signal_value_str[] = {
[COUNTER_SIGNAL_LEVEL_LOW] = "low",
[COUNTER_SIGNAL_LEVEL_HIGH] = "high"
};
static const char *const counter_synapse_action_str[] = {
[COUNTER_SYNAPSE_ACTION_NONE] = "none",
[COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
[COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
[COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
};
static const char *const counter_count_direction_str[] = {
[COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
[COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
};
static const char *const counter_count_mode_str[] = {
[COUNTER_COUNT_MODE_NORMAL] = "normal",
[COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
[COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
[COUNTER_COUNT_MODE_MODULO_N] = "modulo-n",
[COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT] = "interrupt on terminal count",
[COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT] = "hardware retriggerable one-shot",
[COUNTER_COUNT_MODE_RATE_GENERATOR] = "rate generator",
[COUNTER_COUNT_MODE_SQUARE_WAVE_MODE] = "square wave mode",
[COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE] = "software triggered strobe",
[COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE] = "hardware triggered strobe",
};
static const char *const counter_signal_polarity_str[] = {
[COUNTER_SIGNAL_POLARITY_POSITIVE] = "positive",
[COUNTER_SIGNAL_POLARITY_NEGATIVE] = "negative"
};
static ssize_t counter_comp_u8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
int err;
u8 data = 0;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u8_read(counter, &data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u8_read(counter, a->parent, &data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_u8_read(counter, a->parent, &data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
if (a->comp.type == COUNTER_COMP_BOOL)
/* data should already be boolean but ensure just to be safe */
data = !!data;
return sysfs_emit(buf, "%u\n", (unsigned int)data);
}
static ssize_t counter_comp_u8_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
int err;
bool bool_data = 0;
u8 data = 0;
if (a->comp.type == COUNTER_COMP_BOOL) {
err = kstrtobool(buf, &bool_data);
data = bool_data;
} else
err = kstrtou8(buf, 0, &data);
if (err < 0)
return err;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u8_write(counter, data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u8_write(counter, a->parent, data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_u8_write(counter, a->parent, data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return len;
}
static ssize_t counter_comp_u32_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
const struct counter_available *const avail = a->comp.priv;
int err;
u32 data = 0;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u32_read(counter, &data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u32_read(counter, a->parent, &data);
break;
case COUNTER_SCOPE_COUNT:
if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
err = a->comp.action_read(counter, a->parent,
a->comp.priv, &data);
else
err = a->comp.count_u32_read(counter, a->parent, &data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
switch (a->comp.type) {
case COUNTER_COMP_FUNCTION:
return sysfs_emit(buf, "%s\n", counter_function_str[data]);
case COUNTER_COMP_SIGNAL_LEVEL:
return sysfs_emit(buf, "%s\n", counter_signal_value_str[data]);
case COUNTER_COMP_SYNAPSE_ACTION:
return sysfs_emit(buf, "%s\n", counter_synapse_action_str[data]);
case COUNTER_COMP_ENUM:
return sysfs_emit(buf, "%s\n", avail->strs[data]);
case COUNTER_COMP_COUNT_DIRECTION:
return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]);
case COUNTER_COMP_COUNT_MODE:
return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]);
case COUNTER_COMP_SIGNAL_POLARITY:
return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
default:
return sysfs_emit(buf, "%u\n", (unsigned int)data);
}
}
static int counter_find_enum(u32 *const enum_item, const u32 *const enums,
const size_t num_enums, const char *const buf,
const char *const string_array[])
{
size_t index;
for (index = 0; index < num_enums; index++) {
*enum_item = enums[index];
if (sysfs_streq(buf, string_array[*enum_item]))
return 0;
}
return -EINVAL;
}
static ssize_t counter_comp_u32_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
struct counter_count *const count = a->parent;
struct counter_synapse *const synapse = a->comp.priv;
const struct counter_available *const avail = a->comp.priv;
int err;
u32 data = 0;
switch (a->comp.type) {
case COUNTER_COMP_FUNCTION:
err = counter_find_enum(&data, count->functions_list,
count->num_functions, buf,
counter_function_str);
break;
case COUNTER_COMP_SYNAPSE_ACTION:
err = counter_find_enum(&data, synapse->actions_list,
synapse->num_actions, buf,
counter_synapse_action_str);
break;
case COUNTER_COMP_ENUM:
err = __sysfs_match_string(avail->strs, avail->num_items, buf);
data = err;
break;
case COUNTER_COMP_COUNT_MODE:
err = counter_find_enum(&data, avail->enums, avail->num_items,
buf, counter_count_mode_str);
break;
case COUNTER_COMP_SIGNAL_POLARITY:
err = counter_find_enum(&data, avail->enums, avail->num_items,
buf, counter_signal_polarity_str);
break;
default:
err = kstrtou32(buf, 0, &data);
break;
}
if (err < 0)
return err;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u32_write(counter, data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u32_write(counter, a->parent, data);
break;
case COUNTER_SCOPE_COUNT:
if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION)
err = a->comp.action_write(counter, count, synapse,
data);
else
err = a->comp.count_u32_write(counter, count, data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return len;
}
static ssize_t counter_comp_u64_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u64_read(counter, &data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u64_read(counter, a->parent, &data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_u64_read(counter, a->parent, &data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
}
static ssize_t counter_comp_u64_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
int err;
u64 data = 0;
err = kstrtou64(buf, 0, &data);
if (err < 0)
return err;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_u64_write(counter, data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_u64_write(counter, a->parent, data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_u64_write(counter, a->parent, data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return len;
}
static ssize_t counter_comp_array_u32_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
const struct counter_array *const element = a->comp.priv;
int err;
u32 data = 0;
if (a->scope != COUNTER_SCOPE_SIGNAL ||
element->type != COUNTER_COMP_SIGNAL_POLARITY)
return -EINVAL;
err = a->comp.signal_array_u32_read(counter, a->parent, element->idx,
&data);
if (err < 0)
return err;
return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
}
static ssize_t counter_comp_array_u32_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
const struct counter_array *const element = a->comp.priv;
int err;
u32 data = 0;
if (element->type != COUNTER_COMP_SIGNAL_POLARITY ||
a->scope != COUNTER_SCOPE_SIGNAL)
return -EINVAL;
err = counter_find_enum(&data, element->avail->enums,
element->avail->num_items, buf,
counter_signal_polarity_str);
if (err < 0)
return err;
err = a->comp.signal_array_u32_write(counter, a->parent, element->idx,
data);
if (err < 0)
return err;
return len;
}
static ssize_t counter_comp_array_u64_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
const struct counter_array *const element = a->comp.priv;
int err;
u64 data = 0;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_array_u64_read(counter, element->idx,
&data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_array_u64_read(counter, a->parent,
element->idx, &data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_array_u64_read(counter, a->parent,
element->idx, &data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
}
static ssize_t counter_comp_array_u64_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
struct counter_device *const counter = counter_from_dev(dev);
const struct counter_array *const element = a->comp.priv;
int err;
u64 data = 0;
err = kstrtou64(buf, 0, &data);
if (err < 0)
return err;
switch (a->scope) {
case COUNTER_SCOPE_DEVICE:
err = a->comp.device_array_u64_write(counter, element->idx,
data);
break;
case COUNTER_SCOPE_SIGNAL:
err = a->comp.signal_array_u64_write(counter, a->parent,
element->idx, data);
break;
case COUNTER_SCOPE_COUNT:
err = a->comp.count_array_u64_write(counter, a->parent,
element->idx, data);
break;
default:
return -EINVAL;
}
if (err < 0)
return err;
return len;
}
static ssize_t enums_available_show(const u32 *const enums,
const size_t num_enums,
const char *const strs[], char *buf)
{
size_t len = 0;
size_t index;
for (index = 0; index < num_enums; index++)
len += sysfs_emit_at(buf, len, "%s\n", strs[enums[index]]);
return len;
}
static ssize_t strs_available_show(const struct counter_available *const avail,
char *buf)
{
size_t len = 0;
size_t index;
for (index = 0; index < avail->num_items; index++)
len += sysfs_emit_at(buf, len, "%s\n", avail->strs[index]);
return len;
}
static ssize_t counter_comp_available_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct counter_attribute *const a = to_counter_attribute(attr);
const struct counter_count *const count = a->parent;
const struct counter_synapse *const synapse = a->comp.priv;
const struct counter_available *const avail = a->comp.priv;
switch (a->comp.type) {
case COUNTER_COMP_FUNCTION:
return enums_available_show(count->functions_list,
count->num_functions,
counter_function_str, buf);
case COUNTER_COMP_SYNAPSE_ACTION:
return enums_available_show(synapse->actions_list,
synapse->num_actions,
counter_synapse_action_str, buf);
case COUNTER_COMP_ENUM:
return strs_available_show(avail, buf);
case COUNTER_COMP_COUNT_MODE:
return enums_available_show(avail->enums, avail->num_items,
counter_count_mode_str, buf);
default:
return -EINVAL;
}
}
static int counter_avail_attr_create(struct device *const dev,
struct counter_attribute_group *const group,
const struct counter_comp *const comp, void *const parent)
{
struct counter_attribute *counter_attr;
struct device_attribute *dev_attr;
counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
if (!counter_attr)
return -ENOMEM;
/* Configure Counter attribute */
counter_attr->comp.type = comp->type;
counter_attr->comp.priv = comp->priv;
counter_attr->parent = parent;
/* Initialize sysfs attribute */
dev_attr = &counter_attr->dev_attr;
sysfs_attr_init(&dev_attr->attr);
/* Configure device attribute */
dev_attr->attr.name = devm_kasprintf(dev, GFP_KERNEL, "%s_available",
comp->name);
if (!dev_attr->attr.name)
return -ENOMEM;
dev_attr->attr.mode = 0444;
dev_attr->show = counter_comp_available_show;
/* Store list node */
list_add(&counter_attr->l, &group->attr_list);
group->num_attr++;
return 0;
}
static int counter_attr_create(struct device *const dev,
struct counter_attribute_group *const group,
const struct counter_comp *const comp,
const enum counter_scope scope,
void *const parent)
{
const struct counter_array *const array = comp->priv;
struct counter_attribute *counter_attr;
struct device_attribute *dev_attr;
counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
if (!counter_attr)
return -ENOMEM;
/* Configure Counter attribute */
counter_attr->comp = *comp;
counter_attr->scope = scope;
counter_attr->parent = parent;
/* Configure device attribute */
dev_attr = &counter_attr->dev_attr;
sysfs_attr_init(&dev_attr->attr);
dev_attr->attr.name = comp->name;
switch (comp->type) {
case COUNTER_COMP_U8:
case COUNTER_COMP_BOOL:
if (comp->device_u8_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_u8_show;
}
if (comp->device_u8_write) {
dev_attr->attr.mode |= 0200;
dev_attr->store = counter_comp_u8_store;
}
break;
case COUNTER_COMP_SIGNAL_LEVEL:
case COUNTER_COMP_FUNCTION:
case COUNTER_COMP_SYNAPSE_ACTION:
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
case COUNTER_COMP_SIGNAL_POLARITY:
if (comp->device_u32_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_u32_show;
}
if (comp->device_u32_write) {
dev_attr->attr.mode |= 0200;
dev_attr->store = counter_comp_u32_store;
}
break;
case COUNTER_COMP_U64:
if (comp->device_u64_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_u64_show;
}
if (comp->device_u64_write) {
dev_attr->attr.mode |= 0200;
dev_attr->store = counter_comp_u64_store;
}
break;
case COUNTER_COMP_ARRAY:
switch (array->type) {
case COUNTER_COMP_SIGNAL_POLARITY:
if (comp->signal_array_u32_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_array_u32_show;
}
if (comp->signal_array_u32_write) {
dev_attr->attr.mode |= 0200;
dev_attr->store = counter_comp_array_u32_store;
}
break;
case COUNTER_COMP_U64:
if (comp->device_array_u64_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_array_u64_show;
}
if (comp->device_array_u64_write) {
dev_attr->attr.mode |= 0200;
dev_attr->store = counter_comp_array_u64_store;
}
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
/* Store list node */
list_add(&counter_attr->l, &group->attr_list);
group->num_attr++;
/* Create "*_available" attribute if needed */
switch (comp->type) {
case COUNTER_COMP_FUNCTION:
case COUNTER_COMP_SYNAPSE_ACTION:
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_MODE:
return counter_avail_attr_create(dev, group, comp, parent);
default:
return 0;
}
}
static ssize_t counter_comp_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n", to_counter_attribute(attr)->comp.name);
}
static int counter_name_attr_create(struct device *const dev,
struct counter_attribute_group *const group,
const char *const name)
{
struct counter_attribute *counter_attr;
counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
if (!counter_attr)
return -ENOMEM;
/* Configure Counter attribute */
counter_attr->comp.name = name;
/* Configure device attribute */
sysfs_attr_init(&counter_attr->dev_attr.attr);
counter_attr->dev_attr.attr.name = "name";
counter_attr->dev_attr.attr.mode = 0444;
counter_attr->dev_attr.show = counter_comp_name_show;
/* Store list node */
list_add(&counter_attr->l, &group->attr_list);
group->num_attr++;
return 0;
}
static ssize_t counter_comp_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const size_t id = (size_t)to_counter_attribute(attr)->comp.priv;
return sysfs_emit(buf, "%zu\n", id);
}
static int counter_comp_id_attr_create(struct device *const dev,
struct counter_attribute_group *const group,
const char *name, const size_t id)
{
struct counter_attribute *counter_attr;
/* Allocate Counter attribute */
counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL);
if (!counter_attr)
return -ENOMEM;
/* Generate component ID name */
name = devm_kasprintf(dev, GFP_KERNEL, "%s_component_id", name);
if (!name)
return -ENOMEM;
/* Configure Counter attribute */
counter_attr->comp.priv = (void *)id;
/* Configure device attribute */
sysfs_attr_init(&counter_attr->dev_attr.attr);
counter_attr->dev_attr.attr.name = name;
counter_attr->dev_attr.attr.mode = 0444;
counter_attr->dev_attr.show = counter_comp_id_show;
/* Store list node */
list_add(&counter_attr->l, &group->attr_list);
group->num_attr++;
return 0;
}
static int counter_ext_attrs_create(struct device *const dev,
struct counter_attribute_group *const group,
const struct counter_comp *const ext,
const enum counter_scope scope,
void *const parent, const size_t id)
{
int err;
/* Create main extension attribute */
err = counter_attr_create(dev, group, ext, scope, parent);
if (err < 0)
return err;
/* Create extension id attribute */
return counter_comp_id_attr_create(dev, group, ext->name, id);
}
static int counter_array_attrs_create(struct device *const dev,
struct counter_attribute_group *const group,
const struct counter_comp *const comp,
const enum counter_scope scope,
void *const parent, const size_t id)
{
const struct counter_array *const array = comp->priv;
struct counter_comp ext = *comp;
struct counter_array *element;
size_t idx;
int err;
/* Create an attribute for each array element */
for (idx = 0; idx < array->length; idx++) {
/* Generate array element attribute name */
ext.name = devm_kasprintf(dev, GFP_KERNEL, "%s%zu", comp->name,
idx);
if (!ext.name)
return -ENOMEM;
/* Allocate and configure array element */
element = devm_kzalloc(dev, sizeof(*element), GFP_KERNEL);
if (!element)
return -ENOMEM;
element->type = array->type;
element->avail = array->avail;
element->idx = idx;
ext.priv = element;
/* Create all attributes associated with the array element */
err = counter_ext_attrs_create(dev, group, &ext, scope, parent,
id + idx);
if (err < 0)
return err;
}
return 0;
}
static int counter_sysfs_exts_add(struct device *const dev,
struct counter_attribute_group *const group,
const struct counter_comp *const exts,
const size_t num_ext,
const enum counter_scope scope,
void *const parent)
{
size_t i;
const struct counter_comp *ext;
int err;
size_t id = 0;
const struct counter_array *array;
/* Create attributes for each extension */
for (i = 0; i < num_ext; i++) {
ext = &exts[i];
if (ext->type == COUNTER_COMP_ARRAY) {
err = counter_array_attrs_create(dev, group, ext, scope,
parent, id);
array = ext->priv;
id += array->length;
} else {
err = counter_ext_attrs_create(dev, group, ext, scope,
parent, id);
id++;
}
if (err < 0)
return err;
}
return 0;
}
static struct counter_comp counter_signal_comp = {
.type = COUNTER_COMP_SIGNAL_LEVEL,
.name = "signal",
};
static int counter_signal_attrs_create(struct counter_device *const counter,
struct counter_attribute_group *const cattr_group,
struct counter_signal *const signal)
{
const enum counter_scope scope = COUNTER_SCOPE_SIGNAL;
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
/* Create main Signal attribute */
comp = counter_signal_comp;
comp.signal_u32_read = counter->ops->signal_read;
err = counter_attr_create(dev, cattr_group, &comp, scope, signal);
if (err < 0)
return err;
/* Create Signal name attribute */
err = counter_name_attr_create(dev, cattr_group, signal->name);
if (err < 0)
return err;
/* Add Signal extensions */
return counter_sysfs_exts_add(dev, cattr_group, signal->ext,
signal->num_ext, scope, signal);
}
static int counter_sysfs_signals_add(struct counter_device *const counter,
struct counter_attribute_group *const groups)
{
size_t i;
int err;
/* Add each Signal */
for (i = 0; i < counter->num_signals; i++) {
/* Generate Signal attribute directory name */
groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
"signal%zu", i);
if (!groups[i].name)
return -ENOMEM;
/* Create all attributes associated with Signal */
err = counter_signal_attrs_create(counter, groups + i,
counter->signals + i);
if (err < 0)
return err;
}
return 0;
}
static int counter_sysfs_synapses_add(struct counter_device *const counter,
struct counter_attribute_group *const group,
struct counter_count *const count)
{
size_t i;
/* Add each Synapse */
for (i = 0; i < count->num_synapses; i++) {
struct device *const dev = &counter->dev;
struct counter_synapse *synapse;
size_t id;
struct counter_comp comp;
int err;
synapse = count->synapses + i;
/* Generate Synapse action name */
id = synapse->signal - counter->signals;
comp.name = devm_kasprintf(dev, GFP_KERNEL, "signal%zu_action",
id);
if (!comp.name)
return -ENOMEM;
/* Create action attribute */
comp.type = COUNTER_COMP_SYNAPSE_ACTION;
comp.action_read = counter->ops->action_read;
comp.action_write = counter->ops->action_write;
comp.priv = synapse;
err = counter_attr_create(dev, group, &comp,
COUNTER_SCOPE_COUNT, count);
if (err < 0)
return err;
/* Create Synapse component ID attribute */
err = counter_comp_id_attr_create(dev, group, comp.name, i);
if (err < 0)
return err;
}
return 0;
}
static struct counter_comp counter_count_comp =
COUNTER_COMP_COUNT_U64("count", NULL, NULL);
static struct counter_comp counter_function_comp = {
.type = COUNTER_COMP_FUNCTION,
.name = "function",
};
static int counter_count_attrs_create(struct counter_device *const counter,
struct counter_attribute_group *const cattr_group,
struct counter_count *const count)
{
const enum counter_scope scope = COUNTER_SCOPE_COUNT;
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
/* Create main Count attribute */
comp = counter_count_comp;
comp.count_u64_read = counter->ops->count_read;
comp.count_u64_write = counter->ops->count_write;
err = counter_attr_create(dev, cattr_group, &comp, scope, count);
if (err < 0)
return err;
/* Create Count name attribute */
err = counter_name_attr_create(dev, cattr_group, count->name);
if (err < 0)
return err;
/* Create Count function attribute */
comp = counter_function_comp;
comp.count_u32_read = counter->ops->function_read;
comp.count_u32_write = counter->ops->function_write;
err = counter_attr_create(dev, cattr_group, &comp, scope, count);
if (err < 0)
return err;
/* Add Count extensions */
return counter_sysfs_exts_add(dev, cattr_group, count->ext,
count->num_ext, scope, count);
}
static int counter_sysfs_counts_add(struct counter_device *const counter,
struct counter_attribute_group *const groups)
{
size_t i;
struct counter_count *count;
int err;
/* Add each Count */
for (i = 0; i < counter->num_counts; i++) {
count = counter->counts + i;
/* Generate Count attribute directory name */
groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL,
"count%zu", i);
if (!groups[i].name)
return -ENOMEM;
/* Add sysfs attributes of the Synapses */
err = counter_sysfs_synapses_add(counter, groups + i, count);
if (err < 0)
return err;
/* Create all attributes associated with Count */
err = counter_count_attrs_create(counter, groups + i, count);
if (err < 0)
return err;
}
return 0;
}
static int counter_num_signals_read(struct counter_device *counter, u8 *val)
{
*val = counter->num_signals;
return 0;
}
static int counter_num_counts_read(struct counter_device *counter, u8 *val)
{
*val = counter->num_counts;
return 0;
}
static int counter_events_queue_size_read(struct counter_device *counter,
u64 *val)
{
*val = kfifo_size(&counter->events);
return 0;
}
static int counter_events_queue_size_write(struct counter_device *counter,
u64 val)
{
DECLARE_KFIFO_PTR(events, struct counter_event);
int err;
unsigned long flags;
/* Allocate new events queue */
err = kfifo_alloc(&events, val, GFP_KERNEL);
if (err)
return err;
/* Swap in new events queue */
mutex_lock(&counter->events_out_lock);
spin_lock_irqsave(&counter->events_in_lock, flags);
kfifo_free(&counter->events);
counter->events.kfifo = events.kfifo;
spin_unlock_irqrestore(&counter->events_in_lock, flags);
mutex_unlock(&counter->events_out_lock);
return 0;
}
static struct counter_comp counter_num_signals_comp =
COUNTER_COMP_DEVICE_U8("num_signals", counter_num_signals_read, NULL);
static struct counter_comp counter_num_counts_comp =
COUNTER_COMP_DEVICE_U8("num_counts", counter_num_counts_read, NULL);
static struct counter_comp counter_events_queue_size_comp =
COUNTER_COMP_DEVICE_U64("events_queue_size",
counter_events_queue_size_read,
counter_events_queue_size_write);
static int counter_sysfs_attr_add(struct counter_device *const counter,
struct counter_attribute_group *cattr_group)
{
const enum counter_scope scope = COUNTER_SCOPE_DEVICE;
struct device *const dev = &counter->dev;
int err;
/* Add Signals sysfs attributes */
err = counter_sysfs_signals_add(counter, cattr_group);
if (err < 0)
return err;
cattr_group += counter->num_signals;
/* Add Counts sysfs attributes */
err = counter_sysfs_counts_add(counter, cattr_group);
if (err < 0)
return err;
cattr_group += counter->num_counts;
/* Create name attribute */
err = counter_name_attr_create(dev, cattr_group, counter->name);
if (err < 0)
return err;
/* Create num_signals attribute */
err = counter_attr_create(dev, cattr_group, &counter_num_signals_comp,
scope, NULL);
if (err < 0)
return err;
/* Create num_counts attribute */
err = counter_attr_create(dev, cattr_group, &counter_num_counts_comp,
scope, NULL);
if (err < 0)
return err;
/* Create events_queue_size attribute */
err = counter_attr_create(dev, cattr_group,
&counter_events_queue_size_comp, scope, NULL);
if (err < 0)
return err;
/* Add device extensions */
return counter_sysfs_exts_add(dev, cattr_group, counter->ext,
counter->num_ext, scope, NULL);
return 0;
}
/**
* counter_sysfs_add - Adds Counter sysfs attributes to the device structure
* @counter: Pointer to the Counter device structure
*
* Counter sysfs attributes are created and added to the respective device
* structure for later registration to the system. Resource-managed memory
* allocation is performed by this function, and this memory should be freed
* when no longer needed (automatically by a device_unregister call, or
* manually by a devres_release_all call).
*/
int counter_sysfs_add(struct counter_device *const counter)
{
struct device *const dev = &counter->dev;
const size_t num_groups = counter->num_signals + counter->num_counts + 1;
struct counter_attribute_group *cattr_groups;
size_t i, j;
int err;
struct attribute_group *groups;
struct counter_attribute *p;
/* Allocate space for attribute groups (signals, counts, and ext) */
cattr_groups = devm_kcalloc(dev, num_groups, sizeof(*cattr_groups),
GFP_KERNEL);
if (!cattr_groups)
return -ENOMEM;
/* Initialize attribute lists */
for (i = 0; i < num_groups; i++)
INIT_LIST_HEAD(&cattr_groups[i].attr_list);
/* Add Counter device sysfs attributes */
err = counter_sysfs_attr_add(counter, cattr_groups);
if (err < 0)
return err;
/* Allocate attribute group pointers for association with device */
dev->groups = devm_kcalloc(dev, num_groups + 1, sizeof(*dev->groups),
GFP_KERNEL);
if (!dev->groups)
return -ENOMEM;
/* Allocate space for attribute groups */
groups = devm_kcalloc(dev, num_groups, sizeof(*groups), GFP_KERNEL);
if (!groups)
return -ENOMEM;
/* Prepare each group of attributes for association */
for (i = 0; i < num_groups; i++) {
groups[i].name = cattr_groups[i].name;
/* Allocate space for attribute pointers */
groups[i].attrs = devm_kcalloc(dev,
cattr_groups[i].num_attr + 1,
sizeof(*groups[i].attrs),
GFP_KERNEL);
if (!groups[i].attrs)
return -ENOMEM;
/* Add attribute pointers to attribute group */
j = 0;
list_for_each_entry(p, &cattr_groups[i].attr_list, l)
groups[i].attrs[j++] = &p->dev_attr.attr;
/* Associate attribute group */
dev->groups[i] = &groups[i];
}
return 0;
}
| linux-master | drivers/counter/counter-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Microchip
*
* Author: Kamel Bouhara <[email protected]>
*/
#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <soc/at91/atmel_tcb.h>
#define ATMEL_TC_CMR_MASK (ATMEL_TC_LDRA_RISING | ATMEL_TC_LDRB_FALLING | \
ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_LDBDIS | \
ATMEL_TC_LDBSTOP)
#define ATMEL_TC_QDEN BIT(8)
#define ATMEL_TC_POSEN BIT(9)
struct mchp_tc_data {
const struct atmel_tcb_config *tc_cfg;
struct regmap *regmap;
int qdec_mode;
int num_channels;
int channel[2];
};
static const enum counter_function mchp_tc_count_functions[] = {
COUNTER_FUNCTION_INCREASE,
COUNTER_FUNCTION_QUADRATURE_X4,
};
static const enum counter_synapse_action mchp_tc_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_NONE,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
static struct counter_signal mchp_tc_count_signals[] = {
{
.id = 0,
.name = "Channel A",
},
{
.id = 1,
.name = "Channel B",
}
};
static struct counter_synapse mchp_tc_count_synapses[] = {
{
.actions_list = mchp_tc_synapse_actions,
.num_actions = ARRAY_SIZE(mchp_tc_synapse_actions),
.signal = &mchp_tc_count_signals[0]
},
{
.actions_list = mchp_tc_synapse_actions,
.num_actions = ARRAY_SIZE(mchp_tc_synapse_actions),
.signal = &mchp_tc_count_signals[1]
}
};
static int mchp_tc_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct mchp_tc_data *const priv = counter_priv(counter);
if (priv->qdec_mode)
*function = COUNTER_FUNCTION_QUADRATURE_X4;
else
*function = COUNTER_FUNCTION_INCREASE;
return 0;
}
static int mchp_tc_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct mchp_tc_data *const priv = counter_priv(counter);
u32 bmr, cmr;
regmap_read(priv->regmap, ATMEL_TC_BMR, &bmr);
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
/* Set capture mode */
cmr &= ~ATMEL_TC_WAVE;
switch (function) {
case COUNTER_FUNCTION_INCREASE:
priv->qdec_mode = 0;
/* Set highest rate based on whether soc has gclk or not */
bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
if (priv->tc_cfg->has_gclk)
cmr |= ATMEL_TC_TIMER_CLOCK2;
else
cmr |= ATMEL_TC_TIMER_CLOCK1;
/* Setup the period capture mode */
cmr |= ATMEL_TC_CMR_MASK;
cmr &= ~(ATMEL_TC_ABETRG | ATMEL_TC_XC0);
break;
case COUNTER_FUNCTION_QUADRATURE_X4:
if (!priv->tc_cfg->has_qdec)
return -EINVAL;
/* In QDEC mode settings both channels 0 and 1 are required */
if (priv->num_channels < 2 || priv->channel[0] != 0 ||
priv->channel[1] != 1) {
pr_err("Invalid channels number or id for quadrature mode\n");
return -EINVAL;
}
priv->qdec_mode = 1;
bmr |= ATMEL_TC_QDEN | ATMEL_TC_POSEN;
cmr |= ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_ABETRG | ATMEL_TC_XC0;
break;
default:
/* should never reach this path */
return -EINVAL;
}
regmap_write(priv->regmap, ATMEL_TC_BMR, bmr);
regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), cmr);
/* Enable clock and trigger counter */
regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], CCR),
ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
if (priv->qdec_mode) {
regmap_write(priv->regmap,
ATMEL_TC_REG(priv->channel[1], CMR), cmr);
regmap_write(priv->regmap,
ATMEL_TC_REG(priv->channel[1], CCR),
ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
}
return 0;
}
static int mchp_tc_count_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *lvl)
{
struct mchp_tc_data *const priv = counter_priv(counter);
bool sigstatus;
u32 sr;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
if (signal->id == 1)
sigstatus = (sr & ATMEL_TC_MTIOB);
else
sigstatus = (sr & ATMEL_TC_MTIOA);
*lvl = sigstatus ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
static int mchp_tc_count_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
struct mchp_tc_data *const priv = counter_priv(counter);
u32 cmr;
if (priv->qdec_mode) {
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
/* Only TIOA signal is evaluated in non-QDEC mode */
if (synapse->signal->id != 0) {
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
switch (cmr & ATMEL_TC_ETRGEDG) {
default:
*action = COUNTER_SYNAPSE_ACTION_NONE;
break;
case ATMEL_TC_ETRGEDG_RISING:
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
break;
case ATMEL_TC_ETRGEDG_FALLING:
*action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
break;
case ATMEL_TC_ETRGEDG_BOTH:
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
break;
}
return 0;
}
static int mchp_tc_count_action_write(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
struct mchp_tc_data *const priv = counter_priv(counter);
u32 edge = ATMEL_TC_ETRGEDG_NONE;
/* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
if (priv->qdec_mode || synapse->signal->id != 0)
return -EINVAL;
switch (action) {
case COUNTER_SYNAPSE_ACTION_NONE:
edge = ATMEL_TC_ETRGEDG_NONE;
break;
case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
edge = ATMEL_TC_ETRGEDG_RISING;
break;
case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
edge = ATMEL_TC_ETRGEDG_FALLING;
break;
case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
edge = ATMEL_TC_ETRGEDG_BOTH;
break;
default:
/* should never reach this path */
return -EINVAL;
}
return regmap_write_bits(priv->regmap,
ATMEL_TC_REG(priv->channel[0], CMR),
ATMEL_TC_ETRGEDG, edge);
}
static int mchp_tc_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct mchp_tc_data *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CV), &cnt);
*val = cnt;
return 0;
}
static struct counter_count mchp_tc_counts[] = {
{
.id = 0,
.name = "Timer Counter",
.functions_list = mchp_tc_count_functions,
.num_functions = ARRAY_SIZE(mchp_tc_count_functions),
.synapses = mchp_tc_count_synapses,
.num_synapses = ARRAY_SIZE(mchp_tc_count_synapses),
},
};
static const struct counter_ops mchp_tc_ops = {
.signal_read = mchp_tc_count_signal_read,
.count_read = mchp_tc_count_read,
.function_read = mchp_tc_count_function_read,
.function_write = mchp_tc_count_function_write,
.action_read = mchp_tc_count_action_read,
.action_write = mchp_tc_count_action_write
};
static const struct atmel_tcb_config tcb_rm9200_config = {
.counter_width = 16,
};
static const struct atmel_tcb_config tcb_sam9x5_config = {
.counter_width = 32,
};
static const struct atmel_tcb_config tcb_sama5d2_config = {
.counter_width = 32,
.has_gclk = true,
.has_qdec = true,
};
static const struct atmel_tcb_config tcb_sama5d3_config = {
.counter_width = 32,
.has_qdec = true,
};
static const struct of_device_id atmel_tc_of_match[] = {
{ .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
{ .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
{ .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
{ .compatible = "atmel,sama5d3-tcb", .data = &tcb_sama5d3_config, },
{ /* sentinel */ }
};
static void mchp_tc_clk_remove(void *ptr)
{
clk_disable_unprepare((struct clk *)ptr);
}
static int mchp_tc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct atmel_tcb_config *tcb_config;
const struct of_device_id *match;
struct counter_device *counter;
struct mchp_tc_data *priv;
char clk_name[7];
struct regmap *regmap;
struct clk *clk[3];
int channel;
int ret, i;
counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
match = of_match_node(atmel_tc_of_match, np->parent);
tcb_config = match->data;
if (!tcb_config) {
dev_err(&pdev->dev, "No matching parent node found\n");
return -ENODEV;
}
regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/* max. channels number is 2 when in QDEC mode */
priv->num_channels = of_property_count_u32_elems(np, "reg");
if (priv->num_channels < 0) {
dev_err(&pdev->dev, "Invalid or missing channel\n");
return -EINVAL;
}
/* Register channels and initialize clocks */
for (i = 0; i < priv->num_channels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &channel);
if (ret < 0 || channel > 2)
return -ENODEV;
priv->channel[i] = channel;
snprintf(clk_name, sizeof(clk_name), "t%d_clk", channel);
clk[i] = of_clk_get_by_name(np->parent, clk_name);
if (IS_ERR(clk[i])) {
/* Fallback to t0_clk */
clk[i] = of_clk_get_by_name(np->parent, "t0_clk");
if (IS_ERR(clk[i]))
return PTR_ERR(clk[i]);
}
ret = clk_prepare_enable(clk[i]);
if (ret)
return ret;
ret = devm_add_action_or_reset(&pdev->dev,
mchp_tc_clk_remove,
clk[i]);
if (ret)
return ret;
dev_dbg(&pdev->dev,
"Initialized capture mode on channel %d\n",
channel);
}
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
counter->name = dev_name(&pdev->dev);
counter->parent = &pdev->dev;
counter->ops = &mchp_tc_ops;
counter->num_counts = ARRAY_SIZE(mchp_tc_counts);
counter->counts = mchp_tc_counts;
counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
counter->signals = mchp_tc_count_signals;
ret = devm_counter_add(&pdev->dev, counter);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
static const struct of_device_id mchp_tc_dt_ids[] = {
{ .compatible = "microchip,tcb-capture", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mchp_tc_dt_ids);
static struct platform_driver mchp_tc_driver = {
.probe = mchp_tc_probe,
.driver = {
.name = "microchip-tcb-capture",
.of_match_table = mchp_tc_dt_ids,
},
};
module_platform_driver(mchp_tc_driver);
MODULE_AUTHOR("Kamel Bouhara <[email protected]>");
MODULE_DESCRIPTION("Microchip TCB Capture driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/microchip-tcb-capture.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ECAP Capture driver
*
* Copyright (C) 2022 Julien Panis <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#define ECAP_DRV_NAME "ecap"
/* ECAP event IDs */
#define ECAP_CEVT1 0
#define ECAP_CEVT2 1
#define ECAP_CEVT3 2
#define ECAP_CEVT4 3
#define ECAP_CNTOVF 4
#define ECAP_CEVT_LAST ECAP_CEVT4
#define ECAP_NB_CEVT (ECAP_CEVT_LAST + 1)
#define ECAP_EVT_LAST ECAP_CNTOVF
#define ECAP_NB_EVT (ECAP_EVT_LAST + 1)
/* Registers */
#define ECAP_TSCNT_REG 0x00
#define ECAP_CAP_REG(i) (((i) << 2) + 0x08)
#define ECAP_ECCTL_REG 0x28
#define ECAP_CAPPOL_BIT(i) BIT((i) << 1)
#define ECAP_EV_MODE_MASK GENMASK(7, 0)
#define ECAP_CAPLDEN_BIT BIT(8)
#define ECAP_CONT_ONESHT_BIT BIT(16)
#define ECAP_STOPVALUE_MASK GENMASK(18, 17)
#define ECAP_TSCNTSTP_BIT BIT(20)
#define ECAP_SYNCO_DIS_MASK GENMASK(23, 22)
#define ECAP_CAP_APWM_BIT BIT(25)
#define ECAP_ECCTL_EN_MASK (ECAP_CAPLDEN_BIT | ECAP_TSCNTSTP_BIT)
#define ECAP_ECCTL_CFG_MASK (ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK \
| ECAP_ECCTL_EN_MASK | ECAP_CAP_APWM_BIT \
| ECAP_CONT_ONESHT_BIT)
#define ECAP_ECINT_EN_FLG_REG 0x2c
#define ECAP_EVT_EN_MASK GENMASK(ECAP_NB_EVT, ECAP_NB_CEVT)
#define ECAP_EVT_FLG_BIT(i) BIT((i) + 17)
#define ECAP_ECINT_CLR_FRC_REG 0x30
#define ECAP_INT_CLR_BIT BIT(0)
#define ECAP_EVT_CLR_BIT(i) BIT((i) + 1)
#define ECAP_EVT_CLR_MASK GENMASK(ECAP_NB_EVT, 0)
#define ECAP_PID_REG 0x5c
/* ECAP signals */
#define ECAP_CLOCK_SIG 0
#define ECAP_INPUT_SIG 1
static const struct regmap_config ecap_cnt_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ECAP_PID_REG,
};
/**
* struct ecap_cnt_dev - device private data structure
* @enabled: device state
* @lock: synchronization lock to prevent I/O race conditions
* @clk: device clock
* @regmap: device register map
* @nb_ovf: number of overflows since capture start
* @pm_ctx: device context for PM operations
* @pm_ctx.ev_mode: event mode bits
* @pm_ctx.time_cntr: timestamp counter value
*/
struct ecap_cnt_dev {
bool enabled;
struct mutex lock;
struct clk *clk;
struct regmap *regmap;
atomic_t nb_ovf;
struct {
u8 ev_mode;
u32 time_cntr;
} pm_ctx;
};
static u8 ecap_cnt_capture_get_evmode(struct counter_device *counter)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
unsigned int regval;
pm_runtime_get_sync(counter->parent);
regmap_read(ecap_dev->regmap, ECAP_ECCTL_REG, ®val);
pm_runtime_put_sync(counter->parent);
return regval;
}
static void ecap_cnt_capture_set_evmode(struct counter_device *counter, u8 ev_mode)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
pm_runtime_get_sync(counter->parent);
regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_EV_MODE_MASK, ev_mode);
pm_runtime_put_sync(counter->parent);
}
static void ecap_cnt_capture_enable(struct counter_device *counter)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
pm_runtime_get_sync(counter->parent);
/* Enable interrupts on events */
regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG,
ECAP_EVT_EN_MASK, ECAP_EVT_EN_MASK);
/* Run counter */
regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_CFG_MASK,
ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK | ECAP_ECCTL_EN_MASK);
}
static void ecap_cnt_capture_disable(struct counter_device *counter)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
/* Stop counter */
regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_EN_MASK, 0);
/* Disable interrupts on events */
regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, ECAP_EVT_EN_MASK, 0);
pm_runtime_put_sync(counter->parent);
}
static u32 ecap_cnt_count_get_val(struct counter_device *counter, unsigned int reg)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
unsigned int regval;
pm_runtime_get_sync(counter->parent);
regmap_read(ecap_dev->regmap, reg, ®val);
pm_runtime_put_sync(counter->parent);
return regval;
}
static void ecap_cnt_count_set_val(struct counter_device *counter, unsigned int reg, u32 val)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
pm_runtime_get_sync(counter->parent);
regmap_write(ecap_dev->regmap, reg, val);
pm_runtime_put_sync(counter->parent);
}
static int ecap_cnt_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
*val = ecap_cnt_count_get_val(counter, ECAP_TSCNT_REG);
return 0;
}
static int ecap_cnt_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
if (val > U32_MAX)
return -ERANGE;
ecap_cnt_count_set_val(counter, ECAP_TSCNT_REG, val);
return 0;
}
static int ecap_cnt_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
*function = COUNTER_FUNCTION_INCREASE;
return 0;
}
static int ecap_cnt_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
*action = (synapse->signal->id == ECAP_CLOCK_SIG) ?
COUNTER_SYNAPSE_ACTION_RISING_EDGE :
COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
static int ecap_cnt_watch_validate(struct counter_device *counter,
const struct counter_watch *watch)
{
if (watch->channel > ECAP_CEVT_LAST)
return -EINVAL;
switch (watch->event) {
case COUNTER_EVENT_CAPTURE:
case COUNTER_EVENT_OVERFLOW:
return 0;
default:
return -EINVAL;
}
}
static int ecap_cnt_clk_get_freq(struct counter_device *counter,
struct counter_signal *signal, u64 *freq)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
*freq = clk_get_rate(ecap_dev->clk);
return 0;
}
static int ecap_cnt_pol_read(struct counter_device *counter,
struct counter_signal *signal,
size_t idx, enum counter_signal_polarity *pol)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
int bitval;
pm_runtime_get_sync(counter->parent);
bitval = regmap_test_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
pm_runtime_put_sync(counter->parent);
*pol = bitval ? COUNTER_SIGNAL_POLARITY_NEGATIVE : COUNTER_SIGNAL_POLARITY_POSITIVE;
return 0;
}
static int ecap_cnt_pol_write(struct counter_device *counter,
struct counter_signal *signal,
size_t idx, enum counter_signal_polarity pol)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
pm_runtime_get_sync(counter->parent);
if (pol == COUNTER_SIGNAL_POLARITY_NEGATIVE)
regmap_set_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
else
regmap_clear_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
pm_runtime_put_sync(counter->parent);
return 0;
}
static int ecap_cnt_cap_read(struct counter_device *counter,
struct counter_count *count,
size_t idx, u64 *cap)
{
*cap = ecap_cnt_count_get_val(counter, ECAP_CAP_REG(idx));
return 0;
}
static int ecap_cnt_cap_write(struct counter_device *counter,
struct counter_count *count,
size_t idx, u64 cap)
{
if (cap > U32_MAX)
return -ERANGE;
ecap_cnt_count_set_val(counter, ECAP_CAP_REG(idx), cap);
return 0;
}
static int ecap_cnt_nb_ovf_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
*val = atomic_read(&ecap_dev->nb_ovf);
return 0;
}
static int ecap_cnt_nb_ovf_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
if (val > U32_MAX)
return -ERANGE;
atomic_set(&ecap_dev->nb_ovf, val);
return 0;
}
static int ecap_cnt_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
*val = U32_MAX;
return 0;
}
static int ecap_cnt_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
*enable = ecap_dev->enabled;
return 0;
}
static int ecap_cnt_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
mutex_lock(&ecap_dev->lock);
if (enable == ecap_dev->enabled)
goto out;
if (enable)
ecap_cnt_capture_enable(counter);
else
ecap_cnt_capture_disable(counter);
ecap_dev->enabled = enable;
out:
mutex_unlock(&ecap_dev->lock);
return 0;
}
static const struct counter_ops ecap_cnt_ops = {
.count_read = ecap_cnt_count_read,
.count_write = ecap_cnt_count_write,
.function_read = ecap_cnt_function_read,
.action_read = ecap_cnt_action_read,
.watch_validate = ecap_cnt_watch_validate,
};
static const enum counter_function ecap_cnt_functions[] = {
COUNTER_FUNCTION_INCREASE,
};
static const enum counter_synapse_action ecap_cnt_clock_actions[] = {
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
static const enum counter_synapse_action ecap_cnt_input_actions[] = {
COUNTER_SYNAPSE_ACTION_NONE,
};
static struct counter_comp ecap_cnt_clock_ext[] = {
COUNTER_COMP_SIGNAL_U64("frequency", ecap_cnt_clk_get_freq, NULL),
};
static const enum counter_signal_polarity ecap_cnt_pol_avail[] = {
COUNTER_SIGNAL_POLARITY_POSITIVE,
COUNTER_SIGNAL_POLARITY_NEGATIVE,
};
static DEFINE_COUNTER_AVAILABLE(ecap_cnt_pol_available, ecap_cnt_pol_avail);
static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_available, ECAP_NB_CEVT);
static struct counter_comp ecap_cnt_signal_ext[] = {
COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array),
};
static struct counter_signal ecap_cnt_signals[] = {
{
.id = ECAP_CLOCK_SIG,
.name = "Clock Signal",
.ext = ecap_cnt_clock_ext,
.num_ext = ARRAY_SIZE(ecap_cnt_clock_ext),
},
{
.id = ECAP_INPUT_SIG,
.name = "Input Signal",
.ext = ecap_cnt_signal_ext,
.num_ext = ARRAY_SIZE(ecap_cnt_signal_ext),
},
};
static struct counter_synapse ecap_cnt_synapses[] = {
{
.actions_list = ecap_cnt_clock_actions,
.num_actions = ARRAY_SIZE(ecap_cnt_clock_actions),
.signal = &ecap_cnt_signals[ECAP_CLOCK_SIG],
},
{
.actions_list = ecap_cnt_input_actions,
.num_actions = ARRAY_SIZE(ecap_cnt_input_actions),
.signal = &ecap_cnt_signals[ECAP_INPUT_SIG],
},
};
static DEFINE_COUNTER_ARRAY_CAPTURE(ecap_cnt_cap_array, ECAP_NB_CEVT);
static struct counter_comp ecap_cnt_count_ext[] = {
COUNTER_COMP_ARRAY_CAPTURE(ecap_cnt_cap_read, ecap_cnt_cap_write, ecap_cnt_cap_array),
COUNTER_COMP_COUNT_U64("num_overflows", ecap_cnt_nb_ovf_read, ecap_cnt_nb_ovf_write),
COUNTER_COMP_CEILING(ecap_cnt_ceiling_read, NULL),
COUNTER_COMP_ENABLE(ecap_cnt_enable_read, ecap_cnt_enable_write),
};
static struct counter_count ecap_cnt_counts[] = {
{
.name = "Timestamp Counter",
.functions_list = ecap_cnt_functions,
.num_functions = ARRAY_SIZE(ecap_cnt_functions),
.synapses = ecap_cnt_synapses,
.num_synapses = ARRAY_SIZE(ecap_cnt_synapses),
.ext = ecap_cnt_count_ext,
.num_ext = ARRAY_SIZE(ecap_cnt_count_ext),
},
};
static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
{
struct counter_device *counter_dev = dev_id;
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
unsigned int clr = 0;
unsigned int flg;
int i;
regmap_read(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, &flg);
/* Check capture events */
for (i = 0 ; i < ECAP_NB_CEVT ; i++) {
if (flg & ECAP_EVT_FLG_BIT(i)) {
counter_push_event(counter_dev, COUNTER_EVENT_CAPTURE, i);
clr |= ECAP_EVT_CLR_BIT(i);
}
}
/* Check counter overflow */
if (flg & ECAP_EVT_FLG_BIT(ECAP_CNTOVF)) {
atomic_inc(&ecap_dev->nb_ovf);
for (i = 0 ; i < ECAP_NB_CEVT ; i++)
counter_push_event(counter_dev, COUNTER_EVENT_OVERFLOW, i);
clr |= ECAP_EVT_CLR_BIT(ECAP_CNTOVF);
}
clr |= ECAP_INT_CLR_BIT;
regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_CLR_FRC_REG, ECAP_EVT_CLR_MASK, clr);
return IRQ_HANDLED;
}
static void ecap_cnt_pm_disable(void *dev)
{
pm_runtime_disable(dev);
}
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ecap_cnt_dev *ecap_dev;
struct counter_device *counter_dev;
void __iomem *mmio_base;
unsigned long clk_rate;
int ret;
counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
if (!counter_dev)
return -ENOMEM;
counter_dev->name = ECAP_DRV_NAME;
counter_dev->parent = dev;
counter_dev->ops = &ecap_cnt_ops;
counter_dev->signals = ecap_cnt_signals;
counter_dev->num_signals = ARRAY_SIZE(ecap_cnt_signals);
counter_dev->counts = ecap_cnt_counts;
counter_dev->num_counts = ARRAY_SIZE(ecap_cnt_counts);
ecap_dev = counter_priv(counter_dev);
mutex_init(&ecap_dev->lock);
ecap_dev->clk = devm_clk_get_enabled(dev, "fck");
if (IS_ERR(ecap_dev->clk))
return dev_err_probe(dev, PTR_ERR(ecap_dev->clk), "failed to get clock\n");
clk_rate = clk_get_rate(ecap_dev->clk);
if (!clk_rate) {
dev_err(dev, "failed to get clock rate\n");
return -EINVAL;
}
mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base))
return PTR_ERR(mmio_base);
ecap_dev->regmap = devm_regmap_init_mmio(dev, mmio_base, &ecap_cnt_regmap_config);
if (IS_ERR(ecap_dev->regmap))
return dev_err_probe(dev, PTR_ERR(ecap_dev->regmap), "failed to init regmap\n");
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get irq\n");
ret = devm_request_irq(dev, ret, ecap_cnt_isr, 0, pdev->name, counter_dev);
if (ret)
return dev_err_probe(dev, ret, "failed to request irq\n");
platform_set_drvdata(pdev, counter_dev);
pm_runtime_enable(dev);
/* Register a cleanup callback to care for disabling PM */
ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
if (ret)
return dev_err_probe(dev, ret, "failed to add pm disable action\n");
ret = devm_counter_add(dev, counter_dev);
if (ret)
return dev_err_probe(dev, ret, "failed to add counter\n");
return 0;
}
static int ecap_cnt_remove(struct platform_device *pdev)
{
struct counter_device *counter_dev = platform_get_drvdata(pdev);
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
if (ecap_dev->enabled)
ecap_cnt_capture_disable(counter_dev);
return 0;
}
static int ecap_cnt_suspend(struct device *dev)
{
struct counter_device *counter_dev = dev_get_drvdata(dev);
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
/* If eCAP is running, stop capture then save timestamp counter */
if (ecap_dev->enabled) {
/*
* Disabling capture has the following effects:
* - interrupts are disabled
* - loading of capture registers is disabled
* - timebase counter is stopped
*/
ecap_cnt_capture_disable(counter_dev);
ecap_dev->pm_ctx.time_cntr = ecap_cnt_count_get_val(counter_dev, ECAP_TSCNT_REG);
}
ecap_dev->pm_ctx.ev_mode = ecap_cnt_capture_get_evmode(counter_dev);
clk_disable(ecap_dev->clk);
return 0;
}
static int ecap_cnt_resume(struct device *dev)
{
struct counter_device *counter_dev = dev_get_drvdata(dev);
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
clk_enable(ecap_dev->clk);
ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
/* If eCAP was running, restore timestamp counter then run capture */
if (ecap_dev->enabled) {
ecap_cnt_count_set_val(counter_dev, ECAP_TSCNT_REG, ecap_dev->pm_ctx.time_cntr);
ecap_cnt_capture_enable(counter_dev);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(ecap_cnt_pm_ops, ecap_cnt_suspend, ecap_cnt_resume);
static const struct of_device_id ecap_cnt_of_match[] = {
{ .compatible = "ti,am62-ecap-capture" },
{},
};
MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
static struct platform_driver ecap_cnt_driver = {
.probe = ecap_cnt_probe,
.remove = ecap_cnt_remove,
.driver = {
.name = "ecap-capture",
.of_match_table = ecap_cnt_of_match,
.pm = pm_sleep_ptr(&ecap_cnt_pm_ops),
},
};
module_platform_driver(ecap_cnt_driver);
MODULE_DESCRIPTION("ECAP Capture driver");
MODULE_AUTHOR("Julien Panis <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/ti-ecap-capture.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 David Lechner <[email protected]>
*
* Counter driver for Texas Instruments Enhanced Quadrature Encoder Pulse (eQEP)
*/
#include <linux/bitops.h>
#include <linux/counter.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/types.h>
/* 32-bit registers */
#define QPOSCNT 0x0
#define QPOSINIT 0x4
#define QPOSMAX 0x8
#define QPOSCMP 0xc
#define QPOSILAT 0x10
#define QPOSSLAT 0x14
#define QPOSLAT 0x18
#define QUTMR 0x1c
#define QUPRD 0x20
/* 16-bit registers */
#define QWDTMR 0x0 /* 0x24 */
#define QWDPRD 0x2 /* 0x26 */
#define QDECCTL 0x4 /* 0x28 */
#define QEPCTL 0x6 /* 0x2a */
#define QCAPCTL 0x8 /* 0x2c */
#define QPOSCTL 0xa /* 0x2e */
#define QEINT 0xc /* 0x30 */
#define QFLG 0xe /* 0x32 */
#define QCLR 0x10 /* 0x34 */
#define QFRC 0x12 /* 0x36 */
#define QEPSTS 0x14 /* 0x38 */
#define QCTMR 0x16 /* 0x3a */
#define QCPRD 0x18 /* 0x3c */
#define QCTMRLAT 0x1a /* 0x3e */
#define QCPRDLAT 0x1c /* 0x40 */
#define QDECCTL_QSRC_SHIFT 14
#define QDECCTL_QSRC GENMASK(15, 14)
#define QDECCTL_SOEN BIT(13)
#define QDECCTL_SPSEL BIT(12)
#define QDECCTL_XCR BIT(11)
#define QDECCTL_SWAP BIT(10)
#define QDECCTL_IGATE BIT(9)
#define QDECCTL_QAP BIT(8)
#define QDECCTL_QBP BIT(7)
#define QDECCTL_QIP BIT(6)
#define QDECCTL_QSP BIT(5)
#define QEPCTL_FREE_SOFT GENMASK(15, 14)
#define QEPCTL_PCRM GENMASK(13, 12)
#define QEPCTL_SEI GENMASK(11, 10)
#define QEPCTL_IEI GENMASK(9, 8)
#define QEPCTL_SWI BIT(7)
#define QEPCTL_SEL BIT(6)
#define QEPCTL_IEL GENMASK(5, 4)
#define QEPCTL_PHEN BIT(3)
#define QEPCTL_QCLM BIT(2)
#define QEPCTL_UTE BIT(1)
#define QEPCTL_WDE BIT(0)
/* EQEP Inputs */
enum {
TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
TI_EQEP_SIGNAL_QEPB, /* QEPB/XDIR */
};
/* Position Counter Input Modes */
enum ti_eqep_count_func {
TI_EQEP_COUNT_FUNC_QUAD_COUNT,
TI_EQEP_COUNT_FUNC_DIR_COUNT,
TI_EQEP_COUNT_FUNC_UP_COUNT,
TI_EQEP_COUNT_FUNC_DOWN_COUNT,
};
struct ti_eqep_cnt {
struct counter_device counter;
struct regmap *regmap32;
struct regmap *regmap16;
};
static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter)
{
return counter_priv(counter);
}
static int ti_eqep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 cnt;
regmap_read(priv->regmap32, QPOSCNT, &cnt);
*val = cnt;
return 0;
}
static int ti_eqep_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 max;
regmap_read(priv->regmap32, QPOSMAX, &max);
if (val > max)
return -EINVAL;
return regmap_write(priv->regmap32, QPOSCNT, val);
}
static int ti_eqep_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
switch ((qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT) {
case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
*function = COUNTER_FUNCTION_QUADRATURE_X4;
break;
case TI_EQEP_COUNT_FUNC_DIR_COUNT:
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
break;
case TI_EQEP_COUNT_FUNC_UP_COUNT:
*function = COUNTER_FUNCTION_INCREASE;
break;
case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
*function = COUNTER_FUNCTION_DECREASE;
break;
}
return 0;
}
static int ti_eqep_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum ti_eqep_count_func qsrc;
switch (function) {
case COUNTER_FUNCTION_QUADRATURE_X4:
qsrc = TI_EQEP_COUNT_FUNC_QUAD_COUNT;
break;
case COUNTER_FUNCTION_PULSE_DIRECTION:
qsrc = TI_EQEP_COUNT_FUNC_DIR_COUNT;
break;
case COUNTER_FUNCTION_INCREASE:
qsrc = TI_EQEP_COUNT_FUNC_UP_COUNT;
break;
case COUNTER_FUNCTION_DECREASE:
qsrc = TI_EQEP_COUNT_FUNC_DOWN_COUNT;
break;
default:
/* should never reach this path */
return -EINVAL;
}
return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
qsrc << QDECCTL_QSRC_SHIFT);
}
static int ti_eqep_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum counter_function function;
u32 qdecctl;
int err;
err = ti_eqep_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
case COUNTER_FUNCTION_QUADRATURE_X4:
/* In quadrature mode, the rising and falling edge of both
* QEPA and QEPB trigger QCLK.
*/
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
case COUNTER_FUNCTION_PULSE_DIRECTION:
/* In direction-count mode only rising edge of QEPA is counted
* and QEPB gives direction.
*/
switch (synapse->signal->id) {
case TI_EQEP_SIGNAL_QEPA:
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case TI_EQEP_SIGNAL_QEPB:
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
case COUNTER_FUNCTION_INCREASE:
case COUNTER_FUNCTION_DECREASE:
/* In up/down-count modes only QEPA is counted and QEPB is not
* used.
*/
switch (synapse->signal->id) {
case TI_EQEP_SIGNAL_QEPA:
err = regmap_read(priv->regmap16, QDECCTL, &qdecctl);
if (err)
return err;
if (qdecctl & QDECCTL_XCR)
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case TI_EQEP_SIGNAL_QEPB:
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
default:
/* should never reach this path */
return -EINVAL;
}
}
static const struct counter_ops ti_eqep_counter_ops = {
.count_read = ti_eqep_count_read,
.count_write = ti_eqep_count_write,
.function_read = ti_eqep_function_read,
.function_write = ti_eqep_function_write,
.action_read = ti_eqep_action_read,
};
static int ti_eqep_position_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
*ceiling = qposmax;
return 0;
}
static int ti_eqep_position_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
if (ceiling != (u32)ceiling)
return -ERANGE;
regmap_write(priv->regmap32, QPOSMAX, ceiling);
return 0;
}
static int ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
*enable = !!(qepctl & QEPCTL_PHEN);
return 0;
}
static int ti_eqep_position_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
return 0;
}
static struct counter_comp ti_eqep_position_ext[] = {
COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
ti_eqep_position_ceiling_write),
COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
ti_eqep_position_enable_write),
};
static struct counter_signal ti_eqep_signals[] = {
[TI_EQEP_SIGNAL_QEPA] = {
.id = TI_EQEP_SIGNAL_QEPA,
.name = "QEPA"
},
[TI_EQEP_SIGNAL_QEPB] = {
.id = TI_EQEP_SIGNAL_QEPB,
.name = "QEPB"
},
};
static const enum counter_function ti_eqep_position_functions[] = {
COUNTER_FUNCTION_QUADRATURE_X4,
COUNTER_FUNCTION_PULSE_DIRECTION,
COUNTER_FUNCTION_INCREASE,
COUNTER_FUNCTION_DECREASE,
};
static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
COUNTER_SYNAPSE_ACTION_NONE,
};
static struct counter_synapse ti_eqep_position_synapses[] = {
{
.actions_list = ti_eqep_position_synapse_actions,
.num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
.signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPA],
},
{
.actions_list = ti_eqep_position_synapse_actions,
.num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
.signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPB],
},
};
static struct counter_count ti_eqep_counts[] = {
{
.id = 0,
.name = "QPOSCNT",
.functions_list = ti_eqep_position_functions,
.num_functions = ARRAY_SIZE(ti_eqep_position_functions),
.synapses = ti_eqep_position_synapses,
.num_synapses = ARRAY_SIZE(ti_eqep_position_synapses),
.ext = ti_eqep_position_ext,
.num_ext = ARRAY_SIZE(ti_eqep_position_ext),
},
};
static const struct regmap_config ti_eqep_regmap32_config = {
.name = "32-bit",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = QUPRD,
};
static const struct regmap_config ti_eqep_regmap16_config = {
.name = "16-bit",
.reg_bits = 16,
.val_bits = 16,
.reg_stride = 2,
.max_register = QCPRDLAT,
};
static int ti_eqep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
int err;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
priv->regmap32 = devm_regmap_init_mmio(dev, base,
&ti_eqep_regmap32_config);
if (IS_ERR(priv->regmap32))
return PTR_ERR(priv->regmap32);
priv->regmap16 = devm_regmap_init_mmio(dev, base + 0x24,
&ti_eqep_regmap16_config);
if (IS_ERR(priv->regmap16))
return PTR_ERR(priv->regmap16);
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &ti_eqep_counter_ops;
counter->counts = ti_eqep_counts;
counter->num_counts = ARRAY_SIZE(ti_eqep_counts);
counter->signals = ti_eqep_signals;
counter->num_signals = ARRAY_SIZE(ti_eqep_signals);
platform_set_drvdata(pdev, counter);
/*
* Need to make sure power is turned on. On AM33xx, this comes from the
* parent PWMSS bus driver. On AM17xx, this comes from the PSC power
* domain.
*/
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return err;
}
return 0;
}
static int ti_eqep_remove(struct platform_device *pdev)
{
struct counter_device *counter = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
counter_unregister(counter);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return 0;
}
static const struct of_device_id ti_eqep_of_match[] = {
{ .compatible = "ti,am3352-eqep", },
{ },
};
MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
static struct platform_driver ti_eqep_driver = {
.probe = ti_eqep_probe,
.remove = ti_eqep_remove,
.driver = {
.name = "ti-eqep-cnt",
.of_match_table = ti_eqep_of_match,
},
};
module_platform_driver(ti_eqep_driver);
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_DESCRIPTION("TI eQEP counter driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/ti-eqep.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel Quadrature Encoder Peripheral driver
*
* Copyright (C) 2019-2021 Intel Corporation
*
* Author: Felipe Balbi (Intel)
* Author: Jarkko Nikula <[email protected]>
* Author: Raymond Tan <[email protected]>
*/
#include <linux/counter.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#define INTEL_QEPCON 0x00
#define INTEL_QEPFLT 0x04
#define INTEL_QEPCOUNT 0x08
#define INTEL_QEPMAX 0x0c
#define INTEL_QEPWDT 0x10
#define INTEL_QEPCAPDIV 0x14
#define INTEL_QEPCNTR 0x18
#define INTEL_QEPCAPBUF 0x1c
#define INTEL_QEPINT_STAT 0x20
#define INTEL_QEPINT_MASK 0x24
/* QEPCON */
#define INTEL_QEPCON_EN BIT(0)
#define INTEL_QEPCON_FLT_EN BIT(1)
#define INTEL_QEPCON_EDGE_A BIT(2)
#define INTEL_QEPCON_EDGE_B BIT(3)
#define INTEL_QEPCON_EDGE_INDX BIT(4)
#define INTEL_QEPCON_SWPAB BIT(5)
#define INTEL_QEPCON_OP_MODE BIT(6)
#define INTEL_QEPCON_PH_ERR BIT(7)
#define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
#define INTEL_QEPCON_CAP_MODE BIT(11)
#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
#define INTEL_QEPCON_FIFO_EMPTY BIT(15)
/* QEPFLT */
#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
/* QEPINT */
#define INTEL_QEPINT_FIFOCRIT BIT(5)
#define INTEL_QEPINT_FIFOENTRY BIT(4)
#define INTEL_QEPINT_QEPDIR BIT(3)
#define INTEL_QEPINT_QEPRST_UP BIT(2)
#define INTEL_QEPINT_QEPRST_DOWN BIT(1)
#define INTEL_QEPINT_WDT BIT(0)
#define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
#define INTEL_QEP_CLK_PERIOD_NS 10
struct intel_qep {
struct mutex lock;
struct device *dev;
void __iomem *regs;
bool enabled;
/* Context save registers */
u32 qepcon;
u32 qepflt;
u32 qepmax;
};
static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
{
return readl(qep->regs + offset);
}
static inline void intel_qep_writel(struct intel_qep *qep,
u32 offset, u32 value)
{
writel(value, qep->regs + offset);
}
static void intel_qep_init(struct intel_qep *qep)
{
u32 reg;
reg = intel_qep_readl(qep, INTEL_QEPCON);
reg &= ~INTEL_QEPCON_EN;
intel_qep_writel(qep, INTEL_QEPCON, reg);
qep->enabled = false;
/*
* Make sure peripheral is disabled by flushing the write with
* a dummy read
*/
reg = intel_qep_readl(qep, INTEL_QEPCON);
reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
intel_qep_writel(qep, INTEL_QEPCON, reg);
intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
}
static int intel_qep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct intel_qep *const qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*val = intel_qep_readl(qep, INTEL_QEPCOUNT);
pm_runtime_put(qep->dev);
return 0;
}
static const enum counter_function intel_qep_count_functions[] = {
COUNTER_FUNCTION_QUADRATURE_X4,
};
static int intel_qep_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
static const enum counter_synapse_action intel_qep_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
static int intel_qep_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
static const struct counter_ops intel_qep_counter_ops = {
.count_read = intel_qep_count_read,
.function_read = intel_qep_function_read,
.action_read = intel_qep_action_read,
};
#define INTEL_QEP_SIGNAL(_id, _name) { \
.id = (_id), \
.name = (_name), \
}
static struct counter_signal intel_qep_signals[] = {
INTEL_QEP_SIGNAL(0, "Phase A"),
INTEL_QEP_SIGNAL(1, "Phase B"),
INTEL_QEP_SIGNAL(2, "Index"),
};
#define INTEL_QEP_SYNAPSE(_signal_id) { \
.actions_list = intel_qep_synapse_actions, \
.num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
.signal = &intel_qep_signals[(_signal_id)], \
}
static struct counter_synapse intel_qep_count_synapses[] = {
INTEL_QEP_SYNAPSE(0),
INTEL_QEP_SYNAPSE(1),
INTEL_QEP_SYNAPSE(2),
};
static int intel_qep_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
struct intel_qep *qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
pm_runtime_put(qep->dev);
return 0;
}
static int intel_qep_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 max)
{
struct intel_qep *qep = counter_priv(counter);
int ret = 0;
/* Intel QEP ceiling configuration only supports 32-bit values */
if (max != (u32)max)
return -ERANGE;
mutex_lock(&qep->lock);
if (qep->enabled) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(qep->dev);
intel_qep_writel(qep, INTEL_QEPMAX, max);
pm_runtime_put(qep->dev);
out:
mutex_unlock(&qep->lock);
return ret;
}
static int intel_qep_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct intel_qep *qep = counter_priv(counter);
*enable = qep->enabled;
return 0;
}
static int intel_qep_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool changed;
mutex_lock(&qep->lock);
changed = val ^ qep->enabled;
if (!changed)
goto out;
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
if (val) {
/* Enable peripheral and keep runtime PM always on */
reg |= INTEL_QEPCON_EN;
pm_runtime_get_noresume(qep->dev);
} else {
/* Let runtime PM be idle and disable peripheral */
pm_runtime_put_noidle(qep->dev);
reg &= ~INTEL_QEPCON_EN;
}
intel_qep_writel(qep, INTEL_QEPCON, reg);
pm_runtime_put(qep->dev);
qep->enabled = val;
out:
mutex_unlock(&qep->lock);
return 0;
}
static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
struct counter_count *count,
u64 *length)
{
struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
if (!(reg & INTEL_QEPCON_FLT_EN)) {
pm_runtime_put(qep->dev);
return 0;
}
reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
pm_runtime_put(qep->dev);
*length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
return 0;
}
static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
struct counter_count *count,
u64 length)
{
struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool enable;
int ret = 0;
/*
* Spike filter length is (MAX_COUNT + 2) clock periods.
* Disable filter when userspace writes 0, enable for valid
* nanoseconds values and error out otherwise.
*/
do_div(length, INTEL_QEP_CLK_PERIOD_NS);
if (length == 0) {
enable = false;
length = 0;
} else if (length >= 2) {
enable = true;
length -= 2;
} else {
return -EINVAL;
}
if (length > INTEL_QEPFLT_MAX_COUNT(length))
return -ERANGE;
mutex_lock(&qep->lock);
if (qep->enabled) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
if (enable)
reg |= INTEL_QEPCON_FLT_EN;
else
reg &= ~INTEL_QEPCON_FLT_EN;
intel_qep_writel(qep, INTEL_QEPFLT, length);
intel_qep_writel(qep, INTEL_QEPCON, reg);
pm_runtime_put(qep->dev);
out:
mutex_unlock(&qep->lock);
return ret;
}
static int intel_qep_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
pm_runtime_put(qep->dev);
*preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
return 0;
}
static int intel_qep_preset_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
struct intel_qep *qep = counter_priv(counter);
u32 reg;
int ret = 0;
mutex_lock(&qep->lock);
if (qep->enabled) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(qep->dev);
reg = intel_qep_readl(qep, INTEL_QEPCON);
if (val)
reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
else
reg |= INTEL_QEPCON_COUNT_RST_MODE;
intel_qep_writel(qep, INTEL_QEPCON, reg);
pm_runtime_put(qep->dev);
out:
mutex_unlock(&qep->lock);
return ret;
}
static struct counter_comp intel_qep_count_ext[] = {
COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
intel_qep_preset_enable_write),
COUNTER_COMP_COUNT_U64("spike_filter_ns",
intel_qep_spike_filter_ns_read,
intel_qep_spike_filter_ns_write),
};
static struct counter_count intel_qep_counter_count[] = {
{
.id = 0,
.name = "Channel 1 Count",
.functions_list = intel_qep_count_functions,
.num_functions = ARRAY_SIZE(intel_qep_count_functions),
.synapses = intel_qep_count_synapses,
.num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
.ext = intel_qep_count_ext,
.num_ext = ARRAY_SIZE(intel_qep_count_ext),
},
};
static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct counter_device *counter;
struct intel_qep *qep;
struct device *dev = &pci->dev;
void __iomem *regs;
int ret;
counter = devm_counter_alloc(dev, sizeof(*qep));
if (!counter)
return -ENOMEM;
qep = counter_priv(counter);
ret = pcim_enable_device(pci);
if (ret)
return ret;
pci_set_master(pci);
ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
if (ret)
return ret;
regs = pcim_iomap_table(pci)[0];
if (!regs)
return -ENOMEM;
qep->dev = dev;
qep->regs = regs;
mutex_init(&qep->lock);
intel_qep_init(qep);
pci_set_drvdata(pci, qep);
counter->name = pci_name(pci);
counter->parent = dev;
counter->ops = &intel_qep_counter_ops;
counter->counts = intel_qep_counter_count;
counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
counter->signals = intel_qep_signals;
counter->num_signals = ARRAY_SIZE(intel_qep_signals);
qep->enabled = false;
pm_runtime_put(dev);
pm_runtime_allow(dev);
ret = devm_counter_add(&pci->dev, counter);
if (ret < 0)
return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
return 0;
}
static void intel_qep_remove(struct pci_dev *pci)
{
struct intel_qep *qep = pci_get_drvdata(pci);
struct device *dev = &pci->dev;
pm_runtime_forbid(dev);
if (!qep->enabled)
pm_runtime_get(dev);
intel_qep_writel(qep, INTEL_QEPCON, 0);
}
static int __maybe_unused intel_qep_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct intel_qep *qep = pci_get_drvdata(pdev);
qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
return 0;
}
static int __maybe_unused intel_qep_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct intel_qep *qep = pci_get_drvdata(pdev);
/*
* Make sure peripheral is disabled when restoring registers and
* control register bits that are writable only when the peripheral
* is disabled
*/
intel_qep_writel(qep, INTEL_QEPCON, 0);
intel_qep_readl(qep, INTEL_QEPCON);
intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
/* Restore all other control register bits except enable status */
intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
intel_qep_readl(qep, INTEL_QEPCON);
/* Restore enable status */
intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
return 0;
}
static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
intel_qep_suspend, intel_qep_resume, NULL);
static const struct pci_device_id intel_qep_id_table[] = {
/* EHL */
{ PCI_VDEVICE(INTEL, 0x4bc3), },
{ PCI_VDEVICE(INTEL, 0x4b81), },
{ PCI_VDEVICE(INTEL, 0x4b82), },
{ PCI_VDEVICE(INTEL, 0x4b83), },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
static struct pci_driver intel_qep_driver = {
.name = "intel-qep",
.id_table = intel_qep_id_table,
.probe = intel_qep_probe,
.remove = intel_qep_remove,
.driver = {
.pm = &intel_qep_pm_ops,
}
};
module_pci_driver(intel_qep_driver);
MODULE_AUTHOR("Felipe Balbi (Intel)");
MODULE_AUTHOR("Jarkko Nikula <[email protected]>");
MODULE_AUTHOR("Raymond Tan <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/intel-qep.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel 8254 Programmable Interval Timer
* Copyright (C) William Breathitt Gray
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/counter.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/i8254.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
#define I8254_COUNTER_REG(_counter) (_counter)
#define I8254_CONTROL_REG 0x3
#define I8254_SC GENMASK(7, 6)
#define I8254_RW GENMASK(5, 4)
#define I8254_M GENMASK(3, 1)
#define I8254_CONTROL(_sc, _rw, _m) \
(u8_encode_bits(_sc, I8254_SC) | u8_encode_bits(_rw, I8254_RW) | \
u8_encode_bits(_m, I8254_M))
#define I8254_RW_TWO_BYTE 0x3
#define I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT 0
#define I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT 1
#define I8254_MODE_RATE_GENERATOR 2
#define I8254_MODE_SQUARE_WAVE_MODE 3
#define I8254_MODE_SOFTWARE_TRIGGERED_STROBE 4
#define I8254_MODE_HARDWARE_TRIGGERED_STROBE 5
#define I8254_COUNTER_LATCH(_counter) I8254_CONTROL(_counter, 0x0, 0x0)
#define I8254_PROGRAM_COUNTER(_counter, _mode) I8254_CONTROL(_counter, I8254_RW_TWO_BYTE, _mode)
#define I8254_NUM_COUNTERS 3
/**
* struct i8254 - I8254 device private data structure
* @lock: synchronization lock to prevent I/O race conditions
* @preset: array of Counter Register states
* @out_mode: array of mode configuration states
* @map: Regmap for the device
*/
struct i8254 {
struct mutex lock;
u16 preset[I8254_NUM_COUNTERS];
u8 out_mode[I8254_NUM_COUNTERS];
struct regmap *map;
};
static int i8254_count_read(struct counter_device *const counter, struct counter_count *const count,
u64 *const val)
{
struct i8254 *const priv = counter_priv(counter);
int ret;
u8 value[2];
mutex_lock(&priv->lock);
ret = regmap_write(priv->map, I8254_CONTROL_REG, I8254_COUNTER_LATCH(count->id));
if (ret) {
mutex_unlock(&priv->lock);
return ret;
}
ret = regmap_noinc_read(priv->map, I8254_COUNTER_REG(count->id), value, sizeof(value));
if (ret) {
mutex_unlock(&priv->lock);
return ret;
}
mutex_unlock(&priv->lock);
*val = get_unaligned_le16(value);
return ret;
}
static int i8254_function_read(struct counter_device *const counter,
struct counter_count *const count,
enum counter_function *const function)
{
*function = COUNTER_FUNCTION_DECREASE;
return 0;
}
#define I8254_SYNAPSES_PER_COUNT 2
#define I8254_SIGNAL_ID_CLK 0
#define I8254_SIGNAL_ID_GATE 1
static int i8254_action_read(struct counter_device *const counter,
struct counter_count *const count,
struct counter_synapse *const synapse,
enum counter_synapse_action *const action)
{
struct i8254 *const priv = counter_priv(counter);
switch (synapse->signal->id % I8254_SYNAPSES_PER_COUNT) {
case I8254_SIGNAL_ID_CLK:
*action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
return 0;
case I8254_SIGNAL_ID_GATE:
switch (priv->out_mode[count->id]) {
case I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
case I8254_MODE_RATE_GENERATOR:
case I8254_MODE_SQUARE_WAVE_MODE:
case I8254_MODE_HARDWARE_TRIGGERED_STROBE:
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
default:
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
default:
/* should never reach this path */
return -EINVAL;
}
}
static int i8254_count_ceiling_read(struct counter_device *const counter,
struct counter_count *const count, u64 *const ceiling)
{
struct i8254 *const priv = counter_priv(counter);
mutex_lock(&priv->lock);
switch (priv->out_mode[count->id]) {
case I8254_MODE_RATE_GENERATOR:
/* Rate Generator decrements 0 by one and the counter "wraps around" */
*ceiling = (priv->preset[count->id] == 0) ? U16_MAX : priv->preset[count->id];
break;
case I8254_MODE_SQUARE_WAVE_MODE:
if (priv->preset[count->id] % 2)
*ceiling = priv->preset[count->id] - 1;
else if (priv->preset[count->id] == 0)
/* Square Wave Mode decrements 0 by two and the counter "wraps around" */
*ceiling = U16_MAX - 1;
else
*ceiling = priv->preset[count->id];
break;
default:
*ceiling = U16_MAX;
break;
}
mutex_unlock(&priv->lock);
return 0;
}
static int i8254_count_mode_read(struct counter_device *const counter,
struct counter_count *const count,
enum counter_count_mode *const count_mode)
{
const struct i8254 *const priv = counter_priv(counter);
switch (priv->out_mode[count->id]) {
case I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT:
*count_mode = COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT;
return 0;
case I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
*count_mode = COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT;
return 0;
case I8254_MODE_RATE_GENERATOR:
*count_mode = COUNTER_COUNT_MODE_RATE_GENERATOR;
return 0;
case I8254_MODE_SQUARE_WAVE_MODE:
*count_mode = COUNTER_COUNT_MODE_SQUARE_WAVE_MODE;
return 0;
case I8254_MODE_SOFTWARE_TRIGGERED_STROBE:
*count_mode = COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE;
return 0;
case I8254_MODE_HARDWARE_TRIGGERED_STROBE:
*count_mode = COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static int i8254_count_mode_write(struct counter_device *const counter,
struct counter_count *const count,
const enum counter_count_mode count_mode)
{
struct i8254 *const priv = counter_priv(counter);
u8 out_mode;
int ret;
switch (count_mode) {
case COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT:
out_mode = I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT;
break;
case COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT:
out_mode = I8254_MODE_HARDWARE_RETRIGGERABLE_ONESHOT;
break;
case COUNTER_COUNT_MODE_RATE_GENERATOR:
out_mode = I8254_MODE_RATE_GENERATOR;
break;
case COUNTER_COUNT_MODE_SQUARE_WAVE_MODE:
out_mode = I8254_MODE_SQUARE_WAVE_MODE;
break;
case COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE:
out_mode = I8254_MODE_SOFTWARE_TRIGGERED_STROBE;
break;
case COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE:
out_mode = I8254_MODE_HARDWARE_TRIGGERED_STROBE;
break;
default:
/* should never reach this path */
return -EINVAL;
}
mutex_lock(&priv->lock);
/* Counter Register is cleared when the counter is programmed */
priv->preset[count->id] = 0;
priv->out_mode[count->id] = out_mode;
ret = regmap_write(priv->map, I8254_CONTROL_REG,
I8254_PROGRAM_COUNTER(count->id, out_mode));
mutex_unlock(&priv->lock);
return ret;
}
static int i8254_count_floor_read(struct counter_device *const counter,
struct counter_count *const count, u64 *const floor)
{
struct i8254 *const priv = counter_priv(counter);
mutex_lock(&priv->lock);
switch (priv->out_mode[count->id]) {
case I8254_MODE_RATE_GENERATOR:
/* counter is always reloaded after 1, but 0 is a possible reload value */
*floor = (priv->preset[count->id] == 0) ? 0 : 1;
break;
case I8254_MODE_SQUARE_WAVE_MODE:
/* counter is always reloaded after 2 for even preset values */
*floor = (priv->preset[count->id] % 2 || priv->preset[count->id] == 0) ? 0 : 2;
break;
default:
*floor = 0;
break;
}
mutex_unlock(&priv->lock);
return 0;
}
static int i8254_count_preset_read(struct counter_device *const counter,
struct counter_count *const count, u64 *const preset)
{
const struct i8254 *const priv = counter_priv(counter);
*preset = priv->preset[count->id];
return 0;
}
static int i8254_count_preset_write(struct counter_device *const counter,
struct counter_count *const count, const u64 preset)
{
struct i8254 *const priv = counter_priv(counter);
int ret;
u8 value[2];
if (preset > U16_MAX)
return -ERANGE;
mutex_lock(&priv->lock);
if (priv->out_mode[count->id] == I8254_MODE_RATE_GENERATOR ||
priv->out_mode[count->id] == I8254_MODE_SQUARE_WAVE_MODE) {
if (preset == 1) {
mutex_unlock(&priv->lock);
return -EINVAL;
}
}
priv->preset[count->id] = preset;
put_unaligned_le16(preset, value);
ret = regmap_noinc_write(priv->map, I8254_COUNTER_REG(count->id), value, 2);
mutex_unlock(&priv->lock);
return ret;
}
static int i8254_init_hw(struct regmap *const map)
{
unsigned long i;
int ret;
for (i = 0; i < I8254_NUM_COUNTERS; i++) {
/* Initialize each counter to Mode 0 */
ret = regmap_write(map, I8254_CONTROL_REG,
I8254_PROGRAM_COUNTER(i, I8254_MODE_INTERRUPT_ON_TERMINAL_COUNT));
if (ret)
return ret;
}
return 0;
}
static const struct counter_ops i8254_ops = {
.count_read = i8254_count_read,
.function_read = i8254_function_read,
.action_read = i8254_action_read,
};
#define I8254_SIGNAL(_id, _name) { \
.id = (_id), \
.name = (_name), \
}
static struct counter_signal i8254_signals[] = {
I8254_SIGNAL(0, "CLK 0"), I8254_SIGNAL(1, "GATE 0"),
I8254_SIGNAL(2, "CLK 1"), I8254_SIGNAL(3, "GATE 1"),
I8254_SIGNAL(4, "CLK 2"), I8254_SIGNAL(5, "GATE 2"),
};
static const enum counter_synapse_action i8254_clk_actions[] = {
COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
};
static const enum counter_synapse_action i8254_gate_actions[] = {
COUNTER_SYNAPSE_ACTION_NONE,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
#define I8254_SYNAPSES_BASE(_id) ((_id) * I8254_SYNAPSES_PER_COUNT)
#define I8254_SYNAPSE_CLK(_id) { \
.actions_list = i8254_clk_actions, \
.num_actions = ARRAY_SIZE(i8254_clk_actions), \
.signal = &i8254_signals[I8254_SYNAPSES_BASE(_id) + 0], \
}
#define I8254_SYNAPSE_GATE(_id) { \
.actions_list = i8254_gate_actions, \
.num_actions = ARRAY_SIZE(i8254_gate_actions), \
.signal = &i8254_signals[I8254_SYNAPSES_BASE(_id) + 1], \
}
static struct counter_synapse i8254_synapses[] = {
I8254_SYNAPSE_CLK(0), I8254_SYNAPSE_GATE(0),
I8254_SYNAPSE_CLK(1), I8254_SYNAPSE_GATE(1),
I8254_SYNAPSE_CLK(2), I8254_SYNAPSE_GATE(2),
};
static const enum counter_function i8254_functions_list[] = {
COUNTER_FUNCTION_DECREASE,
};
static const enum counter_count_mode i8254_count_modes[] = {
COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
COUNTER_COUNT_MODE_RATE_GENERATOR,
COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
};
static DEFINE_COUNTER_AVAILABLE(i8254_count_modes_available, i8254_count_modes);
static struct counter_comp i8254_count_ext[] = {
COUNTER_COMP_CEILING(i8254_count_ceiling_read, NULL),
COUNTER_COMP_COUNT_MODE(i8254_count_mode_read, i8254_count_mode_write,
i8254_count_modes_available),
COUNTER_COMP_FLOOR(i8254_count_floor_read, NULL),
COUNTER_COMP_PRESET(i8254_count_preset_read, i8254_count_preset_write),
};
#define I8254_COUNT(_id, _name) { \
.id = (_id), \
.name = (_name), \
.functions_list = i8254_functions_list, \
.num_functions = ARRAY_SIZE(i8254_functions_list), \
.synapses = &i8254_synapses[I8254_SYNAPSES_BASE(_id)], \
.num_synapses = I8254_SYNAPSES_PER_COUNT, \
.ext = i8254_count_ext, \
.num_ext = ARRAY_SIZE(i8254_count_ext) \
}
static struct counter_count i8254_counts[I8254_NUM_COUNTERS] = {
I8254_COUNT(0, "Counter 0"), I8254_COUNT(1, "Counter 1"), I8254_COUNT(2, "Counter 2"),
};
/**
* devm_i8254_regmap_register - Register an i8254 Counter device
* @dev: device that is registering this i8254 Counter device
* @config: configuration for i8254_regmap_config
*
* Registers an Intel 8254 Programmable Interval Timer Counter device. Returns 0 on success and
* negative error number on failure.
*/
int devm_i8254_regmap_register(struct device *const dev,
const struct i8254_regmap_config *const config)
{
struct counter_device *counter;
struct i8254 *priv;
int err;
if (!config->parent)
return -EINVAL;
if (!config->map)
return -EINVAL;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
priv->map = config->map;
counter->name = dev_name(config->parent);
counter->parent = config->parent;
counter->ops = &i8254_ops;
counter->counts = i8254_counts;
counter->num_counts = ARRAY_SIZE(i8254_counts);
counter->signals = i8254_signals;
counter->num_signals = ARRAY_SIZE(i8254_signals);
mutex_init(&priv->lock);
err = i8254_init_hw(priv->map);
if (err)
return err;
err = devm_counter_add(dev, counter);
if (err < 0)
return dev_err_probe(dev, err, "Failed to add counter\n");
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, I8254);
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8254 Programmable Interval Timer");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/i8254.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Timer Encoder and Counter driver
*
* Copyright (C) STMicroelectronics 2018
*
* Author: Benjamin Gaignard <[email protected]>
*
*/
#include <linux/counter.h>
#include <linux/mfd/stm32-timers.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
#define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \
TIM_CCMR_IC1F | TIM_CCMR_IC2F)
#define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \
TIM_CCER_CC2P | TIM_CCER_CC2NP)
struct stm32_timer_regs {
u32 cr1;
u32 cnt;
u32 smcr;
u32 arr;
};
struct stm32_timer_cnt {
struct regmap *regmap;
struct clk *clk;
u32 max_arr;
bool enabled;
struct stm32_timer_regs bak;
};
static const enum counter_function stm32_count_functions[] = {
COUNTER_FUNCTION_INCREASE,
COUNTER_FUNCTION_QUADRATURE_X2_A,
COUNTER_FUNCTION_QUADRATURE_X2_B,
COUNTER_FUNCTION_QUADRATURE_X4,
};
static int stm32_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
*val = cnt;
return 0;
}
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 ceiling;
regmap_read(priv->regmap, TIM_ARR, &ceiling);
if (val > ceiling)
return -EINVAL;
return regmap_write(priv->regmap, TIM_CNT, val);
}
static int stm32_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
switch (smcr & TIM_SMCR_SMS) {
case TIM_SMCR_SMS_SLAVE_MODE_DISABLED:
*function = COUNTER_FUNCTION_INCREASE;
return 0;
case TIM_SMCR_SMS_ENCODER_MODE_1:
*function = COUNTER_FUNCTION_QUADRATURE_X2_A;
return 0;
case TIM_SMCR_SMS_ENCODER_MODE_2:
*function = COUNTER_FUNCTION_QUADRATURE_X2_B;
return 0;
case TIM_SMCR_SMS_ENCODER_MODE_3:
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
default:
return -EINVAL;
}
}
static int stm32_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1, sms;
switch (function) {
case COUNTER_FUNCTION_INCREASE:
sms = TIM_SMCR_SMS_SLAVE_MODE_DISABLED;
break;
case COUNTER_FUNCTION_QUADRATURE_X2_A:
sms = TIM_SMCR_SMS_ENCODER_MODE_1;
break;
case COUNTER_FUNCTION_QUADRATURE_X2_B:
sms = TIM_SMCR_SMS_ENCODER_MODE_2;
break;
case COUNTER_FUNCTION_QUADRATURE_X4:
sms = TIM_SMCR_SMS_ENCODER_MODE_3;
break;
default:
return -EINVAL;
}
/* Store enable status */
regmap_read(priv->regmap, TIM_CR1, &cr1);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
/* Restore the enable status */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, cr1);
return 0;
}
static int stm32_count_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
*direction = (cr1 & TIM_CR1_DIR) ? COUNTER_COUNT_DIRECTION_BACKWARD :
COUNTER_COUNT_DIRECTION_FORWARD;
return 0;
}
static int stm32_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
*ceiling = arr;
return 0;
}
static int stm32_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
if (ceiling > priv->max_arr)
return -ERANGE;
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, ceiling);
return 0;
}
static int stm32_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
*enable = cr1 & TIM_CR1_CEN;
return 0;
}
static int stm32_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
if (!(cr1 & TIM_CR1_CEN))
clk_enable(priv->clk);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
} else {
regmap_read(priv->regmap, TIM_CR1, &cr1);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
if (cr1 & TIM_CR1_CEN)
clk_disable(priv->clk);
}
/* Keep enabled state to properly handle low power states */
priv->enabled = enable;
return 0;
}
static struct counter_comp stm32_count_ext[] = {
COUNTER_COMP_DIRECTION(stm32_count_direction_read),
COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write),
COUNTER_COMP_CEILING(stm32_count_ceiling_read,
stm32_count_ceiling_write),
};
static const enum counter_synapse_action stm32_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_NONE,
COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
static int stm32_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
enum counter_function function;
int err;
err = stm32_count_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
case COUNTER_FUNCTION_INCREASE:
/* counts on internal clock when CEN=1 */
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X2_A:
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
if (synapse->signal->id == count->synapses[0].signal->id)
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X2_B:
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
if (synapse->signal->id == count->synapses[1].signal->id)
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
else
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X4:
/* counts up/down on both TI1FP1 and TI2FP2 edges */
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
return -EINVAL;
}
}
static const struct counter_ops stm32_timer_cnt_ops = {
.count_read = stm32_count_read,
.count_write = stm32_count_write,
.function_read = stm32_count_function_read,
.function_write = stm32_count_function_write,
.action_read = stm32_action_read,
};
static struct counter_signal stm32_signals[] = {
{
.id = 0,
.name = "Channel 1 Quadrature A"
},
{
.id = 1,
.name = "Channel 1 Quadrature B"
}
};
static struct counter_synapse stm32_count_synapses[] = {
{
.actions_list = stm32_synapse_actions,
.num_actions = ARRAY_SIZE(stm32_synapse_actions),
.signal = &stm32_signals[0]
},
{
.actions_list = stm32_synapse_actions,
.num_actions = ARRAY_SIZE(stm32_synapse_actions),
.signal = &stm32_signals[1]
}
};
static struct counter_count stm32_counts = {
.id = 0,
.name = "Channel 1 Count",
.functions_list = stm32_count_functions,
.num_functions = ARRAY_SIZE(stm32_count_functions),
.synapses = stm32_count_synapses,
.num_synapses = ARRAY_SIZE(stm32_count_synapses),
.ext = stm32_count_ext,
.num_ext = ARRAY_SIZE(stm32_count_ext)
};
static int stm32_timer_cnt_probe(struct platform_device *pdev)
{
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct stm32_timer_cnt *priv;
struct counter_device *counter;
int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &stm32_timer_cnt_ops;
counter->counts = &stm32_counts;
counter->num_counts = 1;
counter->signals = stm32_signals;
counter->num_signals = ARRAY_SIZE(stm32_signals);
platform_set_drvdata(pdev, priv);
/* Reset input selector to its default input */
regmap_write(priv->regmap, TIM_TISEL, 0x0);
/* Register Counter device */
ret = devm_counter_add(dev, counter);
if (ret < 0)
dev_err_probe(dev, ret, "Failed to add counter\n");
return ret;
}
static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev)
{
struct stm32_timer_cnt *priv = dev_get_drvdata(dev);
/* Only take care of enabled counter: don't disturb other MFD child */
if (priv->enabled) {
/* Backup registers that may get lost in low power mode */
regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr);
regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr);
regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt);
regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1);
/* Disable the counter */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
clk_disable(priv->clk);
}
return pinctrl_pm_select_sleep_state(dev);
}
static int __maybe_unused stm32_timer_cnt_resume(struct device *dev)
{
struct stm32_timer_cnt *priv = dev_get_drvdata(dev);
int ret;
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
if (priv->enabled) {
clk_enable(priv->clk);
/* Restore registers that may have been lost */
regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
regmap_write(priv->regmap, TIM_ARR, priv->bak.arr);
regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt);
/* Also re-enables the counter */
regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(stm32_timer_cnt_pm_ops, stm32_timer_cnt_suspend,
stm32_timer_cnt_resume);
static const struct of_device_id stm32_timer_cnt_of_match[] = {
{ .compatible = "st,stm32-timer-counter", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_timer_cnt_of_match);
static struct platform_driver stm32_timer_cnt_driver = {
.probe = stm32_timer_cnt_probe,
.driver = {
.name = "stm32-timer-counter",
.of_match_table = stm32_timer_cnt_of_match,
.pm = &stm32_timer_cnt_pm_ops,
},
};
module_platform_driver(stm32_timer_cnt_driver);
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_ALIAS("platform:stm32-timer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/stm32-timer-cnt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic Counter interface
* Copyright (C) 2020 William Breathitt Gray
*/
#include <linux/cdev.h>
#include <linux/counter.h>
#include <linux/device.h>
#include <linux/device/bus.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "counter-chrdev.h"
#include "counter-sysfs.h"
#define COUNTER_NAME "counter"
/* Provides a unique ID for each counter device */
static DEFINE_IDA(counter_ida);
struct counter_device_allochelper {
struct counter_device counter;
/*
* This is cache line aligned to ensure private data behaves like if it
* were kmalloced separately.
*/
unsigned long privdata[] ____cacheline_aligned;
};
static void counter_device_release(struct device *dev)
{
struct counter_device *const counter =
container_of(dev, struct counter_device, dev);
counter_chrdev_remove(counter);
ida_free(&counter_ida, dev->id);
kfree(container_of(counter, struct counter_device_allochelper, counter));
}
static struct device_type counter_device_type = {
.name = "counter_device",
.release = counter_device_release,
};
static struct bus_type counter_bus_type = {
.name = "counter",
.dev_name = "counter",
};
static dev_t counter_devt;
/**
* counter_priv - access counter device private data
* @counter: counter device
*
* Get the counter device private data
*/
void *counter_priv(const struct counter_device *const counter)
{
struct counter_device_allochelper *ch =
container_of(counter, struct counter_device_allochelper, counter);
return &ch->privdata;
}
EXPORT_SYMBOL_NS_GPL(counter_priv, COUNTER);
/**
* counter_alloc - allocate a counter_device
* @sizeof_priv: size of the driver private data
*
* This is part one of counter registration. The structure is allocated
* dynamically to ensure the right lifetime for the embedded struct device.
*
* If this succeeds, call counter_put() to get rid of the counter_device again.
*/
struct counter_device *counter_alloc(size_t sizeof_priv)
{
struct counter_device_allochelper *ch;
struct counter_device *counter;
struct device *dev;
int err;
ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL);
if (!ch)
return NULL;
counter = &ch->counter;
dev = &counter->dev;
/* Acquire unique ID */
err = ida_alloc(&counter_ida, GFP_KERNEL);
if (err < 0)
goto err_ida_alloc;
dev->id = err;
mutex_init(&counter->ops_exist_lock);
dev->type = &counter_device_type;
dev->bus = &counter_bus_type;
dev->devt = MKDEV(MAJOR(counter_devt), dev->id);
err = counter_chrdev_add(counter);
if (err < 0)
goto err_chrdev_add;
device_initialize(dev);
err = dev_set_name(dev, COUNTER_NAME "%d", dev->id);
if (err)
goto err_dev_set_name;
return counter;
err_dev_set_name:
counter_chrdev_remove(counter);
err_chrdev_add:
ida_free(&counter_ida, dev->id);
err_ida_alloc:
kfree(ch);
return NULL;
}
EXPORT_SYMBOL_NS_GPL(counter_alloc, COUNTER);
void counter_put(struct counter_device *counter)
{
put_device(&counter->dev);
}
EXPORT_SYMBOL_NS_GPL(counter_put, COUNTER);
/**
* counter_add - complete registration of a counter
* @counter: the counter to add
*
* This is part two of counter registration.
*
* If this succeeds, call counter_unregister() to get rid of the counter_device again.
*/
int counter_add(struct counter_device *counter)
{
int err;
struct device *dev = &counter->dev;
if (counter->parent) {
dev->parent = counter->parent;
dev->of_node = counter->parent->of_node;
}
err = counter_sysfs_add(counter);
if (err < 0)
return err;
/* implies device_add(dev) */
return cdev_device_add(&counter->chrdev, dev);
}
EXPORT_SYMBOL_NS_GPL(counter_add, COUNTER);
/**
* counter_unregister - unregister Counter from the system
* @counter: pointer to Counter to unregister
*
* The Counter is unregistered from the system.
*/
void counter_unregister(struct counter_device *const counter)
{
if (!counter)
return;
cdev_device_del(&counter->chrdev, &counter->dev);
mutex_lock(&counter->ops_exist_lock);
counter->ops = NULL;
wake_up(&counter->events_wait);
mutex_unlock(&counter->ops_exist_lock);
}
EXPORT_SYMBOL_NS_GPL(counter_unregister, COUNTER);
static void devm_counter_release(void *counter)
{
counter_unregister(counter);
}
static void devm_counter_put(void *counter)
{
counter_put(counter);
}
/**
* devm_counter_alloc - allocate a counter_device
* @dev: the device to register the release callback for
* @sizeof_priv: size of the driver private data
*
* This is the device managed version of counter_add(). It registers a cleanup
* callback to care for calling counter_put().
*/
struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv)
{
struct counter_device *counter;
int err;
counter = counter_alloc(sizeof_priv);
if (!counter)
return NULL;
err = devm_add_action_or_reset(dev, devm_counter_put, counter);
if (err < 0)
return NULL;
return counter;
}
EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, COUNTER);
/**
* devm_counter_add - complete registration of a counter
* @dev: the device to register the release callback for
* @counter: the counter to add
*
* This is the device managed version of counter_add(). It registers a cleanup
* callback to care for calling counter_unregister().
*/
int devm_counter_add(struct device *dev,
struct counter_device *const counter)
{
int err;
err = counter_add(counter);
if (err < 0)
return err;
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
EXPORT_SYMBOL_NS_GPL(devm_counter_add, COUNTER);
#define COUNTER_DEV_MAX 256
static int __init counter_init(void)
{
int err;
err = bus_register(&counter_bus_type);
if (err < 0)
return err;
err = alloc_chrdev_region(&counter_devt, 0, COUNTER_DEV_MAX,
COUNTER_NAME);
if (err < 0)
goto err_unregister_bus;
return 0;
err_unregister_bus:
bus_unregister(&counter_bus_type);
return err;
}
static void __exit counter_exit(void)
{
unregister_chrdev_region(counter_devt, COUNTER_DEV_MAX);
bus_unregister(&counter_bus_type);
}
subsys_initcall(counter_init);
module_exit(counter_exit);
MODULE_AUTHOR("William Breathitt Gray <[email protected]>");
MODULE_DESCRIPTION("Generic Counter interface");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/counter/counter-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer Encoder and Counter driver
*
* Copyright (C) STMicroelectronics 2017
*
* Author: Fabrice Gasnier <[email protected]>
*
* Inspired by 104-quad-8 and stm32-timer-trigger drivers.
*
*/
#include <linux/bitfield.h>
#include <linux/counter.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
struct stm32_lptim_cnt {
struct device *dev;
struct regmap *regmap;
struct clk *clk;
u32 ceiling;
u32 polarity;
u32 quadrature_mode;
bool enabled;
};
static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv)
{
u32 val;
int ret;
ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
if (ret)
return ret;
return FIELD_GET(STM32_LPTIM_ENABLE, val);
}
static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
int enable)
{
int ret;
u32 val;
val = FIELD_PREP(STM32_LPTIM_ENABLE, enable);
ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val);
if (ret)
return ret;
if (!enable) {
clk_disable(priv->clk);
priv->enabled = false;
return 0;
}
/* LP timer must be enabled before writing CMP & ARR */
ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
if (ret)
return ret;
ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
if (ret)
return ret;
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
return ret;
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
STM32_LPTIM_CMPOKCF_ARROKCF);
if (ret)
return ret;
ret = clk_enable(priv->clk);
if (ret) {
regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
return ret;
}
priv->enabled = true;
/* Start LP timer in continuous mode */
return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
}
static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
{
u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE |
STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC;
u32 val;
/* Setup LP timer encoder/counter and polarity, without prescaler */
if (priv->quadrature_mode)
val = enable ? STM32_LPTIM_ENC : 0;
else
val = enable ? STM32_LPTIM_COUNTMODE : 0;
val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0);
return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
}
/*
* In non-quadrature mode, device counts up on active edge.
* In quadrature mode, encoder counting scenarios are as follows:
* +---------+----------+--------------------+--------------------+
* | Active | Level on | IN1 signal | IN2 signal |
* | edge | opposite +----------+---------+----------+---------+
* | | signal | Rising | Falling | Rising | Falling |
* +---------+----------+----------+---------+----------+---------+
* | Rising | High -> | Down | - | Up | - |
* | edge | Low -> | Up | - | Down | - |
* +---------+----------+----------+---------+----------+---------+
* | Falling | High -> | - | Up | - | Down |
* | edge | Low -> | - | Down | - | Up |
* +---------+----------+----------+---------+----------+---------+
* | Both | High -> | Down | Up | Up | Down |
* | edges | Low -> | Up | Down | Down | Up |
* +---------+----------+----------+---------+----------+---------+
*/
static const enum counter_function stm32_lptim_cnt_functions[] = {
COUNTER_FUNCTION_INCREASE,
COUNTER_FUNCTION_QUADRATURE_X4,
};
static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
COUNTER_SYNAPSE_ACTION_NONE,
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
u32 cnt;
int ret;
ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &cnt);
if (ret)
return ret;
*val = cnt;
return 0;
}
static int stm32_lptim_cnt_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (!priv->quadrature_mode) {
*function = COUNTER_FUNCTION_INCREASE;
return 0;
}
if (priv->polarity == STM32_LPTIM_CKPOL_BOTH_EDGES) {
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
return -EINVAL;
}
static int stm32_lptim_cnt_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
switch (function) {
case COUNTER_FUNCTION_INCREASE:
priv->quadrature_mode = 0;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X4:
priv->quadrature_mode = 1;
priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static int stm32_lptim_cnt_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *enable)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
ret = stm32_lptim_is_enabled(priv);
if (ret < 0)
return ret;
*enable = ret;
return 0;
}
static int stm32_lptim_cnt_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 enable)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
/* Check nobody uses the timer, or already disabled/enabled */
ret = stm32_lptim_is_enabled(priv);
if ((ret < 0) || (!ret && !enable))
return ret;
if (enable && ret)
return -EBUSY;
ret = stm32_lptim_setup(priv, enable);
if (ret)
return ret;
ret = stm32_lptim_set_enable_state(priv, enable);
if (ret)
return ret;
return 0;
}
static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
*ceiling = priv->ceiling;
return 0;
}
static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
if (ceiling > STM32_LPTIM_MAX_ARR)
return -ERANGE;
priv->ceiling = ceiling;
return 0;
}
static struct counter_comp stm32_lptim_cnt_ext[] = {
COUNTER_COMP_ENABLE(stm32_lptim_cnt_enable_read,
stm32_lptim_cnt_enable_write),
COUNTER_COMP_CEILING(stm32_lptim_cnt_ceiling_read,
stm32_lptim_cnt_ceiling_write),
};
static int stm32_lptim_cnt_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
switch (function) {
case COUNTER_FUNCTION_INCREASE:
/* LP Timer acts as up-counter on input 1 */
if (synapse->signal->id != count->synapses[0].signal->id) {
*action = COUNTER_SYNAPSE_ACTION_NONE;
return 0;
}
switch (priv->polarity) {
case STM32_LPTIM_CKPOL_RISING_EDGE:
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case STM32_LPTIM_CKPOL_FALLING_EDGE:
*action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
return 0;
case STM32_LPTIM_CKPOL_BOTH_EDGES:
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
case COUNTER_FUNCTION_QUADRATURE_X4:
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static int stm32_lptim_cnt_action_write(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
err = stm32_lptim_cnt_function_read(counter, count, &function);
if (err)
return err;
/* only set polarity when in counter mode (on input 1) */
if (function != COUNTER_FUNCTION_INCREASE
|| synapse->signal->id != count->synapses[0].signal->id)
return -EINVAL;
switch (action) {
case COUNTER_SYNAPSE_ACTION_RISING_EDGE:
priv->polarity = STM32_LPTIM_CKPOL_RISING_EDGE;
return 0;
case COUNTER_SYNAPSE_ACTION_FALLING_EDGE:
priv->polarity = STM32_LPTIM_CKPOL_FALLING_EDGE;
return 0;
case COUNTER_SYNAPSE_ACTION_BOTH_EDGES:
priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES;
return 0;
default:
return -EINVAL;
}
}
static const struct counter_ops stm32_lptim_cnt_ops = {
.count_read = stm32_lptim_cnt_read,
.function_read = stm32_lptim_cnt_function_read,
.function_write = stm32_lptim_cnt_function_write,
.action_read = stm32_lptim_cnt_action_read,
.action_write = stm32_lptim_cnt_action_write,
};
static struct counter_signal stm32_lptim_cnt_signals[] = {
{
.id = 0,
.name = "Channel 1 Quadrature A"
},
{
.id = 1,
.name = "Channel 1 Quadrature B"
}
};
static struct counter_synapse stm32_lptim_cnt_synapses[] = {
{
.actions_list = stm32_lptim_cnt_synapse_actions,
.num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
.signal = &stm32_lptim_cnt_signals[0]
},
{
.actions_list = stm32_lptim_cnt_synapse_actions,
.num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
.signal = &stm32_lptim_cnt_signals[1]
}
};
/* LP timer with encoder */
static struct counter_count stm32_lptim_enc_counts = {
.id = 0,
.name = "LPTimer Count",
.functions_list = stm32_lptim_cnt_functions,
.num_functions = ARRAY_SIZE(stm32_lptim_cnt_functions),
.synapses = stm32_lptim_cnt_synapses,
.num_synapses = ARRAY_SIZE(stm32_lptim_cnt_synapses),
.ext = stm32_lptim_cnt_ext,
.num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
};
/* LP timer without encoder (counter only) */
static struct counter_count stm32_lptim_in1_counts = {
.id = 0,
.name = "LPTimer Count",
.functions_list = stm32_lptim_cnt_functions,
.num_functions = 1,
.synapses = stm32_lptim_cnt_synapses,
.num_synapses = 1,
.ext = stm32_lptim_cnt_ext,
.num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
};
static int stm32_lptim_cnt_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct counter_device *counter;
struct stm32_lptim_cnt *priv;
int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
priv->dev = &pdev->dev;
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->ceiling = STM32_LPTIM_MAX_ARR;
/* Initialize Counter device */
counter->name = dev_name(&pdev->dev);
counter->parent = &pdev->dev;
counter->ops = &stm32_lptim_cnt_ops;
if (ddata->has_encoder) {
counter->counts = &stm32_lptim_enc_counts;
counter->num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
} else {
counter->counts = &stm32_lptim_in1_counts;
counter->num_signals = 1;
}
counter->num_counts = 1;
counter->signals = stm32_lptim_cnt_signals;
platform_set_drvdata(pdev, priv);
ret = devm_counter_add(&pdev->dev, counter);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int stm32_lptim_cnt_suspend(struct device *dev)
{
struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
int ret;
/* Only take care of enabled counter: don't disturb other MFD child */
if (priv->enabled) {
ret = stm32_lptim_setup(priv, 0);
if (ret)
return ret;
ret = stm32_lptim_set_enable_state(priv, 0);
if (ret)
return ret;
/* Force enable state for later resume */
priv->enabled = true;
}
return pinctrl_pm_select_sleep_state(dev);
}
static int stm32_lptim_cnt_resume(struct device *dev)
{
struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
int ret;
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
if (priv->enabled) {
priv->enabled = false;
ret = stm32_lptim_setup(priv, 1);
if (ret)
return ret;
ret = stm32_lptim_set_enable_state(priv, 1);
if (ret)
return ret;
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(stm32_lptim_cnt_pm_ops, stm32_lptim_cnt_suspend,
stm32_lptim_cnt_resume);
static const struct of_device_id stm32_lptim_cnt_of_match[] = {
{ .compatible = "st,stm32-lptimer-counter", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match);
static struct platform_driver stm32_lptim_cnt_driver = {
.probe = stm32_lptim_cnt_probe,
.driver = {
.name = "stm32-lptimer-counter",
.of_match_table = stm32_lptim_cnt_of_match,
.pm = &stm32_lptim_cnt_pm_ops,
},
};
module_platform_driver(stm32_lptim_cnt_driver);
MODULE_AUTHOR("Fabrice Gasnier <[email protected]>");
MODULE_ALIAS("platform:stm32-lptimer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/stm32-lptimer-cnt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Flex Timer Module Quadrature decoder
*
* This module implements a driver for decoding the FTM quadrature
* of ex. a LS1021A
*/
#include <linux/fsl/ftm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/counter.h>
#include <linux/bitfield.h>
#include <linux/types.h>
#define FTM_FIELD_UPDATE(ftm, offset, mask, val) \
({ \
uint32_t flags; \
ftm_read(ftm, offset, &flags); \
flags &= ~mask; \
flags |= FIELD_PREP(mask, val); \
ftm_write(ftm, offset, flags); \
})
struct ftm_quaddec {
struct platform_device *pdev;
void __iomem *ftm_base;
bool big_endian;
struct mutex ftm_quaddec_mutex;
};
static void ftm_read(struct ftm_quaddec *ftm, uint32_t offset, uint32_t *data)
{
if (ftm->big_endian)
*data = ioread32be(ftm->ftm_base + offset);
else
*data = ioread32(ftm->ftm_base + offset);
}
static void ftm_write(struct ftm_quaddec *ftm, uint32_t offset, uint32_t data)
{
if (ftm->big_endian)
iowrite32be(data, ftm->ftm_base + offset);
else
iowrite32(data, ftm->ftm_base + offset);
}
/* Hold mutex before modifying write protection state */
static void ftm_clear_write_protection(struct ftm_quaddec *ftm)
{
uint32_t flag;
/* First see if it is enabled */
ftm_read(ftm, FTM_FMS, &flag);
if (flag & FTM_FMS_WPEN)
FTM_FIELD_UPDATE(ftm, FTM_MODE, FTM_MODE_WPDIS, 1);
}
static void ftm_set_write_protection(struct ftm_quaddec *ftm)
{
FTM_FIELD_UPDATE(ftm, FTM_FMS, FTM_FMS_WPEN, 1);
}
static void ftm_reset_counter(struct ftm_quaddec *ftm)
{
/* Reset hardware counter to CNTIN */
ftm_write(ftm, FTM_CNT, 0x0);
}
static void ftm_quaddec_init(struct ftm_quaddec *ftm)
{
ftm_clear_write_protection(ftm);
/*
* Do not write in the region from the CNTIN register through the
* PWMLOAD register when FTMEN = 0.
* Also reset other fields to zero
*/
ftm_write(ftm, FTM_MODE, FTM_MODE_FTMEN);
ftm_write(ftm, FTM_CNTIN, 0x0000);
ftm_write(ftm, FTM_MOD, 0xffff);
ftm_write(ftm, FTM_CNT, 0x0);
/* Set prescaler, reset other fields to zero */
ftm_write(ftm, FTM_SC, FTM_SC_PS_1);
/* Select quad mode, reset other fields to zero */
ftm_write(ftm, FTM_QDCTRL, FTM_QDCTRL_QUADEN);
/* Unused features and reset to default section */
ftm_write(ftm, FTM_POL, 0x0);
ftm_write(ftm, FTM_FLTCTRL, 0x0);
ftm_write(ftm, FTM_SYNCONF, 0x0);
ftm_write(ftm, FTM_SYNC, 0xffff);
/* Lock the FTM */
ftm_set_write_protection(ftm);
}
static void ftm_quaddec_disable(void *ftm)
{
struct ftm_quaddec *ftm_qua = ftm;
ftm_clear_write_protection(ftm_qua);
ftm_write(ftm_qua, FTM_MODE, 0);
ftm_write(ftm_qua, FTM_QDCTRL, 0);
/*
* This is enough to disable the counter. No clock has been
* selected by writing to FTM_SC in init()
*/
ftm_set_write_protection(ftm_qua);
}
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
struct counter_count *count, u32 *cnt_mode)
{
struct ftm_quaddec *ftm = counter_priv(counter);
uint32_t scflags;
ftm_read(ftm, FTM_SC, &scflags);
*cnt_mode = FIELD_GET(FTM_SC_PS_MASK, scflags);
return 0;
}
static int ftm_quaddec_set_prescaler(struct counter_device *counter,
struct counter_count *count, u32 cnt_mode)
{
struct ftm_quaddec *ftm = counter_priv(counter);
mutex_lock(&ftm->ftm_quaddec_mutex);
ftm_clear_write_protection(ftm);
FTM_FIELD_UPDATE(ftm, FTM_SC, FTM_SC_PS_MASK, cnt_mode);
ftm_set_write_protection(ftm);
/* Also resets the counter as it is undefined anyway now */
ftm_reset_counter(ftm);
mutex_unlock(&ftm->ftm_quaddec_mutex);
return 0;
}
static const char * const ftm_quaddec_prescaler[] = {
"1", "2", "4", "8", "16", "32", "64", "128"
};
static const enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
static const enum counter_function ftm_quaddec_count_functions[] = {
COUNTER_FUNCTION_QUADRATURE_X4
};
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
u64 *val)
{
struct ftm_quaddec *const ftm = counter_priv(counter);
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
*val = cntval;
return 0;
}
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
const u64 val)
{
struct ftm_quaddec *const ftm = counter_priv(counter);
if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
return -EINVAL;
}
ftm_reset_counter(ftm);
return 0;
}
static int ftm_quaddec_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
}
static int ftm_quaddec_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
}
static const struct counter_ops ftm_quaddec_cnt_ops = {
.count_read = ftm_quaddec_count_read,
.count_write = ftm_quaddec_count_write,
.function_read = ftm_quaddec_count_function_read,
.action_read = ftm_quaddec_action_read,
};
static struct counter_signal ftm_quaddec_signals[] = {
{
.id = 0,
.name = "Channel 1 Phase A"
},
{
.id = 1,
.name = "Channel 1 Phase B"
}
};
static struct counter_synapse ftm_quaddec_count_synapses[] = {
{
.actions_list = ftm_quaddec_synapse_actions,
.num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
.signal = &ftm_quaddec_signals[0]
},
{
.actions_list = ftm_quaddec_synapse_actions,
.num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
.signal = &ftm_quaddec_signals[1]
}
};
static DEFINE_COUNTER_ENUM(ftm_quaddec_prescaler_enum, ftm_quaddec_prescaler);
static struct counter_comp ftm_quaddec_count_ext[] = {
COUNTER_COMP_COUNT_ENUM("prescaler", ftm_quaddec_get_prescaler,
ftm_quaddec_set_prescaler,
ftm_quaddec_prescaler_enum),
};
static struct counter_count ftm_quaddec_counts = {
.id = 0,
.name = "Channel 1 Count",
.functions_list = ftm_quaddec_count_functions,
.num_functions = ARRAY_SIZE(ftm_quaddec_count_functions),
.synapses = ftm_quaddec_count_synapses,
.num_synapses = ARRAY_SIZE(ftm_quaddec_count_synapses),
.ext = ftm_quaddec_count_ext,
.num_ext = ARRAY_SIZE(ftm_quaddec_count_ext)
};
static int ftm_quaddec_probe(struct platform_device *pdev)
{
struct counter_device *counter;
struct ftm_quaddec *ftm;
struct device_node *node = pdev->dev.of_node;
struct resource *io;
int ret;
counter = devm_counter_alloc(&pdev->dev, sizeof(*ftm));
if (!counter)
return -ENOMEM;
ftm = counter_priv(counter);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io) {
dev_err(&pdev->dev, "Failed to get memory region\n");
return -ENODEV;
}
ftm->pdev = pdev;
ftm->big_endian = of_property_read_bool(node, "big-endian");
ftm->ftm_base = devm_ioremap(&pdev->dev, io->start, resource_size(io));
if (!ftm->ftm_base) {
dev_err(&pdev->dev, "Failed to map memory region\n");
return -EINVAL;
}
counter->name = dev_name(&pdev->dev);
counter->parent = &pdev->dev;
counter->ops = &ftm_quaddec_cnt_ops;
counter->counts = &ftm_quaddec_counts;
counter->num_counts = 1;
counter->signals = ftm_quaddec_signals;
counter->num_signals = ARRAY_SIZE(ftm_quaddec_signals);
mutex_init(&ftm->ftm_quaddec_mutex);
ftm_quaddec_init(ftm);
ret = devm_add_action_or_reset(&pdev->dev, ftm_quaddec_disable, ftm);
if (ret)
return ret;
ret = devm_counter_add(&pdev->dev, counter);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
static const struct of_device_id ftm_quaddec_match[] = {
{ .compatible = "fsl,ftm-quaddec" },
{},
};
static struct platform_driver ftm_quaddec_driver = {
.driver = {
.name = "ftm-quaddec",
.of_match_table = ftm_quaddec_match,
},
.probe = ftm_quaddec_probe,
};
module_platform_driver(ftm_quaddec_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <[email protected]>");
MODULE_AUTHOR("Patrick Havelange <[email protected]>");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/ftm-quaddec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Counter driver for the ACCES 104-QUAD-8
* Copyright (C) 2016 William Breathitt Gray
*
* This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/counter.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#define QUAD8_EXTENT 32
static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
static unsigned int num_quad8;
module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
static unsigned int irq[max_num_isa_dev(QUAD8_EXTENT)];
static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
#define QUAD8_NUM_COUNTERS 8
#define QUAD8_DATA(_channel) ((_channel) * 2)
#define QUAD8_CONTROL(_channel) (QUAD8_DATA(_channel) + 1)
#define QUAD8_INTERRUPT_STATUS 0x10
#define QUAD8_CHANNEL_OPERATION 0x11
#define QUAD8_INDEX_INTERRUPT 0x12
#define QUAD8_INDEX_INPUT_LEVELS 0x16
#define QUAD8_CABLE_STATUS 0x17
/**
* struct quad8 - device private data structure
* @lock: lock to prevent clobbering device states during R/W ops
* @cmr: array of Counter Mode Register states
* @ior: array of Input / Output Control Register states
* @idr: array of Index Control Register states
* @fck_prescaler: array of filter clock prescaler configurations
* @preset: array of preset values
* @cable_fault_enable: differential encoder cable status enable configurations
* @map: regmap for the device
*/
struct quad8 {
spinlock_t lock;
u8 cmr[QUAD8_NUM_COUNTERS];
u8 ior[QUAD8_NUM_COUNTERS];
u8 idr[QUAD8_NUM_COUNTERS];
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
struct regmap *map;
};
static const struct regmap_range quad8_wr_ranges[] = {
regmap_reg_range(0x0, 0xF), regmap_reg_range(0x11, 0x12), regmap_reg_range(0x17, 0x17),
};
static const struct regmap_range quad8_rd_ranges[] = {
regmap_reg_range(0x0, 0x12), regmap_reg_range(0x16, 0x18),
};
static const struct regmap_access_table quad8_wr_table = {
.yes_ranges = quad8_wr_ranges,
.n_yes_ranges = ARRAY_SIZE(quad8_wr_ranges),
};
static const struct regmap_access_table quad8_rd_table = {
.yes_ranges = quad8_rd_ranges,
.n_yes_ranges = ARRAY_SIZE(quad8_rd_ranges),
};
static const struct regmap_config quad8_regmap_config = {
.reg_bits = 8,
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
.wr_table = &quad8_wr_table,
.rd_table = &quad8_rd_table,
};
/* Error flag */
#define FLAG_E BIT(4)
/* Up/Down flag */
#define FLAG_UD BIT(5)
/* Counting up */
#define UP 0x1
#define REGISTER_SELECTION GENMASK(6, 5)
/* Reset and Load Signal Decoders */
#define SELECT_RLD u8_encode_bits(0x0, REGISTER_SELECTION)
/* Counter Mode Register */
#define SELECT_CMR u8_encode_bits(0x1, REGISTER_SELECTION)
/* Input / Output Control Register */
#define SELECT_IOR u8_encode_bits(0x2, REGISTER_SELECTION)
/* Index Control Register */
#define SELECT_IDR u8_encode_bits(0x3, REGISTER_SELECTION)
/*
* Reset and Load Signal Decoders
*/
#define RESETS GENMASK(2, 1)
#define LOADS GENMASK(4, 3)
/* Reset Byte Pointer (three byte data pointer) */
#define RESET_BP BIT(0)
/* Reset Borrow Toggle, Carry toggle, Compare toggle, Sign, and Index flags */
#define RESET_BT_CT_CPT_S_IDX u8_encode_bits(0x2, RESETS)
/* Reset Error flag */
#define RESET_E u8_encode_bits(0x3, RESETS)
/* Preset Register to Counter */
#define TRANSFER_PR_TO_CNTR u8_encode_bits(0x1, LOADS)
/* Transfer Counter to Output Latch */
#define TRANSFER_CNTR_TO_OL u8_encode_bits(0x2, LOADS)
/* Transfer Preset Register LSB to FCK Prescaler */
#define TRANSFER_PR0_TO_PSC u8_encode_bits(0x3, LOADS)
/*
* Counter Mode Registers
*/
#define COUNT_ENCODING BIT(0)
#define COUNT_MODE GENMASK(2, 1)
#define QUADRATURE_MODE GENMASK(4, 3)
/* Binary count */
#define BINARY u8_encode_bits(0x0, COUNT_ENCODING)
/* Normal count */
#define NORMAL_COUNT 0x0
/* Range Limit */
#define RANGE_LIMIT 0x1
/* Non-recycle count */
#define NON_RECYCLE_COUNT 0x2
/* Modulo-N */
#define MODULO_N 0x3
/* Non-quadrature */
#define NON_QUADRATURE 0x0
/* Quadrature X1 */
#define QUADRATURE_X1 0x1
/* Quadrature X2 */
#define QUADRATURE_X2 0x2
/* Quadrature X4 */
#define QUADRATURE_X4 0x3
/*
* Input/Output Control Register
*/
#define AB_GATE BIT(0)
#define LOAD_PIN BIT(1)
#define FLG_PINS GENMASK(4, 3)
/* Disable inputs A and B */
#define DISABLE_AB u8_encode_bits(0x0, AB_GATE)
/* Load Counter input */
#define LOAD_CNTR 0x0
/* FLG1 = CARRY(active low); FLG2 = BORROW(active low) */
#define FLG1_CARRY_FLG2_BORROW 0x0
/* FLG1 = COMPARE(active low); FLG2 = BORROW(active low) */
#define FLG1_COMPARE_FLG2_BORROW 0x1
/* FLG1 = Carry(active low)/Borrow(active low); FLG2 = U/D(active low) flag */
#define FLG1_CARRYBORROW_FLG2_UD 0x2
/* FLG1 = INDX (low pulse at INDEX pin active level); FLG2 = E flag */
#define FLG1_INDX_FLG2_E 0x3
/*
* INDEX CONTROL REGISTERS
*/
#define INDEX_MODE BIT(0)
#define INDEX_POLARITY BIT(1)
/* Disable Index mode */
#define DISABLE_INDEX_MODE 0x0
/* Enable Index mode */
#define ENABLE_INDEX_MODE 0x1
/* Negative Index Polarity */
#define NEGATIVE_INDEX_POLARITY 0x0
/* Positive Index Polarity */
#define POSITIVE_INDEX_POLARITY 0x1
/*
* Channel Operation Register
*/
#define COUNTERS_OPERATION BIT(0)
#define INTERRUPT_FUNCTION BIT(2)
/* Enable all Counters */
#define ENABLE_COUNTERS u8_encode_bits(0x0, COUNTERS_OPERATION)
/* Reset all Counters */
#define RESET_COUNTERS u8_encode_bits(0x1, COUNTERS_OPERATION)
/* Disable the interrupt function */
#define DISABLE_INTERRUPT_FUNCTION u8_encode_bits(0x0, INTERRUPT_FUNCTION)
/* Enable the interrupt function */
#define ENABLE_INTERRUPT_FUNCTION u8_encode_bits(0x1, INTERRUPT_FUNCTION)
/* Any write to the Channel Operation register clears any pending interrupts */
#define CLEAR_PENDING_INTERRUPTS (ENABLE_COUNTERS | ENABLE_INTERRUPT_FUNCTION)
/* Each Counter is 24 bits wide */
#define LS7267_CNTR_MAX GENMASK(23, 0)
static __always_inline int quad8_control_register_update(struct regmap *const map, u8 *const buf,
const size_t channel, const u8 val,
const u8 field)
{
u8p_replace_bits(&buf[channel], val, field);
return regmap_write(map, QUAD8_CONTROL(channel), buf[channel]);
}
static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
const struct quad8 *const priv = counter_priv(counter);
int ret;
/* Only Index signal levels can be read */
if (signal->id < 16)
return -EINVAL;
ret = regmap_test_bits(priv->map, QUAD8_INDEX_INPUT_LEVELS, BIT(signal->id - 16));
if (ret < 0)
return ret;
*level = (ret) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
u8 value[3];
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
ret = regmap_write(priv->map, QUAD8_CONTROL(count->id),
SELECT_RLD | RESET_BP | TRANSFER_CNTR_TO_OL);
if (ret)
goto exit_unlock;
ret = regmap_noinc_read(priv->map, QUAD8_DATA(count->id), value, sizeof(value));
exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
*val = get_unaligned_le24(value);
return ret;
}
static int quad8_preset_register_set(struct quad8 *const priv, const size_t id,
const unsigned long preset)
{
u8 value[3];
int ret;
put_unaligned_le24(preset, value);
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
if (ret)
return ret;
return regmap_noinc_write(priv->map, QUAD8_DATA(id), value, sizeof(value));
}
static int quad8_flag_register_reset(struct quad8 *const priv, const size_t id)
{
int ret;
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BT_CT_CPT_S_IDX);
if (ret)
return ret;
return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_E);
}
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int ret;
if (val > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
/* Counter can only be set via Preset Register */
ret = quad8_preset_register_set(priv, count->id, val);
if (ret)
goto exit_unlock;
ret = regmap_write(priv->map, QUAD8_CONTROL(count->id), SELECT_RLD | TRANSFER_PR_TO_CNTR);
if (ret)
goto exit_unlock;
ret = quad8_flag_register_reset(priv, count->id);
if (ret)
goto exit_unlock;
/* Set Preset Register back to original value */
ret = quad8_preset_register_set(priv, count->id, priv->preset[count->id]);
exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static const enum counter_function quad8_count_functions_list[] = {
COUNTER_FUNCTION_PULSE_DIRECTION,
COUNTER_FUNCTION_QUADRATURE_X1_A,
COUNTER_FUNCTION_QUADRATURE_X2_A,
COUNTER_FUNCTION_QUADRATURE_X4,
};
static int quad8_function_get(const struct quad8 *const priv, const size_t id,
enum counter_function *const function)
{
switch (u8_get_bits(priv->cmr[id], QUADRATURE_MODE)) {
case NON_QUADRATURE:
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
return 0;
case QUADRATURE_X1:
*function = COUNTER_FUNCTION_QUADRATURE_X1_A;
return 0;
case QUADRATURE_X2:
*function = COUNTER_FUNCTION_QUADRATURE_X2_A;
return 0;
case QUADRATURE_X4:
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static int quad8_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int retval;
spin_lock_irqsave(&priv->lock, irqflags);
retval = quad8_function_get(priv, count->id, function);
spin_unlock_irqrestore(&priv->lock, irqflags);
return retval;
}
static int quad8_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned long irqflags;
unsigned int mode_cfg;
bool synchronous_mode;
int ret;
switch (function) {
case COUNTER_FUNCTION_PULSE_DIRECTION:
mode_cfg = NON_QUADRATURE;
break;
case COUNTER_FUNCTION_QUADRATURE_X1_A:
mode_cfg = QUADRATURE_X1;
break;
case COUNTER_FUNCTION_QUADRATURE_X2_A:
mode_cfg = QUADRATURE_X2;
break;
case COUNTER_FUNCTION_QUADRATURE_X4:
mode_cfg = QUADRATURE_X4;
break;
default:
/* should never reach this path */
return -EINVAL;
}
spin_lock_irqsave(&priv->lock, irqflags);
/* Synchronous function not supported in non-quadrature mode */
synchronous_mode = u8_get_bits(priv->idr[id], INDEX_MODE) == ENABLE_INDEX_MODE;
if (synchronous_mode && mode_cfg == NON_QUADRATURE) {
ret = quad8_control_register_update(priv->map, priv->idr, id, DISABLE_INDEX_MODE,
INDEX_MODE);
if (ret)
goto exit_unlock;
}
ret = quad8_control_register_update(priv->map, priv->cmr, id, mode_cfg, QUADRATURE_MODE);
exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
const struct quad8 *const priv = counter_priv(counter);
unsigned int flag;
int ret;
ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
if (ret)
return ret;
*direction = (u8_get_bits(flag, FLAG_UD) == UP) ? COUNTER_COUNT_DIRECTION_FORWARD :
COUNTER_COUNT_DIRECTION_BACKWARD;
return 0;
}
static const enum counter_synapse_action quad8_index_actions_list[] = {
COUNTER_SYNAPSE_ACTION_NONE,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
static const enum counter_synapse_action quad8_synapse_actions_list[] = {
COUNTER_SYNAPSE_ACTION_NONE,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
static int quad8_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int err;
enum counter_function function;
const size_t signal_a_id = count->synapses[0].signal->id;
enum counter_count_direction direction;
/* Default action mode */
*action = COUNTER_SYNAPSE_ACTION_NONE;
/* Handle Index signals */
if (synapse->signal->id >= 16) {
if (u8_get_bits(priv->ior[count->id], LOAD_PIN) == LOAD_CNTR)
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
}
spin_lock_irqsave(&priv->lock, irqflags);
/* Get Count function and direction atomically */
err = quad8_function_get(priv, count->id, &function);
if (err) {
spin_unlock_irqrestore(&priv->lock, irqflags);
return err;
}
err = quad8_direction_read(counter, count, &direction);
if (err) {
spin_unlock_irqrestore(&priv->lock, irqflags);
return err;
}
spin_unlock_irqrestore(&priv->lock, irqflags);
/* Determine action mode based on current count function mode */
switch (function) {
case COUNTER_FUNCTION_PULSE_DIRECTION:
if (synapse->signal->id == signal_a_id)
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X1_A:
if (synapse->signal->id == signal_a_id) {
if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
else
*action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE;
}
return 0;
case COUNTER_FUNCTION_QUADRATURE_X2_A:
if (synapse->signal->id == signal_a_id)
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
case COUNTER_FUNCTION_QUADRATURE_X4:
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static int quad8_events_configure(struct counter_device *counter)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irq_enabled = 0;
unsigned long irqflags;
struct counter_event_node *event_node;
u8 flg_pins;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
list_for_each_entry(event_node, &counter->events_list, l) {
switch (event_node->event) {
case COUNTER_EVENT_OVERFLOW:
flg_pins = FLG1_CARRY_FLG2_BORROW;
break;
case COUNTER_EVENT_THRESHOLD:
flg_pins = FLG1_COMPARE_FLG2_BORROW;
break;
case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
flg_pins = FLG1_CARRYBORROW_FLG2_UD;
break;
case COUNTER_EVENT_INDEX:
flg_pins = FLG1_INDX_FLG2_E;
break;
default:
/* should never reach this path */
ret = -EINVAL;
goto exit_unlock;
}
/* Enable IRQ line */
irq_enabled |= BIT(event_node->channel);
/* Skip configuration if it is the same as previously set */
if (flg_pins == u8_get_bits(priv->ior[event_node->channel], FLG_PINS))
continue;
/* Save new IRQ function configuration */
ret = quad8_control_register_update(priv->map, priv->ior, event_node->channel,
flg_pins, FLG_PINS);
if (ret)
goto exit_unlock;
}
ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, irq_enabled);
exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_watch_validate(struct counter_device *counter,
const struct counter_watch *watch)
{
struct counter_event_node *event_node;
if (watch->channel > QUAD8_NUM_COUNTERS - 1)
return -EINVAL;
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
case COUNTER_EVENT_THRESHOLD:
case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
case COUNTER_EVENT_INDEX:
list_for_each_entry(event_node, &counter->next_events_list, l)
if (watch->channel == event_node->channel &&
watch->event != event_node->event)
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
static const struct counter_ops quad8_ops = {
.signal_read = quad8_signal_read,
.count_read = quad8_count_read,
.count_write = quad8_count_write,
.function_read = quad8_function_read,
.function_write = quad8_function_write,
.action_read = quad8_action_read,
.events_configure = quad8_events_configure,
.watch_validate = quad8_watch_validate,
};
static const char *const quad8_index_polarity_modes[] = {
"negative",
"positive"
};
static int quad8_index_polarity_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *index_polarity)
{
const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*index_polarity = u8_get_bits(priv->idr[channel_id], INDEX_POLARITY);
return 0;
}
static int quad8_index_polarity_set(struct counter_device *counter,
struct counter_signal *signal,
u32 index_polarity)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
unsigned long irqflags;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
ret = quad8_control_register_update(priv->map, priv->idr, channel_id, index_polarity,
INDEX_POLARITY);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_polarity_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_polarity *polarity)
{
int err;
u32 index_polarity;
err = quad8_index_polarity_get(counter, signal, &index_polarity);
if (err)
return err;
*polarity = (index_polarity == POSITIVE_INDEX_POLARITY) ? COUNTER_SIGNAL_POLARITY_POSITIVE :
COUNTER_SIGNAL_POLARITY_NEGATIVE;
return 0;
}
static int quad8_polarity_write(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_polarity polarity)
{
const u32 pol = (polarity == COUNTER_SIGNAL_POLARITY_POSITIVE) ? POSITIVE_INDEX_POLARITY :
NEGATIVE_INDEX_POLARITY;
return quad8_index_polarity_set(counter, signal, pol);
}
static const char *const quad8_synchronous_modes[] = {
"non-synchronous",
"synchronous"
};
static int quad8_synchronous_mode_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *synchronous_mode)
{
const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*synchronous_mode = u8_get_bits(priv->idr[channel_id], INDEX_MODE);
return 0;
}
static int quad8_synchronous_mode_set(struct counter_device *counter,
struct counter_signal *signal,
u32 synchronous_mode)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
u8 quadrature_mode;
unsigned long irqflags;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
/* Index function must be non-synchronous in non-quadrature mode */
quadrature_mode = u8_get_bits(priv->idr[channel_id], QUADRATURE_MODE);
if (synchronous_mode && quadrature_mode == NON_QUADRATURE) {
ret = -EINVAL;
goto exit_unlock;
}
ret = quad8_control_register_update(priv->map, priv->idr, channel_id, synchronous_mode,
INDEX_MODE);
exit_unlock:
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_count_floor_read(struct counter_device *counter,
struct counter_count *count, u64 *floor)
{
/* Only a floor of 0 is supported */
*floor = 0;
return 0;
}
static int quad8_count_mode_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode *cnt_mode)
{
const struct quad8 *const priv = counter_priv(counter);
switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
case NORMAL_COUNT:
*cnt_mode = COUNTER_COUNT_MODE_NORMAL;
break;
case RANGE_LIMIT:
*cnt_mode = COUNTER_COUNT_MODE_RANGE_LIMIT;
break;
case NON_RECYCLE_COUNT:
*cnt_mode = COUNTER_COUNT_MODE_NON_RECYCLE;
break;
case MODULO_N:
*cnt_mode = COUNTER_COUNT_MODE_MODULO_N;
break;
}
return 0;
}
static int quad8_count_mode_write(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode cnt_mode)
{
struct quad8 *const priv = counter_priv(counter);
unsigned int count_mode;
unsigned long irqflags;
int ret;
switch (cnt_mode) {
case COUNTER_COUNT_MODE_NORMAL:
count_mode = NORMAL_COUNT;
break;
case COUNTER_COUNT_MODE_RANGE_LIMIT:
count_mode = RANGE_LIMIT;
break;
case COUNTER_COUNT_MODE_NON_RECYCLE:
count_mode = NON_RECYCLE_COUNT;
break;
case COUNTER_COUNT_MODE_MODULO_N:
count_mode = MODULO_N;
break;
default:
/* should never reach this path */
return -EINVAL;
}
spin_lock_irqsave(&priv->lock, irqflags);
ret = quad8_control_register_update(priv->map, priv->cmr, count->id, count_mode,
COUNT_MODE);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
const struct quad8 *const priv = counter_priv(counter);
*enable = u8_get_bits(priv->ior[count->id], AB_GATE);
return 0;
}
static int quad8_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
ret = quad8_control_register_update(priv->map, priv->ior, count->id, enable, AB_GATE);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static const char *const quad8_noise_error_states[] = {
"No excessive noise is present at the count inputs",
"Excessive noise is present at the count inputs"
};
static int quad8_error_noise_get(struct counter_device *counter,
struct counter_count *count, u32 *noise_error)
{
const struct quad8 *const priv = counter_priv(counter);
unsigned int flag;
int ret;
ret = regmap_read(priv->map, QUAD8_CONTROL(count->id), &flag);
if (ret)
return ret;
*noise_error = u8_get_bits(flag, FLAG_E);
return 0;
}
static int quad8_count_preset_read(struct counter_device *counter,
struct counter_count *count, u64 *preset)
{
const struct quad8 *const priv = counter_priv(counter);
*preset = priv->preset[count->id];
return 0;
}
static int quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, u64 preset)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int ret;
if (preset > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
priv->preset[count->id] = preset;
ret = quad8_preset_register_set(priv, count->id, preset);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
case RANGE_LIMIT:
case MODULO_N:
*ceiling = priv->preset[count->id];
break;
default:
*ceiling = LS7267_CNTR_MAX;
break;
}
spin_unlock_irqrestore(&priv->lock, irqflags);
return 0;
}
static int quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int ret;
if (ceiling > LS7267_CNTR_MAX)
return -ERANGE;
spin_lock_irqsave(&priv->lock, irqflags);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (u8_get_bits(priv->cmr[count->id], COUNT_MODE)) {
case RANGE_LIMIT:
case MODULO_N:
priv->preset[count->id] = ceiling;
ret = quad8_preset_register_set(priv, count->id, ceiling);
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_count_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
const struct quad8 *const priv = counter_priv(counter);
/* Preset enable is active low in Input/Output Control register */
*preset_enable = !u8_get_bits(priv->ior[count->id], LOAD_PIN);
return 0;
}
static int quad8_count_preset_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 preset_enable)
{
struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
/* Preset enable is active low in Input/Output Control register */
ret = quad8_control_register_update(priv->map, priv->ior, count->id, !preset_enable,
LOAD_PIN);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_signal_cable_fault_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *cable_fault)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
bool disabled;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
disabled = !(priv->cable_fault_enable & BIT(channel_id));
if (disabled) {
spin_unlock_irqrestore(&priv->lock, irqflags);
return -EINVAL;
}
ret = regmap_test_bits(priv->map, QUAD8_CABLE_STATUS, BIT(channel_id));
if (ret < 0) {
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
spin_unlock_irqrestore(&priv->lock, irqflags);
/* Logic 0 = cable fault */
*cable_fault = !ret;
return 0;
}
static int quad8_signal_cable_fault_enable_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *enable)
{
const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
*enable = !!(priv->cable_fault_enable & BIT(channel_id));
return 0;
}
static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
struct counter_signal *signal,
u8 enable)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
unsigned int cable_fault_enable;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
if (enable)
priv->cable_fault_enable |= BIT(channel_id);
else
priv->cable_fault_enable &= ~BIT(channel_id);
/* Enable is active low in Differential Encoder Cable Status register */
cable_fault_enable = ~priv->cable_fault_enable;
ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, cable_fault_enable);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *prescaler)
{
const struct quad8 *const priv = counter_priv(counter);
*prescaler = priv->fck_prescaler[signal->id / 2];
return 0;
}
static int quad8_filter_clock_prescaler_set(struct quad8 *const priv, const size_t id,
const u8 prescaler)
{
int ret;
ret = regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | RESET_BP);
if (ret)
return ret;
ret = regmap_write(priv->map, QUAD8_DATA(id), prescaler);
if (ret)
return ret;
return regmap_write(priv->map, QUAD8_CONTROL(id), SELECT_RLD | TRANSFER_PR0_TO_PSC);
}
static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
struct counter_signal *signal,
u8 prescaler)
{
struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
int ret;
spin_lock_irqsave(&priv->lock, irqflags);
priv->fck_prescaler[channel_id] = prescaler;
ret = quad8_filter_clock_prescaler_set(priv, channel_id, prescaler);
spin_unlock_irqrestore(&priv->lock, irqflags);
return ret;
}
static struct counter_comp quad8_signal_ext[] = {
COUNTER_COMP_SIGNAL_BOOL("cable_fault", quad8_signal_cable_fault_read,
NULL),
COUNTER_COMP_SIGNAL_BOOL("cable_fault_enable",
quad8_signal_cable_fault_enable_read,
quad8_signal_cable_fault_enable_write),
COUNTER_COMP_SIGNAL_U8("filter_clock_prescaler",
quad8_signal_fck_prescaler_read,
quad8_signal_fck_prescaler_write)
};
static const enum counter_signal_polarity quad8_polarities[] = {
COUNTER_SIGNAL_POLARITY_POSITIVE,
COUNTER_SIGNAL_POLARITY_NEGATIVE,
};
static DEFINE_COUNTER_AVAILABLE(quad8_polarity_available, quad8_polarities);
static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes);
static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes);
static struct counter_comp quad8_index_ext[] = {
COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get,
quad8_index_polarity_set,
quad8_index_pol_enum),
COUNTER_COMP_POLARITY(quad8_polarity_read, quad8_polarity_write,
quad8_polarity_available),
COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get,
quad8_synchronous_mode_set,
quad8_synch_mode_enum),
};
#define QUAD8_QUAD_SIGNAL(_id, _name) { \
.id = (_id), \
.name = (_name), \
.ext = quad8_signal_ext, \
.num_ext = ARRAY_SIZE(quad8_signal_ext) \
}
#define QUAD8_INDEX_SIGNAL(_id, _name) { \
.id = (_id), \
.name = (_name), \
.ext = quad8_index_ext, \
.num_ext = ARRAY_SIZE(quad8_index_ext) \
}
static struct counter_signal quad8_signals[] = {
QUAD8_QUAD_SIGNAL(0, "Channel 1 Quadrature A"),
QUAD8_QUAD_SIGNAL(1, "Channel 1 Quadrature B"),
QUAD8_QUAD_SIGNAL(2, "Channel 2 Quadrature A"),
QUAD8_QUAD_SIGNAL(3, "Channel 2 Quadrature B"),
QUAD8_QUAD_SIGNAL(4, "Channel 3 Quadrature A"),
QUAD8_QUAD_SIGNAL(5, "Channel 3 Quadrature B"),
QUAD8_QUAD_SIGNAL(6, "Channel 4 Quadrature A"),
QUAD8_QUAD_SIGNAL(7, "Channel 4 Quadrature B"),
QUAD8_QUAD_SIGNAL(8, "Channel 5 Quadrature A"),
QUAD8_QUAD_SIGNAL(9, "Channel 5 Quadrature B"),
QUAD8_QUAD_SIGNAL(10, "Channel 6 Quadrature A"),
QUAD8_QUAD_SIGNAL(11, "Channel 6 Quadrature B"),
QUAD8_QUAD_SIGNAL(12, "Channel 7 Quadrature A"),
QUAD8_QUAD_SIGNAL(13, "Channel 7 Quadrature B"),
QUAD8_QUAD_SIGNAL(14, "Channel 8 Quadrature A"),
QUAD8_QUAD_SIGNAL(15, "Channel 8 Quadrature B"),
QUAD8_INDEX_SIGNAL(16, "Channel 1 Index"),
QUAD8_INDEX_SIGNAL(17, "Channel 2 Index"),
QUAD8_INDEX_SIGNAL(18, "Channel 3 Index"),
QUAD8_INDEX_SIGNAL(19, "Channel 4 Index"),
QUAD8_INDEX_SIGNAL(20, "Channel 5 Index"),
QUAD8_INDEX_SIGNAL(21, "Channel 6 Index"),
QUAD8_INDEX_SIGNAL(22, "Channel 7 Index"),
QUAD8_INDEX_SIGNAL(23, "Channel 8 Index")
};
#define QUAD8_COUNT_SYNAPSES(_id) { \
{ \
.actions_list = quad8_synapse_actions_list, \
.num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
.signal = quad8_signals + 2 * (_id) \
}, \
{ \
.actions_list = quad8_synapse_actions_list, \
.num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
.signal = quad8_signals + 2 * (_id) + 1 \
}, \
{ \
.actions_list = quad8_index_actions_list, \
.num_actions = ARRAY_SIZE(quad8_index_actions_list), \
.signal = quad8_signals + 2 * (_id) + 16 \
} \
}
static struct counter_synapse quad8_count_synapses[][3] = {
QUAD8_COUNT_SYNAPSES(0), QUAD8_COUNT_SYNAPSES(1),
QUAD8_COUNT_SYNAPSES(2), QUAD8_COUNT_SYNAPSES(3),
QUAD8_COUNT_SYNAPSES(4), QUAD8_COUNT_SYNAPSES(5),
QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7)
};
static const enum counter_count_mode quad8_cnt_modes[] = {
COUNTER_COUNT_MODE_NORMAL,
COUNTER_COUNT_MODE_RANGE_LIMIT,
COUNTER_COUNT_MODE_NON_RECYCLE,
COUNTER_COUNT_MODE_MODULO_N,
};
static DEFINE_COUNTER_AVAILABLE(quad8_count_mode_available, quad8_cnt_modes);
static DEFINE_COUNTER_ENUM(quad8_error_noise_enum, quad8_noise_error_states);
static struct counter_comp quad8_count_ext[] = {
COUNTER_COMP_CEILING(quad8_count_ceiling_read,
quad8_count_ceiling_write),
COUNTER_COMP_FLOOR(quad8_count_floor_read, NULL),
COUNTER_COMP_COUNT_MODE(quad8_count_mode_read, quad8_count_mode_write,
quad8_count_mode_available),
COUNTER_COMP_DIRECTION(quad8_direction_read),
COUNTER_COMP_ENABLE(quad8_count_enable_read, quad8_count_enable_write),
COUNTER_COMP_COUNT_ENUM("error_noise", quad8_error_noise_get, NULL,
quad8_error_noise_enum),
COUNTER_COMP_PRESET(quad8_count_preset_read, quad8_count_preset_write),
COUNTER_COMP_PRESET_ENABLE(quad8_count_preset_enable_read,
quad8_count_preset_enable_write),
};
#define QUAD8_COUNT(_id, _cntname) { \
.id = (_id), \
.name = (_cntname), \
.functions_list = quad8_count_functions_list, \
.num_functions = ARRAY_SIZE(quad8_count_functions_list), \
.synapses = quad8_count_synapses[(_id)], \
.num_synapses = 2, \
.ext = quad8_count_ext, \
.num_ext = ARRAY_SIZE(quad8_count_ext) \
}
static struct counter_count quad8_counts[] = {
QUAD8_COUNT(0, "Channel 1 Count"),
QUAD8_COUNT(1, "Channel 2 Count"),
QUAD8_COUNT(2, "Channel 3 Count"),
QUAD8_COUNT(3, "Channel 4 Count"),
QUAD8_COUNT(4, "Channel 5 Count"),
QUAD8_COUNT(5, "Channel 6 Count"),
QUAD8_COUNT(6, "Channel 7 Count"),
QUAD8_COUNT(7, "Channel 8 Count")
};
static irqreturn_t quad8_irq_handler(int irq, void *private)
{
struct counter_device *counter = private;
struct quad8 *const priv = counter_priv(counter);
unsigned int status;
unsigned long irq_status;
unsigned long channel;
unsigned int flg_pins;
u8 event;
int ret;
ret = regmap_read(priv->map, QUAD8_INTERRUPT_STATUS, &status);
if (ret)
return ret;
if (!status)
return IRQ_NONE;
irq_status = status;
for_each_set_bit(channel, &irq_status, QUAD8_NUM_COUNTERS) {
flg_pins = u8_get_bits(priv->ior[channel], FLG_PINS);
switch (flg_pins) {
case FLG1_CARRY_FLG2_BORROW:
event = COUNTER_EVENT_OVERFLOW;
break;
case FLG1_COMPARE_FLG2_BORROW:
event = COUNTER_EVENT_THRESHOLD;
break;
case FLG1_CARRYBORROW_FLG2_UD:
event = COUNTER_EVENT_OVERFLOW_UNDERFLOW;
break;
case FLG1_INDX_FLG2_E:
event = COUNTER_EVENT_INDEX;
break;
default:
/* should never reach this path */
WARN_ONCE(true, "invalid interrupt trigger function %u configured for channel %lu\n",
flg_pins, channel);
continue;
}
counter_push_event(counter, event, channel);
}
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION, CLEAR_PENDING_INTERRUPTS);
if (ret)
return ret;
return IRQ_HANDLED;
}
static int quad8_init_counter(struct quad8 *const priv, const size_t channel)
{
int ret;
ret = quad8_filter_clock_prescaler_set(priv, channel, 0);
if (ret)
return ret;
ret = quad8_preset_register_set(priv, channel, 0);
if (ret)
return ret;
ret = quad8_flag_register_reset(priv, channel);
if (ret)
return ret;
/* Binary encoding; Normal count; non-quadrature mode */
priv->cmr[channel] = SELECT_CMR | BINARY | u8_encode_bits(NORMAL_COUNT, COUNT_MODE) |
u8_encode_bits(NON_QUADRATURE, QUADRATURE_MODE);
ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->cmr[channel]);
if (ret)
return ret;
/* Disable A and B inputs; preset on index; FLG1 as Carry */
priv->ior[channel] = SELECT_IOR | DISABLE_AB | u8_encode_bits(LOAD_CNTR, LOAD_PIN) |
u8_encode_bits(FLG1_CARRY_FLG2_BORROW, FLG_PINS);
ret = regmap_write(priv->map, QUAD8_CONTROL(channel), priv->ior[channel]);
if (ret)
return ret;
/* Disable index function; negative index polarity */
priv->idr[channel] = SELECT_IDR | u8_encode_bits(DISABLE_INDEX_MODE, INDEX_MODE) |
u8_encode_bits(NEGATIVE_INDEX_POLARITY, INDEX_POLARITY);
return regmap_write(priv->map, QUAD8_CONTROL(channel), priv->idr[channel]);
}
static int quad8_probe(struct device *dev, unsigned int id)
{
struct counter_device *counter;
struct quad8 *priv;
void __iomem *regs;
unsigned long i;
int ret;
if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
base[id], base[id] + QUAD8_EXTENT);
return -EBUSY;
}
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
regs = devm_ioport_map(dev, base[id], QUAD8_EXTENT);
if (!regs)
return -ENOMEM;
priv->map = devm_regmap_init_mmio(dev, regs, &quad8_regmap_config);
if (IS_ERR(priv->map))
return dev_err_probe(dev, PTR_ERR(priv->map),
"Unable to initialize register map\n");
/* Initialize Counter device and driver data */
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &quad8_ops;
counter->counts = quad8_counts;
counter->num_counts = ARRAY_SIZE(quad8_counts);
counter->signals = quad8_signals;
counter->num_signals = ARRAY_SIZE(quad8_signals);
spin_lock_init(&priv->lock);
/* Reset Index/Interrupt Register */
ret = regmap_write(priv->map, QUAD8_INDEX_INTERRUPT, 0x00);
if (ret)
return ret;
/* Reset all counters and disable interrupt function */
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
RESET_COUNTERS | DISABLE_INTERRUPT_FUNCTION);
if (ret)
return ret;
/* Set initial configuration for all counters */
for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
ret = quad8_init_counter(priv, i);
if (ret)
return ret;
}
/* Disable Differential Encoder Cable Status for all channels */
ret = regmap_write(priv->map, QUAD8_CABLE_STATUS, GENMASK(7, 0));
if (ret)
return ret;
/* Enable all counters and enable interrupt function */
ret = regmap_write(priv->map, QUAD8_CHANNEL_OPERATION,
ENABLE_COUNTERS | ENABLE_INTERRUPT_FUNCTION);
if (ret)
return ret;
ret = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
IRQF_SHARED, counter->name, counter);
if (ret)
return ret;
ret = devm_counter_add(dev, counter);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to add counter\n");
return 0;
}
static struct isa_driver quad8_driver = {
.probe = quad8_probe,
.driver = {
.name = "104-quad-8"
}
};
module_isa_driver_with_irq(quad8_driver, num_quad8, num_irq);
MODULE_AUTHOR("William Breathitt Gray <[email protected]>");
MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/104-quad-8.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Pengutronix, Oleksij Rempel <[email protected]>
*/
#include <linux/counter.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define INTERRUPT_CNT_NAME "interrupt-cnt"
struct interrupt_cnt_priv {
atomic_t count;
struct gpio_desc *gpio;
int irq;
bool enabled;
struct counter_signal signals;
struct counter_synapse synapses;
struct counter_count cnts;
};
static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
{
struct counter_device *counter = dev_id;
struct interrupt_cnt_priv *priv = counter_priv(counter);
atomic_inc(&priv->count);
counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE, 0);
return IRQ_HANDLED;
}
static int interrupt_cnt_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
*enable = priv->enabled;
return 0;
}
static int interrupt_cnt_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
if (priv->enabled == enable)
return 0;
if (enable) {
priv->enabled = true;
enable_irq(priv->irq);
} else {
disable_irq(priv->irq);
priv->enabled = false;
}
return 0;
}
static struct counter_comp interrupt_cnt_ext[] = {
COUNTER_COMP_ENABLE(interrupt_cnt_enable_read,
interrupt_cnt_enable_write),
};
static const enum counter_synapse_action interrupt_cnt_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
};
static int interrupt_cnt_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
return 0;
}
static int interrupt_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
*val = atomic_read(&priv->count);
return 0;
}
static int interrupt_cnt_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
if (val != (typeof(priv->count.counter))val)
return -ERANGE;
atomic_set(&priv->count, val);
return 0;
}
static const enum counter_function interrupt_cnt_functions[] = {
COUNTER_FUNCTION_INCREASE,
};
static int interrupt_cnt_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
*function = COUNTER_FUNCTION_INCREASE;
return 0;
}
static int interrupt_cnt_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
int ret;
if (!priv->gpio)
return -EINVAL;
ret = gpiod_get_value(priv->gpio);
if (ret < 0)
return ret;
*level = ret ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
return 0;
}
static int interrupt_cnt_watch_validate(struct counter_device *counter,
const struct counter_watch *watch)
{
if (watch->channel != 0 ||
watch->event != COUNTER_EVENT_CHANGE_OF_STATE)
return -EINVAL;
return 0;
}
static const struct counter_ops interrupt_cnt_ops = {
.action_read = interrupt_cnt_action_read,
.count_read = interrupt_cnt_read,
.count_write = interrupt_cnt_write,
.function_read = interrupt_cnt_function_read,
.signal_read = interrupt_cnt_signal_read,
.watch_validate = interrupt_cnt_watch_validate,
};
static int interrupt_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct counter_device *counter;
struct interrupt_cnt_priv *priv;
int ret;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
priv->irq = platform_get_irq_optional(pdev, 0);
if (priv->irq == -ENXIO)
priv->irq = 0;
else if (priv->irq < 0)
return dev_err_probe(dev, priv->irq, "failed to get IRQ\n");
priv->gpio = devm_gpiod_get_optional(dev, NULL, GPIOD_IN);
if (IS_ERR(priv->gpio))
return dev_err_probe(dev, PTR_ERR(priv->gpio), "failed to get GPIO\n");
if (!priv->irq && !priv->gpio) {
dev_err(dev, "IRQ and GPIO are not found. At least one source should be provided\n");
return -ENODEV;
}
if (!priv->irq) {
int irq = gpiod_to_irq(priv->gpio);
if (irq < 0)
return dev_err_probe(dev, irq, "failed to get IRQ from GPIO\n");
priv->irq = irq;
}
priv->signals.name = devm_kasprintf(dev, GFP_KERNEL, "IRQ %d",
priv->irq);
if (!priv->signals.name)
return -ENOMEM;
counter->signals = &priv->signals;
counter->num_signals = 1;
priv->synapses.actions_list = interrupt_cnt_synapse_actions;
priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions);
priv->synapses.signal = &priv->signals;
priv->cnts.name = "Channel 0 Count";
priv->cnts.functions_list = interrupt_cnt_functions;
priv->cnts.num_functions = ARRAY_SIZE(interrupt_cnt_functions);
priv->cnts.synapses = &priv->synapses;
priv->cnts.num_synapses = 1;
priv->cnts.ext = interrupt_cnt_ext;
priv->cnts.num_ext = ARRAY_SIZE(interrupt_cnt_ext);
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &interrupt_cnt_ops;
counter->counts = &priv->cnts;
counter->num_counts = 1;
irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
IRQF_TRIGGER_RISING | IRQF_NO_THREAD,
dev_name(dev), counter);
if (ret)
return ret;
ret = devm_counter_add(dev, counter);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to add counter\n");
return 0;
}
static const struct of_device_id interrupt_cnt_of_match[] = {
{ .compatible = "interrupt-counter", },
{}
};
MODULE_DEVICE_TABLE(of, interrupt_cnt_of_match);
static struct platform_driver interrupt_cnt_driver = {
.probe = interrupt_cnt_probe,
.driver = {
.name = INTERRUPT_CNT_NAME,
.of_match_table = interrupt_cnt_of_match,
},
};
module_platform_driver(interrupt_cnt_driver);
MODULE_ALIAS("platform:interrupt-counter");
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("Interrupt counter driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/interrupt-cnt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2L MTU3a Counter driver
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/counter.h>
#include <linux/mfd/rz-mtu3.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
/*
* Register descriptions
* TSR: Timer Status Register
* TMDR1: Timer Mode Register 1
* TMDR3: Timer Mode Register 3
* TIOR: Timer I/O Control Register
* TCR: Timer Control Register
* TCNT: Timer Counter
* TGRA: Timer general register A
* TCNTLW: Timer Longword Counter
* TGRALW: Timer longword general register A
*/
#define RZ_MTU3_TSR_TCFD BIT(7) /* Count Direction Flag */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_1 (4) /* Phase counting mode 1 */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_2 (5) /* Phase counting mode 2 */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_3 (6) /* Phase counting mode 3 */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_4 (7) /* Phase counting mode 4 */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_5 (9) /* Phase counting mode 5 */
#define RZ_MTU3_TMDR1_PH_CNT_MODE_MASK (0xf)
/*
* LWA: MTU1/MTU2 Combination Longword Access Control
* 0: 16-bit, 1: 32-bit
*/
#define RZ_MTU3_TMDR3_LWA (0)
/*
* PHCKSEL: External Input Phase Clock Select
* 0: MTCLKA and MTCLKB, 1: MTCLKC and MTCLKD
*/
#define RZ_MTU3_TMDR3_PHCKSEL (1)
#define RZ_MTU3_16_BIT_MTU1_CH (0)
#define RZ_MTU3_16_BIT_MTU2_CH (1)
#define RZ_MTU3_32_BIT_CH (2)
#define RZ_MTU3_TIOR_NO_OUTPUT (0) /* Output prohibited */
#define RZ_MTU3_TIOR_IC_BOTH (10) /* Input capture at both edges */
#define SIGNAL_A_ID (0)
#define SIGNAL_B_ID (1)
#define SIGNAL_C_ID (2)
#define SIGNAL_D_ID (3)
#define RZ_MTU3_MAX_HW_CNTR_CHANNELS (2)
#define RZ_MTU3_MAX_LOGICAL_CNTR_CHANNELS (3)
/**
* struct rz_mtu3_cnt - MTU3 counter private data
*
* @clk: MTU3 module clock
* @lock: Lock to prevent concurrent access for ceiling and count
* @ch: HW channels for the counters
* @count_is_enabled: Enabled state of Counter value channel
* @mtu_16bit_max: Cache for 16-bit counters
* @mtu_32bit_max: Cache for 32-bit counters
*/
struct rz_mtu3_cnt {
struct clk *clk;
struct mutex lock;
struct rz_mtu3_channel *ch;
bool count_is_enabled[RZ_MTU3_MAX_LOGICAL_CNTR_CHANNELS];
union {
u16 mtu_16bit_max[RZ_MTU3_MAX_HW_CNTR_CHANNELS];
u32 mtu_32bit_max;
};
};
static const enum counter_function rz_mtu3_count_functions[] = {
COUNTER_FUNCTION_QUADRATURE_X4,
COUNTER_FUNCTION_PULSE_DIRECTION,
COUNTER_FUNCTION_QUADRATURE_X2_B,
};
static inline size_t rz_mtu3_get_hw_ch(const size_t id)
{
return (id == RZ_MTU3_32_BIT_CH) ? 0 : id;
}
static inline struct rz_mtu3_channel *rz_mtu3_get_ch(struct counter_device *counter, int id)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
const size_t ch_id = rz_mtu3_get_hw_ch(id);
return &priv->ch[ch_id];
}
static bool rz_mtu3_is_counter_invalid(struct counter_device *counter, int id)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
unsigned long tmdr;
pm_runtime_get_sync(priv->ch->dev);
tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
pm_runtime_put(priv->ch->dev);
if (id == RZ_MTU3_32_BIT_CH && test_bit(RZ_MTU3_TMDR3_LWA, &tmdr))
return false;
if (id != RZ_MTU3_32_BIT_CH && !test_bit(RZ_MTU3_TMDR3_LWA, &tmdr))
return false;
return true;
}
static int rz_mtu3_lock_if_counter_is_valid(struct counter_device *counter,
struct rz_mtu3_channel *const ch,
struct rz_mtu3_cnt *const priv,
int id)
{
mutex_lock(&priv->lock);
if (ch->is_busy && !priv->count_is_enabled[id]) {
mutex_unlock(&priv->lock);
return -EINVAL;
}
if (rz_mtu3_is_counter_invalid(counter, id)) {
mutex_unlock(&priv->lock);
return -EBUSY;
}
return 0;
}
static int rz_mtu3_lock_if_count_is_enabled(struct rz_mtu3_channel *const ch,
struct rz_mtu3_cnt *const priv,
int id)
{
mutex_lock(&priv->lock);
if (ch->is_busy && !priv->count_is_enabled[id]) {
mutex_unlock(&priv->lock);
return -EINVAL;
}
return 0;
}
static int rz_mtu3_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
if (ret)
return ret;
pm_runtime_get_sync(ch->dev);
if (count->id == RZ_MTU3_32_BIT_CH)
*val = rz_mtu3_32bit_ch_read(ch, RZ_MTU3_TCNTLW);
else
*val = rz_mtu3_16bit_ch_read(ch, RZ_MTU3_TCNT);
pm_runtime_put(ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
if (ret)
return ret;
pm_runtime_get_sync(ch->dev);
if (count->id == RZ_MTU3_32_BIT_CH)
rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TCNTLW, val);
else
rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TCNT, val);
pm_runtime_put(ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_function_read_helper(struct rz_mtu3_channel *const ch,
struct rz_mtu3_cnt *const priv,
enum counter_function *function)
{
u8 timer_mode;
pm_runtime_get_sync(ch->dev);
timer_mode = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TMDR1);
pm_runtime_put(ch->dev);
switch (timer_mode & RZ_MTU3_TMDR1_PH_CNT_MODE_MASK) {
case RZ_MTU3_TMDR1_PH_CNT_MODE_1:
*function = COUNTER_FUNCTION_QUADRATURE_X4;
return 0;
case RZ_MTU3_TMDR1_PH_CNT_MODE_2:
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
return 0;
case RZ_MTU3_TMDR1_PH_CNT_MODE_4:
*function = COUNTER_FUNCTION_QUADRATURE_X2_B;
return 0;
default:
/*
* TODO:
* - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_3
* - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_5
*/
return -EINVAL;
}
}
static int rz_mtu3_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
if (ret)
return ret;
ret = rz_mtu3_count_function_read_helper(ch, priv, function);
mutex_unlock(&priv->lock);
return ret;
}
static int rz_mtu3_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
u8 timer_mode;
int ret;
ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
if (ret)
return ret;
switch (function) {
case COUNTER_FUNCTION_QUADRATURE_X4:
timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_1;
break;
case COUNTER_FUNCTION_PULSE_DIRECTION:
timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_2;
break;
case COUNTER_FUNCTION_QUADRATURE_X2_B:
timer_mode = RZ_MTU3_TMDR1_PH_CNT_MODE_4;
break;
default:
/*
* TODO:
* - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_3
* - need to add RZ_MTU3_TMDR1_PH_CNT_MODE_5
*/
mutex_unlock(&priv->lock);
return -EINVAL;
}
pm_runtime_get_sync(ch->dev);
rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, timer_mode);
pm_runtime_put(ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
u8 tsr;
ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
if (ret)
return ret;
pm_runtime_get_sync(ch->dev);
tsr = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TSR);
pm_runtime_put(ch->dev);
*direction = (tsr & RZ_MTU3_TSR_TCFD) ?
COUNTER_COUNT_DIRECTION_FORWARD : COUNTER_COUNT_DIRECTION_BACKWARD;
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
const size_t ch_id = rz_mtu3_get_hw_ch(count->id);
int ret;
ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
if (ret)
return ret;
switch (count->id) {
case RZ_MTU3_16_BIT_MTU1_CH:
case RZ_MTU3_16_BIT_MTU2_CH:
*ceiling = priv->mtu_16bit_max[ch_id];
break;
case RZ_MTU3_32_BIT_CH:
*ceiling = priv->mtu_32bit_max;
break;
default:
/* should never reach this path */
mutex_unlock(&priv->lock);
return -EINVAL;
}
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
const size_t ch_id = rz_mtu3_get_hw_ch(count->id);
int ret;
ret = rz_mtu3_lock_if_counter_is_valid(counter, ch, priv, count->id);
if (ret)
return ret;
switch (count->id) {
case RZ_MTU3_16_BIT_MTU1_CH:
case RZ_MTU3_16_BIT_MTU2_CH:
if (ceiling > U16_MAX) {
mutex_unlock(&priv->lock);
return -ERANGE;
}
priv->mtu_16bit_max[ch_id] = ceiling;
break;
case RZ_MTU3_32_BIT_CH:
if (ceiling > U32_MAX) {
mutex_unlock(&priv->lock);
return -ERANGE;
}
priv->mtu_32bit_max = ceiling;
break;
default:
/* should never reach this path */
mutex_unlock(&priv->lock);
return -EINVAL;
}
pm_runtime_get_sync(ch->dev);
if (count->id == RZ_MTU3_32_BIT_CH)
rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TGRALW, ceiling);
else
rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TGRA, ceiling);
rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
pm_runtime_put(ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static void rz_mtu3_32bit_cnt_setting(struct counter_device *counter)
{
struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
/* Phase counting mode 1 is used as default in initialization. */
rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TMDR1, RZ_MTU3_TMDR1_PH_CNT_MODE_1);
rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
rz_mtu3_8bit_ch_write(ch1, RZ_MTU3_TIOR, RZ_MTU3_TIOR_IC_BOTH);
rz_mtu3_enable(ch1);
rz_mtu3_enable(ch2);
}
static void rz_mtu3_16bit_cnt_setting(struct counter_device *counter, int id)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
/* Phase counting mode 1 is used as default in initialization. */
rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, RZ_MTU3_TMDR1_PH_CNT_MODE_1);
rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA);
rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TIOR, RZ_MTU3_TIOR_NO_OUTPUT);
rz_mtu3_enable(ch);
}
static int rz_mtu3_initialize_counter(struct counter_device *counter, int id)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
switch (id) {
case RZ_MTU3_16_BIT_MTU1_CH:
case RZ_MTU3_16_BIT_MTU2_CH:
if (!rz_mtu3_request_channel(ch))
return -EBUSY;
rz_mtu3_16bit_cnt_setting(counter, id);
return 0;
case RZ_MTU3_32_BIT_CH:
/*
* 32-bit phase counting need MTU1 and MTU2 to create 32-bit
* cascade counter.
*/
if (!rz_mtu3_request_channel(ch1))
return -EBUSY;
if (!rz_mtu3_request_channel(ch2)) {
rz_mtu3_release_channel(ch1);
return -EBUSY;
}
rz_mtu3_32bit_cnt_setting(counter);
return 0;
default:
/* should never reach this path */
return -EINVAL;
}
}
static void rz_mtu3_terminate_counter(struct counter_device *counter, int id)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, id);
struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
if (id == RZ_MTU3_32_BIT_CH) {
rz_mtu3_release_channel(ch2);
rz_mtu3_release_channel(ch1);
rz_mtu3_disable(ch2);
rz_mtu3_disable(ch1);
} else {
rz_mtu3_release_channel(ch);
rz_mtu3_disable(ch);
}
}
static int rz_mtu3_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_channel *const ch1 = rz_mtu3_get_ch(counter, 0);
struct rz_mtu3_channel *const ch2 = rz_mtu3_get_ch(counter, 1);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
if (ret)
return ret;
if (count->id == RZ_MTU3_32_BIT_CH)
*enable = rz_mtu3_is_enabled(ch1) && rz_mtu3_is_enabled(ch2);
else
*enable = rz_mtu3_is_enabled(ch);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret = 0;
if (enable) {
mutex_lock(&priv->lock);
pm_runtime_get_sync(ch->dev);
ret = rz_mtu3_initialize_counter(counter, count->id);
if (ret == 0)
priv->count_is_enabled[count->id] = true;
mutex_unlock(&priv->lock);
} else {
mutex_lock(&priv->lock);
rz_mtu3_terminate_counter(counter, count->id);
priv->count_is_enabled[count->id] = false;
pm_runtime_put(ch->dev);
mutex_unlock(&priv->lock);
}
return ret;
}
static int rz_mtu3_lock_if_ch0_is_enabled(struct rz_mtu3_cnt *const priv)
{
mutex_lock(&priv->lock);
if (priv->ch->is_busy && !(priv->count_is_enabled[RZ_MTU3_16_BIT_MTU1_CH] ||
priv->count_is_enabled[RZ_MTU3_32_BIT_CH])) {
mutex_unlock(&priv->lock);
return -EINVAL;
}
return 0;
}
static int rz_mtu3_cascade_counts_enable_get(struct counter_device *counter,
u8 *cascade_enable)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
unsigned long tmdr;
int ret;
ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
if (ret)
return ret;
pm_runtime_get_sync(priv->ch->dev);
tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
pm_runtime_put(priv->ch->dev);
*cascade_enable = test_bit(RZ_MTU3_TMDR3_LWA, &tmdr);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_cascade_counts_enable_set(struct counter_device *counter,
u8 cascade_enable)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
if (ret)
return ret;
pm_runtime_get_sync(priv->ch->dev);
rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
RZ_MTU3_TMDR3_LWA, cascade_enable);
pm_runtime_put(priv->ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_ext_input_phase_clock_select_get(struct counter_device *counter,
u32 *ext_input_phase_clock_select)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
unsigned long tmdr;
int ret;
ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
if (ret)
return ret;
pm_runtime_get_sync(priv->ch->dev);
tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
pm_runtime_put(priv->ch->dev);
*ext_input_phase_clock_select = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr);
mutex_unlock(&priv->lock);
return 0;
}
static int rz_mtu3_ext_input_phase_clock_select_set(struct counter_device *counter,
u32 ext_input_phase_clock_select)
{
struct rz_mtu3_cnt *const priv = counter_priv(counter);
int ret;
ret = rz_mtu3_lock_if_ch0_is_enabled(priv);
if (ret)
return ret;
pm_runtime_get_sync(priv->ch->dev);
rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3,
RZ_MTU3_TMDR3_PHCKSEL,
ext_input_phase_clock_select);
pm_runtime_put(priv->ch->dev);
mutex_unlock(&priv->lock);
return 0;
}
static struct counter_comp rz_mtu3_count_ext[] = {
COUNTER_COMP_DIRECTION(rz_mtu3_count_direction_read),
COUNTER_COMP_ENABLE(rz_mtu3_count_enable_read,
rz_mtu3_count_enable_write),
COUNTER_COMP_CEILING(rz_mtu3_count_ceiling_read,
rz_mtu3_count_ceiling_write),
};
static const enum counter_synapse_action rz_mtu3_synapse_actions[] = {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
COUNTER_SYNAPSE_ACTION_RISING_EDGE,
COUNTER_SYNAPSE_ACTION_NONE,
};
static int rz_mtu3_action_read(struct counter_device *counter,
struct counter_count *count,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
const bool is_signal_ab = (synapse->signal->id == SIGNAL_A_ID) ||
(synapse->signal->id == SIGNAL_B_ID);
struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id);
struct rz_mtu3_cnt *const priv = counter_priv(counter);
enum counter_function function;
bool mtclkc_mtclkd;
unsigned long tmdr;
int ret;
ret = rz_mtu3_lock_if_count_is_enabled(ch, priv, count->id);
if (ret)
return ret;
ret = rz_mtu3_count_function_read_helper(ch, priv, &function);
if (ret) {
mutex_unlock(&priv->lock);
return ret;
}
/* Default action mode */
*action = COUNTER_SYNAPSE_ACTION_NONE;
if (count->id != RZ_MTU3_16_BIT_MTU1_CH) {
tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3);
mtclkc_mtclkd = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr);
if ((mtclkc_mtclkd && is_signal_ab) ||
(!mtclkc_mtclkd && !is_signal_ab)) {
mutex_unlock(&priv->lock);
return 0;
}
}
switch (function) {
case COUNTER_FUNCTION_PULSE_DIRECTION:
/*
* Rising edges on signal A (signal C) updates the respective
* count. The input level of signal B (signal D) determines
* direction.
*/
if (synapse->signal->id == SIGNAL_A_ID ||
synapse->signal->id == SIGNAL_C_ID)
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
break;
case COUNTER_FUNCTION_QUADRATURE_X2_B:
/*
* Any state transition on quadrature pair signal B (signal D)
* updates the respective count.
*/
if (synapse->signal->id == SIGNAL_B_ID ||
synapse->signal->id == SIGNAL_D_ID)
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
break;
case COUNTER_FUNCTION_QUADRATURE_X4:
/* counts up/down on both edges of A (C) and B (D) signal */
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
break;
default:
/* should never reach this path */
mutex_unlock(&priv->lock);
return -EINVAL;
}
mutex_unlock(&priv->lock);
return 0;
}
static const struct counter_ops rz_mtu3_cnt_ops = {
.count_read = rz_mtu3_count_read,
.count_write = rz_mtu3_count_write,
.function_read = rz_mtu3_count_function_read,
.function_write = rz_mtu3_count_function_write,
.action_read = rz_mtu3_action_read,
};
#define RZ_MTU3_PHASE_SIGNAL(_id, _name) { \
.id = (_id), \
.name = (_name), \
}
static struct counter_signal rz_mtu3_signals[] = {
RZ_MTU3_PHASE_SIGNAL(SIGNAL_A_ID, "MTU1 MTCLKA"),
RZ_MTU3_PHASE_SIGNAL(SIGNAL_B_ID, "MTU1 MTCLKB"),
RZ_MTU3_PHASE_SIGNAL(SIGNAL_C_ID, "MTU2 MTCLKC"),
RZ_MTU3_PHASE_SIGNAL(SIGNAL_D_ID, "MTU2 MTCLKD"),
};
static struct counter_synapse rz_mtu3_mtu1_count_synapses[] = {
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals,
},
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals + 1,
}
};
static struct counter_synapse rz_mtu3_mtu2_count_synapses[] = {
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals,
},
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals + 1,
},
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals + 2,
},
{
.actions_list = rz_mtu3_synapse_actions,
.num_actions = ARRAY_SIZE(rz_mtu3_synapse_actions),
.signal = rz_mtu3_signals + 3,
}
};
static struct counter_count rz_mtu3_counts[] = {
{
.id = RZ_MTU3_16_BIT_MTU1_CH,
.name = "Channel 1 Count",
.functions_list = rz_mtu3_count_functions,
.num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
.synapses = rz_mtu3_mtu1_count_synapses,
.num_synapses = ARRAY_SIZE(rz_mtu3_mtu1_count_synapses),
.ext = rz_mtu3_count_ext,
.num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
},
{
.id = RZ_MTU3_16_BIT_MTU2_CH,
.name = "Channel 2 Count",
.functions_list = rz_mtu3_count_functions,
.num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
.synapses = rz_mtu3_mtu2_count_synapses,
.num_synapses = ARRAY_SIZE(rz_mtu3_mtu2_count_synapses),
.ext = rz_mtu3_count_ext,
.num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
},
{
.id = RZ_MTU3_32_BIT_CH,
.name = "Channel 1 and 2 (cascaded) Count",
.functions_list = rz_mtu3_count_functions,
.num_functions = ARRAY_SIZE(rz_mtu3_count_functions),
.synapses = rz_mtu3_mtu2_count_synapses,
.num_synapses = ARRAY_SIZE(rz_mtu3_mtu2_count_synapses),
.ext = rz_mtu3_count_ext,
.num_ext = ARRAY_SIZE(rz_mtu3_count_ext),
}
};
static const char *const rz_mtu3_ext_input_phase_clock_select[] = {
"MTCLKA-MTCLKB",
"MTCLKC-MTCLKD",
};
static DEFINE_COUNTER_ENUM(rz_mtu3_ext_input_phase_clock_select_enum,
rz_mtu3_ext_input_phase_clock_select);
static struct counter_comp rz_mtu3_device_ext[] = {
COUNTER_COMP_DEVICE_BOOL("cascade_counts_enable",
rz_mtu3_cascade_counts_enable_get,
rz_mtu3_cascade_counts_enable_set),
COUNTER_COMP_DEVICE_ENUM("external_input_phase_clock_select",
rz_mtu3_ext_input_phase_clock_select_get,
rz_mtu3_ext_input_phase_clock_select_set,
rz_mtu3_ext_input_phase_clock_select_enum),
};
static int rz_mtu3_cnt_pm_runtime_suspend(struct device *dev)
{
struct clk *const clk = dev_get_drvdata(dev);
clk_disable_unprepare(clk);
return 0;
}
static int rz_mtu3_cnt_pm_runtime_resume(struct device *dev)
{
struct clk *const clk = dev_get_drvdata(dev);
clk_prepare_enable(clk);
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(rz_mtu3_cnt_pm_ops,
rz_mtu3_cnt_pm_runtime_suspend,
rz_mtu3_cnt_pm_runtime_resume, NULL);
static void rz_mtu3_cnt_pm_disable(void *data)
{
struct device *dev = data;
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
static int rz_mtu3_cnt_probe(struct platform_device *pdev)
{
struct rz_mtu3 *ddata = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct counter_device *counter;
struct rz_mtu3_channel *ch;
struct rz_mtu3_cnt *priv;
unsigned int i;
int ret;
counter = devm_counter_alloc(dev, sizeof(*priv));
if (!counter)
return -ENOMEM;
priv = counter_priv(counter);
priv->clk = ddata->clk;
priv->mtu_32bit_max = U32_MAX;
priv->ch = &ddata->channels[RZ_MTU3_CHAN_1];
ch = &priv->ch[0];
for (i = 0; i < RZ_MTU3_MAX_HW_CNTR_CHANNELS; i++) {
ch->dev = dev;
priv->mtu_16bit_max[i] = U16_MAX;
ch++;
}
mutex_init(&priv->lock);
platform_set_drvdata(pdev, priv->clk);
clk_prepare_enable(priv->clk);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_add_action_or_reset(&pdev->dev, rz_mtu3_cnt_pm_disable, dev);
if (ret < 0)
goto disable_clock;
counter->name = dev_name(dev);
counter->parent = dev;
counter->ops = &rz_mtu3_cnt_ops;
counter->counts = rz_mtu3_counts;
counter->num_counts = ARRAY_SIZE(rz_mtu3_counts);
counter->signals = rz_mtu3_signals;
counter->num_signals = ARRAY_SIZE(rz_mtu3_signals);
counter->ext = rz_mtu3_device_ext;
counter->num_ext = ARRAY_SIZE(rz_mtu3_device_ext);
/* Register Counter device */
ret = devm_counter_add(dev, counter);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to add counter\n");
goto disable_clock;
}
return 0;
disable_clock:
clk_disable_unprepare(priv->clk);
return ret;
}
static struct platform_driver rz_mtu3_cnt_driver = {
.probe = rz_mtu3_cnt_probe,
.driver = {
.name = "rz-mtu3-counter",
.pm = pm_ptr(&rz_mtu3_cnt_pm_ops),
},
};
module_platform_driver(rz_mtu3_cnt_driver);
MODULE_AUTHOR("Biju Das <[email protected]>");
MODULE_ALIAS("platform:rz-mtu3-counter");
MODULE_DESCRIPTION("Renesas RZ/G2L MTU3a counter driver");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(COUNTER);
| linux-master | drivers/counter/rz-mtu3-cnt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic Counter character device interface
* Copyright (C) 2020 William Breathitt Gray
*/
#include <linux/cdev.h>
#include <linux/counter.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/nospec.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include "counter-chrdev.h"
struct counter_comp_node {
struct list_head l;
struct counter_component component;
struct counter_comp comp;
void *parent;
};
#define counter_comp_read_is_equal(a, b) \
(a.action_read == b.action_read || \
a.device_u8_read == b.device_u8_read || \
a.count_u8_read == b.count_u8_read || \
a.signal_u8_read == b.signal_u8_read || \
a.device_u32_read == b.device_u32_read || \
a.count_u32_read == b.count_u32_read || \
a.signal_u32_read == b.signal_u32_read || \
a.device_u64_read == b.device_u64_read || \
a.count_u64_read == b.count_u64_read || \
a.signal_u64_read == b.signal_u64_read || \
a.signal_array_u32_read == b.signal_array_u32_read || \
a.device_array_u64_read == b.device_array_u64_read || \
a.count_array_u64_read == b.count_array_u64_read || \
a.signal_array_u64_read == b.signal_array_u64_read)
#define counter_comp_read_is_set(comp) \
(comp.action_read || \
comp.device_u8_read || \
comp.count_u8_read || \
comp.signal_u8_read || \
comp.device_u32_read || \
comp.count_u32_read || \
comp.signal_u32_read || \
comp.device_u64_read || \
comp.count_u64_read || \
comp.signal_u64_read || \
comp.signal_array_u32_read || \
comp.device_array_u64_read || \
comp.count_array_u64_read || \
comp.signal_array_u64_read)
static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
size_t len, loff_t *f_ps)
{
struct counter_device *const counter = filp->private_data;
int err;
unsigned int copied;
if (!counter->ops)
return -ENODEV;
if (len < sizeof(struct counter_event))
return -EINVAL;
do {
if (kfifo_is_empty(&counter->events)) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(counter->events_wait,
!kfifo_is_empty(&counter->events) ||
!counter->ops);
if (err < 0)
return err;
if (!counter->ops)
return -ENODEV;
}
if (mutex_lock_interruptible(&counter->events_out_lock))
return -ERESTARTSYS;
err = kfifo_to_user(&counter->events, buf, len, &copied);
mutex_unlock(&counter->events_out_lock);
if (err < 0)
return err;
} while (!copied);
return copied;
}
static __poll_t counter_chrdev_poll(struct file *filp,
struct poll_table_struct *pollt)
{
struct counter_device *const counter = filp->private_data;
__poll_t events = 0;
if (!counter->ops)
return events;
poll_wait(filp, &counter->events_wait, pollt);
if (!kfifo_is_empty(&counter->events))
events = EPOLLIN | EPOLLRDNORM;
return events;
}
static void counter_events_list_free(struct list_head *const events_list)
{
struct counter_event_node *p, *n;
struct counter_comp_node *q, *o;
list_for_each_entry_safe(p, n, events_list, l) {
/* Free associated component nodes */
list_for_each_entry_safe(q, o, &p->comp_list, l) {
list_del(&q->l);
kfree(q);
}
/* Free event node */
list_del(&p->l);
kfree(p);
}
}
static int counter_set_event_node(struct counter_device *const counter,
struct counter_watch *const watch,
const struct counter_comp_node *const cfg)
{
struct counter_event_node *event_node;
int err = 0;
struct counter_comp_node *comp_node;
/* Search for event in the list */
list_for_each_entry(event_node, &counter->next_events_list, l)
if (event_node->event == watch->event &&
event_node->channel == watch->channel)
break;
/* If event is not already in the list */
if (&event_node->l == &counter->next_events_list) {
/* Allocate new event node */
event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
if (!event_node)
return -ENOMEM;
/* Configure event node and add to the list */
event_node->event = watch->event;
event_node->channel = watch->channel;
INIT_LIST_HEAD(&event_node->comp_list);
list_add(&event_node->l, &counter->next_events_list);
}
/* Check if component watch has already been set before */
list_for_each_entry(comp_node, &event_node->comp_list, l)
if (comp_node->parent == cfg->parent &&
counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
err = -EINVAL;
goto exit_free_event_node;
}
/* Allocate component node */
comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
if (!comp_node) {
err = -ENOMEM;
goto exit_free_event_node;
}
*comp_node = *cfg;
/* Add component node to event node */
list_add_tail(&comp_node->l, &event_node->comp_list);
exit_free_event_node:
/* Free event node if no one else is watching */
if (list_empty(&event_node->comp_list)) {
list_del(&event_node->l);
kfree(event_node);
}
return err;
}
static int counter_enable_events(struct counter_device *const counter)
{
unsigned long flags;
int err = 0;
mutex_lock(&counter->n_events_list_lock);
spin_lock_irqsave(&counter->events_list_lock, flags);
counter_events_list_free(&counter->events_list);
list_replace_init(&counter->next_events_list,
&counter->events_list);
if (counter->ops->events_configure)
err = counter->ops->events_configure(counter);
spin_unlock_irqrestore(&counter->events_list_lock, flags);
mutex_unlock(&counter->n_events_list_lock);
return err;
}
static int counter_disable_events(struct counter_device *const counter)
{
unsigned long flags;
int err = 0;
spin_lock_irqsave(&counter->events_list_lock, flags);
counter_events_list_free(&counter->events_list);
if (counter->ops->events_configure)
err = counter->ops->events_configure(counter);
spin_unlock_irqrestore(&counter->events_list_lock, flags);
mutex_lock(&counter->n_events_list_lock);
counter_events_list_free(&counter->next_events_list);
mutex_unlock(&counter->n_events_list_lock);
return err;
}
static int counter_get_ext(const struct counter_comp *const ext,
const size_t num_ext, const size_t component_id,
size_t *const ext_idx, size_t *const id)
{
struct counter_array *element;
*id = 0;
for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
if (*id == component_id)
return 0;
if (ext->type == COUNTER_COMP_ARRAY) {
element = ext->priv;
if (component_id - *id < element->length)
return 0;
*id += element->length;
} else
(*id)++;
}
return -EINVAL;
}
static int counter_add_watch(struct counter_device *const counter,
const unsigned long arg)
{
void __user *const uwatch = (void __user *)arg;
struct counter_watch watch;
struct counter_comp_node comp_node = {};
size_t parent, id;
struct counter_comp *ext;
size_t num_ext;
size_t ext_idx, ext_id;
int err = 0;
if (copy_from_user(&watch, uwatch, sizeof(watch)))
return -EFAULT;
if (watch.component.type == COUNTER_COMPONENT_NONE)
goto no_component;
parent = watch.component.parent;
/* Configure parent component info for comp node */
switch (watch.component.scope) {
case COUNTER_SCOPE_DEVICE:
ext = counter->ext;
num_ext = counter->num_ext;
break;
case COUNTER_SCOPE_SIGNAL:
if (parent >= counter->num_signals)
return -EINVAL;
parent = array_index_nospec(parent, counter->num_signals);
comp_node.parent = counter->signals + parent;
ext = counter->signals[parent].ext;
num_ext = counter->signals[parent].num_ext;
break;
case COUNTER_SCOPE_COUNT:
if (parent >= counter->num_counts)
return -EINVAL;
parent = array_index_nospec(parent, counter->num_counts);
comp_node.parent = counter->counts + parent;
ext = counter->counts[parent].ext;
num_ext = counter->counts[parent].num_ext;
break;
default:
return -EINVAL;
}
id = watch.component.id;
/* Configure component info for comp node */
switch (watch.component.type) {
case COUNTER_COMPONENT_SIGNAL:
if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
return -EINVAL;
comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
comp_node.comp.signal_u32_read = counter->ops->signal_read;
break;
case COUNTER_COMPONENT_COUNT:
if (watch.component.scope != COUNTER_SCOPE_COUNT)
return -EINVAL;
comp_node.comp.type = COUNTER_COMP_U64;
comp_node.comp.count_u64_read = counter->ops->count_read;
break;
case COUNTER_COMPONENT_FUNCTION:
if (watch.component.scope != COUNTER_SCOPE_COUNT)
return -EINVAL;
comp_node.comp.type = COUNTER_COMP_FUNCTION;
comp_node.comp.count_u32_read = counter->ops->function_read;
break;
case COUNTER_COMPONENT_SYNAPSE_ACTION:
if (watch.component.scope != COUNTER_SCOPE_COUNT)
return -EINVAL;
if (id >= counter->counts[parent].num_synapses)
return -EINVAL;
id = array_index_nospec(id, counter->counts[parent].num_synapses);
comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
comp_node.comp.action_read = counter->ops->action_read;
comp_node.comp.priv = counter->counts[parent].synapses + id;
break;
case COUNTER_COMPONENT_EXTENSION:
err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
if (err < 0)
return err;
comp_node.comp = ext[ext_idx];
break;
default:
return -EINVAL;
}
if (!counter_comp_read_is_set(comp_node.comp))
return -EOPNOTSUPP;
no_component:
mutex_lock(&counter->n_events_list_lock);
if (counter->ops->watch_validate) {
err = counter->ops->watch_validate(counter, &watch);
if (err < 0)
goto err_exit;
}
comp_node.component = watch.component;
err = counter_set_event_node(counter, &watch, &comp_node);
err_exit:
mutex_unlock(&counter->n_events_list_lock);
return err;
}
static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct counter_device *const counter = filp->private_data;
int ret = -ENODEV;
mutex_lock(&counter->ops_exist_lock);
if (!counter->ops)
goto out_unlock;
switch (cmd) {
case COUNTER_ADD_WATCH_IOCTL:
ret = counter_add_watch(counter, arg);
break;
case COUNTER_ENABLE_EVENTS_IOCTL:
ret = counter_enable_events(counter);
break;
case COUNTER_DISABLE_EVENTS_IOCTL:
ret = counter_disable_events(counter);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
out_unlock:
mutex_unlock(&counter->ops_exist_lock);
return ret;
}
static int counter_chrdev_open(struct inode *inode, struct file *filp)
{
struct counter_device *const counter = container_of(inode->i_cdev,
typeof(*counter),
chrdev);
get_device(&counter->dev);
filp->private_data = counter;
return nonseekable_open(inode, filp);
}
static int counter_chrdev_release(struct inode *inode, struct file *filp)
{
struct counter_device *const counter = filp->private_data;
int ret = 0;
mutex_lock(&counter->ops_exist_lock);
if (!counter->ops) {
/* Free any lingering held memory */
counter_events_list_free(&counter->events_list);
counter_events_list_free(&counter->next_events_list);
ret = -ENODEV;
goto out_unlock;
}
ret = counter_disable_events(counter);
if (ret < 0) {
mutex_unlock(&counter->ops_exist_lock);
return ret;
}
out_unlock:
mutex_unlock(&counter->ops_exist_lock);
put_device(&counter->dev);
return ret;
}
static const struct file_operations counter_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = counter_chrdev_read,
.poll = counter_chrdev_poll,
.unlocked_ioctl = counter_chrdev_ioctl,
.open = counter_chrdev_open,
.release = counter_chrdev_release,
};
int counter_chrdev_add(struct counter_device *const counter)
{
/* Initialize Counter events lists */
INIT_LIST_HEAD(&counter->events_list);
INIT_LIST_HEAD(&counter->next_events_list);
spin_lock_init(&counter->events_list_lock);
mutex_init(&counter->n_events_list_lock);
init_waitqueue_head(&counter->events_wait);
spin_lock_init(&counter->events_in_lock);
mutex_init(&counter->events_out_lock);
/* Initialize character device */
cdev_init(&counter->chrdev, &counter_fops);
/* Allocate Counter events queue */
return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
}
void counter_chrdev_remove(struct counter_device *const counter)
{
kfifo_free(&counter->events);
}
static int counter_get_array_data(struct counter_device *const counter,
const enum counter_scope scope,
void *const parent,
const struct counter_comp *const comp,
const size_t idx, u64 *const value)
{
const struct counter_array *const element = comp->priv;
u32 value_u32 = 0;
int ret;
switch (element->type) {
case COUNTER_COMP_SIGNAL_POLARITY:
if (scope != COUNTER_SCOPE_SIGNAL)
return -EINVAL;
ret = comp->signal_array_u32_read(counter, parent, idx,
&value_u32);
*value = value_u32;
return ret;
case COUNTER_COMP_U64:
switch (scope) {
case COUNTER_SCOPE_DEVICE:
return comp->device_array_u64_read(counter, idx, value);
case COUNTER_SCOPE_SIGNAL:
return comp->signal_array_u64_read(counter, parent, idx,
value);
case COUNTER_SCOPE_COUNT:
return comp->count_array_u64_read(counter, parent, idx,
value);
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static int counter_get_data(struct counter_device *const counter,
const struct counter_comp_node *const comp_node,
u64 *const value)
{
const struct counter_comp *const comp = &comp_node->comp;
const enum counter_scope scope = comp_node->component.scope;
const size_t id = comp_node->component.id;
struct counter_signal *const signal = comp_node->parent;
struct counter_count *const count = comp_node->parent;
u8 value_u8 = 0;
u32 value_u32 = 0;
const struct counter_comp *ext;
size_t num_ext;
size_t ext_idx, ext_id;
int ret;
if (comp_node->component.type == COUNTER_COMPONENT_NONE)
return 0;
switch (comp->type) {
case COUNTER_COMP_U8:
case COUNTER_COMP_BOOL:
switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u8_read(counter, &value_u8);
break;
case COUNTER_SCOPE_SIGNAL:
ret = comp->signal_u8_read(counter, signal, &value_u8);
break;
case COUNTER_SCOPE_COUNT:
ret = comp->count_u8_read(counter, count, &value_u8);
break;
default:
return -EINVAL;
}
*value = value_u8;
return ret;
case COUNTER_COMP_SIGNAL_LEVEL:
case COUNTER_COMP_FUNCTION:
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
case COUNTER_COMP_SIGNAL_POLARITY:
switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u32_read(counter, &value_u32);
break;
case COUNTER_SCOPE_SIGNAL:
ret = comp->signal_u32_read(counter, signal,
&value_u32);
break;
case COUNTER_SCOPE_COUNT:
ret = comp->count_u32_read(counter, count, &value_u32);
break;
default:
return -EINVAL;
}
*value = value_u32;
return ret;
case COUNTER_COMP_U64:
switch (scope) {
case COUNTER_SCOPE_DEVICE:
return comp->device_u64_read(counter, value);
case COUNTER_SCOPE_SIGNAL:
return comp->signal_u64_read(counter, signal, value);
case COUNTER_SCOPE_COUNT:
return comp->count_u64_read(counter, count, value);
default:
return -EINVAL;
}
case COUNTER_COMP_SYNAPSE_ACTION:
ret = comp->action_read(counter, count, comp->priv, &value_u32);
*value = value_u32;
return ret;
case COUNTER_COMP_ARRAY:
switch (scope) {
case COUNTER_SCOPE_DEVICE:
ext = counter->ext;
num_ext = counter->num_ext;
break;
case COUNTER_SCOPE_SIGNAL:
ext = signal->ext;
num_ext = signal->num_ext;
break;
case COUNTER_SCOPE_COUNT:
ext = count->ext;
num_ext = count->num_ext;
break;
default:
return -EINVAL;
}
ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
if (ret < 0)
return ret;
return counter_get_array_data(counter, scope, comp_node->parent,
comp, id - ext_id, value);
default:
return -EINVAL;
}
}
/**
* counter_push_event - queue event for userspace reading
* @counter: pointer to Counter structure
* @event: triggered event
* @channel: event channel
*
* Note: If no one is watching for the respective event, it is silently
* discarded.
*/
void counter_push_event(struct counter_device *const counter, const u8 event,
const u8 channel)
{
struct counter_event ev;
unsigned int copied = 0;
unsigned long flags;
struct counter_event_node *event_node;
struct counter_comp_node *comp_node;
ev.timestamp = ktime_get_ns();
ev.watch.event = event;
ev.watch.channel = channel;
/* Could be in an interrupt context, so use a spin lock */
spin_lock_irqsave(&counter->events_list_lock, flags);
/* Search for event in the list */
list_for_each_entry(event_node, &counter->events_list, l)
if (event_node->event == event &&
event_node->channel == channel)
break;
/* If event is not in the list */
if (&event_node->l == &counter->events_list)
goto exit_early;
/* Read and queue relevant comp for userspace */
list_for_each_entry(comp_node, &event_node->comp_list, l) {
ev.watch.component = comp_node->component;
ev.status = -counter_get_data(counter, comp_node, &ev.value);
copied += kfifo_in_spinlocked_noirqsave(&counter->events, &ev,
1, &counter->events_in_lock);
}
exit_early:
spin_unlock_irqrestore(&counter->events_list_lock, flags);
if (copied)
wake_up_poll(&counter->events_wait, EPOLLIN);
}
EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);
| linux-master | drivers/counter/counter-chrdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*/
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include "dax-private.h"
/**
* struct dax_device - anchor object for dax services
* @inode: core vfs
* @cdev: optional character interface for "device dax"
* @private: dax driver private data
* @flags: state and boolean properties
* @ops: operations for this device
* @holder_data: holder of a dax_device: could be filesystem or mapped device
* @holder_ops: operations for the inner holder
*/
struct dax_device {
struct inode inode;
struct cdev cdev;
void *private;
unsigned long flags;
const struct dax_operations *ops;
void *holder_data;
const struct dax_holder_operations *holder_ops;
};
static dev_t dax_devt;
DEFINE_STATIC_SRCU(dax_srcu);
static struct vfsmount *dax_mnt;
static DEFINE_IDA(dax_minor_ida);
static struct kmem_cache *dax_cache __read_mostly;
static struct super_block *dax_superblock __read_mostly;
int dax_read_lock(void)
{
return srcu_read_lock(&dax_srcu);
}
EXPORT_SYMBOL_GPL(dax_read_lock);
void dax_read_unlock(int id)
{
srcu_read_unlock(&dax_srcu, id);
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
#include <linux/blkdev.h>
static DEFINE_XARRAY(dax_hosts);
int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(dax_add_host);
void dax_remove_host(struct gendisk *disk)
{
xa_erase(&dax_hosts, (unsigned long)disk);
}
EXPORT_SYMBOL_GPL(dax_remove_host);
/**
* fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
* @bdev: block device to find a dax_device for
* @start_off: returns the byte offset into the dax_device that @bdev starts
* @holder: filesystem or mapped device inside the dax_device
* @ops: operations for the inner holder
*/
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
void *holder, const struct dax_holder_operations *ops)
{
struct dax_device *dax_dev;
u64 part_size;
int id;
if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
*start_off = get_start_sect(bdev) * SECTOR_SIZE;
part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
pr_info("%pg: error: unaligned partition for dax\n", bdev);
return NULL;
}
id = dax_read_lock();
dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
dax_dev = NULL;
else if (holder) {
if (!cmpxchg(&dax_dev->holder_data, NULL, holder))
dax_dev->holder_ops = ops;
else
dax_dev = NULL;
}
dax_read_unlock(id);
return dax_dev;
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
void fs_put_dax(struct dax_device *dax_dev, void *holder)
{
if (dax_dev && holder &&
cmpxchg(&dax_dev->holder_data, holder, NULL) == holder)
dax_dev->holder_ops = NULL;
put_dax(dax_dev);
}
EXPORT_SYMBOL_GPL(fs_put_dax);
#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
DAXDEV_ALIVE,
/* gate whether dax_flush() calls the low level flush routine */
DAXDEV_WRITE_CACHE,
/* flag to check if device supports synchronous flush */
DAXDEV_SYNC,
/* do not leave the caches dirty after writes */
DAXDEV_NOCACHE,
/* handle CPU fetch exceptions during reads */
DAXDEV_NOMC,
};
/**
* dax_direct_access() - translate a device pgoff to an absolute pfn
* @dax_dev: a dax_device instance representing the logical memory range
* @pgoff: offset in pages from the start of the device to translate
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
* @mode: indicator on normal access or recovery write
* @kaddr: output parameter that returns a virtual address mapping of pfn
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
*
* Return: negative errno if an error occurs, otherwise the number of
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
{
long avail;
if (!dax_dev)
return -EOPNOTSUPP;
if (!dax_alive(dax_dev))
return -ENXIO;
if (nr_pages < 0)
return -EINVAL;
avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
mode, kaddr, pfn);
if (!avail)
return -ERANGE;
return min(avail, nr_pages);
}
EXPORT_SYMBOL_GPL(dax_direct_access);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
if (!dax_alive(dax_dev))
return 0;
/*
* The userspace address for the memory copy has already been validated
* via access_ok() in vfs_write, so use the 'no check' version to bypass
* the HARDENED_USERCOPY overhead.
*/
if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
return _copy_from_iter_flushcache(addr, bytes, i);
return _copy_from_iter(addr, bytes, i);
}
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
if (!dax_alive(dax_dev))
return 0;
/*
* The userspace address for the memory copy has already been validated
* via access_ok() in vfs_red, so use the 'no check' version to bypass
* the HARDENED_USERCOPY overhead.
*/
if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
return _copy_mc_to_iter(addr, bytes, i);
return _copy_to_iter(addr, bytes, i);
}
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
int ret;
if (!dax_alive(dax_dev))
return -ENXIO;
/*
* There are no callers that want to zero more than one page as of now.
* Once users are there, this check can be removed after the
* device mapper code has been updated to split ranges across targets.
*/
if (nr_pages != 1)
return -EIO;
ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
return dax_mem2blk_err(ret);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *iter)
{
if (!dax_dev->ops->recovery_write)
return 0;
return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
}
EXPORT_SYMBOL_GPL(dax_recovery_write);
int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off,
u64 len, int mf_flags)
{
int rc, id;
id = dax_read_lock();
if (!dax_alive(dax_dev)) {
rc = -ENXIO;
goto out;
}
if (!dax_dev->holder_ops) {
rc = -EOPNOTSUPP;
goto out;
}
rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags);
out:
dax_read_unlock(id);
return rc;
}
EXPORT_SYMBOL_GPL(dax_holder_notify_failure);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
if (unlikely(!dax_write_cache_enabled(dax_dev)))
return;
arch_wb_cache_pmem(addr, size);
}
#else
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
}
#endif
EXPORT_SYMBOL_GPL(dax_flush);
void dax_write_cache(struct dax_device *dax_dev, bool wc)
{
if (wc)
set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
else
clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_write_cache);
bool dax_write_cache_enabled(struct dax_device *dax_dev)
{
return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
bool dax_synchronous(struct dax_device *dax_dev)
{
return test_bit(DAXDEV_SYNC, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_synchronous);
void set_dax_synchronous(struct dax_device *dax_dev)
{
set_bit(DAXDEV_SYNC, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(set_dax_synchronous);
void set_dax_nocache(struct dax_device *dax_dev)
{
set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(set_dax_nocache);
void set_dax_nomc(struct dax_device *dax_dev)
{
set_bit(DAXDEV_NOMC, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(set_dax_nomc);
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_alive);
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
* that any fault handlers or operations that might have seen
* dax_alive(), have completed. Any operations that start after
* synchronize_srcu() has run will abort upon seeing !dax_alive().
*/
void kill_dax(struct dax_device *dax_dev)
{
if (!dax_dev)
return;
if (dax_dev->holder_data != NULL)
dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
/* clear holder data */
dax_dev->holder_ops = NULL;
dax_dev->holder_data = NULL;
}
EXPORT_SYMBOL_GPL(kill_dax);
void run_dax(struct dax_device *dax_dev)
{
set_bit(DAXDEV_ALIVE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(run_dax);
static struct inode *dax_alloc_inode(struct super_block *sb)
{
struct dax_device *dax_dev;
struct inode *inode;
dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL);
if (!dax_dev)
return NULL;
inode = &dax_dev->inode;
inode->i_rdev = 0;
return inode;
}
static struct dax_device *to_dax_dev(struct inode *inode)
{
return container_of(inode, struct dax_device, inode);
}
static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
if (inode->i_rdev)
ida_free(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
}
static void dax_destroy_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n");
}
static const struct super_operations dax_sops = {
.statfs = simple_statfs,
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
.drop_inode = generic_delete_inode,
};
static int dax_init_fs_context(struct fs_context *fc)
{
struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
if (!ctx)
return -ENOMEM;
ctx->ops = &dax_sops;
return 0;
}
static struct file_system_type dax_fs_type = {
.name = "dax",
.init_fs_context = dax_init_fs_context,
.kill_sb = kill_anon_super,
};
static int dax_test(struct inode *inode, void *data)
{
dev_t devt = *(dev_t *) data;
return inode->i_rdev == devt;
}
static int dax_set(struct inode *inode, void *data)
{
dev_t devt = *(dev_t *) data;
inode->i_rdev = devt;
return 0;
}
static struct dax_device *dax_dev_get(dev_t devt)
{
struct dax_device *dax_dev;
struct inode *inode;
inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
dax_test, dax_set, &devt);
if (!inode)
return NULL;
dax_dev = to_dax_dev(inode);
if (inode->i_state & I_NEW) {
set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR;
inode->i_flags = S_DAX;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
unlock_new_inode(inode);
}
return dax_dev;
}
struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
{
struct dax_device *dax_dev;
dev_t devt;
int minor;
if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0)
return ERR_PTR(-ENOMEM);
devt = MKDEV(MAJOR(dax_devt), minor);
dax_dev = dax_dev_get(devt);
if (!dax_dev)
goto err_dev;
dax_dev->ops = ops;
dax_dev->private = private;
return dax_dev;
err_dev:
ida_free(&dax_minor_ida, minor);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
void put_dax(struct dax_device *dax_dev)
{
if (!dax_dev)
return;
iput(&dax_dev->inode);
}
EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
*
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
void *dax_holder(struct dax_device *dax_dev)
{
return dax_dev->holder_data;
}
EXPORT_SYMBOL_GPL(dax_holder);
/**
* inode_dax: convert a public inode into its dax_dev
* @inode: An inode with i_cdev pointing to a dax_dev
*
* Note this is not equivalent to to_dax_dev() which is for private
* internal use where we know the inode filesystem type == dax_fs_type.
*/
struct dax_device *inode_dax(struct inode *inode)
{
struct cdev *cdev = inode->i_cdev;
return container_of(cdev, struct dax_device, cdev);
}
EXPORT_SYMBOL_GPL(inode_dax);
struct inode *dax_inode(struct dax_device *dax_dev)
{
return &dax_dev->inode;
}
EXPORT_SYMBOL_GPL(dax_inode);
void *dax_get_private(struct dax_device *dax_dev)
{
if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
return NULL;
return dax_dev->private;
}
EXPORT_SYMBOL_GPL(dax_get_private);
static void init_once(void *_dax_dev)
{
struct dax_device *dax_dev = _dax_dev;
struct inode *inode = &dax_dev->inode;
memset(dax_dev, 0, sizeof(*dax_dev));
inode_init_once(inode);
}
static int dax_fs_init(void)
{
int rc;
dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
if (!dax_cache)
return -ENOMEM;
dax_mnt = kern_mount(&dax_fs_type);
if (IS_ERR(dax_mnt)) {
rc = PTR_ERR(dax_mnt);
goto err_mount;
}
dax_superblock = dax_mnt->mnt_sb;
return 0;
err_mount:
kmem_cache_destroy(dax_cache);
return rc;
}
static void dax_fs_exit(void)
{
kern_unmount(dax_mnt);
rcu_barrier();
kmem_cache_destroy(dax_cache);
}
static int __init dax_core_init(void)
{
int rc;
rc = dax_fs_init();
if (rc)
return rc;
rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
if (rc)
goto err_chrdev;
rc = dax_bus_init();
if (rc)
goto err_bus;
return 0;
err_bus:
unregister_chrdev_region(dax_devt, MINORMASK+1);
err_chrdev:
dax_fs_exit();
return 0;
}
static void __exit dax_core_exit(void)
{
dax_bus_exit();
unregister_chrdev_region(dax_devt, MINORMASK+1);
ida_destroy(&dax_minor_ida);
dax_fs_exit();
}
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
subsys_initcall(dax_core_init);
module_exit(dax_core_exit);
| linux-master | drivers/dax/super.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include "../nvdimm/pfn.h"
#include "../nvdimm/nd.h"
#include "bus.h"
static struct dev_dax *__dax_pmem_probe(struct device *dev)
{
struct range range;
int rc, id, region_id;
resource_size_t offset;
struct nd_pfn_sb *pfn_sb;
struct dev_dax_data data;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct dev_pagemap pgmap = { };
struct nd_namespace_common *ndns;
struct nd_dax *nd_dax = to_nd_dax(dev);
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
struct nd_region *nd_region = to_nd_region(dev->parent);
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return ERR_CAST(ndns);
/* parse the 'pfn' info block via ->rw_bytes */
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return ERR_PTR(rc);
rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
if (rc)
return ERR_PTR(rc);
devm_namespace_disable(dev, ndns);
/* reserve the metadata area, device-dax will reserve the data */
pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve metadata\n");
return ERR_PTR(-EBUSY);
}
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id);
if (rc != 2)
return ERR_PTR(-EINVAL);
/* adjust the dax_region range to the start of data */
range = pgmap.range;
range.start += offset;
dax_region = alloc_dax_region(dev, region_id, &range,
nd_region->target_node, le32_to_cpu(pfn_sb->align),
IORESOURCE_DAX_STATIC);
if (!dax_region)
return ERR_PTR(-ENOMEM);
data = (struct dev_dax_data) {
.dax_region = dax_region,
.id = id,
.pgmap = &pgmap,
.size = range_len(&range),
};
return devm_create_dev_dax(&data);
}
static int dax_pmem_probe(struct device *dev)
{
return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev));
}
static struct nd_device_driver dax_pmem_driver = {
.probe = dax_pmem_probe,
.drv = {
.name = "dax_pmem",
},
.type = ND_DRIVER_DAX_PMEM,
};
static int __init dax_pmem_init(void)
{
return nd_driver_register(&dax_pmem_driver);
}
module_init(dax_pmem_init);
static void __exit dax_pmem_exit(void)
{
driver_unregister(&dax_pmem_driver.drv);
}
module_exit(dax_pmem_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
| linux-master | drivers/dax/pmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
#include <linux/module.h>
#include <linux/dax.h>
#include "../cxl/cxl.h"
#include "bus.h"
static int cxl_dax_region_probe(struct device *dev)
{
struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev);
int nid = phys_to_target_node(cxlr_dax->hpa_range.start);
struct cxl_region *cxlr = cxlr_dax->cxlr;
struct dax_region *dax_region;
struct dev_dax_data data;
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(cxlr_dax->hpa_range.start);
dax_region = alloc_dax_region(dev, cxlr->id, &cxlr_dax->hpa_range, nid,
PMD_SIZE, IORESOURCE_DAX_KMEM);
if (!dax_region)
return -ENOMEM;
data = (struct dev_dax_data) {
.dax_region = dax_region,
.id = -1,
.size = range_len(&cxlr_dax->hpa_range),
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
}
static struct cxl_driver cxl_dax_region_driver = {
.name = "cxl_dax_region",
.probe = cxl_dax_region_probe,
.id = CXL_DEVICE_DAX_REGION,
.drv = {
.suppress_bind_attrs = true,
},
};
module_cxl_driver(cxl_dax_region_driver);
MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
MODULE_IMPORT_NS(CXL);
| linux-master | drivers/dax/cxl.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/io.h>
#include "dax-private.h"
#include "bus.h"
static DEFINE_MUTEX(dax_bus_lock);
#define DAX_NAME_LEN 30
struct dax_id {
struct list_head list;
char dev_name[DAX_NAME_LEN];
};
static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
/*
* We only ever expect to handle device-dax instances, i.e. the
* @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
*/
return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
}
static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
{
return container_of(drv, struct dax_device_driver, drv);
}
static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
const char *dev_name)
{
struct dax_id *dax_id;
lockdep_assert_held(&dax_bus_lock);
list_for_each_entry(dax_id, &dax_drv->ids, list)
if (sysfs_streq(dax_id->dev_name, dev_name))
return dax_id;
return NULL;
}
static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
{
int match;
mutex_lock(&dax_bus_lock);
match = !!__dax_match_id(dax_drv, dev_name(dev));
mutex_unlock(&dax_bus_lock);
return match;
}
static int dax_match_type(struct dax_device_driver *dax_drv, struct device *dev)
{
enum dax_driver_type type = DAXDRV_DEVICE_TYPE;
struct dev_dax *dev_dax = to_dev_dax(dev);
if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM)
type = DAXDRV_KMEM_TYPE;
if (dax_drv->type == type)
return 1;
/* default to device mode if dax_kmem is disabled */
if (dax_drv->type == DAXDRV_DEVICE_TYPE &&
!IS_ENABLED(CONFIG_DEV_DAX_KMEM))
return 1;
return 0;
}
enum id_action {
ID_REMOVE,
ID_ADD,
};
static ssize_t do_id_store(struct device_driver *drv, const char *buf,
size_t count, enum id_action action)
{
struct dax_device_driver *dax_drv = to_dax_drv(drv);
unsigned int region_id, id;
char devname[DAX_NAME_LEN];
struct dax_id *dax_id;
ssize_t rc = count;
int fields;
fields = sscanf(buf, "dax%d.%d", ®ion_id, &id);
if (fields != 2)
return -EINVAL;
sprintf(devname, "dax%d.%d", region_id, id);
if (!sysfs_streq(buf, devname))
return -EINVAL;
mutex_lock(&dax_bus_lock);
dax_id = __dax_match_id(dax_drv, buf);
if (!dax_id) {
if (action == ID_ADD) {
dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL);
if (dax_id) {
strncpy(dax_id->dev_name, buf, DAX_NAME_LEN);
list_add(&dax_id->list, &dax_drv->ids);
} else
rc = -ENOMEM;
}
} else if (action == ID_REMOVE) {
list_del(&dax_id->list);
kfree(dax_id);
}
mutex_unlock(&dax_bus_lock);
if (rc < 0)
return rc;
if (action == ID_ADD)
rc = driver_attach(drv);
if (rc)
return rc;
return count;
}
static ssize_t new_id_store(struct device_driver *drv, const char *buf,
size_t count)
{
return do_id_store(drv, buf, count, ID_ADD);
}
static DRIVER_ATTR_WO(new_id);
static ssize_t remove_id_store(struct device_driver *drv, const char *buf,
size_t count)
{
return do_id_store(drv, buf, count, ID_REMOVE);
}
static DRIVER_ATTR_WO(remove_id);
static struct attribute *dax_drv_attrs[] = {
&driver_attr_new_id.attr,
&driver_attr_remove_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(dax_drv);
static int dax_bus_match(struct device *dev, struct device_driver *drv);
/*
* Static dax regions are regions created by an external subsystem
* nvdimm where a single range is assigned. Its boundaries are by the external
* subsystem and are usually limited to one physical memory range. For example,
* for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a
* single contiguous range)
*
* On dynamic dax regions, the assigned region can be partitioned by dax core
* into multiple subdivisions. A subdivision is represented into one
* /dev/daxN.M device composed by one or more potentially discontiguous ranges.
*
* When allocating a dax region, drivers must set whether it's static
* (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned
* to dax core when calling devm_create_dev_dax(), whereas in dynamic dax
* devices it is NULL but afterwards allocated by dax core on device ->probe().
* Care is needed to make sure that dynamic dax devices are torn down with a
* cleared @pgmap field (see kill_dev_dax()).
*/
static bool is_static(struct dax_region *dax_region)
{
return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
}
bool static_dev_dax(struct dev_dax *dev_dax)
{
return is_static(dev_dax->region);
}
EXPORT_SYMBOL_GPL(static_dev_dax);
static u64 dev_dax_size(struct dev_dax *dev_dax)
{
u64 size = 0;
int i;
device_lock_assert(&dev_dax->dev);
for (i = 0; i < dev_dax->nr_range; i++)
size += range_len(&dev_dax->ranges[i].range);
return size;
}
static int dax_bus_probe(struct device *dev)
{
struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
int rc;
if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
return -ENXIO;
rc = dax_drv->probe(dev_dax);
if (rc || is_static(dax_region))
return rc;
/*
* Track new seed creation only after successful probe of the
* previous seed.
*/
if (dax_region->seed == dev)
dax_region->seed = NULL;
return 0;
}
static void dax_bus_remove(struct device *dev)
{
struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
struct dev_dax *dev_dax = to_dev_dax(dev);
if (dax_drv->remove)
dax_drv->remove(dev_dax);
}
static struct bus_type dax_bus_type = {
.name = "dax",
.uevent = dax_bus_uevent,
.match = dax_bus_match,
.probe = dax_bus_probe,
.remove = dax_bus_remove,
.drv_groups = dax_drv_groups,
};
static int dax_bus_match(struct device *dev, struct device_driver *drv)
{
struct dax_device_driver *dax_drv = to_dax_drv(drv);
if (dax_match_id(dax_drv, dev))
return 1;
return dax_match_type(dax_drv, dev);
}
/*
* Rely on the fact that drvdata is set before the attributes are
* registered, and that the attributes are unregistered before drvdata
* is cleared to assume that drvdata is always valid.
*/
static ssize_t id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", dax_region->id);
}
static DEVICE_ATTR_RO(id);
static ssize_t region_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%llu\n", (unsigned long long)
resource_size(&dax_region->res));
}
static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
region_size_show, NULL);
static ssize_t region_align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dax_region->align);
}
static struct device_attribute dev_attr_region_align =
__ATTR(align, 0400, region_align_show, NULL);
#define for_each_dax_region_resource(dax_region, res) \
for (res = (dax_region)->res.child; res; res = res->sibling)
static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
{
resource_size_t size = resource_size(&dax_region->res);
struct resource *res;
device_lock_assert(dax_region->dev);
for_each_dax_region_resource(dax_region, res)
size -= resource_size(res);
return size;
}
static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
unsigned long long size;
device_lock(dev);
size = dax_region_avail_size(dax_region);
device_unlock(dev);
return sprintf(buf, "%llu\n", size);
}
static DEVICE_ATTR_RO(available_size);
static ssize_t seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
struct device *seed;
ssize_t rc;
if (is_static(dax_region))
return -EINVAL;
device_lock(dev);
seed = dax_region->seed;
rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : "");
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(seed);
static ssize_t create_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
struct device *youngest;
ssize_t rc;
if (is_static(dax_region))
return -EINVAL;
device_lock(dev);
youngest = dax_region->youngest;
rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : "");
device_unlock(dev);
return rc;
}
static ssize_t create_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
unsigned long long avail;
ssize_t rc;
int val;
if (is_static(dax_region))
return -EINVAL;
rc = kstrtoint(buf, 0, &val);
if (rc)
return rc;
if (val != 1)
return -EINVAL;
device_lock(dev);
avail = dax_region_avail_size(dax_region);
if (avail == 0)
rc = -ENOSPC;
else {
struct dev_dax_data data = {
.dax_region = dax_region,
.size = 0,
.id = -1,
};
struct dev_dax *dev_dax = devm_create_dev_dax(&data);
if (IS_ERR(dev_dax))
rc = PTR_ERR(dev_dax);
else {
/*
* In support of crafting multiple new devices
* simultaneously multiple seeds can be created,
* but only the first one that has not been
* successfully bound is tracked as the region
* seed.
*/
if (!dax_region->seed)
dax_region->seed = &dev_dax->dev;
dax_region->youngest = &dev_dax->dev;
rc = len;
}
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RW(create);
void kill_dev_dax(struct dev_dax *dev_dax)
{
struct dax_device *dax_dev = dev_dax->dax_dev;
struct inode *inode = dax_inode(dax_dev);
kill_dax(dax_dev);
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
/*
* Dynamic dax region have the pgmap allocated via dev_kzalloc()
* and thus freed by devm. Clear the pgmap to not have stale pgmap
* ranges on probe() from previous reconfigurations of region devices.
*/
if (!static_dev_dax(dev_dax))
dev_dax->pgmap = NULL;
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
static void trim_dev_dax_range(struct dev_dax *dev_dax)
{
int i = dev_dax->nr_range - 1;
struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
device_lock_assert(dax_region->dev);
dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
(unsigned long long)range->start,
(unsigned long long)range->end);
__release_region(&dax_region->res, range->start, range_len(range));
if (--dev_dax->nr_range == 0) {
kfree(dev_dax->ranges);
dev_dax->ranges = NULL;
}
}
static void free_dev_dax_ranges(struct dev_dax *dev_dax)
{
while (dev_dax->nr_range)
trim_dev_dax_range(dev_dax);
}
static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
dev_dbg(dev, "%s\n", __func__);
kill_dev_dax(dev_dax);
device_del(dev);
free_dev_dax_ranges(dev_dax);
put_device(dev);
}
static void dax_region_free(struct kref *kref)
{
struct dax_region *dax_region;
dax_region = container_of(kref, struct dax_region, kref);
kfree(dax_region);
}
static void dax_region_put(struct dax_region *dax_region)
{
kref_put(&dax_region->kref, dax_region_free);
}
/* a return value >= 0 indicates this invocation invalidated the id */
static int __free_dev_dax_id(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
struct dax_region *dax_region;
int rc = dev_dax->id;
device_lock_assert(dev);
if (!dev_dax->dyn_id || dev_dax->id < 0)
return -1;
dax_region = dev_dax->region;
ida_free(&dax_region->ida, dev_dax->id);
dax_region_put(dax_region);
dev_dax->id = -1;
return rc;
}
static int free_dev_dax_id(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
int rc;
device_lock(dev);
rc = __free_dev_dax_id(dev_dax);
device_unlock(dev);
return rc;
}
static int alloc_dev_dax_id(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
int id;
id = ida_alloc(&dax_region->ida, GFP_KERNEL);
if (id < 0)
return id;
kref_get(&dax_region->kref);
dev_dax->dyn_id = true;
dev_dax->id = id;
return id;
}
static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
struct dev_dax *dev_dax;
struct device *victim;
bool do_del = false;
int rc;
if (is_static(dax_region))
return -EINVAL;
victim = device_find_child_by_name(dax_region->dev, buf);
if (!victim)
return -ENXIO;
device_lock(dev);
device_lock(victim);
dev_dax = to_dev_dax(victim);
if (victim->driver || dev_dax_size(dev_dax))
rc = -EBUSY;
else {
/*
* Invalidate the device so it does not become active
* again, but always preserve device-id-0 so that
* /sys/bus/dax/ is guaranteed to be populated while any
* dax_region is registered.
*/
if (dev_dax->id > 0) {
do_del = __free_dev_dax_id(dev_dax) >= 0;
rc = len;
if (dax_region->seed == victim)
dax_region->seed = NULL;
if (dax_region->youngest == victim)
dax_region->youngest = NULL;
} else
rc = -EBUSY;
}
device_unlock(victim);
/* won the race to invalidate the device, clean it up */
if (do_del)
devm_release_action(dev, unregister_dev_dax, victim);
device_unlock(dev);
put_device(victim);
return rc;
}
static DEVICE_ATTR_WO(delete);
static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dax_region *dax_region = dev_get_drvdata(dev);
if (is_static(dax_region))
if (a == &dev_attr_available_size.attr
|| a == &dev_attr_create.attr
|| a == &dev_attr_seed.attr
|| a == &dev_attr_delete.attr)
return 0;
return a->mode;
}
static struct attribute *dax_region_attributes[] = {
&dev_attr_available_size.attr,
&dev_attr_region_size.attr,
&dev_attr_region_align.attr,
&dev_attr_create.attr,
&dev_attr_seed.attr,
&dev_attr_delete.attr,
&dev_attr_id.attr,
NULL,
};
static const struct attribute_group dax_region_attribute_group = {
.name = "dax_region",
.attrs = dax_region_attributes,
.is_visible = dax_region_visible,
};
static const struct attribute_group *dax_region_attribute_groups[] = {
&dax_region_attribute_group,
NULL,
};
static void dax_region_unregister(void *region)
{
struct dax_region *dax_region = region;
sysfs_remove_groups(&dax_region->dev->kobj,
dax_region_attribute_groups);
dax_region_put(dax_region);
}
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct range *range, int target_node, unsigned int align,
unsigned long flags)
{
struct dax_region *dax_region;
/*
* The DAX core assumes that it can store its private data in
* parent->driver_data. This WARN is a reminder / safeguard for
* developers of device-dax drivers.
*/
if (dev_get_drvdata(parent)) {
dev_WARN(parent, "dax core failed to setup private data\n");
return NULL;
}
if (!IS_ALIGNED(range->start, align)
|| !IS_ALIGNED(range_len(range), align))
return NULL;
dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
if (!dax_region)
return NULL;
dev_set_drvdata(parent, dax_region);
kref_init(&dax_region->kref);
dax_region->id = region_id;
dax_region->align = align;
dax_region->dev = parent;
dax_region->target_node = target_node;
ida_init(&dax_region->ida);
dax_region->res = (struct resource) {
.start = range->start,
.end = range->end,
.flags = IORESOURCE_MEM | flags,
};
if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
kfree(dax_region);
return NULL;
}
if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
return NULL;
return dax_region;
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
static void dax_mapping_release(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
struct device *parent = dev->parent;
struct dev_dax *dev_dax = to_dev_dax(parent);
ida_free(&dev_dax->ida, mapping->id);
kfree(mapping);
put_device(parent);
}
static void unregister_dax_mapping(void *data)
{
struct device *dev = data;
struct dax_mapping *mapping = to_dax_mapping(dev);
struct dev_dax *dev_dax = to_dev_dax(dev->parent);
struct dax_region *dax_region = dev_dax->region;
dev_dbg(dev, "%s\n", __func__);
device_lock_assert(dax_region->dev);
dev_dax->ranges[mapping->range_id].mapping = NULL;
mapping->range_id = -1;
device_unregister(dev);
}
static struct dev_dax_range *get_dax_range(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
struct dev_dax *dev_dax = to_dev_dax(dev->parent);
struct dax_region *dax_region = dev_dax->region;
device_lock(dax_region->dev);
if (mapping->range_id < 0) {
device_unlock(dax_region->dev);
return NULL;
}
return &dev_dax->ranges[mapping->range_id];
}
static void put_dax_range(struct dev_dax_range *dax_range)
{
struct dax_mapping *mapping = dax_range->mapping;
struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
struct dax_region *dax_region = dev_dax->region;
device_unlock(dax_region->dev);
}
static ssize_t start_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax_range *dax_range;
ssize_t rc;
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
rc = sprintf(buf, "%#llx\n", dax_range->range.start);
put_dax_range(dax_range);
return rc;
}
static DEVICE_ATTR(start, 0400, start_show, NULL);
static ssize_t end_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax_range *dax_range;
ssize_t rc;
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
rc = sprintf(buf, "%#llx\n", dax_range->range.end);
put_dax_range(dax_range);
return rc;
}
static DEVICE_ATTR(end, 0400, end_show, NULL);
static ssize_t pgoff_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax_range *dax_range;
ssize_t rc;
dax_range = get_dax_range(dev);
if (!dax_range)
return -ENXIO;
rc = sprintf(buf, "%#lx\n", dax_range->pgoff);
put_dax_range(dax_range);
return rc;
}
static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL);
static struct attribute *dax_mapping_attributes[] = {
&dev_attr_start.attr,
&dev_attr_end.attr,
&dev_attr_page_offset.attr,
NULL,
};
static const struct attribute_group dax_mapping_attribute_group = {
.attrs = dax_mapping_attributes,
};
static const struct attribute_group *dax_mapping_attribute_groups[] = {
&dax_mapping_attribute_group,
NULL,
};
static struct device_type dax_mapping_type = {
.release = dax_mapping_release,
.groups = dax_mapping_attribute_groups,
};
static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
{
struct dax_region *dax_region = dev_dax->region;
struct dax_mapping *mapping;
struct device *dev;
int rc;
device_lock_assert(dax_region->dev);
if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
"region disabled\n"))
return -ENXIO;
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
mapping->range_id = range_id;
mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
if (mapping->id < 0) {
kfree(mapping);
return -ENOMEM;
}
dev_dax->ranges[range_id].mapping = mapping;
dev = &mapping->dev;
device_initialize(dev);
dev->parent = &dev_dax->dev;
get_device(dev->parent);
dev->type = &dax_mapping_type;
dev_set_name(dev, "mapping%d", mapping->id);
rc = device_add(dev);
if (rc) {
put_device(dev);
return rc;
}
rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping,
dev);
if (rc)
return rc;
return 0;
}
static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
resource_size_t size)
{
struct dax_region *dax_region = dev_dax->region;
struct resource *res = &dax_region->res;
struct device *dev = &dev_dax->dev;
struct dev_dax_range *ranges;
unsigned long pgoff = 0;
struct resource *alloc;
int i, rc;
device_lock_assert(dax_region->dev);
/* handle the seed alloc special case */
if (!size) {
if (dev_WARN_ONCE(dev, dev_dax->nr_range,
"0-size allocation must be first\n"))
return -EBUSY;
/* nr_range == 0 is elsewhere special cased as 0-size device */
return 0;
}
alloc = __request_region(res, start, size, dev_name(dev), 0);
if (!alloc)
return -ENOMEM;
ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
* (dev_dax->nr_range + 1), GFP_KERNEL);
if (!ranges) {
__release_region(res, alloc->start, resource_size(alloc));
return -ENOMEM;
}
for (i = 0; i < dev_dax->nr_range; i++)
pgoff += PHYS_PFN(range_len(&ranges[i].range));
dev_dax->ranges = ranges;
ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
.pgoff = pgoff,
.range = {
.start = alloc->start,
.end = alloc->end,
},
};
dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
&alloc->start, &alloc->end);
/*
* A dev_dax instance must be registered before mapping device
* children can be added. Defer to devm_create_dev_dax() to add
* the initial mapping device.
*/
if (!device_is_registered(&dev_dax->dev))
return 0;
rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
if (rc)
trim_dev_dax_range(dev_dax);
return rc;
}
static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
{
int last_range = dev_dax->nr_range - 1;
struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
struct dax_region *dax_region = dev_dax->region;
bool is_shrink = resource_size(res) > size;
struct range *range = &dax_range->range;
struct device *dev = &dev_dax->dev;
int rc;
device_lock_assert(dax_region->dev);
if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
return -EINVAL;
rc = adjust_resource(res, range->start, size);
if (rc)
return rc;
*range = (struct range) {
.start = range->start,
.end = range->start + size - 1,
};
dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend",
last_range, (unsigned long long) range->start,
(unsigned long long) range->end);
return 0;
}
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
unsigned long long size;
device_lock(dev);
size = dev_dax_size(dev_dax);
device_unlock(dev);
return sprintf(buf, "%llu\n", size);
}
static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
{
/*
* The minimum mapping granularity for a device instance is a
* single subsection, unless the arch says otherwise.
*/
return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
}
static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
{
resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
struct dax_region *dax_region = dev_dax->region;
struct device *dev = &dev_dax->dev;
int i;
for (i = dev_dax->nr_range - 1; i >= 0; i--) {
struct range *range = &dev_dax->ranges[i].range;
struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
struct resource *adjust = NULL, *res;
resource_size_t shrink;
shrink = min_t(u64, to_shrink, range_len(range));
if (shrink >= range_len(range)) {
devm_release_action(dax_region->dev,
unregister_dax_mapping, &mapping->dev);
trim_dev_dax_range(dev_dax);
to_shrink -= shrink;
if (!to_shrink)
break;
continue;
}
for_each_dax_region_resource(dax_region, res)
if (strcmp(res->name, dev_name(dev)) == 0
&& res->start == range->start) {
adjust = res;
break;
}
if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
"failed to find matching resource\n"))
return -ENXIO;
return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
- shrink);
}
return 0;
}
/*
* Only allow adjustments that preserve the relative pgoff of existing
* allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
*/
static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
{
struct dev_dax_range *last;
int i;
if (dev_dax->nr_range == 0)
return false;
if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
return false;
last = &dev_dax->ranges[dev_dax->nr_range - 1];
if (last->range.start != res->start || last->range.end != res->end)
return false;
for (i = 0; i < dev_dax->nr_range - 1; i++) {
struct dev_dax_range *dax_range = &dev_dax->ranges[i];
if (dax_range->pgoff > last->pgoff)
return false;
}
return true;
}
static ssize_t dev_dax_resize(struct dax_region *dax_region,
struct dev_dax *dev_dax, resource_size_t size)
{
resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
resource_size_t dev_size = dev_dax_size(dev_dax);
struct resource *region_res = &dax_region->res;
struct device *dev = &dev_dax->dev;
struct resource *res, *first;
resource_size_t alloc = 0;
int rc;
if (dev->driver)
return -EBUSY;
if (size == dev_size)
return 0;
if (size > dev_size && size - dev_size > avail)
return -ENOSPC;
if (size < dev_size)
return dev_dax_shrink(dev_dax, size);
to_alloc = size - dev_size;
if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
"resize of %pa misaligned\n", &to_alloc))
return -ENXIO;
/*
* Expand the device into the unused portion of the region. This
* may involve adjusting the end of an existing resource, or
* allocating a new resource.
*/
retry:
first = region_res->child;
if (!first)
return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
rc = -ENOSPC;
for (res = first; res; res = res->sibling) {
struct resource *next = res->sibling;
/* space at the beginning of the region */
if (res == first && res->start > dax_region->res.start) {
alloc = min(res->start - dax_region->res.start, to_alloc);
rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
break;
}
alloc = 0;
/* space between allocations */
if (next && next->start > res->end + 1)
alloc = min(next->start - (res->end + 1), to_alloc);
/* space at the end of the region */
if (!alloc && !next && res->end < region_res->end)
alloc = min(region_res->end - res->end, to_alloc);
if (!alloc)
continue;
if (adjust_ok(dev_dax, res)) {
rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
break;
}
rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
break;
}
if (rc)
return rc;
to_alloc -= alloc;
if (to_alloc)
goto retry;
return 0;
}
static ssize_t size_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
ssize_t rc;
unsigned long long val;
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
rc = kstrtoull(buf, 0, &val);
if (rc)
return rc;
if (!alloc_is_aligned(dev_dax, val)) {
dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
return -EINVAL;
}
device_lock(dax_region->dev);
if (!dax_region->dev->driver) {
device_unlock(dax_region->dev);
return -ENXIO;
}
device_lock(dev);
rc = dev_dax_resize(dax_region, dev_dax, val);
device_unlock(dev);
device_unlock(dax_region->dev);
return rc == 0 ? len : rc;
}
static DEVICE_ATTR_RW(size);
static ssize_t range_parse(const char *opt, size_t len, struct range *range)
{
unsigned long long addr = 0;
char *start, *end, *str;
ssize_t rc = -EINVAL;
str = kstrdup(opt, GFP_KERNEL);
if (!str)
return rc;
end = str;
start = strsep(&end, "-");
if (!start || !end)
goto err;
rc = kstrtoull(start, 16, &addr);
if (rc)
goto err;
range->start = addr;
rc = kstrtoull(end, 16, &addr);
if (rc)
goto err;
range->end = addr;
err:
kfree(str);
return rc;
}
static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
size_t to_alloc;
struct range r;
ssize_t rc;
rc = range_parse(buf, len, &r);
if (rc)
return rc;
rc = -ENXIO;
device_lock(dax_region->dev);
if (!dax_region->dev->driver) {
device_unlock(dax_region->dev);
return rc;
}
device_lock(dev);
to_alloc = range_len(&r);
if (alloc_is_aligned(dev_dax, to_alloc))
rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
device_unlock(dev);
device_unlock(dax_region->dev);
return rc == 0 ? len : rc;
}
static DEVICE_ATTR_WO(mapping);
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
return sprintf(buf, "%d\n", dev_dax->align);
}
static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
int i;
for (i = 0; i < dev_dax->nr_range; i++) {
size_t len = range_len(&dev_dax->ranges[i].range);
if (!alloc_is_aligned(dev_dax, len)) {
dev_dbg(dev, "%s: align %u invalid for range %d\n",
__func__, dev_dax->align, i);
return -EINVAL;
}
}
return 0;
}
static ssize_t align_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
unsigned long val, align_save;
ssize_t rc;
rc = kstrtoul(buf, 0, &val);
if (rc)
return -ENXIO;
if (!dax_align_valid(val))
return -EINVAL;
device_lock(dax_region->dev);
if (!dax_region->dev->driver) {
device_unlock(dax_region->dev);
return -ENXIO;
}
device_lock(dev);
if (dev->driver) {
rc = -EBUSY;
goto out_unlock;
}
align_save = dev_dax->align;
dev_dax->align = val;
rc = dev_dax_validate_align(dev_dax);
if (rc)
dev_dax->align = align_save;
out_unlock:
device_unlock(dev);
device_unlock(dax_region->dev);
return rc == 0 ? len : rc;
}
static DEVICE_ATTR_RW(align);
static int dev_dax_target_node(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
return dax_region->target_node;
}
static ssize_t target_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
}
static DEVICE_ATTR_RO(target_node);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
unsigned long long start;
if (dev_dax->nr_range < 1)
start = dax_region->res.start;
else
start = dev_dax->ranges[0].range.start;
return sprintf(buf, "%#llx\n", start);
}
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
/*
* We only ever expect to handle device-dax instances, i.e. the
* @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
*/
return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
if (a == &dev_attr_mapping.attr && is_static(dax_region))
return 0;
if ((a == &dev_attr_align.attr ||
a == &dev_attr_size.attr) && is_static(dax_region))
return 0444;
return a->mode;
}
static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
&dev_attr_mapping.attr,
&dev_attr_target_node.attr,
&dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
NULL,
};
static const struct attribute_group dev_dax_attribute_group = {
.attrs = dev_dax_attributes,
.is_visible = dev_dax_visible,
};
static const struct attribute_group *dax_attribute_groups[] = {
&dev_dax_attribute_group,
NULL,
};
static void dev_dax_release(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_device *dax_dev = dev_dax->dax_dev;
put_dax(dax_dev);
free_dev_dax_id(dev_dax);
kfree(dev_dax->pgmap);
kfree(dev_dax);
}
static const struct device_type dev_dax_type = {
.release = dev_dax_release,
.groups = dax_attribute_groups,
};
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
{
struct dax_region *dax_region = data->dax_region;
struct device *parent = dax_region->dev;
struct dax_device *dax_dev;
struct dev_dax *dev_dax;
struct inode *inode;
struct device *dev;
int rc;
dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
if (!dev_dax)
return ERR_PTR(-ENOMEM);
dev_dax->region = dax_region;
if (is_static(dax_region)) {
if (dev_WARN_ONCE(parent, data->id < 0,
"dynamic id specified to static region\n")) {
rc = -EINVAL;
goto err_id;
}
dev_dax->id = data->id;
} else {
if (dev_WARN_ONCE(parent, data->id >= 0,
"static id specified to dynamic region\n")) {
rc = -EINVAL;
goto err_id;
}
rc = alloc_dev_dax_id(dev_dax);
if (rc < 0)
goto err_id;
}
dev = &dev_dax->dev;
device_initialize(dev);
dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
if (rc)
goto err_range;
if (data->pgmap) {
dev_WARN_ONCE(parent, !is_static(dax_region),
"custom dev_pagemap requires a static dax_region\n");
dev_dax->pgmap = kmemdup(data->pgmap,
sizeof(struct dev_pagemap), GFP_KERNEL);
if (!dev_dax->pgmap) {
rc = -ENOMEM;
goto err_pgmap;
}
}
/*
* No dax_operations since there is no access to this device outside of
* mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto err_alloc_dax;
}
set_dax_synchronous(dax_dev);
set_dax_nocache(dax_dev);
set_dax_nomc(dax_dev);
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
dev_dax->dax_dev = dax_dev;
dev_dax->target_node = dax_region->target_node;
dev_dax->align = dax_region->align;
ida_init(&dev_dax->ida);
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
dev->bus = &dax_bus_type;
dev->parent = parent;
dev->type = &dev_dax_type;
rc = device_add(dev);
if (rc) {
kill_dev_dax(dev_dax);
put_device(dev);
return ERR_PTR(rc);
}
rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
if (rc)
return ERR_PTR(rc);
/* register mapping device for the initial allocation range */
if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
rc = devm_register_dax_mapping(dev_dax, 0);
if (rc)
return ERR_PTR(rc);
}
return dev_dax;
err_alloc_dax:
kfree(dev_dax->pgmap);
err_pgmap:
free_dev_dax_ranges(dev_dax);
err_range:
free_dev_dax_id(dev_dax);
err_id:
kfree(dev_dax);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(devm_create_dev_dax);
int __dax_driver_register(struct dax_device_driver *dax_drv,
struct module *module, const char *mod_name)
{
struct device_driver *drv = &dax_drv->drv;
/*
* dax_bus_probe() calls dax_drv->probe() unconditionally.
* So better be safe than sorry and ensure it is provided.
*/
if (!dax_drv->probe)
return -EINVAL;
INIT_LIST_HEAD(&dax_drv->ids);
drv->owner = module;
drv->name = mod_name;
drv->mod_name = mod_name;
drv->bus = &dax_bus_type;
return driver_register(drv);
}
EXPORT_SYMBOL_GPL(__dax_driver_register);
void dax_driver_unregister(struct dax_device_driver *dax_drv)
{
struct device_driver *drv = &dax_drv->drv;
struct dax_id *dax_id, *_id;
mutex_lock(&dax_bus_lock);
list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
list_del(&dax_id->list);
kfree(dax_id);
}
mutex_unlock(&dax_bus_lock);
driver_unregister(drv);
}
EXPORT_SYMBOL_GPL(dax_driver_unregister);
int __init dax_bus_init(void)
{
return bus_register(&dax_bus_type);
}
void __exit dax_bus_exit(void)
{
bus_unregister(&dax_bus_type);
}
| linux-master | drivers/dax/bus.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include "dax-private.h"
#include "bus.h"
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
const char *func)
{
struct device *dev = &dev_dax->dev;
unsigned long mask;
if (!dax_alive(dev_dax->dax_dev))
return -ENXIO;
/* prevent private mappings from being established */
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;
}
mask = dev_dax->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
current->comm, func, vma->vm_start, vma->vm_end,
mask);
return -EINVAL;
}
if (!vma_is_dax(vma)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
return -EINVAL;
}
return 0;
}
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
int i;
for (i = 0; i < dev_dax->nr_range; i++) {
struct dev_dax_range *dax_range = &dev_dax->ranges[i];
struct range *range = &dax_range->range;
unsigned long long pgoff_end;
phys_addr_t phys;
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
continue;
phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
if (phys + size - 1 <= range->end)
return phys;
break;
}
return -1;
}
static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
unsigned long fault_size)
{
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
struct file *filp = vmf->vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
pgoff_t pgoff;
/* mapping is only set on the head */
if (dev_dax->pgmap->vmemmap_shift)
nr_pages = 1;
pgoff = linear_page_index(vmf->vma,
ALIGN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
page = compound_head(page);
if (page->mapping)
continue;
page->mapping = filp->f_mapping;
page->index = pgoff + i;
}
}
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf)
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pfn_t pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
if (dev_dax->align > PAGE_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
if (fault_size != dev_dax->align)
return VM_FAULT_SIGBUS;
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
if (phys == -1) {
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
return VM_FAULT_SIGBUS;
}
pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
dax_set_mapping(vmf, pfn, fault_size);
return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
if (dev_dax->align > PMD_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
if (fault_size < dev_dax->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dev_dax->align)
return VM_FAULT_FALLBACK;
/* if we are outside of the VMA */
if (pmd_addr < vmf->vma->vm_start ||
(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
return VM_FAULT_SIGBUS;
pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
if (phys == -1) {
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
return VM_FAULT_SIGBUS;
}
pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
dax_set_mapping(vmf, pfn, fault_size);
return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf)
{
unsigned long pud_addr = vmf->address & PUD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
unsigned int fault_size = PUD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
if (dev_dax->align > PUD_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
if (fault_size < dev_dax->align)
return VM_FAULT_SIGBUS;
else if (fault_size > dev_dax->align)
return VM_FAULT_FALLBACK;
/* if we are outside of the VMA */
if (pud_addr < vmf->vma->vm_start ||
(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
return VM_FAULT_SIGBUS;
pgoff = linear_page_index(vmf->vma, pud_addr);
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
if (phys == -1) {
dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
return VM_FAULT_SIGBUS;
}
pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
dax_set_mapping(vmf, pfn, fault_size);
return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf)
{
return VM_FAULT_FALLBACK;
}
#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
{
struct file *filp = vmf->vma->vm_file;
vm_fault_t rc = VM_FAULT_SIGBUS;
int id;
struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) order:%d\n", current->comm,
(vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end, order);
id = dax_read_lock();
if (order == 0)
rc = __dev_dax_pte_fault(dev_dax, vmf);
else if (order == PMD_ORDER)
rc = __dev_dax_pmd_fault(dev_dax, vmf);
else if (order == PUD_ORDER)
rc = __dev_dax_pud_fault(dev_dax, vmf);
else
rc = VM_FAULT_SIGBUS;
dax_read_unlock(id);
return rc;
}
static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
{
return dev_dax_huge_fault(vmf, 0);
}
static int dev_dax_may_split(struct vm_area_struct *vma, unsigned long addr)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
if (!IS_ALIGNED(addr, dev_dax->align))
return -EINVAL;
return 0;
}
static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
return dev_dax->align;
}
static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault,
.may_split = dev_dax_may_split,
.pagesize = dev_dax_pagesize,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct dev_dax *dev_dax = filp->private_data;
int rc, id;
dev_dbg(&dev_dax->dev, "trace\n");
/*
* We lock to check dax_dev liveness and will re-check at
* fault time.
*/
id = dax_read_lock();
rc = check_vma(dev_dax, vma, __func__);
dax_read_unlock(id);
if (rc)
return rc;
vma->vm_ops = &dax_vm_ops;
vm_flags_set(vma, VM_HUGEPAGE);
return 0;
}
/* return an unmapped area aligned to the dax region specified alignment */
static unsigned long dax_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
unsigned long off, off_end, off_align, len_align, addr_align, align;
struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
if (!dev_dax || addr)
goto out;
align = dev_dax->align;
off = pgoff << PAGE_SHIFT;
off_end = off + len;
off_align = round_up(off, align);
if ((off_end <= off_align) || ((off_end - off_align) < align))
goto out;
len_align = len + align;
if ((off + len_align) < off)
goto out;
addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
pgoff, flags);
if (!IS_ERR_VALUE(addr_align)) {
addr_align += (off - addr_align) & (align - 1);
return addr_align;
}
out:
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
static const struct address_space_operations dev_dax_aops = {
.dirty_folio = noop_dirty_folio,
};
static int dax_open(struct inode *inode, struct file *filp)
{
struct dax_device *dax_dev = inode_dax(inode);
struct inode *__dax_inode = dax_inode(dax_dev);
struct dev_dax *dev_dax = dax_get_private(dax_dev);
dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode;
inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->f_sb_err = file_sample_sb_err(filp);
filp->private_data = dev_dax;
inode->i_flags = S_DAX;
return 0;
}
static int dax_release(struct inode *inode, struct file *filp)
{
struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "trace\n");
return 0;
}
static const struct file_operations dax_fops = {
.llseek = noop_llseek,
.owner = THIS_MODULE,
.open = dax_open,
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
.mmap_supported_flags = MAP_SYNC,
};
static void dev_dax_cdev_del(void *cdev)
{
cdev_del(cdev);
}
static void dev_dax_kill(void *dev_dax)
{
kill_dev_dax(dev_dax);
}
static int dev_dax_probe(struct dev_dax *dev_dax)
{
struct dax_device *dax_dev = dev_dax->dax_dev;
struct device *dev = &dev_dax->dev;
struct dev_pagemap *pgmap;
struct inode *inode;
struct cdev *cdev;
void *addr;
int rc, i;
if (static_dev_dax(dev_dax)) {
if (dev_dax->nr_range > 1) {
dev_warn(dev,
"static pgmap / multi-range device conflict\n");
return -EINVAL;
}
pgmap = dev_dax->pgmap;
} else {
if (dev_dax->pgmap) {
dev_warn(dev,
"dynamic-dax with pre-populated page map\n");
return -EINVAL;
}
pgmap = devm_kzalloc(dev,
struct_size(pgmap, ranges, dev_dax->nr_range - 1),
GFP_KERNEL);
if (!pgmap)
return -ENOMEM;
pgmap->nr_range = dev_dax->nr_range;
dev_dax->pgmap = pgmap;
for (i = 0; i < dev_dax->nr_range; i++) {
struct range *range = &dev_dax->ranges[i].range;
pgmap->ranges[i] = *range;
}
}
for (i = 0; i < dev_dax->nr_range; i++) {
struct range *range = &dev_dax->ranges[i].range;
if (!devm_request_mem_region(dev, range->start,
range_len(range), dev_name(dev))) {
dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
i, range->start, range->end);
return -EBUSY;
}
}
pgmap->type = MEMORY_DEVICE_GENERIC;
if (dev_dax->align > PAGE_SIZE)
pgmap->vmemmap_shift =
order_base_2(dev_dax->align >> PAGE_SHIFT);
addr = devm_memremap_pages(dev, pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;
cdev_init(cdev, &dax_fops);
cdev->owner = dev->driver->owner;
cdev_set_parent(cdev, &dev->kobj);
rc = cdev_add(cdev, dev->devt, 1);
if (rc)
return rc;
rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
if (rc)
return rc;
run_dax(dax_dev);
return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
}
static struct dax_device_driver device_dax_driver = {
.probe = dev_dax_probe,
.type = DAXDRV_DEVICE_TYPE,
};
static int __init dax_init(void)
{
return dax_driver_register(&device_dax_driver);
}
static void __exit dax_exit(void)
{
dax_driver_unregister(&device_dax_driver);
}
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
module_init(dax_init);
module_exit(dax_exit);
MODULE_ALIAS_DAX_DEVICE(0);
| linux-master | drivers/dax/device.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/pagemap.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include "dax-private.h"
#include "bus.h"
/*
* Default abstract distance assigned to the NUMA node onlined
* by DAX/kmem if the low level platform driver didn't initialize
* one for this NUMA node.
*/
#define MEMTIER_DEFAULT_DAX_ADISTANCE (MEMTIER_ADISTANCE_DRAM * 5)
/* Memory resource name used for add_memory_driver_managed(). */
static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */
static bool any_hotremove_failed;
static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
{
struct dev_dax_range *dax_range = &dev_dax->ranges[i];
struct range *range = &dax_range->range;
/* memory-block align the hotplug range */
r->start = ALIGN(range->start, memory_block_size_bytes());
r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
if (r->start >= r->end) {
r->start = range->start;
r->end = range->end;
return -ENOSPC;
}
return 0;
}
struct dax_kmem_data {
const char *res_name;
int mgid;
struct resource *res[];
};
static struct memory_dev_type *dax_slowmem_type;
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
unsigned long total_len = 0;
struct dax_kmem_data *data;
int i, rc, mapped = 0;
int numa_node;
/*
* Ensure good NUMA information for the persistent memory.
* Without this check, there is a risk that slow memory
* could be mixed in a node with faster memory, causing
* unavoidable performance issues.
*/
numa_node = dev_dax->target_node;
if (numa_node < 0) {
dev_warn(dev, "rejecting DAX region with invalid node: %d\n",
numa_node);
return -EINVAL;
}
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
i, range.start, range.end);
continue;
}
total_len += range_len(&range);
}
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
}
init_node_memory_type(numa_node, dax_slowmem_type);
rc = -ENOMEM;
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
goto err_dax_kmem_data;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
rc = memory_group_register_static(numa_node, PFN_UP(total_len));
if (rc < 0)
goto err_reg_mgid;
data->mgid = rc;
for (i = 0; i < dev_dax->nr_range; i++) {
struct resource *res;
struct range range;
rc = dax_kmem_range(dev_dax, i, &range);
if (rc)
continue;
/* Region is permanently reserved if hotremove fails. */
res = request_mem_region(range.start, range_len(&range), data->res_name);
if (!res) {
dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
i, range.start, range.end);
/*
* Once some memory has been onlined we can't
* assume that it can be un-onlined safely.
*/
if (mapped)
continue;
rc = -EBUSY;
goto err_request_mem;
}
data->res[i] = res;
/*
* Set flags appropriate for System RAM. Leave ..._BUSY clear
* so that add_memory() can add a child resource. Do not
* inherit flags from the parent since it may set new flags
* unknown to us that will break add_memory() below.
*/
res->flags = IORESOURCE_SYSTEM_RAM;
/*
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
rc = add_memory_driver_managed(data->mgid, range.start,
range_len(&range), kmem_name, MHP_NID_IS_MGID);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
i, range.start, range.end);
remove_resource(res);
kfree(res);
data->res[i] = NULL;
if (mapped)
continue;
goto err_request_mem;
}
mapped++;
}
dev_set_drvdata(dev, data);
return 0;
err_request_mem:
memory_group_unregister(data->mgid);
err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
err_dax_kmem_data:
clear_node_memory_type(numa_node, dax_slowmem_type);
return rc;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
int node = dev_dax->target_node;
struct device *dev = &dev_dax->dev;
struct dax_kmem_data *data = dev_get_drvdata(dev);
/*
* We have one shot for removing memory, if some memory blocks were not
* offline prior to calling this function remove_memory() will fail, and
* there is no way to hotremove this memory until reboot because device
* unbind will succeed even if we return failure.
*/
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
int rc;
rc = dax_kmem_range(dev_dax, i, &range);
if (rc)
continue;
rc = remove_memory(range.start, range_len(&range));
if (rc == 0) {
remove_resource(data->res[i]);
kfree(data->res[i]);
data->res[i] = NULL;
success++;
continue;
}
any_hotremove_failed = true;
dev_err(dev,
"mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
i, range.start, range.end);
}
if (success >= dev_dax->nr_range) {
memory_group_unregister(data->mgid);
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
/*
* Clear the memtype association on successful unplug.
* If not, we have memory blocks left which can be
* offlined/onlined later. We need to keep memory_dev_type
* for that. This implies this reference will be around
* till next reboot.
*/
clear_node_memory_type(node, dax_slowmem_type);
}
}
#else
static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
/*
* Without hotremove purposely leak the request_mem_region() for the
* device-dax range and return '0' to ->remove() attempts. The removal
* of the device from the driver always succeeds, but the region is
* permanently pinned as reserved by the unreleased
* request_mem_region().
*/
any_hotremove_failed = true;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
static struct dax_device_driver device_dax_kmem_driver = {
.probe = dev_dax_kmem_probe,
.remove = dev_dax_kmem_remove,
.type = DAXDRV_KMEM_TYPE,
};
static int __init dax_kmem_init(void)
{
int rc;
/* Resource name is permanently allocated if any hotremove fails. */
kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
if (!kmem_name)
return -ENOMEM;
dax_slowmem_type = alloc_memory_type(MEMTIER_DEFAULT_DAX_ADISTANCE);
if (IS_ERR(dax_slowmem_type)) {
rc = PTR_ERR(dax_slowmem_type);
goto err_dax_slowmem_type;
}
rc = dax_driver_register(&device_dax_kmem_driver);
if (rc)
goto error_dax_driver;
return rc;
error_dax_driver:
put_memory_type(dax_slowmem_type);
err_dax_slowmem_type:
kfree_const(kmem_name);
return rc;
}
static void __exit dax_kmem_exit(void)
{
dax_driver_unregister(&device_dax_kmem_driver);
if (!any_hotremove_failed)
kfree_const(kmem_name);
put_memory_type(dax_slowmem_type);
}
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
module_init(dax_kmem_init);
module_exit(dax_kmem_exit);
MODULE_ALIAS_DAX_DEVICE(0);
| linux-master | drivers/dax/kmem.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
#include <linux/percpu-refcount.h>
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/nd.h>
#include "../bus.h"
| linux-master | drivers/dax/pmem/pmem.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/dax.h>
#include "../bus.h"
static bool region_idle;
module_param_named(region_idle, region_idle, bool, 0644);
static int dax_hmem_probe(struct platform_device *pdev)
{
unsigned long flags = IORESOURCE_DAX_KMEM;
struct device *dev = &pdev->dev;
struct dax_region *dax_region;
struct memregion_info *mri;
struct dev_dax_data data;
/*
* @region_idle == true indicates that an administrative agent
* wants to manipulate the range partitioning before the devices
* are created, so do not send them to the dax_kmem driver by
* default.
*/
if (region_idle)
flags = 0;
mri = dev->platform_data;
dax_region = alloc_dax_region(dev, pdev->id, &mri->range,
mri->target_node, PMD_SIZE, flags);
if (!dax_region)
return -ENOMEM;
data = (struct dev_dax_data) {
.dax_region = dax_region,
.id = -1,
.size = region_idle ? 0 : range_len(&mri->range),
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
}
static struct platform_driver dax_hmem_driver = {
.probe = dax_hmem_probe,
.driver = {
.name = "hmem",
},
};
static void release_memregion(void *data)
{
memregion_free((long) data);
}
static void release_hmem(void *pdev)
{
platform_device_unregister(pdev);
}
static int hmem_register_device(struct device *host, int target_nid,
const struct resource *res)
{
struct platform_device *pdev;
struct memregion_info info;
long id;
int rc;
if (IS_ENABLED(CONFIG_CXL_REGION) &&
region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
IORES_DESC_CXL) != REGION_DISJOINT) {
dev_dbg(host, "deferring range to CXL: %pr\n", res);
return 0;
}
rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
IORES_DESC_SOFT_RESERVED);
if (rc != REGION_INTERSECTS)
return 0;
id = memregion_alloc(GFP_KERNEL);
if (id < 0) {
dev_err(host, "memregion allocation failure for %pr\n", res);
return -ENOMEM;
}
rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
if (rc)
return rc;
pdev = platform_device_alloc("hmem", id);
if (!pdev) {
dev_err(host, "device allocation failure for %pr\n", res);
return -ENOMEM;
}
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
info = (struct memregion_info) {
.target_node = target_nid,
.range = {
.start = res->start,
.end = res->end,
},
};
rc = platform_device_add_data(pdev, &info, sizeof(info));
if (rc < 0) {
dev_err(host, "memregion_info allocation failure for %pr\n",
res);
goto out_put;
}
rc = platform_device_add(pdev);
if (rc < 0) {
dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
res);
goto out_put;
}
return devm_add_action_or_reset(host, release_hmem, pdev);
out_put:
platform_device_put(pdev);
return rc;
}
static int dax_hmem_platform_probe(struct platform_device *pdev)
{
return walk_hmem_resources(&pdev->dev, hmem_register_device);
}
static struct platform_driver dax_hmem_platform_driver = {
.probe = dax_hmem_platform_probe,
.driver = {
.name = "hmem_platform",
},
};
static __init int dax_hmem_init(void)
{
int rc;
rc = platform_driver_register(&dax_hmem_platform_driver);
if (rc)
return rc;
rc = platform_driver_register(&dax_hmem_driver);
if (rc)
platform_driver_unregister(&dax_hmem_platform_driver);
return rc;
}
static __exit void dax_hmem_exit(void)
{
platform_driver_unregister(&dax_hmem_driver);
platform_driver_unregister(&dax_hmem_platform_driver);
}
module_init(dax_hmem_init);
module_exit(dax_hmem_exit);
/* Allow for CXL to define its own dax regions */
#if IS_ENABLED(CONFIG_CXL_REGION)
#if IS_MODULE(CONFIG_CXL_ACPI)
MODULE_SOFTDEP("pre: cxl_acpi");
#endif
#endif
MODULE_ALIAS("platform:hmem*");
MODULE_ALIAS("platform:hmem_platform*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
| linux-master | drivers/dax/hmem/hmem.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/dax.h>
#include <linux/mm.h>
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
static bool platform_initialized;
static DEFINE_MUTEX(hmem_resource_lock);
static struct resource hmem_active = {
.name = "HMEM devices",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
{
struct resource *res;
int rc = 0;
mutex_lock(&hmem_resource_lock);
for (res = hmem_active.child; res; res = res->sibling) {
rc = fn(host, (int) res->desc, res);
if (rc)
break;
}
mutex_unlock(&hmem_resource_lock);
return rc;
}
EXPORT_SYMBOL_GPL(walk_hmem_resources);
static void __hmem_register_resource(int target_nid, struct resource *res)
{
struct platform_device *pdev;
struct resource *new;
int rc;
new = __request_region(&hmem_active, res->start, resource_size(res), "",
0);
if (!new) {
pr_debug("hmem range %pr already active\n", res);
return;
}
new->desc = target_nid;
if (platform_initialized)
return;
pdev = platform_device_alloc("hmem_platform", 0);
if (!pdev) {
pr_err_once("failed to register device-dax hmem_platform device\n");
return;
}
rc = platform_device_add(pdev);
if (rc)
platform_device_put(pdev);
else
platform_initialized = true;
}
void hmem_register_resource(int target_nid, struct resource *res)
{
if (nohmem)
return;
mutex_lock(&hmem_resource_lock);
__hmem_register_resource(target_nid, res);
mutex_unlock(&hmem_resource_lock);
}
static __init int hmem_register_one(struct resource *res, void *data)
{
hmem_register_resource(phys_to_target_node(res->start), res);
return 0;
}
static __init int hmem_init(void)
{
walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
return 0;
}
/*
* As this is a fallback for address ranges unclaimed by the ACPI HMAT
* parsing it must be at an initcall level greater than hmat_init().
*/
device_initcall(hmem_init);
| linux-master | drivers/dax/hmem/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OF NUMA Parsing support.
*
* Copyright (C) 2015 - 2016 Cavium Inc.
*/
#define pr_fmt(fmt) "OF: NUMA: " fmt
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/nodemask.h>
#include <asm/numa.h>
/* define default numa node to 0 */
#define DEFAULT_NODE 0
/*
* Even though we connect cpus to numa domains later in SMP
* init, we need to know the node ids now for all cpus.
*/
static void __init of_numa_parse_cpu_nodes(void)
{
u32 nid;
int r;
struct device_node *np;
for_each_of_cpu_node(np) {
r = of_property_read_u32(np, "numa-node-id", &nid);
if (r)
continue;
pr_debug("CPU on %u\n", nid);
if (nid >= MAX_NUMNODES)
pr_warn("Node id %u exceeds maximum value\n", nid);
else
node_set(nid, numa_nodes_parsed);
}
}
static int __init of_numa_parse_memory_nodes(void)
{
struct device_node *np = NULL;
struct resource rsrc;
u32 nid;
int i, r;
for_each_node_by_type(np, "memory") {
r = of_property_read_u32(np, "numa-node-id", &nid);
if (r == -EINVAL)
/*
* property doesn't exist if -EINVAL, continue
* looking for more memory nodes with
* "numa-node-id" property
*/
continue;
if (nid >= MAX_NUMNODES) {
pr_warn("Node id %u exceeds maximum value\n", nid);
r = -EINVAL;
}
for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++)
r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1);
if (!i || r) {
of_node_put(np);
pr_err("bad property in memory node\n");
return r ? : -EINVAL;
}
}
return 0;
}
static int __init of_numa_parse_distance_map_v1(struct device_node *map)
{
const __be32 *matrix;
int entry_count;
int i;
pr_info("parsing numa-distance-map-v1\n");
matrix = of_get_property(map, "distance-matrix", NULL);
if (!matrix) {
pr_err("No distance-matrix property in distance-map\n");
return -EINVAL;
}
entry_count = of_property_count_u32_elems(map, "distance-matrix");
if (entry_count <= 0) {
pr_err("Invalid distance-matrix\n");
return -EINVAL;
}
for (i = 0; i + 2 < entry_count; i += 3) {
u32 nodea, nodeb, distance;
nodea = of_read_number(matrix, 1);
matrix++;
nodeb = of_read_number(matrix, 1);
matrix++;
distance = of_read_number(matrix, 1);
matrix++;
if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
(nodea != nodeb && distance <= LOCAL_DISTANCE)) {
pr_err("Invalid distance[node%d -> node%d] = %d\n",
nodea, nodeb, distance);
return -EINVAL;
}
node_set(nodea, numa_nodes_parsed);
numa_set_distance(nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
if (nodeb > nodea)
numa_set_distance(nodeb, nodea, distance);
}
return 0;
}
static int __init of_numa_parse_distance_map(void)
{
int ret = 0;
struct device_node *np;
np = of_find_compatible_node(NULL, NULL,
"numa-distance-map-v1");
if (np)
ret = of_numa_parse_distance_map_v1(np);
of_node_put(np);
return ret;
}
int of_node_to_nid(struct device_node *device)
{
struct device_node *np;
u32 nid;
int r = -ENODATA;
np = of_node_get(device);
while (np) {
r = of_property_read_u32(np, "numa-node-id", &nid);
/*
* -EINVAL indicates the property was not found, and
* we walk up the tree trying to find a parent with a
* "numa-node-id". Any other type of error indicates
* a bad device tree and we give up.
*/
if (r != -EINVAL)
break;
np = of_get_next_parent(np);
}
if (np && r)
pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
np);
of_node_put(np);
/*
* If numa=off passed on command line, or with a defective
* device tree, the nid may not be in the set of possible
* nodes. Check for this case and return NUMA_NO_NODE.
*/
if (!r && nid < MAX_NUMNODES && node_possible(nid))
return nid;
return NUMA_NO_NODE;
}
int __init of_numa_init(void)
{
int r;
of_numa_parse_cpu_nodes();
r = of_numa_parse_memory_nodes();
if (r)
return r;
return of_numa_parse_distance_map();
}
| linux-master | drivers/of/of_numa.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Updated and modified by Cort Dougan <[email protected]>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras ([email protected])
*
* This file contains the code used to make IRQ descriptions in the
* device tree to actual irq numbers on an interrupt controller
* driver.
*/
#define pr_fmt(fmt) "OF: " fmt
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/string.h>
#include <linux/slab.h>
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
* This function is a wrapper that chains of_irq_parse_one() and
* irq_create_of_mapping() to make things easier to callers
*/
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
struct of_phandle_args oirq;
if (of_irq_parse_one(dev, index, &oirq))
return 0;
return irq_create_of_mapping(&oirq);
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
/**
* of_irq_find_parent - Given a device node, find its interrupt parent node
* @child: pointer to device node
*
* Return: A pointer to the interrupt parent node, or NULL if the interrupt
* parent could not be determined.
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
struct device_node *p;
phandle parent;
if (!of_node_get(child))
return NULL;
do {
if (of_property_read_u32(child, "interrupt-parent", &parent)) {
p = of_get_parent(child);
} else {
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
p = of_node_get(of_irq_dflt_pic);
else
p = of_find_node_by_phandle(parent);
}
of_node_put(child);
child = p;
} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
return p;
}
EXPORT_SYMBOL_GPL(of_irq_find_parent);
/*
* These interrupt controllers abuse interrupt-map for unspeakable
* reasons and rely on the core code to *ignore* it (the drivers do
* their own parsing of the property).
*
* If you think of adding to the list for something *new*, think
* again. There is a high chance that you will be sent back to the
* drawing board.
*/
static const char * const of_irq_imap_abusers[] = {
"CBEA,platform-spider-pic",
"sti,platform-spider-pic",
"realtek,rtl-intc",
"fsl,ls1021a-extirq",
"fsl,ls1043a-extirq",
"fsl,ls1088a-extirq",
"renesas,rza1-irqc",
NULL,
};
/**
* of_irq_parse_raw - Low level interrupt tree parsing
* @addr: address specifier (start of "reg" property of the device) in be32 format
* @out_irq: structure of_phandle_args updated by this function
*
* This function is a low-level interrupt tree walking function. It
* can be used to do a partial walk with synthetized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
* node exist for the parent. It takes an interrupt specifier structure as
* input, walks the tree looking for any interrupt-map properties, translates
* the specifier for each map, and then returns the translated map.
*
* Return: 0 on success and a negative number on error
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i, rc = -EINVAL;
#ifdef DEBUG
of_print_phandle_args("of_irq_parse_raw: ", out_irq);
#endif
ipar = of_node_get(out_irq->np);
/* First get the #interrupt-cells property of the current cursor
* that tells us how to interpret the passed-in intspec. If there
* is none, we are nice and just walk up the tree
*/
do {
if (!of_property_read_u32(ipar, "#interrupt-cells", &intsize))
break;
tnode = ipar;
ipar = of_irq_find_parent(ipar);
of_node_put(tnode);
} while (ipar);
if (ipar == NULL) {
pr_debug(" -> no parent found !\n");
goto fail;
}
pr_debug("of_irq_parse_raw: ipar=%pOF, size=%d\n", ipar, intsize);
if (out_irq->args_count != intsize)
goto fail;
/* Look for this #address-cells. We have to implement the old linux
* trick of looking for the parent here as some device-trees rely on it
*/
old = of_node_get(ipar);
do {
tmp = of_get_property(old, "#address-cells", NULL);
tnode = of_get_parent(old);
of_node_put(old);
old = tnode;
} while (old && tmp == NULL);
of_node_put(old);
old = NULL;
addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp);
pr_debug(" -> addrsize=%d\n", addrsize);
/* Range check so that the temporary buffer doesn't overflow */
if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)) {
rc = -EFAULT;
goto fail;
}
/* Precalculate the match array - this simplifies match loop */
for (i = 0; i < addrsize; i++)
initial_match_array[i] = addr ? addr[i] : 0;
for (i = 0; i < intsize; i++)
initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]);
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
/*
* Now check if cursor is an interrupt-controller and
* if it is then we are done, unless there is an
* interrupt-map which takes precedence except on one
* of these broken platforms that want to parse
* interrupt-map themselves for $reason.
*/
bool intc = of_property_read_bool(ipar, "interrupt-controller");
imap = of_get_property(ipar, "interrupt-map", &imaplen);
if (intc &&
(!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
pr_debug(" -> got it !\n");
return 0;
}
/*
* interrupt-map parsing does not work without a reg
* property when #address-cells != 0
*/
if (addrsize && !addr) {
pr_debug(" -> no reg passed in when needed !\n");
goto fail;
}
/* No interrupt map, check for an interrupt parent */
if (imap == NULL) {
pr_debug(" -> no map, getting parent\n");
newpar = of_irq_find_parent(ipar);
goto skiplevel;
}
imaplen /= sizeof(u32);
/* Look for a mask */
imask = of_get_property(ipar, "interrupt-map-mask", NULL);
if (!imask)
imask = dummy_imask;
/* Parse interrupt-map */
match = 0;
while (imaplen > (addrsize + intsize + 1) && !match) {
/* Compare specifiers */
match = 1;
for (i = 0; i < (addrsize + intsize); i++, imaplen--)
match &= !((match_array[i] ^ *imap++) & imask[i]);
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
/* Get the interrupt parent */
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
newpar = of_node_get(of_irq_dflt_pic);
else
newpar = of_find_node_by_phandle(be32_to_cpup(imap));
imap++;
--imaplen;
/* Check if not found */
if (newpar == NULL) {
pr_debug(" -> imap parent not found !\n");
goto fail;
}
if (!of_device_is_available(newpar))
match = 0;
/* Get #interrupt-cells and #address-cells of new
* parent
*/
if (of_property_read_u32(newpar, "#interrupt-cells",
&newintsize)) {
pr_debug(" -> parent lacks #interrupt-cells!\n");
goto fail;
}
if (of_property_read_u32(newpar, "#address-cells",
&newaddrsize))
newaddrsize = 0;
pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
newintsize, newaddrsize);
/* Check for malformed properties */
if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
|| (imaplen < (newaddrsize + newintsize))) {
rc = -EFAULT;
goto fail;
}
imap += newaddrsize + newintsize;
imaplen -= newaddrsize + newintsize;
pr_debug(" -> imaplen=%d\n", imaplen);
}
if (!match) {
if (intc) {
/*
* The PASEMI Nemo is a known offender, so
* let's only warn for anyone else.
*/
WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
"%pOF interrupt-map failed, using interrupt-controller\n",
ipar);
return 0;
}
goto fail;
}
/*
* Successfully parsed an interrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
out_irq->args_count = intsize = newintsize;
addrsize = newaddrsize;
if (ipar == newpar) {
pr_debug("%pOF interrupt-map entry to self\n", ipar);
return 0;
}
skiplevel:
/* Iterate again with new parent */
out_irq->np = newpar;
pr_debug(" -> new parent: %pOF\n", newpar);
of_node_put(ipar);
ipar = newpar;
newpar = NULL;
}
rc = -ENOENT; /* No interrupt-map found */
fail:
of_node_put(ipar);
of_node_put(newpar);
return rc;
}
EXPORT_SYMBOL_GPL(of_irq_parse_raw);
/**
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
* interrupt specifier that can be used to retrieve a Linux IRQ number.
*/
int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
struct device_node *p;
const __be32 *addr;
u32 intsize;
int i, res;
pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
/* OldWorld mac stuff is "special", handle out of line */
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return of_irq_parse_oldworld(device, index, out_irq);
/* Get the reg property (if any) */
addr = of_get_property(device, "reg", NULL);
/* Try the new-style interrupts-extended first */
res = of_parse_phandle_with_args(device, "interrupts-extended",
"#interrupt-cells", index, out_irq);
if (!res)
return of_irq_parse_raw(addr, out_irq);
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
if (p == NULL)
return -EINVAL;
/* Get size of interrupt specifier */
if (of_property_read_u32(p, "#interrupt-cells", &intsize)) {
res = -EINVAL;
goto out;
}
pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
/* Copy intspec into irq structure */
out_irq->np = p;
out_irq->args_count = intsize;
for (i = 0; i < intsize; i++) {
res = of_property_read_u32_index(device, "interrupts",
(index * intsize) + i,
out_irq->args + i);
if (res)
goto out;
}
pr_debug(" intspec=%d\n", *out_irq->args);
/* Check if there are any interrupt-map translations to process */
res = of_irq_parse_raw(addr, out_irq);
out:
of_node_put(p);
return res;
}
EXPORT_SYMBOL_GPL(of_irq_parse_one);
/**
* of_irq_to_resource - Decode a node's IRQ and return it as a resource
* @dev: pointer to device tree node
* @index: zero-based index of the irq
* @r: pointer to resource structure to return result into.
*/
int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
{
int irq = of_irq_get(dev, index);
if (irq < 0)
return irq;
/* Only dereference the resource if both the
* resource and the irq are valid. */
if (r && irq) {
const char *name = NULL;
memset(r, 0, sizeof(*r));
/*
* Get optional "interrupt-names" property to add a name
* to the resource.
*/
of_property_read_string_index(dev, "interrupt-names", index,
&name);
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
r->name = name ? name : of_node_full_name(dev);
}
return irq;
}
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
* of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
* @index: zero-based index of the IRQ
*
* Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or
* -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
* of any other failure.
*/
int of_irq_get(struct device_node *dev, int index)
{
int rc;
struct of_phandle_args oirq;
struct irq_domain *domain;
rc = of_irq_parse_one(dev, index, &oirq);
if (rc)
return rc;
domain = irq_find_host(oirq.np);
if (!domain) {
rc = -EPROBE_DEFER;
goto out;
}
rc = irq_create_of_mapping(&oirq);
out:
of_node_put(oirq.np);
return rc;
}
EXPORT_SYMBOL_GPL(of_irq_get);
/**
* of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
* @name: IRQ name
*
* Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or
* -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
* of any other failure.
*/
int of_irq_get_byname(struct device_node *dev, const char *name)
{
int index;
if (unlikely(!name))
return -EINVAL;
index = of_property_match_string(dev, "interrupt-names", name);
if (index < 0)
return index;
return of_irq_get(dev, index);
}
EXPORT_SYMBOL_GPL(of_irq_get_byname);
/**
* of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node
*/
int of_irq_count(struct device_node *dev)
{
struct of_phandle_args irq;
int nr = 0;
while (of_irq_parse_one(dev, nr, &irq) == 0)
nr++;
return nr;
}
/**
* of_irq_to_resource_table - Fill in resource table with node's IRQ info
* @dev: pointer to device tree node
* @res: array of resources to fill in
* @nr_irqs: the number of IRQs (and upper bound for num of @res elements)
*
* Return: The size of the filled in table (up to @nr_irqs).
*/
int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int nr_irqs)
{
int i;
for (i = 0; i < nr_irqs; i++, res++)
if (of_irq_to_resource(dev, i, res) <= 0)
break;
return i;
}
EXPORT_SYMBOL_GPL(of_irq_to_resource_table);
struct of_intc_desc {
struct list_head list;
of_irq_init_cb_t irq_init_cb;
struct device_node *dev;
struct device_node *interrupt_parent;
};
/**
* of_irq_init - Scan and init matching interrupt controllers in DT
* @matches: 0 terminated array of nodes to match and init function to call
*
* This function scans the device tree for matching interrupt controller nodes,
* and calls their initialization functions in order with parents first.
*/
void __init of_irq_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np, *parent = NULL;
struct of_intc_desc *desc, *temp_desc;
struct list_head intc_desc_list, intc_parent_list;
INIT_LIST_HEAD(&intc_desc_list);
INIT_LIST_HEAD(&intc_parent_list);
for_each_matching_node_and_match(np, matches, &match) {
if (!of_property_read_bool(np, "interrupt-controller") ||
!of_device_is_available(np))
continue;
if (WARN(!match->data, "of_irq_init: no init function for %s\n",
match->compatible))
continue;
/*
* Here, we allocate and populate an of_intc_desc with the node
* pointer, interrupt-parent device_node etc.
*/
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc) {
of_node_put(np);
goto err;
}
desc->irq_init_cb = match->data;
desc->dev = of_node_get(np);
/*
* interrupts-extended can reference multiple parent domains.
* Arbitrarily pick the first one; assume any other parents
* are the same distance away from the root irq controller.
*/
desc->interrupt_parent = of_parse_phandle(np, "interrupts-extended", 0);
if (!desc->interrupt_parent)
desc->interrupt_parent = of_irq_find_parent(np);
if (desc->interrupt_parent == np) {
of_node_put(desc->interrupt_parent);
desc->interrupt_parent = NULL;
}
list_add_tail(&desc->list, &intc_desc_list);
}
/*
* The root irq controller is the one without an interrupt-parent.
* That one goes first, followed by the controllers that reference it,
* followed by the ones that reference the 2nd level controllers, etc.
*/
while (!list_empty(&intc_desc_list)) {
/*
* Process all controllers with the current 'parent'.
* First pass will be looking for NULL as the parent.
* The assumption is that NULL parent means a root controller.
*/
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
int ret;
if (desc->interrupt_parent != parent)
continue;
list_del(&desc->list);
of_node_set_flag(desc->dev, OF_POPULATED);
pr_debug("of_irq_init: init %pOF (%p), parent %p\n",
desc->dev,
desc->dev, desc->interrupt_parent);
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
pr_err("%s: Failed to init %pOF (%p), parent %p\n",
__func__, desc->dev, desc->dev,
desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
}
/*
* This one is now set up; add it to the parent list so
* its children can get processed in a subsequent pass.
*/
list_add_tail(&desc->list, &intc_parent_list);
}
/* Get the next pending parent that might have children */
desc = list_first_entry_or_null(&intc_parent_list,
typeof(*desc), list);
if (!desc) {
pr_err("of_irq_init: children remain, but no parents\n");
break;
}
list_del(&desc->list);
parent = desc->dev;
kfree(desc);
}
list_for_each_entry_safe(desc, temp_desc, &intc_parent_list, list) {
list_del(&desc->list);
kfree(desc);
}
err:
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
list_del(&desc->list);
of_node_put(desc->dev);
kfree(desc);
}
}
static u32 __of_msi_map_id(struct device *dev, struct device_node **np,
u32 id_in)
{
struct device *parent_dev;
u32 id_out = id_in;
/*
* Walk up the device parent links looking for one with a
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
"msi-map-mask", np, &id_out))
break;
return id_out;
}
/**
* of_msi_map_id - Map a MSI ID for a device.
* @dev: device for which the mapping is to be done.
* @msi_np: device node of the expected msi controller.
* @id_in: unmapped MSI ID for the device.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
* property. If found, apply the mapping to @id_in.
*
* Return: The mapped MSI ID.
*/
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
{
return __of_msi_map_id(dev, &msi_np, id_in);
}
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
* @id: Device ID.
* @bus_token: Bus token
*
* Walk up the device hierarchy looking for devices with a "msi-map"
* property.
*
* Returns: the MSI domain for this device (or NULL on failure)
*/
struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
u32 bus_token)
{
struct device_node *np = NULL;
__of_msi_map_id(dev, &np, id);
return irq_find_matching_host(np, bus_token);
}
/**
* of_msi_get_domain - Use msi-parent to find the relevant MSI domain
* @dev: device for which the domain is requested
* @np: device node for @dev
* @token: bus type for this domain
*
* Parse the msi-parent property (both the simple and the complex
* versions), and returns the corresponding MSI domain.
*
* Returns: the MSI domain for this device (or NULL on failure).
*/
struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token)
{
struct device_node *msi_np;
struct irq_domain *d;
/* Check for a single msi-parent property */
msi_np = of_parse_phandle(np, "msi-parent", 0);
if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
d = irq_find_matching_host(msi_np, token);
if (!d)
of_node_put(msi_np);
return d;
}
if (token == DOMAIN_BUS_PLATFORM_MSI) {
/* Check for the complex msi-parent version */
struct of_phandle_args args;
int index = 0;
while (!of_parse_phandle_with_args(np, "msi-parent",
"#msi-cells",
index, &args)) {
d = irq_find_matching_host(args.np, token);
if (d)
return d;
of_node_put(args.np);
index++;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(of_msi_get_domain);
/**
* of_msi_configure - Set the msi_domain field of a device
* @dev: device structure to associate with an MSI irq domain
* @np: device node for that device
*/
void of_msi_configure(struct device *dev, struct device_node *np)
{
dev_set_msi_domain(dev,
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
}
EXPORT_SYMBOL_GPL(of_msi_configure);
| linux-master | drivers/of/irq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FDT Address translation based on u-boot fdt_support.c which in turn was
* based on the kernel unflattened DT address translation code.
*
* (C) Copyright 2007
* Gerald Van Baren, Custom IDEAS, [email protected]
*
* Copyright 2010-2011 Freescale Semiconductor, Inc.
*/
#define pr_fmt(fmt) "OF: fdt: " fmt
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/sizes.h>
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
(ns) > 0)
/* Debug utility */
#ifdef DEBUG
static void __init of_dump_addr(const char *s, const __be32 *addr, int na)
{
pr_debug("%s", s);
while(na--)
pr_cont(" %08x", *(addr++));
pr_cont("\n");
}
#else
static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
/* Callbacks for bus specific translators */
struct of_bus {
void (*count_cells)(const void *blob, int parentoffset,
int *addrc, int *sizec);
u64 (*map)(__be32 *addr, const __be32 *range,
int na, int ns, int pna);
int (*translate)(__be32 *addr, u64 offset, int na);
};
/* Default translator (generic bus) */
static void __init fdt_bus_default_count_cells(const void *blob, int parentoffset,
int *addrc, int *sizec)
{
const __be32 *prop;
if (addrc) {
prop = fdt_getprop(blob, parentoffset, "#address-cells", NULL);
if (prop)
*addrc = be32_to_cpup(prop);
else
*addrc = dt_root_addr_cells;
}
if (sizec) {
prop = fdt_getprop(blob, parentoffset, "#size-cells", NULL);
if (prop)
*sizec = be32_to_cpup(prop);
else
*sizec = dt_root_size_cells;
}
}
static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range,
int na, int ns, int pna)
{
u64 cp, s, da;
cp = of_read_number(range, na);
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr, na);
pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
}
static int __init fdt_bus_default_translate(__be32 *addr, u64 offset, int na)
{
u64 a = of_read_number(addr, na);
memset(addr, 0, na * 4);
a += offset;
if (na > 1)
addr[na - 2] = cpu_to_fdt32(a >> 32);
addr[na - 1] = cpu_to_fdt32(a & 0xffffffffu);
return 0;
}
/* Array of bus specific translators */
static const struct of_bus of_busses[] __initconst = {
/* Default */
{
.count_cells = fdt_bus_default_count_cells,
.map = fdt_bus_default_map,
.translate = fdt_bus_default_translate,
},
};
static int __init fdt_translate_one(const void *blob, int parent,
const struct of_bus *bus,
const struct of_bus *pbus, __be32 *addr,
int na, int ns, int pna, const char *rprop)
{
const __be32 *ranges;
int rlen;
int rone;
u64 offset = OF_BAD_ADDR;
ranges = fdt_getprop(blob, parent, rprop, &rlen);
if (!ranges)
return 1;
if (rlen == 0) {
offset = of_read_number(addr, na);
memset(addr, 0, pna * 4);
pr_debug("empty ranges, 1:1 translation\n");
goto finish;
}
pr_debug("walking ranges...\n");
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
offset = bus->map(addr, ranges, na, ns, pna);
if (offset != OF_BAD_ADDR)
break;
}
if (offset == OF_BAD_ADDR) {
pr_debug("not found !\n");
return 1;
}
memcpy(addr, ranges + na, 4 * pna);
finish:
of_dump_addr("parent translation for:", addr, pna);
pr_debug("with offset: %llx\n", offset);
/* Translate it into parent bus space */
return pbus->translate(addr, offset, pna);
}
/*
* Translate an address from the device-tree into a CPU physical address,
* this walks up the tree and applies the various bus mappings on the
* way.
*
* Note: We consider that crossing any level with #size-cells == 0 to mean
* that translation is impossible (that is we are not dealing with a value
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
*/
static u64 __init fdt_translate_address(const void *blob, int node_offset)
{
int parent, len;
const struct of_bus *bus, *pbus;
const __be32 *reg;
__be32 addr[OF_MAX_ADDR_CELLS];
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
pr_debug("** translation for device %s **\n",
fdt_get_name(blob, node_offset, NULL));
reg = fdt_getprop(blob, node_offset, "reg", &len);
if (!reg) {
pr_err("warning: device tree node '%s' has no address.\n",
fdt_get_name(blob, node_offset, NULL));
goto bail;
}
/* Get parent & match bus type */
parent = fdt_parent_offset(blob, node_offset);
if (parent < 0)
goto bail;
bus = &of_busses[0];
/* Cound address cells & copy address locally */
bus->count_cells(blob, parent, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
pr_err("Bad cell count for %s\n",
fdt_get_name(blob, node_offset, NULL));
goto bail;
}
memcpy(addr, reg, na * 4);
pr_debug("bus (na=%d, ns=%d) on %s\n",
na, ns, fdt_get_name(blob, parent, NULL));
of_dump_addr("translating address:", addr, na);
/* Translate */
for (;;) {
/* Switch to parent bus */
node_offset = parent;
parent = fdt_parent_offset(blob, node_offset);
/* If root, we have finished */
if (parent < 0) {
pr_debug("reached root node\n");
result = of_read_number(addr, na);
break;
}
/* Get new parent bus and counts */
pbus = &of_busses[0];
pbus->count_cells(blob, parent, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %s\n",
fdt_get_name(blob, node_offset, NULL));
break;
}
pr_debug("parent bus (na=%d, ns=%d) on %s\n",
pna, pns, fdt_get_name(blob, parent, NULL));
/* Apply bus translation */
if (fdt_translate_one(blob, node_offset, bus, pbus,
addr, na, ns, pna, "ranges"))
break;
/* Complete the move up one level */
na = pna;
ns = pns;
bus = pbus;
of_dump_addr("one level translation:", addr, na);
}
bail:
return result;
}
/**
* of_flat_dt_translate_address - translate DT addr into CPU phys addr
* @node: node in the flat blob
*/
u64 __init of_flat_dt_translate_address(unsigned long node)
{
return fdt_translate_address(initial_boot_params, node);
}
| linux-master | drivers/of/fdt_address.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/of.h>
/**
* of_get_cpu_hwid - Get the hardware ID from a CPU device node
*
* @cpun: CPU number(logical index) for which device node is required
* @thread: The local thread number to get the hardware ID for.
*
* Return: The hardware ID for the CPU node or ~0ULL if not found.
*/
u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
{
const __be32 *cell;
int ac, len;
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, "reg", &len);
if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
return ~0ULL;
cell += ac * thread;
return of_read_number(cell, ac);
}
/*
* arch_match_cpu_phys_id - Match the given logical CPU and physical id
*
* @cpu: logical cpu index of a core/thread
* @phys_id: physical identifier of a core/thread
*
* CPU logical to physical index mapping is architecture specific.
* However this __weak function provides a default match of physical
* id to logical cpu index. phys_id provided here is usually values read
* from the device tree which must match the hardware internal registers.
*
* Returns true if the physical identifier and the logical cpu index
* correspond to the same core/thread, false otherwise.
*/
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (u32)phys_id == cpu;
}
/*
* Checks if the given "prop_name" property holds the physical id of the
* core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
* NULL, local thread number within the core is returned in it.
*/
static bool __of_find_n_match_cpu_property(struct device_node *cpun,
const char *prop_name, int cpu, unsigned int *thread)
{
const __be32 *cell;
int ac, prop_len, tid;
u64 hwid;
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
return true;
if (!cell || !ac)
return false;
prop_len /= sizeof(*cell) * ac;
for (tid = 0; tid < prop_len; tid++) {
hwid = of_read_number(cell, ac);
if (arch_match_cpu_phys_id(cpu, hwid)) {
if (thread)
*thread = tid;
return true;
}
cell += ac;
}
return false;
}
/*
* arch_find_n_match_cpu_physical_id - See if the given device node is
* for the cpu corresponding to logical cpu 'cpu'. Return true if so,
* else false. If 'thread' is non-NULL, the local thread number within the
* core is returned in it.
*/
bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread)
{
/* Check for non-standard "ibm,ppc-interrupt-server#s" property
* for thread ids on PowerPC. If it doesn't exist fallback to
* standard "reg" property.
*/
if (IS_ENABLED(CONFIG_PPC) &&
__of_find_n_match_cpu_property(cpun,
"ibm,ppc-interrupt-server#s",
cpu, thread))
return true;
return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
}
/**
* of_get_cpu_node - Get device node associated with the given logical CPU
*
* @cpu: CPU number(logical index) for which device node is required
* @thread: if not NULL, local thread number within the physical core is
* returned
*
* The main purpose of this function is to retrieve the device node for the
* given logical CPU index. It should be used to initialize the of_node in
* cpu device. Once of_node in cpu device is populated, all the further
* references can use that instead.
*
* CPU logical to physical index mapping is architecture specific and is built
* before booting secondary cores. This function uses arch_match_cpu_phys_id
* which can be overridden by architecture specific implementation.
*
* Return: A node pointer for the logical cpu with refcount incremented, use
* of_node_put() on it when done. Returns NULL if not found.
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun;
for_each_of_cpu_node(cpun) {
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
return NULL;
}
EXPORT_SYMBOL(of_get_cpu_node);
/**
* of_cpu_device_node_get: Get the CPU device_node for a given logical CPU number
*
* @cpu: The logical CPU number
*
* Return: Pointer to the device_node for CPU with its reference count
* incremented of the given logical CPU number or NULL if the CPU device_node
* is not found.
*/
struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return of_get_cpu_node(cpu, NULL);
return of_node_get(cpu_dev->of_node);
}
EXPORT_SYMBOL(of_cpu_device_node_get);
/**
* of_cpu_node_to_id: Get the logical CPU number for a given device_node
*
* @cpu_node: Pointer to the device_node for CPU.
*
* Return: The logical CPU number of the given CPU device_node or -ENODEV if the
* CPU is not found.
*/
int of_cpu_node_to_id(struct device_node *cpu_node)
{
int cpu;
bool found = false;
struct device_node *np;
for_each_possible_cpu(cpu) {
np = of_cpu_device_node_get(cpu);
found = (cpu_node == np);
of_node_put(np);
if (found)
return cpu;
}
return -ENODEV;
}
EXPORT_SYMBOL(of_cpu_node_to_id);
/**
* of_get_cpu_state_node - Get CPU's idle state node at the given index
*
* @cpu_node: The device node for the CPU
* @index: The index in the list of the idle states
*
* Two generic methods can be used to describe a CPU's idle states, either via
* a flattened description through the "cpu-idle-states" binding or via the
* hierarchical layout, using the "power-domains" and the "domain-idle-states"
* bindings. This function check for both and returns the idle state node for
* the requested index.
*
* Return: An idle state node if found at @index. The refcount is incremented
* for it, so call of_node_put() on it when done. Returns NULL if not found.
*/
struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
int index)
{
struct of_phandle_args args;
int err;
err = of_parse_phandle_with_args(cpu_node, "power-domains",
"#power-domain-cells", 0, &args);
if (!err) {
struct device_node *state_node =
of_parse_phandle(args.np, "domain-idle-states", index);
of_node_put(args.np);
if (state_node)
return state_node;
}
return of_parse_phandle(cpu_node, "cpu-idle-states", index);
}
EXPORT_SYMBOL(of_get_cpu_state_node);
| linux-master | drivers/of/cpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for dealing with DT resolution
*
* Copyright (C) 2012 Pantelis Antoniou <[email protected]>
* Copyright (C) 2012 Texas Instruments Inc.
*/
#define pr_fmt(fmt) "OF: resolver: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include "of_private.h"
static phandle live_tree_max_phandle(void)
{
struct device_node *node;
phandle phandle;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
phandle = 0;
for_each_of_allnodes(node) {
if (node->phandle != OF_PHANDLE_ILLEGAL &&
node->phandle > phandle)
phandle = node->phandle;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return phandle;
}
static void adjust_overlay_phandles(struct device_node *overlay,
int phandle_delta)
{
struct device_node *child;
struct property *prop;
phandle phandle;
/* adjust node's phandle in node */
if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL)
overlay->phandle += phandle_delta;
/* copy adjusted phandle into *phandle properties */
for_each_property_of_node(overlay, prop) {
if (of_prop_cmp(prop->name, "phandle") &&
of_prop_cmp(prop->name, "linux,phandle"))
continue;
if (prop->length < 4)
continue;
phandle = be32_to_cpup(prop->value);
if (phandle == OF_PHANDLE_ILLEGAL)
continue;
*(__be32 *)prop->value = cpu_to_be32(overlay->phandle);
}
for_each_child_of_node(overlay, child)
adjust_overlay_phandles(child, phandle_delta);
}
static int update_usages_of_a_phandle_reference(struct device_node *overlay,
struct property *prop_fixup, phandle phandle)
{
struct device_node *refnode;
struct property *prop;
char *value, *cur, *end, *node_path, *prop_name, *s;
int offset, len;
int err = 0;
value = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL);
if (!value)
return -ENOMEM;
/* prop_fixup contains a list of tuples of path:property_name:offset */
end = value + prop_fixup->length;
for (cur = value; cur < end; cur += len + 1) {
len = strlen(cur);
node_path = cur;
s = strchr(cur, ':');
if (!s) {
err = -EINVAL;
goto err_fail;
}
*s++ = '\0';
prop_name = s;
s = strchr(s, ':');
if (!s) {
err = -EINVAL;
goto err_fail;
}
*s++ = '\0';
err = kstrtoint(s, 10, &offset);
if (err)
goto err_fail;
refnode = __of_find_node_by_full_path(of_node_get(overlay), node_path);
if (!refnode)
continue;
for_each_property_of_node(refnode, prop) {
if (!of_prop_cmp(prop->name, prop_name))
break;
}
of_node_put(refnode);
if (!prop) {
err = -ENOENT;
goto err_fail;
}
if (offset < 0 || offset + sizeof(__be32) > prop->length) {
err = -EINVAL;
goto err_fail;
}
*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
err_fail:
kfree(value);
return err;
}
/* compare nodes taking into account that 'name' strips out the @ part */
static int node_name_cmp(const struct device_node *dn1,
const struct device_node *dn2)
{
const char *n1 = kbasename(dn1->full_name);
const char *n2 = kbasename(dn2->full_name);
return of_node_cmp(n1, n2);
}
/*
* Adjust the local phandle references by the given phandle delta.
*
* Subtree @local_fixups, which is overlay node __local_fixups__,
* mirrors the fragment node structure at the root of the overlay.
*
* For each property in the fragments that contains a phandle reference,
* @local_fixups has a property of the same name that contains a list
* of offsets of the phandle reference(s) within the respective property
* value(s). The values at these offsets will be fixed up.
*/
static int adjust_local_phandle_references(struct device_node *local_fixups,
struct device_node *overlay, int phandle_delta)
{
struct device_node *child, *overlay_child;
struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
if (!local_fixups)
return 0;
for_each_property_of_node(local_fixups, prop_fix) {
/* skip properties added automatically */
if (!of_prop_cmp(prop_fix->name, "name") ||
!of_prop_cmp(prop_fix->name, "phandle") ||
!of_prop_cmp(prop_fix->name, "linux,phandle"))
continue;
if ((prop_fix->length % 4) != 0 || prop_fix->length == 0)
return -EINVAL;
count = prop_fix->length / sizeof(__be32);
for_each_property_of_node(overlay, prop) {
if (!of_prop_cmp(prop->name, prop_fix->name))
break;
}
if (!prop)
return -EINVAL;
for (i = 0; i < count; i++) {
off = be32_to_cpu(((__be32 *)prop_fix->value)[i]);
if ((off + 4) > prop->length)
return -EINVAL;
be32_add_cpu(prop->value + off, phandle_delta);
}
}
/*
* These nested loops recurse down two subtrees in parallel, where the
* node names in the two subtrees match.
*
* The roots of the subtrees are the overlay's __local_fixups__ node
* and the overlay's root node.
*/
for_each_child_of_node(local_fixups, child) {
for_each_child_of_node(overlay, overlay_child)
if (!node_name_cmp(child, overlay_child)) {
of_node_put(overlay_child);
break;
}
if (!overlay_child) {
of_node_put(child);
return -EINVAL;
}
err = adjust_local_phandle_references(child, overlay_child,
phandle_delta);
if (err) {
of_node_put(child);
return err;
}
}
return 0;
}
/**
* of_resolve_phandles - Relocate and resolve overlay against live tree
*
* @overlay: Pointer to devicetree overlay to relocate and resolve
*
* Modify (relocate) values of local phandles in @overlay to a range that
* does not conflict with the live expanded devicetree. Update references
* to the local phandles in @overlay. Update (resolve) phandle references
* in @overlay that refer to the live expanded devicetree.
*
* Phandle values in the live tree are in the range of
* 1 .. live_tree_max_phandle(). The range of phandle values in the overlay
* also begin with at 1. Adjust the phandle values in the overlay to begin
* at live_tree_max_phandle() + 1. Update references to the phandles to
* the adjusted phandle values.
*
* The name of each property in the "__fixups__" node in the overlay matches
* the name of a symbol (a label) in the live tree. The values of each
* property in the "__fixups__" node is a list of the property values in the
* overlay that need to be updated to contain the phandle reference
* corresponding to that symbol in the live tree. Update the references in
* the overlay with the phandle values in the live tree.
*
* @overlay must be detached.
*
* Resolving and applying @overlay to the live expanded devicetree must be
* protected by a mechanism to ensure that multiple overlays are processed
* in a single threaded manner so that multiple overlays will not relocate
* phandles to overlapping ranges. The mechanism to enforce this is not
* yet implemented.
*
* Return: %0 on success or a negative error value on error.
*/
int of_resolve_phandles(struct device_node *overlay)
{
struct device_node *child, *local_fixups, *refnode;
struct device_node *tree_symbols, *overlay_fixups;
struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
tree_symbols = NULL;
if (!overlay) {
pr_err("null overlay\n");
err = -EINVAL;
goto out;
}
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
err = -EINVAL;
goto out;
}
phandle_delta = live_tree_max_phandle() + 1;
adjust_overlay_phandles(overlay, phandle_delta);
for_each_child_of_node(overlay, local_fixups)
if (of_node_name_eq(local_fixups, "__local_fixups__"))
break;
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
if (err)
goto out;
overlay_fixups = NULL;
for_each_child_of_node(overlay, child) {
if (of_node_name_eq(child, "__fixups__"))
overlay_fixups = child;
}
if (!overlay_fixups) {
err = 0;
goto out;
}
tree_symbols = of_find_node_by_path("/__symbols__");
if (!tree_symbols) {
pr_err("no symbols in root of device tree.\n");
err = -EINVAL;
goto out;
}
for_each_property_of_node(overlay_fixups, prop) {
/* skip properties added automatically */
if (!of_prop_cmp(prop->name, "name"))
continue;
err = of_property_read_string(tree_symbols,
prop->name, &refpath);
if (err) {
pr_err("node label '%s' not found in live devicetree symbols table\n",
prop->name);
goto out;
}
refnode = of_find_node_by_path(refpath);
if (!refnode) {
err = -ENOENT;
goto out;
}
phandle = refnode->phandle;
of_node_put(refnode);
err = update_usages_of_a_phandle_reference(overlay, prop, phandle);
if (err)
break;
}
out:
if (err)
pr_err("overlay phandle fixup failed: %d\n", err);
of_node_put(tree_symbols);
return err;
}
EXPORT_SYMBOL_GPL(of_resolve_phandles);
| linux-master | drivers/of/resolver.c |
// SPDX-License-Identifier: GPL-2.0+
/* pdt.c: OF PROM device tree support code.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc by David S. Miller [email protected]
* Adapted for multiple architectures by Andres Salomon <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_pdt.h>
static struct of_pdt_ops *of_pdt_prom_ops __initdata;
#if defined(CONFIG_SPARC)
unsigned int of_pdt_unique_id __initdata;
#define of_pdt_incr_unique_id(p) do { \
(p)->unique_id = of_pdt_unique_id++; \
} while (0)
static char * __init of_pdt_build_full_name(struct device_node *dp)
{
return build_path_component(dp);
}
#else /* CONFIG_SPARC */
static inline void of_pdt_incr_unique_id(void *p) { }
static inline void irq_trans_init(struct device_node *dp) { }
static char * __init of_pdt_build_full_name(struct device_node *dp)
{
static int failsafe_id = 0; /* for generating unique names on failure */
const char *name;
char path[256];
char *buf;
int len;
if (!of_pdt_prom_ops->pkg2path(dp->phandle, path, sizeof(path), &len)) {
name = kbasename(path);
buf = prom_early_alloc(strlen(name) + 1);
strcpy(buf, name);
return buf;
}
name = of_get_property(dp, "name", &len);
buf = prom_early_alloc(len + 16);
sprintf(buf, "%s@unknown%i", name, failsafe_id++);
pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
return buf;
}
#endif /* !CONFIG_SPARC */
static struct property * __init of_pdt_build_one_prop(phandle node, char *prev,
char *special_name,
void *special_val,
int special_len)
{
static struct property *tmp = NULL;
struct property *p;
int err;
if (tmp) {
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
} else {
p = prom_early_alloc(sizeof(struct property) + 32);
of_pdt_incr_unique_id(p);
}
p->name = (char *) (p + 1);
if (special_name) {
strcpy(p->name, special_name);
p->length = special_len;
p->value = prom_early_alloc(special_len);
memcpy(p->value, special_val, special_len);
} else {
err = of_pdt_prom_ops->nextprop(node, prev, p->name);
if (err) {
tmp = p;
return NULL;
}
p->length = of_pdt_prom_ops->getproplen(node, p->name);
if (p->length <= 0) {
p->length = 0;
} else {
int len;
p->value = prom_early_alloc(p->length + 1);
len = of_pdt_prom_ops->getproperty(node, p->name,
p->value, p->length);
if (len <= 0)
p->length = 0;
((unsigned char *)p->value)[p->length] = '\0';
}
}
return p;
}
static struct property * __init of_pdt_build_prop_list(phandle node)
{
struct property *head, *tail;
head = tail = of_pdt_build_one_prop(node, NULL,
".node", &node, sizeof(node));
tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0);
tail = tail->next;
while(tail) {
tail->next = of_pdt_build_one_prop(node, tail->name,
NULL, NULL, 0);
tail = tail->next;
}
return head;
}
static char * __init of_pdt_get_one_property(phandle node, const char *name)
{
char *buf = "<NULL>";
int len;
len = of_pdt_prom_ops->getproplen(node, name);
if (len > 0) {
buf = prom_early_alloc(len);
len = of_pdt_prom_ops->getproperty(node, name, buf, len);
}
return buf;
}
static struct device_node * __init of_pdt_create_node(phandle node,
struct device_node *parent)
{
struct device_node *dp;
if (!node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
of_node_init(dp);
of_pdt_incr_unique_id(dp);
dp->parent = parent;
dp->name = of_pdt_get_one_property(node, "name");
dp->phandle = node;
dp->properties = of_pdt_build_prop_list(node);
dp->full_name = of_pdt_build_full_name(dp);
irq_trans_init(dp);
return dp;
}
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
phandle node)
{
struct device_node *ret = NULL, *prev_sibling = NULL;
struct device_node *dp;
while (1) {
dp = of_pdt_create_node(node, parent);
if (!dp)
break;
if (prev_sibling)
prev_sibling->sibling = dp;
if (!ret)
ret = dp;
prev_sibling = dp;
dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node));
node = of_pdt_prom_ops->getsibling(node);
}
return ret;
}
static void * __init kernel_tree_alloc(u64 size, u64 align)
{
return prom_early_alloc(size);
}
void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
{
BUG_ON(!ops);
of_pdt_prom_ops = ops;
of_root = of_pdt_create_node(root_node, NULL);
of_root->full_name = "/";
of_root->child = of_pdt_build_tree(of_root,
of_pdt_prom_ops->getchild(of_root->phandle));
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(kernel_tree_alloc);
}
| linux-master | drivers/of/pdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Arm Limited
*
* Based on arch/arm64/kernel/machine_kexec_file.c:
* Copyright (C) 2018 Linaro Limited
*
* And arch/powerpc/kexec/file_load.c:
* Copyright (C) 2016 IBM Corporation
*/
#include <linux/ima.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/types.h>
#define RNG_SEED_SIZE 128
/*
* Additional space needed for the FDT buffer so that we can add initrd,
* bootargs, kaslr-seed, rng-seed, useable-memory-range and elfcorehdr.
*/
#define FDT_EXTRA_SPACE 0x1000
/**
* fdt_find_and_del_mem_rsv - delete memory reservation with given address and size
*
* @fdt: Flattened device tree for the current kernel.
* @start: Starting address of the reserved memory.
* @size: Size of the reserved memory.
*
* Return: 0 on success, or negative errno on error.
*/
static int fdt_find_and_del_mem_rsv(void *fdt, unsigned long start, unsigned long size)
{
int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
for (i = 0; i < num_rsvs; i++) {
u64 rsv_start, rsv_size;
ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size);
if (ret) {
pr_err("Malformed device tree.\n");
return -EINVAL;
}
if (rsv_start == start && rsv_size == size) {
ret = fdt_del_mem_rsv(fdt, i);
if (ret) {
pr_err("Error deleting device tree reservation.\n");
return -EINVAL;
}
return 0;
}
}
return -ENOENT;
}
/**
* get_addr_size_cells - Get address and size of root node
*
* @addr_cells: Return address of the root node
* @size_cells: Return size of the root node
*
* Return: 0 on success, or negative errno on error.
*/
static int get_addr_size_cells(int *addr_cells, int *size_cells)
{
struct device_node *root;
root = of_find_node_by_path("/");
if (!root)
return -EINVAL;
*addr_cells = of_n_addr_cells(root);
*size_cells = of_n_size_cells(root);
of_node_put(root);
return 0;
}
/**
* do_get_kexec_buffer - Get address and size of device tree property
*
* @prop: Device tree property
* @len: Size of @prop
* @addr: Return address of the node
* @size: Return size of the node
*
* Return: 0 on success, or negative errno on error.
*/
static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
size_t *size)
{
int ret, addr_cells, size_cells;
ret = get_addr_size_cells(&addr_cells, &size_cells);
if (ret)
return ret;
if (len < 4 * (addr_cells + size_cells))
return -ENOENT;
*addr = of_read_number(prop, addr_cells);
*size = of_read_number(prop + 4 * addr_cells, size_cells);
return 0;
}
#ifdef CONFIG_HAVE_IMA_KEXEC
/**
* ima_get_kexec_buffer - get IMA buffer from the previous kernel
* @addr: On successful return, set to point to the buffer contents.
* @size: On successful return, set to the buffer size.
*
* Return: 0 on success, negative errno on error.
*/
int __init ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
unsigned long start_pfn, end_pfn;
size_t tmp_size;
const void *prop;
prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
if (!prop)
return -ENOENT;
ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
if (ret)
return ret;
/* Do some sanity on the returned size for the ima-kexec buffer */
if (!tmp_size)
return -ENOENT;
/*
* Calculate the PFNs for the buffer and ensure
* they are with in addressable memory.
*/
start_pfn = PHYS_PFN(tmp_addr);
end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
tmp_addr, tmp_size);
return -EINVAL;
}
*addr = __va(tmp_addr);
*size = tmp_size;
return 0;
}
/**
* ima_free_kexec_buffer - free memory used by the IMA buffer
*/
int __init ima_free_kexec_buffer(void)
{
int ret;
unsigned long addr;
size_t size;
struct property *prop;
prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
if (!prop)
return -ENOENT;
ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
if (ret)
return ret;
ret = of_remove_property(of_chosen, prop);
if (ret)
return ret;
memblock_free_late(addr, size);
return 0;
}
#endif
/**
* remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
*
* @fdt: Flattened Device Tree to update
* @chosen_node: Offset to the chosen node in the device tree
*
* The IMA measurement buffer is of no use to a subsequent kernel, so we always
* remove it from the device tree.
*/
static void remove_ima_buffer(void *fdt, int chosen_node)
{
int ret, len;
unsigned long addr;
size_t size;
const void *prop;
if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC))
return;
prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
if (!prop)
return;
ret = do_get_kexec_buffer(prop, len, &addr, &size);
fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
if (ret)
return;
ret = fdt_find_and_del_mem_rsv(fdt, addr, size);
if (!ret)
pr_debug("Removed old IMA buffer reservation.\n");
}
#ifdef CONFIG_IMA_KEXEC
/**
* setup_ima_buffer - add IMA buffer information to the fdt
* @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
* @chosen_node: Offset to the chosen node.
*
* Return: 0 on success, or negative errno on error.
*/
static int setup_ima_buffer(const struct kimage *image, void *fdt,
int chosen_node)
{
int ret;
if (!image->ima_buffer_size)
return 0;
ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
"linux,ima-kexec-buffer",
image->ima_buffer_addr,
image->ima_buffer_size);
if (ret < 0)
return -EINVAL;
ret = fdt_add_mem_rsv(fdt, image->ima_buffer_addr,
image->ima_buffer_size);
if (ret)
return -EINVAL;
pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
image->ima_buffer_addr, image->ima_buffer_size);
return 0;
}
#else /* CONFIG_IMA_KEXEC */
static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
int chosen_node)
{
return 0;
}
#endif /* CONFIG_IMA_KEXEC */
/*
* of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree
*
* @image: kexec image being loaded.
* @initrd_load_addr: Address where the next initrd will be loaded.
* @initrd_len: Size of the next initrd, or 0 if there will be none.
* @cmdline: Command line for the next kernel, or NULL if there will
* be none.
* @extra_fdt_size: Additional size for the new FDT buffer.
*
* Return: fdt on success, or NULL errno on error.
*/
void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
unsigned long initrd_load_addr,
unsigned long initrd_len,
const char *cmdline, size_t extra_fdt_size)
{
void *fdt;
int ret, chosen_node, len;
const void *prop;
size_t fdt_size;
fdt_size = fdt_totalsize(initial_boot_params) +
(cmdline ? strlen(cmdline) : 0) +
FDT_EXTRA_SPACE +
extra_fdt_size;
fdt = kvmalloc(fdt_size, GFP_KERNEL);
if (!fdt)
return NULL;
ret = fdt_open_into(initial_boot_params, fdt, fdt_size);
if (ret < 0) {
pr_err("Error %d setting up the new device tree.\n", ret);
goto out;
}
/* Remove memory reservation for the current device tree. */
ret = fdt_find_and_del_mem_rsv(fdt, __pa(initial_boot_params),
fdt_totalsize(initial_boot_params));
if (ret == -EINVAL) {
pr_err("Error removing memory reservation.\n");
goto out;
}
chosen_node = fdt_path_offset(fdt, "/chosen");
if (chosen_node == -FDT_ERR_NOTFOUND)
chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
"chosen");
if (chosen_node < 0) {
ret = chosen_node;
goto out;
}
ret = fdt_delprop(fdt, chosen_node, "linux,elfcorehdr");
if (ret && ret != -FDT_ERR_NOTFOUND)
goto out;
ret = fdt_delprop(fdt, chosen_node, "linux,usable-memory-range");
if (ret && ret != -FDT_ERR_NOTFOUND)
goto out;
/* Did we boot using an initrd? */
prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", &len);
if (prop) {
u64 tmp_start, tmp_end, tmp_size;
tmp_start = of_read_number(prop, len / 4);
prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", &len);
if (!prop) {
ret = -EINVAL;
goto out;
}
tmp_end = of_read_number(prop, len / 4);
/*
* kexec reserves exact initrd size, while firmware may
* reserve a multiple of PAGE_SIZE, so check for both.
*/
tmp_size = tmp_end - tmp_start;
ret = fdt_find_and_del_mem_rsv(fdt, tmp_start, tmp_size);
if (ret == -ENOENT)
ret = fdt_find_and_del_mem_rsv(fdt, tmp_start,
round_up(tmp_size, PAGE_SIZE));
if (ret == -EINVAL)
goto out;
}
/* add initrd-* */
if (initrd_load_addr) {
ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-start",
initrd_load_addr);
if (ret)
goto out;
ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
initrd_load_addr + initrd_len);
if (ret)
goto out;
ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
if (ret)
goto out;
} else {
ret = fdt_delprop(fdt, chosen_node, "linux,initrd-start");
if (ret && (ret != -FDT_ERR_NOTFOUND))
goto out;
ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end");
if (ret && (ret != -FDT_ERR_NOTFOUND))
goto out;
}
if (image->type == KEXEC_TYPE_CRASH) {
/* add linux,elfcorehdr */
ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
"linux,elfcorehdr", image->elf_load_addr,
image->elf_headers_sz);
if (ret)
goto out;
/*
* Avoid elfcorehdr from being stomped on in kdump kernel by
* setting up memory reserve map.
*/
ret = fdt_add_mem_rsv(fdt, image->elf_load_addr,
image->elf_headers_sz);
if (ret)
goto out;
/* add linux,usable-memory-range */
ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
"linux,usable-memory-range", crashk_res.start,
crashk_res.end - crashk_res.start + 1);
if (ret)
goto out;
if (crashk_low_res.end) {
ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
"linux,usable-memory-range",
crashk_low_res.start,
crashk_low_res.end - crashk_low_res.start + 1);
if (ret)
goto out;
}
}
/* add bootargs */
if (cmdline) {
ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
if (ret)
goto out;
} else {
ret = fdt_delprop(fdt, chosen_node, "bootargs");
if (ret && (ret != -FDT_ERR_NOTFOUND))
goto out;
}
/* add kaslr-seed */
ret = fdt_delprop(fdt, chosen_node, "kaslr-seed");
if (ret == -FDT_ERR_NOTFOUND)
ret = 0;
else if (ret)
goto out;
if (rng_is_initialized()) {
u64 seed = get_random_u64();
ret = fdt_setprop_u64(fdt, chosen_node, "kaslr-seed", seed);
if (ret)
goto out;
} else {
pr_notice("RNG is not initialised: omitting \"%s\" property\n",
"kaslr-seed");
}
/* add rng-seed */
if (rng_is_initialized()) {
void *rng_seed;
ret = fdt_setprop_placeholder(fdt, chosen_node, "rng-seed",
RNG_SEED_SIZE, &rng_seed);
if (ret)
goto out;
get_random_bytes(rng_seed, RNG_SEED_SIZE);
} else {
pr_notice("RNG is not initialised: omitting \"%s\" property\n",
"rng-seed");
}
ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
if (ret)
goto out;
remove_ima_buffer(fdt, chosen_node);
ret = setup_ima_buffer(image, fdt, fdt_path_offset(fdt, "/chosen"));
out:
if (ret) {
kvfree(fdt);
fdt = NULL;
}
return fdt;
}
| linux-master | drivers/of/kexec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for working with the Flattened Device Tree data format
*
* Copyright 2009 Benjamin Herrenschmidt, IBM Corp
* [email protected]
*/
#define pr_fmt(fmt) "OF: fdt: " fmt
#include <linux/crash_dump.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/libfdt.h>
#include <linux/debugfs.h>
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
#include "of_private.h"
/*
* of_fdt_limit_memory - limit the number of regions in the /memory node
* @limit: maximum entries
*
* Adjust the flattened device tree to have at most 'limit' number of
* memory entries in the /memory node. This function may be called
* any time after initial_boot_param is set.
*/
void __init of_fdt_limit_memory(int limit)
{
int memory;
int len;
const void *val;
int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
const __be32 *addr_prop;
const __be32 *size_prop;
int root_offset;
int cell_size;
root_offset = fdt_path_offset(initial_boot_params, "/");
if (root_offset < 0)
return;
addr_prop = fdt_getprop(initial_boot_params, root_offset,
"#address-cells", NULL);
if (addr_prop)
nr_address_cells = fdt32_to_cpu(*addr_prop);
size_prop = fdt_getprop(initial_boot_params, root_offset,
"#size-cells", NULL);
if (size_prop)
nr_size_cells = fdt32_to_cpu(*size_prop);
cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
memory = fdt_path_offset(initial_boot_params, "/memory");
if (memory > 0) {
val = fdt_getprop(initial_boot_params, memory, "reg", &len);
if (len > limit*cell_size) {
len = limit*cell_size;
pr_debug("Limiting number of entries to %d\n", limit);
fdt_setprop(initial_boot_params, memory, "reg", val,
len);
}
}
}
static bool of_fdt_device_is_available(const void *blob, unsigned long node)
{
const char *status = fdt_getprop(blob, node, "status", NULL);
if (!status)
return true;
if (!strcmp(status, "ok") || !strcmp(status, "okay"))
return true;
return false;
}
static void *unflatten_dt_alloc(void **mem, unsigned long size,
unsigned long align)
{
void *res;
*mem = PTR_ALIGN(*mem, align);
res = *mem;
*mem += size;
return res;
}
static void populate_properties(const void *blob,
int offset,
void **mem,
struct device_node *np,
const char *nodename,
bool dryrun)
{
struct property *pp, **pprev = NULL;
int cur;
bool has_name = false;
pprev = &np->properties;
for (cur = fdt_first_property_offset(blob, offset);
cur >= 0;
cur = fdt_next_property_offset(blob, cur)) {
const __be32 *val;
const char *pname;
u32 sz;
val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
if (!val) {
pr_warn("Cannot locate property at 0x%x\n", cur);
continue;
}
if (!pname) {
pr_warn("Cannot find property name at 0x%x\n", cur);
continue;
}
if (!strcmp(pname, "name"))
has_name = true;
pp = unflatten_dt_alloc(mem, sizeof(struct property),
__alignof__(struct property));
if (dryrun)
continue;
/* We accept flattened tree phandles either in
* ePAPR-style "phandle" properties, or the
* legacy "linux,phandle" properties. If both
* appear and have different values, things
* will get weird. Don't do that.
*/
if (!strcmp(pname, "phandle") ||
!strcmp(pname, "linux,phandle")) {
if (!np->phandle)
np->phandle = be32_to_cpup(val);
}
/* And we process the "ibm,phandle" property
* used in pSeries dynamic device tree
* stuff
*/
if (!strcmp(pname, "ibm,phandle"))
np->phandle = be32_to_cpup(val);
pp->name = (char *)pname;
pp->length = sz;
pp->value = (__be32 *)val;
*pprev = pp;
pprev = &pp->next;
}
/* With version 0x10 we may not have the name property,
* recreate it here from the unit name if absent
*/
if (!has_name) {
const char *p = nodename, *ps = p, *pa = NULL;
int len;
while (*p) {
if ((*p) == '@')
pa = p;
else if ((*p) == '/')
ps = p + 1;
p++;
}
if (pa < ps)
pa = p;
len = (pa - ps) + 1;
pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
__alignof__(struct property));
if (!dryrun) {
pp->name = "name";
pp->length = len;
pp->value = pp + 1;
*pprev = pp;
memcpy(pp->value, ps, len - 1);
((char *)pp->value)[len - 1] = 0;
pr_debug("fixed up name for %s -> %s\n",
nodename, (char *)pp->value);
}
}
}
static int populate_node(const void *blob,
int offset,
void **mem,
struct device_node *dad,
struct device_node **pnp,
bool dryrun)
{
struct device_node *np;
const char *pathp;
int len;
pathp = fdt_get_name(blob, offset, &len);
if (!pathp) {
*pnp = NULL;
return len;
}
len++;
np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
memcpy(fn, pathp, len);
if (dad != NULL) {
np->parent = dad;
np->sibling = dad->child;
dad->child = np;
}
}
populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
np->name = of_get_property(np, "name", NULL);
if (!np->name)
np->name = "<NULL>";
}
*pnp = np;
return 0;
}
static void reverse_nodes(struct device_node *parent)
{
struct device_node *child, *next;
/* In-depth first */
child = parent->child;
while (child) {
reverse_nodes(child);
child = child->sibling;
}
/* Reverse the nodes in the child list */
child = parent->child;
parent->child = NULL;
while (child) {
next = child->sibling;
child->sibling = parent->child;
parent->child = child;
child = next;
}
}
/**
* unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
* @blob: The parent device tree blob
* @mem: Memory chunk to use for allocating device nodes and properties
* @dad: Parent struct device_node
* @nodepp: The device_node tree created by the call
*
* Return: The size of unflattened device tree or error code
*/
static int unflatten_dt_nodes(const void *blob,
void *mem,
struct device_node *dad,
struct device_node **nodepp)
{
struct device_node *root;
int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
int ret;
if (nodepp)
*nodepp = NULL;
/*
* We're unflattening device sub-tree if @dad is valid. There are
* possibly multiple nodes in the first level of depth. We need
* set @depth to 1 to make fdt_next_node() happy as it bails
* immediately when negative @depth is found. Otherwise, the device
* nodes except the first one won't be unflattened successfully.
*/
if (dad)
depth = initial_depth = 1;
root = dad;
nps[depth] = dad;
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
!of_fdt_device_is_available(blob, offset))
continue;
ret = populate_node(blob, offset, &mem, nps[depth],
&nps[depth+1], dryrun);
if (ret < 0)
return ret;
if (!dryrun && nodepp && !*nodepp)
*nodepp = nps[depth+1];
if (!dryrun && !root)
root = nps[depth+1];
}
if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
pr_err("Error %d processing FDT\n", offset);
return -EINVAL;
}
/*
* Reverse the child list. Some drivers assumes node order matches .dts
* node order
*/
if (!dryrun)
reverse_nodes(root);
return mem - base;
}
/**
* __unflatten_device_tree - create tree of device_nodes from flat blob
* @blob: The blob to expand
* @dad: Parent device node
* @mynodes: The device_node tree created by the call
* @dt_alloc: An allocator that provides a virtual address to memory
* for the resulting tree
* @detached: if true set OF_DETACHED on @mynodes
*
* unflattens a device-tree, creating the tree of struct device_node. It also
* fills the "name" and "type" pointers of the nodes so the normal device-tree
* walking functions can be used.
*
* Return: NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *__unflatten_device_tree(const void *blob,
struct device_node *dad,
struct device_node **mynodes,
void *(*dt_alloc)(u64 size, u64 align),
bool detached)
{
int size;
void *mem;
int ret;
if (mynodes)
*mynodes = NULL;
pr_debug(" -> unflatten_device_tree()\n");
if (!blob) {
pr_debug("No device tree pointer\n");
return NULL;
}
pr_debug("Unflattening device tree:\n");
pr_debug("magic: %08x\n", fdt_magic(blob));
pr_debug("size: %08x\n", fdt_totalsize(blob));
pr_debug("version: %08x\n", fdt_version(blob));
if (fdt_check_header(blob)) {
pr_err("Invalid device tree blob header\n");
return NULL;
}
/* First pass, scan for size */
size = unflatten_dt_nodes(blob, NULL, dad, NULL);
if (size <= 0)
return NULL;
size = ALIGN(size, 4);
pr_debug(" size is %d, allocating...\n", size);
/* Allocate memory for the expanded device tree */
mem = dt_alloc(size + 4, __alignof__(struct device_node));
if (!mem)
return NULL;
memset(mem, 0, size);
*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warn("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
if (ret <= 0)
return NULL;
if (detached && mynodes && *mynodes) {
of_node_set_flag(*mynodes, OF_DETACHED);
pr_debug("unflattened tree is detached\n");
}
pr_debug(" <- unflatten_device_tree()\n");
return mem;
}
static void *kernel_tree_alloc(u64 size, u64 align)
{
return kzalloc(size, GFP_KERNEL);
}
static DEFINE_MUTEX(of_fdt_unflatten_mutex);
/**
* of_fdt_unflatten_tree - create tree of device_nodes from flat blob
* @blob: Flat device tree blob
* @dad: Parent device node
* @mynodes: The device tree created by the call
*
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
*
* Return: NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
void *of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node *dad,
struct device_node **mynodes)
{
void *mem;
mutex_lock(&of_fdt_unflatten_mutex);
mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
true);
mutex_unlock(&of_fdt_unflatten_mutex);
return mem;
}
EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
/* Everything below here references initial_boot_params directly. */
int __initdata dt_root_addr_cells;
int __initdata dt_root_size_cells;
void *initial_boot_params __ro_after_init;
#ifdef CONFIG_OF_EARLY_FLATTREE
static u32 of_fdt_crc32;
static int __init early_init_dt_reserve_memory(phys_addr_t base,
phys_addr_t size, bool nomap)
{
if (nomap) {
/*
* If the memory is already reserved (by another region), we
* should not allow it to be marked nomap, but don't worry
* if the region isn't memory as it won't be mapped.
*/
if (memblock_overlaps_region(&memblock.memory, base, size) &&
memblock_is_region_reserved(base, size))
return -EBUSY;
return memblock_mark_nomap(base, size);
}
return memblock_reserve(base, size);
}
/*
* __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t base, size;
int len;
const __be32 *prop;
int first = 1;
bool nomap;
prop = of_get_flat_dt_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
if (len && len % t_len != 0) {
pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
while (len >= t_len) {
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
early_init_dt_reserve_memory(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
if (first) {
fdt_reserved_mem_save_node(node, uname, base, size);
first = 0;
}
}
return 0;
}
/*
* __reserved_mem_check_root() - check if #size-cells, #address-cells provided
* in /reserved-memory matches the values supported by the current implementation,
* also check if ranges property has been provided
*/
static int __init __reserved_mem_check_root(unsigned long node)
{
const __be32 *prop;
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
return -EINVAL;
prop = of_get_flat_dt_prop(node, "ranges", NULL);
if (!prop)
return -EINVAL;
return 0;
}
/*
* fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
*/
static int __init fdt_scan_reserved_mem(void)
{
int node, child;
const void *fdt = initial_boot_params;
node = fdt_path_offset(fdt, "/reserved-memory");
if (node < 0)
return -ENODEV;
if (__reserved_mem_check_root(node) != 0) {
pr_err("Reserved memory: unsupported node format, ignoring\n");
return -EINVAL;
}
fdt_for_each_subnode(child, fdt, node) {
const char *uname;
int err;
if (!of_fdt_device_is_available(fdt, child))
continue;
uname = fdt_get_name(fdt, child, NULL);
err = __reserved_mem_reserve_reg(child, uname);
if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
fdt_reserved_mem_save_node(child, uname, 0, 0);
}
return 0;
}
/*
* fdt_reserve_elfcorehdr() - reserves memory for elf core header
*
* This function reserves the memory occupied by an elf core header
* described in the device tree. This region contains all the
* information about primary kernel's core image and is used by a dump
* capture kernel to access the system memory on primary kernel.
*/
static void __init fdt_reserve_elfcorehdr(void)
{
if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
return;
if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
pr_warn("elfcorehdr is overlapped\n");
return;
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
}
/**
* early_init_fdt_scan_reserved_mem() - create reserved memory regions
*
* This function grabs memory from early allocator for device exclusive use
* defined in device tree structures. It should be called by arch specific code
* once the early allocator (i.e. memblock) has been fully activated.
*/
void __init early_init_fdt_scan_reserved_mem(void)
{
int n;
u64 base, size;
if (!initial_boot_params)
return;
fdt_scan_reserved_mem();
fdt_reserve_elfcorehdr();
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
if (!size)
break;
memblock_reserve(base, size);
}
fdt_init_reserved_mem();
}
/**
* early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
*/
void __init early_init_fdt_reserve_self(void)
{
if (!initial_boot_params)
return;
/* Reserve the dtb region */
memblock_reserve(__pa(initial_boot_params),
fdt_totalsize(initial_boot_params));
}
/**
* of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function
* @data: context data pointer
*
* This function is used to scan the flattened device-tree, it is
* used to extract the memory information at boot before we can
* unflatten the tree
*/
int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *uname, int depth,
void *data),
void *data)
{
const void *blob = initial_boot_params;
const char *pathp;
int offset, rc = 0, depth = -1;
if (!blob)
return 0;
for (offset = fdt_next_node(blob, -1, &depth);
offset >= 0 && depth >= 0 && !rc;
offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
rc = it(offset, pathp, depth, data);
}
return rc;
}
/**
* of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
* @parent: parent node
* @it: callback function
* @data: context data pointer
*
* This function is used to scan sub-nodes of a node.
*/
int __init of_scan_flat_dt_subnodes(unsigned long parent,
int (*it)(unsigned long node,
const char *uname,
void *data),
void *data)
{
const void *blob = initial_boot_params;
int node;
fdt_for_each_subnode(node, blob, parent) {
const char *pathp;
int rc;
pathp = fdt_get_name(blob, node, NULL);
rc = it(node, pathp, data);
if (rc)
return rc;
}
return 0;
}
/**
* of_get_flat_dt_subnode_by_name - get the subnode by given name
*
* @node: the parent node
* @uname: the name of subnode
* @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
*/
int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
{
return fdt_subnode_offset(initial_boot_params, node, uname);
}
/*
* of_get_flat_dt_root - find the root node in the flat blob
*/
unsigned long __init of_get_flat_dt_root(void)
{
return 0;
}
/*
* of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
*
* This function can be used within scan_flattened_dt callback to get
* access to properties
*/
const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
int *size)
{
return fdt_getprop(initial_boot_params, node, name, size);
}
/**
* of_fdt_is_compatible - Return true if given node from the given blob has
* compat in its compatible list
* @blob: A device tree blob
* @node: node to test
* @compat: compatible string to compare with compatible list.
*
* Return: a non-zero value on match with smaller values returned for more
* specific compatible values.
*/
static int of_fdt_is_compatible(const void *blob,
unsigned long node, const char *compat)
{
const char *cp;
int cplen;
unsigned long l, score = 0;
cp = fdt_getprop(blob, node, "compatible", &cplen);
if (cp == NULL)
return 0;
while (cplen > 0) {
score++;
if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
return score;
l = strlen(cp) + 1;
cp += l;
cplen -= l;
}
return 0;
}
/**
* of_flat_dt_is_compatible - Return true if given node has compat in compatible list
* @node: node to test
* @compat: compatible string to compare with compatible list.
*/
int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
{
return of_fdt_is_compatible(initial_boot_params, node, compat);
}
/*
* of_flat_dt_match - Return true if node matches a list of compatible values
*/
static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
{
unsigned int tmp, score = 0;
if (!compat)
return 0;
while (*compat) {
tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
if (tmp && (score == 0 || (tmp < score)))
score = tmp;
compat++;
}
return score;
}
/*
* of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
*/
uint32_t __init of_get_flat_dt_phandle(unsigned long node)
{
return fdt_get_phandle(initial_boot_params, node);
}
const char * __init of_flat_dt_get_machine_name(void)
{
const char *name;
unsigned long dt_root = of_get_flat_dt_root();
name = of_get_flat_dt_prop(dt_root, "model", NULL);
if (!name)
name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
return name;
}
/**
* of_flat_dt_match_machine - Iterate match tables to find matching machine.
*
* @default_match: A machine specific ptr to return in case of no match.
* @get_next_compat: callback function to return next compatible match table.
*
* Iterate through machine match tables to find the best match for the machine
* compatible string in the FDT.
*/
const void * __init of_flat_dt_match_machine(const void *default_match,
const void * (*get_next_compat)(const char * const**))
{
const void *data = NULL;
const void *best_data = default_match;
const char *const *compat;
unsigned long dt_root;
unsigned int best_score = ~1, score = 0;
dt_root = of_get_flat_dt_root();
while ((data = get_next_compat(&compat))) {
score = of_flat_dt_match(dt_root, compat);
if (score > 0 && score < best_score) {
best_data = data;
best_score = score;
}
}
if (!best_data) {
const char *prop;
int size;
pr_err("\n unrecognized device tree list:\n[ ");
prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
if (prop) {
while (size > 0) {
printk("'%s' ", prop);
size -= strlen(prop) + 1;
prop += strlen(prop) + 1;
}
}
printk("]\n\n");
return NULL;
}
pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
return best_data;
}
static void __early_init_dt_declare_initrd(unsigned long start,
unsigned long end)
{
/*
* __va() is not yet available this early on some platforms. In that
* case, the platform uses phys_initrd_start/phys_initrd_size instead
* and does the VA conversion itself.
*/
if (!IS_ENABLED(CONFIG_ARM64) &&
!(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
initrd_below_start_ok = 1;
}
}
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
*/
static void __init early_init_dt_check_for_initrd(unsigned long node)
{
u64 start, end;
int len;
const __be32 *prop;
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
pr_debug("Looking for initrd properties... ");
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
end = of_read_number(prop, len/4);
if (start > end)
return;
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
phys_initrd_size = end - start;
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
}
/**
* early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
* tree
* @node: reference to node containing elfcorehdr location ('chosen')
*/
static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
{
const __be32 *prop;
int len;
if (!IS_ENABLED(CONFIG_CRASH_DUMP))
return;
pr_debug("Looking for elfcorehdr property... ");
prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
return;
elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
elfcorehdr_addr, elfcorehdr_size);
}
static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
/*
* The main usage of linux,usable-memory-range is for crash dump kernel.
* Originally, the number of usable-memory regions is one. Now there may
* be two regions, low region and high region.
* To make compatibility with existing user-space and older kdump, the low
* region is always the last range of linux,usable-memory-range if exist.
*/
#define MAX_USABLE_RANGES 2
/**
* early_init_dt_check_for_usable_mem_range - Decode usable memory range
* location from flat tree
*/
void __init early_init_dt_check_for_usable_mem_range(void)
{
struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
const __be32 *prop, *endp;
int len, i;
unsigned long node = chosen_node_offset;
if ((long)node < 0)
return;
pr_debug("Looking for usable-memory-range property... ");
prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
return;
endp = prop + (len / sizeof(__be32));
for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
i, &rgn[i].base, &rgn[i].size);
}
memblock_cap_memory_range(rgn[0].base, rgn[0].size);
for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
memblock_add(rgn[i].base, rgn[i].size);
}
#ifdef CONFIG_SERIAL_EARLYCON
int __init early_init_dt_scan_chosen_stdout(void)
{
int offset;
const char *p, *q, *options = NULL;
int l;
const struct earlycon_id *match;
const void *fdt = initial_boot_params;
int ret;
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0)
offset = fdt_path_offset(fdt, "/chosen@0");
if (offset < 0)
return -ENOENT;
p = fdt_getprop(fdt, offset, "stdout-path", &l);
if (!p)
p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
if (!p || !l)
return -ENOENT;
q = strchrnul(p, ':');
if (*q != '\0')
options = q + 1;
l = q - p;
/* Get the node specified by stdout-path */
offset = fdt_path_offset_namelen(fdt, p, l);
if (offset < 0) {
pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
return 0;
}
for (match = __earlycon_table; match < __earlycon_table_end; match++) {
if (!match->compatible[0])
continue;
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
ret = of_setup_earlycon(match, offset, options);
if (!ret || ret == -EALREADY)
return 0;
}
return -ENODEV;
}
#endif
/*
* early_init_dt_scan_root - fetch the top level address and size cells
*/
int __init early_init_dt_scan_root(void)
{
const __be32 *prop;
const void *fdt = initial_boot_params;
int node = fdt_path_offset(fdt, "/");
if (node < 0)
return -ENODEV;
dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
if (prop)
dt_root_size_cells = be32_to_cpup(prop);
pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
if (prop)
dt_root_addr_cells = be32_to_cpup(prop);
pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
return 0;
}
u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
{
const __be32 *p = *cellp;
*cellp = p + s;
return of_read_number(p, s);
}
/*
* early_init_dt_scan_memory - Look for and parse memory nodes
*/
int __init early_init_dt_scan_memory(void)
{
int node, found_memory = 0;
const void *fdt = initial_boot_params;
fdt_for_each_subnode(node, fdt, 0) {
const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *reg, *endp;
int l;
bool hotpluggable;
/* We are scanning "memory" nodes only */
if (type == NULL || strcmp(type, "memory") != 0)
continue;
if (!of_fdt_device_is_available(fdt, node))
continue;
reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
if (reg == NULL)
reg = of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
continue;
endp = reg + (l / sizeof(__be32));
hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
pr_debug("memory scan node %s, reg size %d,\n",
fdt_get_name(fdt, node, NULL), l);
while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
u64 base, size;
base = dt_mem_next_cell(dt_root_addr_cells, ®);
size = dt_mem_next_cell(dt_root_size_cells, ®);
if (size == 0)
continue;
pr_debug(" - %llx, %llx\n", base, size);
early_init_dt_add_memory_arch(base, size);
found_memory = 1;
if (!hotpluggable)
continue;
if (memblock_mark_hotplug(base, size))
pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
base, base + size);
}
}
return found_memory;
}
int __init early_init_dt_scan_chosen(char *cmdline)
{
int l, node;
const char *p;
const void *rng_seed;
const void *fdt = initial_boot_params;
node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
node = fdt_path_offset(fdt, "/chosen@0");
if (node < 0)
/* Handle the cmdline config options even if no /chosen node */
goto handle_cmdline;
chosen_node_offset = node;
early_init_dt_check_for_initrd(node);
early_init_dt_check_for_elfcorehdr(node);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
add_bootloader_randomness(rng_seed, l);
/* try to clear seed so it won't be found. */
fdt_nop_property(initial_boot_params, node, "rng-seed");
/* update CRC check value */
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
}
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
handle_cmdline:
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
* managed to set the command line, unless CONFIG_CMDLINE_FORCE
* is set in which case we override whatever was found earlier.
*/
#ifdef CONFIG_CMDLINE
#if defined(CONFIG_CMDLINE_EXTEND)
strlcat(cmdline, " ", COMMAND_LINE_SIZE);
strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)cmdline)[0])
strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
pr_debug("Command line is: %s\n", (char *)cmdline);
return 0;
}
#ifndef MIN_MEMBLOCK_ADDR
#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
#endif
#ifndef MAX_MEMBLOCK_ADDR
#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
const u64 phys_offset = MIN_MEMBLOCK_ADDR;
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (!PAGE_ALIGNED(base)) {
size -= PAGE_SIZE - (base & ~PAGE_MASK);
base = PAGE_ALIGN(base);
}
size &= PAGE_MASK;
if (base > MAX_MEMBLOCK_ADDR) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
size = MAX_MEMBLOCK_ADDR - base + 1;
}
if (base + size < phys_offset) {
pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
base, base + size);
return;
}
if (base < phys_offset) {
pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
base, phys_offset);
size -= phys_offset - base;
base = phys_offset;
}
memblock_add(base, size);
}
static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
void *ptr = memblock_alloc(size, align);
if (!ptr)
panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
__func__, size, align);
return ptr;
}
bool __init early_init_dt_verify(void *params)
{
if (!params)
return false;
/* check device tree validity */
if (fdt_check_header(params))
return false;
/* Setup flat device-tree pointer */
initial_boot_params = params;
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
return true;
}
void __init early_init_dt_scan_nodes(void)
{
int rc;
/* Initialize {size,address}-cells info */
early_init_dt_scan_root();
/* Retrieve various information from the /chosen node */
rc = early_init_dt_scan_chosen(boot_command_line);
if (rc)
pr_warn("No chosen node found, continuing without\n");
/* Setup memory, calling early_init_dt_add_memory_arch */
early_init_dt_scan_memory();
/* Handle linux,usable-memory-range property */
early_init_dt_check_for_usable_mem_range();
}
bool __init early_init_dt_scan(void *params)
{
bool status;
status = early_init_dt_verify(params);
if (!status)
return false;
early_init_dt_scan_nodes();
return true;
}
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
*/
void __init unflatten_device_tree(void)
{
__unflatten_device_tree(initial_boot_params, NULL, &of_root,
early_init_dt_alloc_memory_arch, false);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(early_init_dt_alloc_memory_arch);
unittest_unflatten_overlay_base();
}
/**
* unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
*
* Copies and unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used. This should only be used when the FDT memory has not been
* reserved such is the case when the FDT is built-in to the kernel init
* section. If the FDT memory is reserved already then unflatten_device_tree
* should be used instead.
*/
void __init unflatten_and_copy_device_tree(void)
{
int size;
void *dt;
if (!initial_boot_params) {
pr_warn("No valid device tree found, continuing without\n");
return;
}
size = fdt_totalsize(initial_boot_params);
dt = early_init_dt_alloc_memory_arch(size,
roundup_pow_of_two(FDT_V17_SIZE));
if (dt) {
memcpy(dt, initial_boot_params, size);
initial_boot_params = dt;
}
unflatten_device_tree();
}
#ifdef CONFIG_SYSFS
static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
memcpy(buf, initial_boot_params + off, count);
return count;
}
static int __init of_fdt_raw_init(void)
{
static struct bin_attribute of_fdt_raw_attr =
__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
if (!initial_boot_params)
return 0;
if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params))) {
pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
}
late_initcall(of_fdt_raw_init);
#endif
#endif /* CONFIG_OF_EARLY_FLATTREE */
| linux-master | drivers/of/fdt.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/of.h>
#include <linux/slab.h>
#include "of_private.h"
/* true when node is initialized */
static int of_node_is_initialized(const struct device_node *node)
{
return node && node->kobj.state_initialized;
}
/* true when node is attached (i.e. present on sysfs) */
int of_node_is_attached(const struct device_node *node)
{
return node && node->kobj.state_in_sysfs;
}
#ifndef CONFIG_OF_DYNAMIC
static void of_node_release(struct kobject *kobj)
{
/* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
}
#endif /* CONFIG_OF_DYNAMIC */
const struct kobj_type of_node_ktype = {
.release = of_node_release,
};
static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct property *pp = container_of(bin_attr, struct property, attr);
return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
}
/* always return newly allocated name, caller must free after use */
static const char *safe_name(struct kobject *kobj, const char *orig_name)
{
const char *name = orig_name;
struct kernfs_node *kn;
int i = 0;
/* don't be a hero. After 16 tries give up */
while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
sysfs_put(kn);
if (name != orig_name)
kfree(name);
name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
}
if (name == orig_name) {
name = kstrdup(orig_name, GFP_KERNEL);
} else {
pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
kobject_name(kobj), name);
}
return name;
}
int __of_add_property_sysfs(struct device_node *np, struct property *pp)
{
int rc;
/* Important: Don't leak passwords */
bool secure = strncmp(pp->name, "security-", 9) == 0;
if (!IS_ENABLED(CONFIG_SYSFS))
return 0;
if (!of_kset || !of_node_is_attached(np))
return 0;
sysfs_bin_attr_init(&pp->attr);
pp->attr.attr.name = safe_name(&np->kobj, pp->name);
pp->attr.attr.mode = secure ? 0400 : 0444;
pp->attr.size = secure ? 0 : pp->length;
pp->attr.read = of_node_property_read;
rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
return rc;
}
void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
{
if (!IS_ENABLED(CONFIG_SYSFS))
return;
sysfs_remove_bin_file(&np->kobj, &prop->attr);
kfree(prop->attr.attr.name);
}
void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
{
/* at early boot, bail here and defer setup to of_init() */
if (of_kset && of_node_is_attached(np))
__of_sysfs_remove_bin_file(np, prop);
}
void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
struct property *oldprop)
{
/* At early boot, bail out and defer setup to of_init() */
if (!of_kset)
return;
if (oldprop)
__of_sysfs_remove_bin_file(np, oldprop);
__of_add_property_sysfs(np, newprop);
}
int __of_attach_node_sysfs(struct device_node *np)
{
const char *name;
struct kobject *parent;
struct property *pp;
int rc;
if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
return 0;
np->kobj.kset = of_kset;
if (!np->parent) {
/* Nodes without parents are new top level trees */
name = safe_name(&of_kset->kobj, "base");
parent = NULL;
} else {
name = safe_name(&np->parent->kobj, kbasename(np->full_name));
parent = &np->parent->kobj;
}
if (!name)
return -ENOMEM;
rc = kobject_add(&np->kobj, parent, "%s", name);
kfree(name);
if (rc)
return rc;
for_each_property_of_node(np, pp)
__of_add_property_sysfs(np, pp);
of_node_get(np);
return 0;
}
void __of_detach_node_sysfs(struct device_node *np)
{
struct property *pp;
BUG_ON(!of_node_is_initialized(np));
if (!of_kset)
return;
/* only remove properties if on sysfs */
if (of_node_is_attached(np)) {
for_each_property_of_node(np, pp)
__of_sysfs_remove_bin_file(np, pp);
kobject_del(&np->kobj);
}
of_node_put(np);
}
| linux-master | drivers/of/kobj.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux kernel module helpers.
*/
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
{
const char *compat;
char *c;
struct property *p;
ssize_t csize;
ssize_t tsize;
/* Name & Type */
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
of_node_get_device_type(np));
tsize = csize;
len -= csize;
if (str)
str += csize;
of_property_for_each_string(np, "compatible", p, compat) {
csize = strlen(compat) + 1;
tsize += csize;
if (csize > len)
continue;
csize = snprintf(str, len, "C%s", compat);
for (c = str; c; ) {
c = strchr(c, ' ');
if (c)
*c++ = '_';
}
len -= csize;
str += csize;
}
return tsize;
}
int of_request_module(const struct device_node *np)
{
char *str;
ssize_t size;
int ret;
if (!np)
return -ENODEV;
size = of_modalias(np, NULL, 0);
if (size < 0)
return size;
/* Reserve an additional byte for the trailing '\0' */
size++;
str = kmalloc(size, GFP_KERNEL);
if (!str)
return -ENOMEM;
of_modalias(np, str, size);
str[size - 1] = '\0';
ret = request_module(str);
kfree(str);
return ret;
}
EXPORT_SYMBOL_GPL(of_request_module);
| linux-master | drivers/of/module.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/of/property.c - Procedures for accessing and interpreting
* Devicetree properties and graphs.
*
* Initially created by copying procedures from drivers/of/base.c. This
* file contains the OF property as well as the OF graph interface
* functions.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc and sparc64 by David S. Miller [email protected]
*
* Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
* Grant Likely.
*/
#define pr_fmt(fmt) "OF: " fmt
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include "of_private.h"
/**
* of_graph_is_present() - check graph's presence
* @node: pointer to device_node containing graph port
*
* Return: True if @node has a port or ports (with a port) sub-node,
* false otherwise.
*/
bool of_graph_is_present(const struct device_node *node)
{
struct device_node *ports, *port;
ports = of_get_child_by_name(node, "ports");
if (ports)
node = ports;
port = of_get_child_by_name(node, "port");
of_node_put(ports);
of_node_put(port);
return !!port;
}
EXPORT_SYMBOL(of_graph_is_present);
/**
* of_property_count_elems_of_size - Count the number of elements in a property
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @elem_size: size of the individual element
*
* Search for a property in a device node and count the number of elements of
* size elem_size in it.
*
* Return: The number of elements on sucess, -EINVAL if the property does not
* exist or its length does not match a multiple of elem_size and -ENODATA if
* the property does not have a value.
*/
int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size)
{
struct property *prop = of_find_property(np, propname, NULL);
if (!prop)
return -EINVAL;
if (!prop->value)
return -ENODATA;
if (prop->length % elem_size != 0) {
pr_err("size of %s in node %pOF is not a multiple of %d\n",
propname, np, elem_size);
return -EINVAL;
}
return prop->length / elem_size;
}
EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
/**
* of_find_property_value_of_size
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @min: minimum allowed length of property value
* @max: maximum allowed length of property value (0 means unlimited)
* @len: if !=NULL, actual length is written to here
*
* Search for a property in a device node and valid the requested size.
*
* Return: The property value on success, -EINVAL if the property does not
* exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data is too small or too large.
*
*/
static void *of_find_property_value_of_size(const struct device_node *np,
const char *propname, u32 min, u32 max, size_t *len)
{
struct property *prop = of_find_property(np, propname, NULL);
if (!prop)
return ERR_PTR(-EINVAL);
if (!prop->value)
return ERR_PTR(-ENODATA);
if (prop->length < min)
return ERR_PTR(-EOVERFLOW);
if (max && prop->length > max)
return ERR_PTR(-EOVERFLOW);
if (len)
*len = prop->length;
return prop->value;
}
/**
* of_property_read_u32_index - Find and read a u32 from a multi-value property.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @index: index of the u32 in the list of values
* @out_value: pointer to return value, modified only if no error.
*
* Search for a property in a device node and read nth 32-bit value from
* it.
*
* Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
* The out_value is modified only if a valid u32 value can be decoded.
*/
int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value)
{
const u32 *val = of_find_property_value_of_size(np, propname,
((index + 1) * sizeof(*out_value)),
0,
NULL);
if (IS_ERR(val))
return PTR_ERR(val);
*out_value = be32_to_cpup(((__be32 *)val) + index);
return 0;
}
EXPORT_SYMBOL_GPL(of_property_read_u32_index);
/**
* of_property_read_u64_index - Find and read a u64 from a multi-value property.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @index: index of the u64 in the list of values
* @out_value: pointer to return value, modified only if no error.
*
* Search for a property in a device node and read nth 64-bit value from
* it.
*
* Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
* The out_value is modified only if a valid u64 value can be decoded.
*/
int of_property_read_u64_index(const struct device_node *np,
const char *propname,
u32 index, u64 *out_value)
{
const u64 *val = of_find_property_value_of_size(np, propname,
((index + 1) * sizeof(*out_value)),
0, NULL);
if (IS_ERR(val))
return PTR_ERR(val);
*out_value = be64_to_cpup(((__be64 *)val) + index);
return 0;
}
EXPORT_SYMBOL_GPL(of_property_read_u64_index);
/**
* of_property_read_variable_u8_array - Find and read an array of u8 from a
* property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
* sz_min will be read.
*
* Search for a property in a device node and read 8-bit value(s) from
* it.
*
* dts entry of array should be like:
* ``property = /bits/ 8 <0x50 0x60 0x70>;``
*
* Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u8 value can be decoded.
*/
int of_property_read_variable_u8_array(const struct device_node *np,
const char *propname, u8 *out_values,
size_t sz_min, size_t sz_max)
{
size_t sz, count;
const u8 *val = of_find_property_value_of_size(np, propname,
(sz_min * sizeof(*out_values)),
(sz_max * sizeof(*out_values)),
&sz);
if (IS_ERR(val))
return PTR_ERR(val);
if (!sz_max)
sz = sz_min;
else
sz /= sizeof(*out_values);
count = sz;
while (count--)
*out_values++ = *val++;
return sz;
}
EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
/**
* of_property_read_variable_u16_array - Find and read an array of u16 from a
* property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
* sz_min will be read.
*
* Search for a property in a device node and read 16-bit value(s) from
* it.
*
* dts entry of array should be like:
* ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
*
* Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u16 value can be decoded.
*/
int of_property_read_variable_u16_array(const struct device_node *np,
const char *propname, u16 *out_values,
size_t sz_min, size_t sz_max)
{
size_t sz, count;
const __be16 *val = of_find_property_value_of_size(np, propname,
(sz_min * sizeof(*out_values)),
(sz_max * sizeof(*out_values)),
&sz);
if (IS_ERR(val))
return PTR_ERR(val);
if (!sz_max)
sz = sz_min;
else
sz /= sizeof(*out_values);
count = sz;
while (count--)
*out_values++ = be16_to_cpup(val++);
return sz;
}
EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
/**
* of_property_read_variable_u32_array - Find and read an array of 32 bit
* integers from a property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
* sz_min will be read.
*
* Search for a property in a device node and read 32-bit value(s) from
* it.
*
* Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u32 value can be decoded.
*/
int of_property_read_variable_u32_array(const struct device_node *np,
const char *propname, u32 *out_values,
size_t sz_min, size_t sz_max)
{
size_t sz, count;
const __be32 *val = of_find_property_value_of_size(np, propname,
(sz_min * sizeof(*out_values)),
(sz_max * sizeof(*out_values)),
&sz);
if (IS_ERR(val))
return PTR_ERR(val);
if (!sz_max)
sz = sz_min;
else
sz /= sizeof(*out_values);
count = sz;
while (count--)
*out_values++ = be32_to_cpup(val++);
return sz;
}
EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
/**
* of_property_read_u64 - Find and read a 64 bit integer from a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_value: pointer to return value, modified only if return value is 0.
*
* Search for a property in a device node and read a 64-bit value from
* it.
*
* Return: 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
* The out_value is modified only if a valid u64 value can be decoded.
*/
int of_property_read_u64(const struct device_node *np, const char *propname,
u64 *out_value)
{
const __be32 *val = of_find_property_value_of_size(np, propname,
sizeof(*out_value),
0,
NULL);
if (IS_ERR(val))
return PTR_ERR(val);
*out_value = of_read_number(val, 2);
return 0;
}
EXPORT_SYMBOL_GPL(of_property_read_u64);
/**
* of_property_read_variable_u64_array - Find and read an array of 64 bit
* integers from a property, with bounds on the minimum and maximum array size.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
* sz_min will be read.
*
* Search for a property in a device node and read 64-bit value(s) from
* it.
*
* Return: The number of elements read on success, -EINVAL if the property
* does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
* if the property data is smaller than sz_min or longer than sz_max.
*
* The out_values is modified only if a valid u64 value can be decoded.
*/
int of_property_read_variable_u64_array(const struct device_node *np,
const char *propname, u64 *out_values,
size_t sz_min, size_t sz_max)
{
size_t sz, count;
const __be32 *val = of_find_property_value_of_size(np, propname,
(sz_min * sizeof(*out_values)),
(sz_max * sizeof(*out_values)),
&sz);
if (IS_ERR(val))
return PTR_ERR(val);
if (!sz_max)
sz = sz_min;
else
sz /= sizeof(*out_values);
count = sz;
while (count--) {
*out_values++ = of_read_number(val, 2);
val += 2;
}
return sz;
}
EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
/**
* of_property_read_string - Find and read a string from a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_string: pointer to null terminated return string, modified only if
* return value is 0.
*
* Search for a property in a device tree node and retrieve a null
* terminated string value (pointer to data, not a copy).
*
* Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
* property does not have a value, and -EILSEQ if the string is not
* null-terminated within the length of the property data.
*
* Note that the empty string "" has length of 1, thus -ENODATA cannot
* be interpreted as an empty string.
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
int of_property_read_string(const struct device_node *np, const char *propname,
const char **out_string)
{
const struct property *prop = of_find_property(np, propname, NULL);
if (!prop)
return -EINVAL;
if (!prop->length)
return -ENODATA;
if (strnlen(prop->value, prop->length) >= prop->length)
return -EILSEQ;
*out_string = prop->value;
return 0;
}
EXPORT_SYMBOL_GPL(of_property_read_string);
/**
* of_property_match_string() - Find string in a list and return index
* @np: pointer to node containing string list property
* @propname: string list property name
* @string: pointer to string to search for in string list
*
* This function searches a string list property and returns the index
* of a specific string value.
*/
int of_property_match_string(const struct device_node *np, const char *propname,
const char *string)
{
const struct property *prop = of_find_property(np, propname, NULL);
size_t l;
int i;
const char *p, *end;
if (!prop)
return -EINVAL;
if (!prop->value)
return -ENODATA;
p = prop->value;
end = p + prop->length;
for (i = 0; p < end; i++, p += l) {
l = strnlen(p, end - p) + 1;
if (p + l > end)
return -EILSEQ;
pr_debug("comparing %s with %s\n", string, p);
if (strcmp(string, p) == 0)
return i; /* Found it; return index */
}
return -ENODATA;
}
EXPORT_SYMBOL_GPL(of_property_match_string);
/**
* of_property_read_string_helper() - Utility helper for parsing string properties
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_strs: output array of string pointers.
* @sz: number of array elements to read.
* @skip: Number of strings to skip over at beginning of list.
*
* Don't call this function directly. It is a utility helper for the
* of_property_read_string*() family of functions.
*/
int of_property_read_string_helper(const struct device_node *np,
const char *propname, const char **out_strs,
size_t sz, int skip)
{
const struct property *prop = of_find_property(np, propname, NULL);
int l = 0, i = 0;
const char *p, *end;
if (!prop)
return -EINVAL;
if (!prop->value)
return -ENODATA;
p = prop->value;
end = p + prop->length;
for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
l = strnlen(p, end - p) + 1;
if (p + l > end)
return -EILSEQ;
if (out_strs && i >= skip)
*out_strs++ = p;
}
i -= skip;
return i <= 0 ? -ENODATA : i;
}
EXPORT_SYMBOL_GPL(of_property_read_string_helper);
const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
u32 *pu)
{
const void *curv = cur;
if (!prop)
return NULL;
if (!cur) {
curv = prop->value;
goto out_val;
}
curv += sizeof(*cur);
if (curv >= prop->value + prop->length)
return NULL;
out_val:
*pu = be32_to_cpup(curv);
return curv;
}
EXPORT_SYMBOL_GPL(of_prop_next_u32);
const char *of_prop_next_string(struct property *prop, const char *cur)
{
const void *curv = cur;
if (!prop)
return NULL;
if (!cur)
return prop->value;
curv += strlen(cur) + 1;
if (curv >= prop->value + prop->length)
return NULL;
return curv;
}
EXPORT_SYMBOL_GPL(of_prop_next_string);
/**
* of_graph_parse_endpoint() - parse common endpoint node properties
* @node: pointer to endpoint device_node
* @endpoint: pointer to the OF endpoint data structure
*
* The caller should hold a reference to @node.
*/
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint)
{
struct device_node *port_node = of_get_parent(node);
WARN_ONCE(!port_node, "%s(): endpoint %pOF has no parent node\n",
__func__, node);
memset(endpoint, 0, sizeof(*endpoint));
endpoint->local_node = node;
/*
* It doesn't matter whether the two calls below succeed.
* If they don't then the default value 0 is used.
*/
of_property_read_u32(port_node, "reg", &endpoint->port);
of_property_read_u32(node, "reg", &endpoint->id);
of_node_put(port_node);
return 0;
}
EXPORT_SYMBOL(of_graph_parse_endpoint);
/**
* of_graph_get_port_by_id() - get the port matching a given id
* @parent: pointer to the parent device node
* @id: id of the port
*
* Return: A 'port' node pointer with refcount incremented. The caller
* has to use of_node_put() on it when done.
*/
struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
{
struct device_node *node, *port;
node = of_get_child_by_name(parent, "ports");
if (node)
parent = node;
for_each_child_of_node(parent, port) {
u32 port_id = 0;
if (!of_node_name_eq(port, "port"))
continue;
of_property_read_u32(port, "reg", &port_id);
if (id == port_id)
break;
}
of_node_put(node);
return port;
}
EXPORT_SYMBOL(of_graph_get_port_by_id);
/**
* of_graph_get_next_endpoint() - get next endpoint node
* @parent: pointer to the parent device node
* @prev: previous endpoint node, or NULL to get first
*
* Return: An 'endpoint' node pointer with refcount incremented. Refcount
* of the passed @prev node is decremented.
*/
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *prev)
{
struct device_node *endpoint;
struct device_node *port;
if (!parent)
return NULL;
/*
* Start by locating the port node. If no previous endpoint is specified
* search for the first port node, otherwise get the previous endpoint
* parent port node.
*/
if (!prev) {
struct device_node *node;
node = of_get_child_by_name(parent, "ports");
if (node)
parent = node;
port = of_get_child_by_name(parent, "port");
of_node_put(node);
if (!port) {
pr_err("graph: no port node found in %pOF\n", parent);
return NULL;
}
} else {
port = of_get_parent(prev);
if (WARN_ONCE(!port, "%s(): endpoint %pOF has no parent node\n",
__func__, prev))
return NULL;
}
while (1) {
/*
* Now that we have a port node, get the next endpoint by
* getting the next child. If the previous endpoint is NULL this
* will return the first child.
*/
endpoint = of_get_next_child(port, prev);
if (endpoint) {
of_node_put(port);
return endpoint;
}
/* No more endpoints under this port, try the next one. */
prev = NULL;
do {
port = of_get_next_child(parent, port);
if (!port)
return NULL;
} while (!of_node_name_eq(port, "port"));
}
}
EXPORT_SYMBOL(of_graph_get_next_endpoint);
/**
* of_graph_get_endpoint_by_regs() - get endpoint node of specific identifiers
* @parent: pointer to the parent device node
* @port_reg: identifier (value of reg property) of the parent port node
* @reg: identifier (value of reg property) of the endpoint node
*
* Return: An 'endpoint' node pointer which is identified by reg and at the same
* is the child of a port node identified by port_reg. reg and port_reg are
* ignored when they are -1. Use of_node_put() on the pointer when done.
*/
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
struct device_node *node = NULL;
for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
}
return NULL;
}
EXPORT_SYMBOL(of_graph_get_endpoint_by_regs);
/**
* of_graph_get_remote_endpoint() - get remote endpoint node
* @node: pointer to a local endpoint device_node
*
* Return: Remote endpoint node associated with remote endpoint node linked
* to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_endpoint(const struct device_node *node)
{
/* Get remote endpoint node. */
return of_parse_phandle(node, "remote-endpoint", 0);
}
EXPORT_SYMBOL(of_graph_get_remote_endpoint);
/**
* of_graph_get_port_parent() - get port's parent node
* @node: pointer to a local endpoint device_node
*
* Return: device node associated with endpoint node linked
* to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_port_parent(struct device_node *node)
{
unsigned int depth;
if (!node)
return NULL;
/*
* Preserve usecount for passed in node as of_get_next_parent()
* will do of_node_put() on it.
*/
of_node_get(node);
/* Walk 3 levels up only if there is 'ports' node. */
for (depth = 3; depth && node; depth--) {
node = of_get_next_parent(node);
if (depth == 2 && !of_node_name_eq(node, "ports"))
break;
}
return node;
}
EXPORT_SYMBOL(of_graph_get_port_parent);
/**
* of_graph_get_remote_port_parent() - get remote port's parent node
* @node: pointer to a local endpoint device_node
*
* Return: Remote device node associated with remote endpoint node linked
* to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
struct device_node *np, *pp;
/* Get remote endpoint node. */
np = of_graph_get_remote_endpoint(node);
pp = of_graph_get_port_parent(np);
of_node_put(np);
return pp;
}
EXPORT_SYMBOL(of_graph_get_remote_port_parent);
/**
* of_graph_get_remote_port() - get remote port node
* @node: pointer to a local endpoint device_node
*
* Return: Remote port node associated with remote endpoint node linked
* to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_port(const struct device_node *node)
{
struct device_node *np;
/* Get remote endpoint node. */
np = of_graph_get_remote_endpoint(node);
if (!np)
return NULL;
return of_get_next_parent(np);
}
EXPORT_SYMBOL(of_graph_get_remote_port);
int of_graph_get_endpoint_count(const struct device_node *np)
{
struct device_node *endpoint;
int num = 0;
for_each_endpoint_of_node(np, endpoint)
num++;
return num;
}
EXPORT_SYMBOL(of_graph_get_endpoint_count);
/**
* of_graph_get_remote_node() - get remote parent device_node for given port/endpoint
* @node: pointer to parent device_node containing graph port/endpoint
* @port: identifier (value of reg property) of the parent port node
* @endpoint: identifier (value of reg property) of the endpoint node
*
* Return: Remote device node associated with remote endpoint node linked
* to @node. Use of_node_put() on it when done.
*/
struct device_node *of_graph_get_remote_node(const struct device_node *node,
u32 port, u32 endpoint)
{
struct device_node *endpoint_node, *remote;
endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint);
if (!endpoint_node) {
pr_debug("no valid endpoint (%d, %d) for node %pOF\n",
port, endpoint, node);
return NULL;
}
remote = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
if (!remote) {
pr_debug("no valid remote node\n");
return NULL;
}
if (!of_device_is_available(remote)) {
pr_debug("not available for remote node\n");
of_node_put(remote);
return NULL;
}
return remote;
}
EXPORT_SYMBOL(of_graph_get_remote_node);
static struct fwnode_handle *of_fwnode_get(struct fwnode_handle *fwnode)
{
return of_fwnode_handle(of_node_get(to_of_node(fwnode)));
}
static void of_fwnode_put(struct fwnode_handle *fwnode)
{
of_node_put(to_of_node(fwnode));
}
static bool of_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
return of_device_is_available(to_of_node(fwnode));
}
static bool of_fwnode_device_dma_supported(const struct fwnode_handle *fwnode)
{
return true;
}
static enum dev_dma_attr
of_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode)
{
if (of_dma_is_coherent(to_of_node(fwnode)))
return DEV_DMA_COHERENT;
else
return DEV_DMA_NON_COHERENT;
}
static bool of_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return of_property_read_bool(to_of_node(fwnode), propname);
}
static int of_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
const struct device_node *node = to_of_node(fwnode);
if (!val)
return of_property_count_elems_of_size(node, propname,
elem_size);
switch (elem_size) {
case sizeof(u8):
return of_property_read_u8_array(node, propname, val, nval);
case sizeof(u16):
return of_property_read_u16_array(node, propname, val, nval);
case sizeof(u32):
return of_property_read_u32_array(node, propname, val, nval);
case sizeof(u64):
return of_property_read_u64_array(node, propname, val, nval);
}
return -ENXIO;
}
static int
of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
const struct device_node *node = to_of_node(fwnode);
return val ?
of_property_read_string_array(node, propname, val, nval) :
of_property_count_strings(node, propname);
}
static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
{
return kbasename(to_of_node(fwnode)->full_name);
}
static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
{
/* Root needs no prefix here (its name is "/"). */
if (!to_of_node(fwnode)->parent)
return "";
return "/";
}
static struct fwnode_handle *
of_fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return of_fwnode_handle(of_get_parent(to_of_node(fwnode)));
}
static struct fwnode_handle *
of_fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
return of_fwnode_handle(of_get_next_available_child(to_of_node(fwnode),
to_of_node(child)));
}
static struct fwnode_handle *
of_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
const struct device_node *node = to_of_node(fwnode);
struct device_node *child;
for_each_available_child_of_node(node, child)
if (of_node_name_eq(child, childname))
return of_fwnode_handle(child);
return NULL;
}
static int
of_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
struct of_phandle_args of_args;
unsigned int i;
int ret;
if (nargs_prop)
ret = of_parse_phandle_with_args(to_of_node(fwnode), prop,
nargs_prop, index, &of_args);
else
ret = of_parse_phandle_with_fixed_args(to_of_node(fwnode), prop,
nargs, index, &of_args);
if (ret < 0)
return ret;
if (!args) {
of_node_put(of_args.np);
return 0;
}
args->nargs = of_args.args_count;
args->fwnode = of_fwnode_handle(of_args.np);
for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
args->args[i] = i < of_args.args_count ? of_args.args[i] : 0;
return 0;
}
static struct fwnode_handle *
of_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return of_fwnode_handle(of_graph_get_next_endpoint(to_of_node(fwnode),
to_of_node(prev)));
}
static struct fwnode_handle *
of_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return of_fwnode_handle(
of_graph_get_remote_endpoint(to_of_node(fwnode)));
}
static struct fwnode_handle *
of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
{
struct device_node *np;
/* Get the parent of the port */
np = of_get_parent(to_of_node(fwnode));
if (!np)
return NULL;
/* Is this the "ports" node? If not, it's the port parent. */
if (!of_node_name_eq(np, "ports"))
return of_fwnode_handle(np);
return of_fwnode_handle(of_get_next_parent(np));
}
static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
const struct device_node *node = to_of_node(fwnode);
struct device_node *port_node = of_get_parent(node);
endpoint->local_fwnode = fwnode;
of_property_read_u32(port_node, "reg", &endpoint->port);
of_property_read_u32(node, "reg", &endpoint->id);
of_node_put(port_node);
return 0;
}
static const void *
of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
return of_device_get_match_data(dev);
}
static struct device_node *of_get_compat_node(struct device_node *np)
{
of_node_get(np);
while (np) {
if (!of_device_is_available(np)) {
of_node_put(np);
np = NULL;
}
if (of_property_present(np, "compatible"))
break;
np = of_get_next_parent(np);
}
return np;
}
static struct device_node *of_get_compat_node_parent(struct device_node *np)
{
struct device_node *parent, *node;
parent = of_get_parent(np);
node = of_get_compat_node(parent);
of_node_put(parent);
return node;
}
static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np)
{
struct device_node *tmp_np = of_node_get(sup_np);
/* Check that sup_np and its ancestors are available. */
while (tmp_np) {
if (of_fwnode_handle(tmp_np)->dev) {
of_node_put(tmp_np);
break;
}
if (!of_device_is_available(tmp_np)) {
of_node_put(tmp_np);
return;
}
tmp_np = of_get_next_parent(tmp_np);
}
fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
}
/**
* parse_prop_cells - Property parsing function for suppliers
*
* @np: Pointer to device tree node containing a list
* @prop_name: Name of property to be parsed. Expected to hold phandle values
* @index: For properties holding a list of phandles, this is the index
* into the list.
* @list_name: Property name that is known to contain list of phandle(s) to
* supplier(s)
* @cells_name: property name that specifies phandles' arguments count
*
* This is a helper function to parse properties that have a known fixed name
* and are a list of phandles and phandle arguments.
*
* Returns:
* - phandle node pointer with refcount incremented. Caller must of_node_put()
* on it when done.
* - NULL if no phandle found at index
*/
static struct device_node *parse_prop_cells(struct device_node *np,
const char *prop_name, int index,
const char *list_name,
const char *cells_name)
{
struct of_phandle_args sup_args;
if (strcmp(prop_name, list_name))
return NULL;
if (__of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
&sup_args))
return NULL;
return sup_args.np;
}
#define DEFINE_SIMPLE_PROP(fname, name, cells) \
static struct device_node *parse_##fname(struct device_node *np, \
const char *prop_name, int index) \
{ \
return parse_prop_cells(np, prop_name, index, name, cells); \
}
static int strcmp_suffix(const char *str, const char *suffix)
{
unsigned int len, suffix_len;
len = strlen(str);
suffix_len = strlen(suffix);
if (len <= suffix_len)
return -1;
return strcmp(str + len - suffix_len, suffix);
}
/**
* parse_suffix_prop_cells - Suffix property parsing function for suppliers
*
* @np: Pointer to device tree node containing a list
* @prop_name: Name of property to be parsed. Expected to hold phandle values
* @index: For properties holding a list of phandles, this is the index
* into the list.
* @suffix: Property suffix that is known to contain list of phandle(s) to
* supplier(s)
* @cells_name: property name that specifies phandles' arguments count
*
* This is a helper function to parse properties that have a known fixed suffix
* and are a list of phandles and phandle arguments.
*
* Returns:
* - phandle node pointer with refcount incremented. Caller must of_node_put()
* on it when done.
* - NULL if no phandle found at index
*/
static struct device_node *parse_suffix_prop_cells(struct device_node *np,
const char *prop_name, int index,
const char *suffix,
const char *cells_name)
{
struct of_phandle_args sup_args;
if (strcmp_suffix(prop_name, suffix))
return NULL;
if (of_parse_phandle_with_args(np, prop_name, cells_name, index,
&sup_args))
return NULL;
return sup_args.np;
}
#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \
static struct device_node *parse_##fname(struct device_node *np, \
const char *prop_name, int index) \
{ \
return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \
}
/**
* struct supplier_bindings - Property parsing functions for suppliers
*
* @parse_prop: function name
* parse_prop() finds the node corresponding to a supplier phandle
* @parse_prop.np: Pointer to device node holding supplier phandle property
* @parse_prop.prop_name: Name of property holding a phandle value
* @parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
* @optional: Describes whether a supplier is mandatory or not
* @node_not_dev: The consumer node containing the property is never converted
* to a struct device. Instead, parse ancestor nodes for the
* compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
* - phandle node pointer with refcount incremented. Caller must of_node_put()
* on it when done.
* - NULL if no phandle found at index
*/
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
bool optional;
bool node_not_dev;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", "#nvmem-cell-cells")
DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells")
DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL)
DEFINE_SIMPLE_PROP(pinctrl0, "pinctrl-0", NULL)
DEFINE_SIMPLE_PROP(pinctrl1, "pinctrl-1", NULL)
DEFINE_SIMPLE_PROP(pinctrl2, "pinctrl-2", NULL)
DEFINE_SIMPLE_PROP(pinctrl3, "pinctrl-3", NULL)
DEFINE_SIMPLE_PROP(pinctrl4, "pinctrl-4", NULL)
DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
static struct device_node *parse_gpios(struct device_node *np,
const char *prop_name, int index)
{
if (!strcmp_suffix(prop_name, ",nr-gpios"))
return NULL;
return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
"#gpio-cells");
}
static struct device_node *parse_iommu_maps(struct device_node *np,
const char *prop_name, int index)
{
if (strcmp(prop_name, "iommu-map"))
return NULL;
return of_parse_phandle(np, prop_name, (index * 4) + 1);
}
static struct device_node *parse_gpio_compat(struct device_node *np,
const char *prop_name, int index)
{
struct of_phandle_args sup_args;
if (strcmp(prop_name, "gpio") && strcmp(prop_name, "gpios"))
return NULL;
/*
* Ignore node with gpio-hog property since its gpios are all provided
* by its parent.
*/
if (of_property_read_bool(np, "gpio-hog"))
return NULL;
if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index,
&sup_args))
return NULL;
return sup_args.np;
}
static struct device_node *parse_interrupts(struct device_node *np,
const char *prop_name, int index)
{
struct of_phandle_args sup_args;
if (!IS_ENABLED(CONFIG_OF_IRQ) || IS_ENABLED(CONFIG_PPC))
return NULL;
if (strcmp(prop_name, "interrupts") &&
strcmp(prop_name, "interrupts-extended"))
return NULL;
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
{ .parse_prop = parse_iommus, .optional = true, },
{ .parse_prop = parse_iommu_maps, .optional = true, },
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
{ .parse_prop = parse_extcon, },
{ .parse_prop = parse_nvmem_cells, },
{ .parse_prop = parse_phys, },
{ .parse_prop = parse_wakeup_parent, },
{ .parse_prop = parse_pinctrl0, },
{ .parse_prop = parse_pinctrl1, },
{ .parse_prop = parse_pinctrl2, },
{ .parse_prop = parse_pinctrl3, },
{ .parse_prop = parse_pinctrl4, },
{ .parse_prop = parse_pinctrl5, },
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
{ .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
{ .parse_prop = parse_pwms, },
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
{ .parse_prop = parse_backlight, },
{ .parse_prop = parse_panel, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
{}
};
/**
* of_link_property - Create device links to suppliers listed in a property
* @con_np: The consumer device tree node which contains the property
* @prop_name: Name of property to be parsed
*
* This function checks if the property @prop_name that is present in the
* @con_np device tree node is one of the known common device tree bindings
* that list phandles to suppliers. If @prop_name isn't one, this function
* doesn't do anything.
*
* If @prop_name is one, this function attempts to create fwnode links from the
* consumer device tree node @con_np to all the suppliers device tree nodes
* listed in @prop_name.
*
* Any failed attempt to create a fwnode link will NOT result in an immediate
* return. of_link_property() must create links to all the available supplier
* device tree nodes even when attempts to create a link to one or more
* suppliers fail.
*/
static int of_link_property(struct device_node *con_np, const char *prop_name)
{
struct device_node *phandle;
const struct supplier_bindings *s = of_supplier_bindings;
unsigned int i = 0;
bool matched = false;
/* Do not stop at first failed link, link all available suppliers. */
while (!matched && s->parse_prop) {
if (s->optional && !fw_devlink_is_strict()) {
s++;
continue;
}
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
struct device_node *con_dev_np;
con_dev_np = s->node_not_dev
? of_get_compat_node_parent(con_np)
: of_node_get(con_np);
matched = true;
i++;
of_link_to_phandle(con_dev_np, phandle);
of_node_put(phandle);
of_node_put(con_dev_np);
}
s++;
}
return 0;
}
static void __iomem *of_fwnode_iomap(struct fwnode_handle *fwnode, int index)
{
#ifdef CONFIG_OF_ADDRESS
return of_iomap(to_of_node(fwnode), index);
#else
return NULL;
#endif
}
static int of_fwnode_irq_get(const struct fwnode_handle *fwnode,
unsigned int index)
{
return of_irq_get(to_of_node(fwnode), index);
}
static int of_fwnode_add_links(struct fwnode_handle *fwnode)
{
struct property *p;
struct device_node *con_np = to_of_node(fwnode);
if (IS_ENABLED(CONFIG_X86))
return 0;
if (!con_np)
return -EINVAL;
for_each_property_of_node(con_np, p)
of_link_property(con_np, p->name);
return 0;
}
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
.device_is_available = of_fwnode_device_is_available,
.device_get_match_data = of_fwnode_device_get_match_data,
.device_dma_supported = of_fwnode_device_dma_supported,
.device_get_dma_attr = of_fwnode_device_get_dma_attr,
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
.get_name = of_fwnode_get_name,
.get_name_prefix = of_fwnode_get_name_prefix,
.get_parent = of_fwnode_get_parent,
.get_next_child_node = of_fwnode_get_next_child_node,
.get_named_child_node = of_fwnode_get_named_child_node,
.get_reference_args = of_fwnode_get_reference_args,
.graph_get_next_endpoint = of_fwnode_graph_get_next_endpoint,
.graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint,
.graph_get_port_parent = of_fwnode_graph_get_port_parent,
.graph_parse_endpoint = of_fwnode_graph_parse_endpoint,
.iomap = of_fwnode_iomap,
.irq_get = of_fwnode_irq_get,
.add_links = of_fwnode_add_links,
};
EXPORT_SYMBOL_GPL(of_fwnode_ops);
| linux-master | drivers/of/property.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc and sparc64 by David S. Miller [email protected]
*
* Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
* Grant Likely.
*/
#define pr_fmt(fmt) "OF: " fmt
#include <linux/console.h>
#include <linux/ctype.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include "of_private.h"
LIST_HEAD(aliases_lookup);
struct device_node *of_root;
EXPORT_SYMBOL(of_root);
struct device_node *of_chosen;
EXPORT_SYMBOL(of_chosen);
struct device_node *of_aliases;
struct device_node *of_stdout;
static const char *of_stdout_options;
struct kset *of_kset;
/*
* Used to protect the of_aliases, to hold off addition of nodes to sysfs.
* This mutex must be held whenever modifications are being made to the
* device tree. The of_{attach,detach}_node() and
* of_{add,remove,update}_property() helpers make sure this happens.
*/
DEFINE_MUTEX(of_mutex);
/* use when traversing tree through the child, sibling,
* or parent members of struct device_node.
*/
DEFINE_RAW_SPINLOCK(devtree_lock);
bool of_node_name_eq(const struct device_node *np, const char *name)
{
const char *node_name;
size_t len;
if (!np)
return false;
node_name = kbasename(np->full_name);
len = strchrnul(node_name, '@') - node_name;
return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
}
EXPORT_SYMBOL(of_node_name_eq);
bool of_node_name_prefix(const struct device_node *np, const char *prefix)
{
if (!np)
return false;
return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
}
EXPORT_SYMBOL(of_node_name_prefix);
static bool __of_node_is_type(const struct device_node *np, const char *type)
{
const char *match = __of_get_property(np, "device_type", NULL);
return np && match && type && !strcmp(match, type);
}
int of_bus_n_addr_cells(struct device_node *np)
{
u32 cells;
for (; np; np = np->parent)
if (!of_property_read_u32(np, "#address-cells", &cells))
return cells;
/* No #address-cells property for the root node */
return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
}
int of_n_addr_cells(struct device_node *np)
{
if (np->parent)
np = np->parent;
return of_bus_n_addr_cells(np);
}
EXPORT_SYMBOL(of_n_addr_cells);
int of_bus_n_size_cells(struct device_node *np)
{
u32 cells;
for (; np; np = np->parent)
if (!of_property_read_u32(np, "#size-cells", &cells))
return cells;
/* No #size-cells property for the root node */
return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
}
int of_n_size_cells(struct device_node *np)
{
if (np->parent)
np = np->parent;
return of_bus_n_size_cells(np);
}
EXPORT_SYMBOL(of_n_size_cells);
#ifdef CONFIG_NUMA
int __weak of_node_to_nid(struct device_node *np)
{
return NUMA_NO_NODE;
}
#endif
#define OF_PHANDLE_CACHE_BITS 7
#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
static u32 of_phandle_cache_hash(phandle handle)
{
return hash_32(handle, OF_PHANDLE_CACHE_BITS);
}
/*
* Caller must hold devtree_lock.
*/
void __of_phandle_cache_inv_entry(phandle handle)
{
u32 handle_hash;
struct device_node *np;
if (!handle)
return;
handle_hash = of_phandle_cache_hash(handle);
np = phandle_cache[handle_hash];
if (np && handle == np->phandle)
phandle_cache[handle_hash] = NULL;
}
void __init of_core_init(void)
{
struct device_node *np;
of_platform_register_reconfig_notifier();
/* Create the kset, and register existing nodes */
mutex_lock(&of_mutex);
of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
if (!of_kset) {
mutex_unlock(&of_mutex);
pr_err("failed to register existing nodes\n");
return;
}
for_each_of_allnodes(np) {
__of_attach_node_sysfs(np);
if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
}
mutex_unlock(&of_mutex);
/* Symlink in /proc as required by userspace ABI */
if (of_root)
proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
}
static struct property *__of_find_property(const struct device_node *np,
const char *name, int *lenp)
{
struct property *pp;
if (!np)
return NULL;
for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, name) == 0) {
if (lenp)
*lenp = pp->length;
break;
}
}
return pp;
}
struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
{
struct property *pp;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
pp = __of_find_property(np, name, lenp);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return pp;
}
EXPORT_SYMBOL(of_find_property);
struct device_node *__of_find_all_nodes(struct device_node *prev)
{
struct device_node *np;
if (!prev) {
np = of_root;
} else if (prev->child) {
np = prev->child;
} else {
/* Walk back up looking for a sibling, or the end of the structure */
np = prev;
while (np->parent && !np->sibling)
np = np->parent;
np = np->sibling; /* Might be null at the end of the tree */
}
return np;
}
/**
* of_find_all_nodes - Get next node in global list
* @prev: Previous node or NULL to start iteration
* of_node_put() will be called on it
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_all_nodes(struct device_node *prev)
{
struct device_node *np;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
np = __of_find_all_nodes(prev);
of_node_get(np);
of_node_put(prev);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_all_nodes);
/*
* Find a property with a given name for a given node
* and return the value.
*/
const void *__of_get_property(const struct device_node *np,
const char *name, int *lenp)
{
struct property *pp = __of_find_property(np, name, lenp);
return pp ? pp->value : NULL;
}
/*
* Find a property with a given name for a given node
* and return the value.
*/
const void *of_get_property(const struct device_node *np, const char *name,
int *lenp)
{
struct property *pp = of_find_property(np, name, lenp);
return pp ? pp->value : NULL;
}
EXPORT_SYMBOL(of_get_property);
/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
* @type: required device_type value, NULL or "" for any match
* @name: required node name, NULL or "" for any match
*
* Checks if the given @compat, @type and @name strings match the
* properties of the given @device. A constraints can be skipped by
* passing NULL or an empty string as the constraint.
*
* Returns 0 for no match, and a positive integer on match. The return
* value is a relative score with larger values indicating better
* matches. The score is weighted for the most specific compatible value
* to get the highest score. Matching type is next, followed by matching
* name. Practically speaking, this results in the following priority
* order for matches:
*
* 1. specific compatible && type && name
* 2. specific compatible && type
* 3. specific compatible && name
* 4. specific compatible
* 5. general compatible && type && name
* 6. general compatible && type
* 7. general compatible && name
* 8. general compatible
* 9. type && name
* 10. type
* 11. name
*/
static int __of_device_is_compatible(const struct device_node *device,
const char *compat, const char *type, const char *name)
{
struct property *prop;
const char *cp;
int index = 0, score = 0;
/* Compatible match has highest priority */
if (compat && compat[0]) {
prop = __of_find_property(device, "compatible", NULL);
for (cp = of_prop_next_string(prop, NULL); cp;
cp = of_prop_next_string(prop, cp), index++) {
if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
score = INT_MAX/2 - (index << 2);
break;
}
}
if (!score)
return 0;
}
/* Matching type is better than matching name */
if (type && type[0]) {
if (!__of_node_is_type(device, type))
return 0;
score += 2;
}
/* Matching name is a bit better than not */
if (name && name[0]) {
if (!of_node_name_eq(device, name))
return 0;
score++;
}
return score;
}
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int of_device_is_compatible(const struct device_node *device,
const char *compat)
{
unsigned long flags;
int res;
raw_spin_lock_irqsave(&devtree_lock, flags);
res = __of_device_is_compatible(device, compat, NULL, NULL);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
EXPORT_SYMBOL(of_device_is_compatible);
/** Checks if the device is compatible with any of the entries in
* a NULL terminated array of strings. Returns the best match
* score or 0.
*/
int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
unsigned int tmp, score = 0;
if (!compat)
return 0;
while (*compat) {
tmp = of_device_is_compatible(device, *compat);
if (tmp > score)
score = tmp;
compat++;
}
return score;
}
EXPORT_SYMBOL_GPL(of_device_compatible_match);
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
* @compat: compatible string to look for in root node's compatible property.
*
* Return: A positive integer if the root node has the given value in its
* compatible property.
*/
int of_machine_is_compatible(const char *compat)
{
struct device_node *root;
int rc = 0;
root = of_find_node_by_path("/");
if (root) {
rc = of_device_is_compatible(root, compat);
of_node_put(root);
}
return rc;
}
EXPORT_SYMBOL(of_machine_is_compatible);
/**
* __of_device_is_available - check if a device is available for use
*
* @device: Node to check for availability, with locks already held
*
* Return: True if the status property is absent or set to "okay" or "ok",
* false otherwise
*/
static bool __of_device_is_available(const struct device_node *device)
{
const char *status;
int statlen;
if (!device)
return false;
status = __of_get_property(device, "status", &statlen);
if (status == NULL)
return true;
if (statlen > 0) {
if (!strcmp(status, "okay") || !strcmp(status, "ok"))
return true;
}
return false;
}
/**
* of_device_is_available - check if a device is available for use
*
* @device: Node to check for availability
*
* Return: True if the status property is absent or set to "okay" or "ok",
* false otherwise
*/
bool of_device_is_available(const struct device_node *device)
{
unsigned long flags;
bool res;
raw_spin_lock_irqsave(&devtree_lock, flags);
res = __of_device_is_available(device);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
EXPORT_SYMBOL(of_device_is_available);
/**
* __of_device_is_fail - check if a device has status "fail" or "fail-..."
*
* @device: Node to check status for, with locks already held
*
* Return: True if the status property is set to "fail" or "fail-..." (for any
* error code suffix), false otherwise
*/
static bool __of_device_is_fail(const struct device_node *device)
{
const char *status;
if (!device)
return false;
status = __of_get_property(device, "status", NULL);
if (status == NULL)
return false;
return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
}
/**
* of_device_is_big_endian - check if a device has BE registers
*
* @device: Node to check for endianness
*
* Return: True if the device has a "big-endian" property, or if the kernel
* was compiled for BE *and* the device has a "native-endian" property.
* Returns false otherwise.
*
* Callers would nominally use ioread32be/iowrite32be if
* of_device_is_big_endian() == true, or readl/writel otherwise.
*/
bool of_device_is_big_endian(const struct device_node *device)
{
if (of_property_read_bool(device, "big-endian"))
return true;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
of_property_read_bool(device, "native-endian"))
return true;
return false;
}
EXPORT_SYMBOL(of_device_is_big_endian);
/**
* of_get_parent - Get a node's parent if any
* @node: Node to get parent
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_get_parent(const struct device_node *node)
{
struct device_node *np;
unsigned long flags;
if (!node)
return NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
np = of_node_get(node->parent);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_get_parent);
/**
* of_get_next_parent - Iterate to a node's parent
* @node: Node to get parent of
*
* This is like of_get_parent() except that it drops the
* refcount on the passed node, making it suitable for iterating
* through a node's parents.
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_get_next_parent(struct device_node *node)
{
struct device_node *parent;
unsigned long flags;
if (!node)
return NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
parent = of_node_get(node->parent);
of_node_put(node);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return parent;
}
EXPORT_SYMBOL(of_get_next_parent);
static struct device_node *__of_get_next_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
if (!node)
return NULL;
next = prev ? prev->sibling : node->child;
of_node_get(next);
of_node_put(prev);
return next;
}
#define __for_each_child_of_node(parent, child) \
for (child = __of_get_next_child(parent, NULL); child != NULL; \
child = __of_get_next_child(parent, child))
/**
* of_get_next_child - Iterate a node childs
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
*
* Return: A node pointer with refcount incremented, use of_node_put() on
* it when done. Returns NULL when prev is the last child. Decrements the
* refcount of prev.
*/
struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
next = __of_get_next_child(node, prev);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_child);
/**
* of_get_next_available_child - Find the next available child node
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
*
* This function is like of_get_next_child(), except that it
* automatically skips any disabled nodes (i.e. status = "disabled").
*/
struct device_node *of_get_next_available_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
unsigned long flags;
if (!node)
return NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling) {
if (!__of_device_is_available(next))
continue;
if (of_node_get(next))
break;
}
of_node_put(prev);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_available_child);
/**
* of_get_next_cpu_node - Iterate on cpu nodes
* @prev: previous child of the /cpus node, or NULL to get first
*
* Unusable CPUs (those with the status property set to "fail" or "fail-...")
* will be skipped.
*
* Return: A cpu node pointer with refcount incremented, use of_node_put()
* on it when done. Returns NULL when prev is the last child. Decrements
* the refcount of prev.
*/
struct device_node *of_get_next_cpu_node(struct device_node *prev)
{
struct device_node *next = NULL;
unsigned long flags;
struct device_node *node;
if (!prev)
node = of_find_node_by_path("/cpus");
raw_spin_lock_irqsave(&devtree_lock, flags);
if (prev)
next = prev->sibling;
else if (node) {
next = node->child;
of_node_put(node);
}
for (; next; next = next->sibling) {
if (__of_device_is_fail(next))
continue;
if (!(of_node_name_eq(next, "cpu") ||
__of_node_is_type(next, "cpu")))
continue;
if (of_node_get(next))
break;
}
of_node_put(prev);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_cpu_node);
/**
* of_get_compatible_child - Find compatible child node
* @parent: parent node
* @compatible: compatible string
*
* Lookup child node whose compatible property contains the given compatible
* string.
*
* Return: a node pointer with refcount incremented, use of_node_put() on it
* when done; or NULL if not found.
*/
struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible)
{
struct device_node *child;
for_each_child_of_node(parent, child) {
if (of_device_is_compatible(child, compatible))
break;
}
return child;
}
EXPORT_SYMBOL(of_get_compatible_child);
/**
* of_get_child_by_name - Find the child node by name for a given parent
* @node: parent node
* @name: child name to look for.
*
* This function looks for child node for given matching name
*
* Return: A node pointer if found, with refcount incremented, use
* of_node_put() on it when done.
* Returns NULL if node is not found.
*/
struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name)
{
struct device_node *child;
for_each_child_of_node(node, child)
if (of_node_name_eq(child, name))
break;
return child;
}
EXPORT_SYMBOL(of_get_child_by_name);
struct device_node *__of_find_node_by_path(struct device_node *parent,
const char *path)
{
struct device_node *child;
int len;
len = strcspn(path, "/:");
if (!len)
return NULL;
__for_each_child_of_node(parent, child) {
const char *name = kbasename(child->full_name);
if (strncmp(path, name, len) == 0 && (strlen(name) == len))
return child;
}
return NULL;
}
struct device_node *__of_find_node_by_full_path(struct device_node *node,
const char *path)
{
const char *separator = strchr(path, ':');
while (node && *path == '/') {
struct device_node *tmp = node;
path++; /* Increment past '/' delimiter */
node = __of_find_node_by_path(node, path);
of_node_put(tmp);
path = strchrnul(path, '/');
if (separator && separator < path)
break;
}
return node;
}
/**
* of_find_node_opts_by_path - Find a node matching a full OF path
* @path: Either the full path to match, or if the path does not
* start with '/', the name of a property of the /aliases
* node (an alias). In the case of an alias, the node
* matching the alias' value will be returned.
* @opts: Address of a pointer into which to store the start of
* an options string appended to the end of the path with
* a ':' separator.
*
* Valid paths:
* * /foo/bar Full path
* * foo Valid alias
* * foo/bar Valid alias + relative path
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
{
struct device_node *np = NULL;
struct property *pp;
unsigned long flags;
const char *separator = strchr(path, ':');
if (opts)
*opts = separator ? separator + 1 : NULL;
if (strcmp(path, "/") == 0)
return of_node_get(of_root);
/* The path could begin with an alias */
if (*path != '/') {
int len;
const char *p = separator;
if (!p)
p = strchrnul(path, '/');
len = p - path;
/* of_aliases must not be NULL */
if (!of_aliases)
return NULL;
for_each_property_of_node(of_aliases, pp) {
if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
np = of_find_node_by_path(pp->value);
break;
}
}
if (!np)
return NULL;
path = p;
}
/* Step down the tree matching path components */
raw_spin_lock_irqsave(&devtree_lock, flags);
if (!np)
np = of_node_get(of_root);
np = __of_find_node_by_full_path(np, path);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
* of_find_node_by_name - Find a node by its "name" property
* @from: The node to start searching from or NULL; the node
* you pass will not be searched, only the next one
* will. Typically, you pass what the previous call
* returned. of_node_put() will be called on @from.
* @name: The name string to match against
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_by_name(struct device_node *from,
const char *name)
{
struct device_node *np;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np)
if (of_node_name_eq(np, name) && of_node_get(np))
break;
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_name);
/**
* of_find_node_by_type - Find a node by its "device_type" property
* @from: The node to start searching from, or NULL to start searching
* the entire device tree. The node you pass will not be
* searched, only the next one will; typically, you pass
* what the previous call returned. of_node_put() will be
* called on from for you.
* @type: The type string to match against
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_by_type(struct device_node *from,
const char *type)
{
struct device_node *np;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np)
if (__of_node_is_type(np, type) && of_node_get(np))
break;
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_type);
/**
* of_find_compatible_node - Find a node based on type and one of the
* tokens in its "compatible" property
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @type: The type string to match "device_type" or NULL to ignore
* @compatible: The string to match to one of the tokens in the device
* "compatible" list.
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compatible)
{
struct device_node *np;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np)
if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_compatible_node);
/**
* of_find_node_with_property - Find a node which has a property with
* the given name.
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @prop_name: The name of the property to look for.
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_with_property(struct device_node *from,
const char *prop_name)
{
struct device_node *np;
struct property *pp;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np) {
for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, prop_name) == 0) {
of_node_get(np);
goto out;
}
}
}
out:
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_with_property);
static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
const struct of_device_id *best_match = NULL;
int score, best_score = 0;
if (!matches)
return NULL;
for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
score = __of_device_is_compatible(node, matches->compatible,
matches->type, matches->name);
if (score > best_score) {
best_match = matches;
best_score = score;
}
}
return best_match;
}
/**
* of_match_node - Tell if a device_node has a matching of_match structure
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
* Low level utility function used by device matching.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
const struct of_device_id *match;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
match = __of_match_node(matches, node);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return match;
}
EXPORT_SYMBOL(of_match_node);
/**
* of_find_matching_node_and_match - Find a node based on an of_device_id
* match table.
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @matches: array of of device match structures to search in
* @match: Updated to point at the matches entry which matched
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_matching_node_and_match(struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match)
{
struct device_node *np;
const struct of_device_id *m;
unsigned long flags;
if (match)
*match = NULL;
raw_spin_lock_irqsave(&devtree_lock, flags);
for_each_of_allnodes_from(from, np) {
m = __of_match_node(matches, np);
if (m && of_node_get(np)) {
if (match)
*match = m;
break;
}
}
of_node_put(from);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_matching_node_and_match);
/**
* of_alias_from_compatible - Lookup appropriate alias for a device node
* depending on compatible
* @node: pointer to a device tree node
* @alias: Pointer to buffer that alias value will be copied into
* @len: Length of alias value
*
* Based on the value of the compatible property, this routine will attempt
* to choose an appropriate alias value for a particular device tree node.
* It does this by stripping the manufacturer prefix (as delimited by a ',')
* from the first entry in the compatible list property.
*
* Note: The matching on just the "product" side of the compatible is a relic
* from I2C and SPI. Please do not add any new user.
*
* Return: This routine returns 0 on success, <0 on failure.
*/
int of_alias_from_compatible(const struct device_node *node, char *alias, int len)
{
const char *compatible, *p;
int cplen;
compatible = of_get_property(node, "compatible", &cplen);
if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
p = strchr(compatible, ',');
strscpy(alias, p ? p + 1 : compatible, len);
return 0;
}
EXPORT_SYMBOL_GPL(of_alias_from_compatible);
/**
* of_find_node_by_phandle - Find a node given a phandle
* @handle: phandle of the node to find
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np = NULL;
unsigned long flags;
u32 handle_hash;
if (!handle)
return NULL;
handle_hash = of_phandle_cache_hash(handle);
raw_spin_lock_irqsave(&devtree_lock, flags);
if (phandle_cache[handle_hash] &&
handle == phandle_cache[handle_hash]->phandle)
np = phandle_cache[handle_hash];
if (!np) {
for_each_of_allnodes(np)
if (np->phandle == handle &&
!of_node_check_flag(np, OF_DETACHED)) {
phandle_cache[handle_hash] = np;
break;
}
}
of_node_get(np);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
{
int i;
printk("%s %pOF", msg, args->np);
for (i = 0; i < args->args_count; i++) {
const char delim = i ? ',' : ':';
pr_cont("%c%08x", delim, args->args[i]);
}
pr_cont("\n");
}
int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
const char *list_name,
const char *cells_name,
int cell_count)
{
const __be32 *list;
int size;
memset(it, 0, sizeof(*it));
/*
* one of cell_count or cells_name must be provided to determine the
* argument length.
*/
if (cell_count < 0 && !cells_name)
return -EINVAL;
list = of_get_property(np, list_name, &size);
if (!list)
return -ENOENT;
it->cells_name = cells_name;
it->cell_count = cell_count;
it->parent = np;
it->list_end = list + size / sizeof(*list);
it->phandle_end = list;
it->cur = list;
return 0;
}
EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
int of_phandle_iterator_next(struct of_phandle_iterator *it)
{
uint32_t count = 0;
if (it->node) {
of_node_put(it->node);
it->node = NULL;
}
if (!it->cur || it->phandle_end >= it->list_end)
return -ENOENT;
it->cur = it->phandle_end;
/* If phandle is 0, then it is an empty entry with no arguments. */
it->phandle = be32_to_cpup(it->cur++);
if (it->phandle) {
/*
* Find the provider node and parse the #*-cells property to
* determine the argument length.
*/
it->node = of_find_node_by_phandle(it->phandle);
if (it->cells_name) {
if (!it->node) {
pr_err("%pOF: could not find phandle %d\n",
it->parent, it->phandle);
goto err;
}
if (of_property_read_u32(it->node, it->cells_name,
&count)) {
/*
* If both cell_count and cells_name is given,
* fall back to cell_count in absence
* of the cells_name property
*/
if (it->cell_count >= 0) {
count = it->cell_count;
} else {
pr_err("%pOF: could not get %s for %pOF\n",
it->parent,
it->cells_name,
it->node);
goto err;
}
}
} else {
count = it->cell_count;
}
/*
* Make sure that the arguments actually fit in the remaining
* property data length
*/
if (it->cur + count > it->list_end) {
if (it->cells_name)
pr_err("%pOF: %s = %d found %td\n",
it->parent, it->cells_name,
count, it->list_end - it->cur);
else
pr_err("%pOF: phandle %s needs %d, found %td\n",
it->parent, of_node_full_name(it->node),
count, it->list_end - it->cur);
goto err;
}
}
it->phandle_end = it->cur + count;
it->cur_count = count;
return 0;
err:
if (it->node) {
of_node_put(it->node);
it->node = NULL;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
int of_phandle_iterator_args(struct of_phandle_iterator *it,
uint32_t *args,
int size)
{
int i, count;
count = it->cur_count;
if (WARN_ON(size < count))
count = size;
for (i = 0; i < count; i++)
args[i] = be32_to_cpup(it->cur++);
return count;
}
int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name,
int cell_count, int index,
struct of_phandle_args *out_args)
{
struct of_phandle_iterator it;
int rc, cur_index = 0;
if (index < 0)
return -EINVAL;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
/*
* All of the error cases bail out of the loop, so at
* this point, the parsing is successful. If the requested
* index matches, then fill the out_args structure and return,
* or return -ENOENT for an empty entry.
*/
rc = -ENOENT;
if (cur_index == index) {
if (!it.phandle)
goto err;
if (out_args) {
int c;
c = of_phandle_iterator_args(&it,
out_args->args,
MAX_PHANDLE_ARGS);
out_args->np = it.node;
out_args->args_count = c;
} else {
of_node_put(it.node);
}
/* Found it! return success */
return 0;
}
cur_index++;
}
/*
* Unlock node before returning result; will be one of:
* -ENOENT : index is for empty phandle
* -EINVAL : parsing error on data
*/
err:
of_node_put(it.node);
return rc;
}
EXPORT_SYMBOL(__of_parse_phandle_with_args);
/**
* of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
* @stem_name: stem of property names that specify phandles' arguments count
* @index: index of a phandle to parse out
* @out_args: optional pointer to output arguments structure (will be filled)
*
* This function is useful to parse lists of phandles and their arguments.
* Returns 0 on success and fills out_args, on error returns appropriate errno
* value. The difference between this function and of_parse_phandle_with_args()
* is that this API remaps a phandle if the node the phandle points to has
* a <@stem_name>-map property.
*
* Caller is responsible to call of_node_put() on the returned out_args->np
* pointer.
*
* Example::
*
* phandle1: node1 {
* #list-cells = <2>;
* };
*
* phandle2: node2 {
* #list-cells = <1>;
* };
*
* phandle3: node3 {
* #list-cells = <1>;
* list-map = <0 &phandle2 3>,
* <1 &phandle2 2>,
* <2 &phandle1 5 1>;
* list-map-mask = <0x3>;
* };
*
* node4 {
* list = <&phandle1 1 2 &phandle3 0>;
* };
*
* To get a device_node of the ``node2`` node you may call this:
* of_parse_phandle_with_args(node4, "list", "list", 1, &args);
*/
int of_parse_phandle_with_args_map(const struct device_node *np,
const char *list_name,
const char *stem_name,
int index, struct of_phandle_args *out_args)
{
char *cells_name, *map_name = NULL, *mask_name = NULL;
char *pass_name = NULL;
struct device_node *cur, *new = NULL;
const __be32 *map, *mask, *pass;
static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
int i, ret, map_len, match;
u32 list_size, new_size;
if (index < 0)
return -EINVAL;
cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
if (!cells_name)
return -ENOMEM;
ret = -ENOMEM;
map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
if (!map_name)
goto free;
mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
if (!mask_name)
goto free;
pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
if (!pass_name)
goto free;
ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
out_args);
if (ret)
goto free;
/* Get the #<list>-cells property */
cur = out_args->np;
ret = of_property_read_u32(cur, cells_name, &list_size);
if (ret < 0)
goto put;
/* Precalculate the match array - this simplifies match loop */
for (i = 0; i < list_size; i++)
initial_match_array[i] = cpu_to_be32(out_args->args[i]);
ret = -EINVAL;
while (cur) {
/* Get the <list>-map property */
map = of_get_property(cur, map_name, &map_len);
if (!map) {
ret = 0;
goto free;
}
map_len /= sizeof(u32);
/* Get the <list>-map-mask property (optional) */
mask = of_get_property(cur, mask_name, NULL);
if (!mask)
mask = dummy_mask;
/* Iterate through <list>-map property */
match = 0;
while (map_len > (list_size + 1) && !match) {
/* Compare specifiers */
match = 1;
for (i = 0; i < list_size; i++, map_len--)
match &= !((match_array[i] ^ *map++) & mask[i]);
of_node_put(new);
new = of_find_node_by_phandle(be32_to_cpup(map));
map++;
map_len--;
/* Check if not found */
if (!new)
goto put;
if (!of_device_is_available(new))
match = 0;
ret = of_property_read_u32(new, cells_name, &new_size);
if (ret)
goto put;
/* Check for malformed properties */
if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
goto put;
if (map_len < new_size)
goto put;
/* Move forward by new node's #<list>-cells amount */
map += new_size;
map_len -= new_size;
}
if (!match)
goto put;
/* Get the <list>-map-pass-thru property (optional) */
pass = of_get_property(cur, pass_name, NULL);
if (!pass)
pass = dummy_pass;
/*
* Successfully parsed a <list>-map translation; copy new
* specifier into the out_args structure, keeping the
* bits specified in <list>-map-pass-thru.
*/
match_array = map - new_size;
for (i = 0; i < new_size; i++) {
__be32 val = *(map - new_size + i);
if (i < list_size) {
val &= ~pass[i];
val |= cpu_to_be32(out_args->args[i]) & pass[i];
}
out_args->args[i] = be32_to_cpu(val);
}
out_args->args_count = list_size = new_size;
/* Iterate again with new provider */
out_args->np = new;
of_node_put(cur);
cur = new;
}
put:
of_node_put(cur);
of_node_put(new);
free:
kfree(mask_name);
kfree(map_name);
kfree(cells_name);
kfree(pass_name);
return ret;
}
EXPORT_SYMBOL(of_parse_phandle_with_args_map);
/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
*
* Return: The number of phandle + argument tuples within a property. It
* is a typical pattern to encode a list of phandle and variable
* arguments into a single property. The number of arguments is encoded
* by a property in the phandle-target node. For example, a gpios
* property would contain a list of GPIO specifies consisting of a
* phandle and 1 or more arguments. The number of arguments are
* determined by the #gpio-cells property in the node pointed to by the
* phandle.
*/
int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name)
{
struct of_phandle_iterator it;
int rc, cur_index = 0;
/*
* If cells_name is NULL we assume a cell count of 0. This makes
* counting the phandles trivial as each 32bit word in the list is a
* phandle and no arguments are to consider. So we don't iterate through
* the list but just use the length to determine the phandle count.
*/
if (!cells_name) {
const __be32 *list;
int size;
list = of_get_property(np, list_name, &size);
if (!list)
return -ENOENT;
return size / sizeof(*list);
}
rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
if (rc)
return rc;
while ((rc = of_phandle_iterator_next(&it)) == 0)
cur_index += 1;
if (rc != -ENOENT)
return rc;
return cur_index;
}
EXPORT_SYMBOL(of_count_phandle_with_args);
static struct property *__of_remove_property_from_list(struct property **list, struct property *prop)
{
struct property **next;
for (next = list; *next; next = &(*next)->next) {
if (*next == prop) {
*next = prop->next;
prop->next = NULL;
return prop;
}
}
return NULL;
}
/**
* __of_add_property - Add a property to a node without lock operations
* @np: Caller's Device Node
* @prop: Property to add
*/
int __of_add_property(struct device_node *np, struct property *prop)
{
int rc = 0;
unsigned long flags;
struct property **next;
raw_spin_lock_irqsave(&devtree_lock, flags);
__of_remove_property_from_list(&np->deadprops, prop);
prop->next = NULL;
next = &np->properties;
while (*next) {
if (strcmp(prop->name, (*next)->name) == 0) {
/* duplicate ! don't insert it */
rc = -EEXIST;
goto out_unlock;
}
next = &(*next)->next;
}
*next = prop;
out_unlock:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (rc)
return rc;
__of_add_property_sysfs(np, prop);
return 0;
}
/**
* of_add_property - Add a property to a node
* @np: Caller's Device Node
* @prop: Property to add
*/
int of_add_property(struct device_node *np, struct property *prop)
{
int rc;
mutex_lock(&of_mutex);
rc = __of_add_property(np, prop);
mutex_unlock(&of_mutex);
if (!rc)
of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(of_add_property);
int __of_remove_property(struct device_node *np, struct property *prop)
{
unsigned long flags;
int rc = -ENODEV;
raw_spin_lock_irqsave(&devtree_lock, flags);
if (__of_remove_property_from_list(&np->properties, prop)) {
/* Found the property, add it to deadprops list */
prop->next = np->deadprops;
np->deadprops = prop;
rc = 0;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (rc)
return rc;
__of_remove_property_sysfs(np, prop);
return 0;
}
/**
* of_remove_property - Remove a property from a node.
* @np: Caller's Device Node
* @prop: Property to remove
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
* Instead we just move the property to the "dead properties"
* list, so it won't be found any more.
*/
int of_remove_property(struct device_node *np, struct property *prop)
{
int rc;
if (!prop)
return -ENODEV;
mutex_lock(&of_mutex);
rc = __of_remove_property(np, prop);
mutex_unlock(&of_mutex);
if (!rc)
of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(of_remove_property);
int __of_update_property(struct device_node *np, struct property *newprop,
struct property **oldpropp)
{
struct property **next, *oldprop;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
__of_remove_property_from_list(&np->deadprops, newprop);
for (next = &np->properties; *next; next = &(*next)->next) {
if (of_prop_cmp((*next)->name, newprop->name) == 0)
break;
}
*oldpropp = oldprop = *next;
if (oldprop) {
/* replace the node */
newprop->next = oldprop->next;
*next = newprop;
oldprop->next = np->deadprops;
np->deadprops = oldprop;
} else {
/* new node */
newprop->next = NULL;
*next = newprop;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
__of_update_property_sysfs(np, newprop, oldprop);
return 0;
}
/*
* of_update_property - Update a property in a node, if the property does
* not exist, add it.
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
* Instead we just move the property to the "dead properties" list,
* and add the new property to the property list
*/
int of_update_property(struct device_node *np, struct property *newprop)
{
struct property *oldprop;
int rc;
if (!newprop->name)
return -EINVAL;
mutex_lock(&of_mutex);
rc = __of_update_property(np, newprop, &oldprop);
mutex_unlock(&of_mutex);
if (!rc)
of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
return rc;
}
static void of_alias_add(struct alias_prop *ap, struct device_node *np,
int id, const char *stem, int stem_len)
{
ap->np = np;
ap->id = id;
strscpy(ap->stem, stem, stem_len + 1);
list_add_tail(&ap->link, &aliases_lookup);
pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
ap->alias, ap->stem, ap->id, np);
}
/**
* of_alias_scan - Scan all properties of the 'aliases' node
* @dt_alloc: An allocator that provides a virtual address to memory
* for storing the resulting tree
*
* The function scans all the properties of the 'aliases' node and populates
* the global lookup table with the properties. It returns the
* number of alias properties found, or an error code in case of failure.
*/
void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
{
struct property *pp;
of_aliases = of_find_node_by_path("/aliases");
of_chosen = of_find_node_by_path("/chosen");
if (of_chosen == NULL)
of_chosen = of_find_node_by_path("/chosen@0");
if (of_chosen) {
/* linux,stdout-path and /aliases/stdout are for legacy compatibility */
const char *name = NULL;
if (of_property_read_string(of_chosen, "stdout-path", &name))
of_property_read_string(of_chosen, "linux,stdout-path",
&name);
if (IS_ENABLED(CONFIG_PPC) && !name)
of_property_read_string(of_aliases, "stdout", &name);
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
if (of_stdout)
of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
}
if (!of_aliases)
return;
for_each_property_of_node(of_aliases, pp) {
const char *start = pp->name;
const char *end = start + strlen(start);
struct device_node *np;
struct alias_prop *ap;
int id, len;
/* Skip those we do not want to proceed */
if (!strcmp(pp->name, "name") ||
!strcmp(pp->name, "phandle") ||
!strcmp(pp->name, "linux,phandle"))
continue;
np = of_find_node_by_path(pp->value);
if (!np)
continue;
/* walk the alias backwards to extract the id and work out
* the 'stem' string */
while (isdigit(*(end-1)) && end > start)
end--;
len = end - start;
if (kstrtoint(end, 10, &id) < 0)
continue;
/* Allocate an alias_prop with enough space for the stem */
ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
if (!ap)
continue;
memset(ap, 0, sizeof(*ap) + len + 1);
ap->alias = start;
of_alias_add(ap, np, id, start, len);
}
}
/**
* of_alias_get_id - Get alias id for the given device_node
* @np: Pointer to the given device_node
* @stem: Alias stem of the given device_node
*
* The function travels the lookup table to get the alias id for the given
* device_node and alias stem.
*
* Return: The alias id if found.
*/
int of_alias_get_id(struct device_node *np, const char *stem)
{
struct alias_prop *app;
int id = -ENODEV;
mutex_lock(&of_mutex);
list_for_each_entry(app, &aliases_lookup, link) {
if (strcmp(app->stem, stem) != 0)
continue;
if (np == app->np) {
id = app->id;
break;
}
}
mutex_unlock(&of_mutex);
return id;
}
EXPORT_SYMBOL_GPL(of_alias_get_id);
/**
* of_alias_get_highest_id - Get highest alias id for the given stem
* @stem: Alias stem to be examined
*
* The function travels the lookup table to get the highest alias id for the
* given alias stem. It returns the alias id if found.
*/
int of_alias_get_highest_id(const char *stem)
{
struct alias_prop *app;
int id = -ENODEV;
mutex_lock(&of_mutex);
list_for_each_entry(app, &aliases_lookup, link) {
if (strcmp(app->stem, stem) != 0)
continue;
if (app->id > id)
id = app->id;
}
mutex_unlock(&of_mutex);
return id;
}
EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
/**
* of_console_check() - Test and setup console for DT setup
* @dn: Pointer to device node
* @name: Name to use for preferred console without index. ex. "ttyS"
* @index: Index to use for preferred console.
*
* Check if the given device node matches the stdout-path property in the
* /chosen node. If it does then register it as the preferred console.
*
* Return: TRUE if console successfully setup. Otherwise return FALSE.
*/
bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
/*
* XXX: cast `options' to char pointer to suppress complication
* warnings: printk, UART and console drivers expect char pointer.
*/
return !add_preferred_console(name, index, (char *)of_stdout_options);
}
EXPORT_SYMBOL_GPL(of_console_check);
/**
* of_find_next_cache_node - Find a node's subsidiary cache
* @np: node of type "cpu" or "cache"
*
* Return: A node pointer with refcount incremented, use
* of_node_put() on it when done. Caller should hold a reference
* to np.
*/
struct device_node *of_find_next_cache_node(const struct device_node *np)
{
struct device_node *child, *cache_node;
cache_node = of_parse_phandle(np, "l2-cache", 0);
if (!cache_node)
cache_node = of_parse_phandle(np, "next-level-cache", 0);
if (cache_node)
return cache_node;
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
*/
if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
for_each_child_of_node(np, child)
if (of_node_is_type(child, "cache"))
return child;
return NULL;
}
/**
* of_find_last_cache_level - Find the level at which the last cache is
* present for the given logical cpu
*
* @cpu: cpu number(logical index) for which the last cache level is needed
*
* Return: The level at which the last cache is present. It is exactly
* same as the total number of cache levels for the given logical cpu.
*/
int of_find_last_cache_level(unsigned int cpu)
{
u32 cache_level = 0;
struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
while (np) {
of_node_put(prev);
prev = np;
np = of_find_next_cache_node(np);
}
of_property_read_u32(prev, "cache-level", &cache_level);
of_node_put(prev);
return cache_level;
}
/**
* of_map_id - Translate an ID through a downstream mapping.
* @np: root complex device node.
* @id: device ID to map.
* @map_name: property name of the map to use.
* @map_mask_name: optional property name of the mask to use.
* @target: optional pointer to a target device node.
* @id_out: optional pointer to receive the translated ID.
*
* Given a device ID, look up the appropriate implementation-defined
* platform ID and/or the target device which receives transactions on that
* ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
* @id_out may be NULL if only the other is required. If @target points to
* a non-NULL device node pointer, only entries targeting that node will be
* matched; if it points to a NULL value, it will receive the device node of
* the first matching target phandle, with a reference held.
*
* Return: 0 on success or a standard error code on failure.
*/
int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
u32 map_mask, masked_id;
int map_len;
const __be32 *map = NULL;
if (!np || !map_name || (!target && !id_out))
return -EINVAL;
map = of_get_property(np, map_name, &map_len);
if (!map) {
if (target)
return -ENODEV;
/* Otherwise, no map implies no translation */
*id_out = id;
return 0;
}
if (!map_len || map_len % (4 * sizeof(*map))) {
pr_err("%pOF: Error: Bad %s length: %d\n", np,
map_name, map_len);
return -EINVAL;
}
/* The default is to select all bits. */
map_mask = 0xffffffff;
/*
* Can be overridden by "{iommu,msi}-map-mask" property.
* If of_property_read_u32() fails, the default is used.
*/
if (map_mask_name)
of_property_read_u32(np, map_mask_name, &map_mask);
masked_id = map_mask & id;
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
struct device_node *phandle_node;
u32 id_base = be32_to_cpup(map + 0);
u32 phandle = be32_to_cpup(map + 1);
u32 out_base = be32_to_cpup(map + 2);
u32 id_len = be32_to_cpup(map + 3);
if (id_base & ~map_mask) {
pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
np, map_name, map_name,
map_mask, id_base);
return -EFAULT;
}
if (masked_id < id_base || masked_id >= id_base + id_len)
continue;
phandle_node = of_find_node_by_phandle(phandle);
if (!phandle_node)
return -ENODEV;
if (target) {
if (*target)
of_node_put(phandle_node);
else
*target = phandle_node;
if (*target != phandle_node)
continue;
}
if (id_out)
*id_out = masked_id - id_base + out_base;
pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
np, map_name, map_mask, id_base, out_base,
id_len, id, masked_id - id_base + out_base);
return 0;
}
pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
id, target && *target ? *target : NULL);
/* Bypasses translation */
if (id_out)
*id_out = id;
return 0;
}
EXPORT_SYMBOL_GPL(of_map_id);
| linux-master | drivers/of/base.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
* <[email protected]>
* and Arnd Bergmann, IBM Corp.
* Merged from powerpc/kernel/of_platform.c and
* sparc{,64}/kernel/of_device.c by Stephen Rothwell
*/
#define pr_fmt(fmt) "OF: " fmt
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "of_private.h"
const struct of_device_id of_default_bus_match_table[] = {
{ .compatible = "simple-bus", },
{ .compatible = "simple-mfd", },
{ .compatible = "isa", },
#ifdef CONFIG_ARM_AMBA
{ .compatible = "arm,amba-bus", },
#endif /* CONFIG_ARM_AMBA */
{} /* Empty terminated list */
};
/**
* of_find_device_by_node - Find the platform_device associated with a node
* @np: Pointer to device tree node
*
* Takes a reference to the embedded struct device which needs to be dropped
* after use.
*
* Return: platform_device pointer, or NULL if not found
*/
struct platform_device *of_find_device_by_node(struct device_node *np)
{
struct device *dev;
dev = bus_find_device_by_of_node(&platform_bus_type, np);
return dev ? to_platform_device(dev) : NULL;
}
EXPORT_SYMBOL(of_find_device_by_node);
int of_device_add(struct platform_device *ofdev)
{
BUG_ON(ofdev->dev.of_node == NULL);
/* name and id have to be set so that the platform bus doesn't get
* confused on matching */
ofdev->name = dev_name(&ofdev->dev);
ofdev->id = PLATFORM_DEVID_NONE;
/*
* If this device has not binding numa node in devicetree, that is
* of_node_to_nid returns NUMA_NO_NODE. device_add will assume that this
* device is on the same node as the parent.
*/
set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
return device_add(&ofdev->dev);
}
int of_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
return of_device_add(pdev);
}
EXPORT_SYMBOL(of_device_register);
void of_device_unregister(struct platform_device *ofdev)
{
device_unregister(&ofdev->dev);
}
EXPORT_SYMBOL(of_device_unregister);
#ifdef CONFIG_OF_ADDRESS
static const struct of_device_id of_skipped_node_table[] = {
{ .compatible = "operating-points-v2", },
{} /* Empty terminated list */
};
/*
* The following routines scan a subtree and registers a device for
* each applicable node.
*
* Note: sparc doesn't use these routines because it has a different
* mechanism for creating devices from device tree nodes.
*/
/**
* of_device_make_bus_id - Use the device node data to assign a unique name
* @dev: pointer to device structure that is linked to a device tree node
*
* This routine will first try using the translated bus address to
* derive a unique name. If it cannot, then it will prepend names from
* parent nodes until a unique name can be derived.
*/
static void of_device_make_bus_id(struct device *dev)
{
struct device_node *node = dev->of_node;
const __be32 *reg;
u64 addr;
u32 mask;
/* Construct the name, using parent nodes if necessary to ensure uniqueness */
while (node->parent) {
/*
* If the address can be translated, then that is as much
* uniqueness as we need. Make it the first component and return
*/
reg = of_get_property(node, "reg", NULL);
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
if (!of_property_read_u32(node, "mask", &mask))
dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
addr, ffs(mask) - 1, node, dev_name(dev));
else
dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
addr, node, dev_name(dev));
return;
}
/* format arguments only used if dev_name() resolves to NULL */
dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
kbasename(node->full_name), dev_name(dev));
node = node->parent;
}
}
/**
* of_device_alloc - Allocate and initialize an of_device
* @np: device node to assign to device
* @bus_id: Name to assign to the device. May be null to use default name.
* @parent: Parent device.
*/
struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent)
{
struct platform_device *dev;
int rc, i, num_reg = 0;
struct resource *res;
dev = platform_device_alloc("", PLATFORM_DEVID_NONE);
if (!dev)
return NULL;
/* count the io resources */
num_reg = of_address_count(np);
/* Populate the resource table */
if (num_reg) {
res = kcalloc(num_reg, sizeof(*res), GFP_KERNEL);
if (!res) {
platform_device_put(dev);
return NULL;
}
dev->num_resources = num_reg;
dev->resource = res;
for (i = 0; i < num_reg; i++, res++) {
rc = of_address_to_resource(np, i, res);
WARN_ON(rc);
}
}
/* setup generic device info */
device_set_node(&dev->dev, of_fwnode_handle(of_node_get(np)));
dev->dev.parent = parent ? : &platform_bus;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
return dev;
}
EXPORT_SYMBOL(of_device_alloc);
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
* @bus_id: name to assign device
* @platform_data: pointer to populate platform_data pointer with
* @parent: Linux device model parent device.
*
* Return: Pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
static struct platform_device *of_platform_device_create_pdata(
struct device_node *np,
const char *bus_id,
void *platform_data,
struct device *parent)
{
struct platform_device *dev;
if (!of_device_is_available(np) ||
of_node_test_and_set_flag(np, OF_POPULATED))
return NULL;
dev = of_device_alloc(np, bus_id, parent);
if (!dev)
goto err_clear_flag;
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
platform_device_put(dev);
goto err_clear_flag;
}
return dev;
err_clear_flag:
of_node_clear_flag(np, OF_POPULATED);
return NULL;
}
/**
* of_platform_device_create - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
* @bus_id: name to assign device
* @parent: Linux device model parent device.
*
* Return: Pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent)
{
return of_platform_device_create_pdata(np, bus_id, NULL, parent);
}
EXPORT_SYMBOL(of_platform_device_create);
#ifdef CONFIG_ARM_AMBA
static struct amba_device *of_amba_device_create(struct device_node *node,
const char *bus_id,
void *platform_data,
struct device *parent)
{
struct amba_device *dev;
int ret;
pr_debug("Creating amba device %pOF\n", node);
if (!of_device_is_available(node) ||
of_node_test_and_set_flag(node, OF_POPULATED))
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
if (!dev)
goto err_clear_flag;
/* AMBA devices only support a single DMA mask */
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
/* setup generic device info */
device_set_node(&dev->dev, of_fwnode_handle(of_node_get(node)));
dev->dev.parent = parent ? : &platform_bus;
dev->dev.platform_data = platform_data;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
/* Allow the HW Peripheral ID to be overridden */
of_property_read_u32(node, "arm,primecell-periphid", &dev->periphid);
ret = of_address_to_resource(node, 0, &dev->res);
if (ret) {
pr_err("amba: of_address_to_resource() failed (%d) for %pOF\n",
ret, node);
goto err_free;
}
ret = amba_device_add(dev, &iomem_resource);
if (ret) {
pr_err("amba_device_add() failed (%d) for %pOF\n",
ret, node);
goto err_free;
}
return dev;
err_free:
amba_device_put(dev);
err_clear_flag:
of_node_clear_flag(node, OF_POPULATED);
return NULL;
}
#else /* CONFIG_ARM_AMBA */
static struct amba_device *of_amba_device_create(struct device_node *node,
const char *bus_id,
void *platform_data,
struct device *parent)
{
return NULL;
}
#endif /* CONFIG_ARM_AMBA */
/*
* of_dev_lookup() - Given a device node, lookup the preferred Linux name
*/
static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
struct device_node *np)
{
const struct of_dev_auxdata *auxdata;
struct resource res;
int compatible = 0;
if (!lookup)
return NULL;
auxdata = lookup;
for (; auxdata->compatible; auxdata++) {
if (!of_device_is_compatible(np, auxdata->compatible))
continue;
compatible++;
if (!of_address_to_resource(np, 0, &res))
if (res.start != auxdata->phys_addr)
continue;
pr_debug("%pOF: devname=%s\n", np, auxdata->name);
return auxdata;
}
if (!compatible)
return NULL;
/* Try compatible match if no phys_addr and name are specified */
auxdata = lookup;
for (; auxdata->compatible; auxdata++) {
if (!of_device_is_compatible(np, auxdata->compatible))
continue;
if (!auxdata->phys_addr && !auxdata->name) {
pr_debug("%pOF: compatible match\n", np);
return auxdata;
}
}
return NULL;
}
/**
* of_platform_bus_create() - Create a device for a node and its children.
* @bus: device node of the bus to instantiate
* @matches: match table for bus nodes
* @lookup: auxdata table for matching id and platform_data with device nodes
* @parent: parent for new device, or NULL for top level.
* @strict: require compatible property
*
* Creates a platform_device for the provided device_node, and optionally
* recursively create devices for all the child nodes.
*/
static int of_platform_bus_create(struct device_node *bus,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent, bool strict)
{
const struct of_dev_auxdata *auxdata;
struct device_node *child;
struct platform_device *dev;
const char *bus_id = NULL;
void *platform_data = NULL;
int rc = 0;
/* Make sure it has a compatible property */
if (strict && (!of_get_property(bus, "compatible", NULL))) {
pr_debug("%s() - skipping %pOF, no compatible prop\n",
__func__, bus);
return 0;
}
/* Skip nodes for which we don't want to create devices */
if (unlikely(of_match_node(of_skipped_node_table, bus))) {
pr_debug("%s() - skipping %pOF node\n", __func__, bus);
return 0;
}
if (of_node_check_flag(bus, OF_POPULATED_BUS)) {
pr_debug("%s() - skipping %pOF, already populated\n",
__func__, bus);
return 0;
}
auxdata = of_dev_lookup(lookup, bus);
if (auxdata) {
bus_id = auxdata->name;
platform_data = auxdata->platform_data;
}
if (of_device_is_compatible(bus, "arm,primecell")) {
/*
* Don't return an error here to keep compatibility with older
* device tree files.
*/
of_amba_device_create(bus, bus_id, platform_data, parent);
return 0;
}
dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent);
if (!dev || !of_match_node(matches, bus))
return 0;
for_each_child_of_node(bus, child) {
pr_debug(" create child: %pOF\n", child);
rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict);
if (rc) {
of_node_put(child);
break;
}
}
of_node_set_flag(bus, OF_POPULATED_BUS);
return rc;
}
/**
* of_platform_bus_probe() - Probe the device-tree for platform buses
* @root: parent of the first level to probe or NULL for the root of the tree
* @matches: match table for bus nodes
* @parent: parent to hook devices from, NULL for toplevel
*
* Note that children of the provided root are not instantiated as devices
* unless the specified root itself matches the bus list and is not NULL.
*/
int of_platform_bus_probe(struct device_node *root,
const struct of_device_id *matches,
struct device *parent)
{
struct device_node *child;
int rc = 0;
root = root ? of_node_get(root) : of_find_node_by_path("/");
if (!root)
return -EINVAL;
pr_debug("%s()\n", __func__);
pr_debug(" starting at: %pOF\n", root);
/* Do a self check of bus type, if there's a match, create children */
if (of_match_node(matches, root)) {
rc = of_platform_bus_create(root, matches, NULL, parent, false);
} else for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
rc = of_platform_bus_create(child, matches, NULL, parent, false);
if (rc) {
of_node_put(child);
break;
}
}
of_node_put(root);
return rc;
}
EXPORT_SYMBOL(of_platform_bus_probe);
/**
* of_platform_populate() - Populate platform_devices from device tree data
* @root: parent of the first level to probe or NULL for the root of the tree
* @matches: match table, NULL to use the default
* @lookup: auxdata table for matching id and platform_data with device nodes
* @parent: parent to hook devices from, NULL for toplevel
*
* Similar to of_platform_bus_probe(), this function walks the device tree
* and creates devices from nodes. It differs in that it follows the modern
* convention of requiring all device nodes to have a 'compatible' property,
* and it is suitable for creating devices which are children of the root
* node (of_platform_bus_probe will only create children of the root which
* are selected by the @matches argument).
*
* New board support should be using this function instead of
* of_platform_bus_probe().
*
* Return: 0 on success, < 0 on failure.
*/
int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
struct device_node *child;
int rc = 0;
root = root ? of_node_get(root) : of_find_node_by_path("/");
if (!root)
return -EINVAL;
pr_debug("%s()\n", __func__);
pr_debug(" starting at: %pOF\n", root);
device_links_supplier_sync_state_pause();
for_each_child_of_node(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
if (rc) {
of_node_put(child);
break;
}
}
device_links_supplier_sync_state_resume();
of_node_set_flag(root, OF_POPULATED_BUS);
of_node_put(root);
return rc;
}
EXPORT_SYMBOL_GPL(of_platform_populate);
int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent)
{
return of_platform_populate(root, of_default_bus_match_table, lookup,
parent);
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "phram" },
{ .compatible = "qcom,rmtfs-mem" },
{ .compatible = "qcom,cmd-db" },
{ .compatible = "qcom,smem" },
{ .compatible = "ramoops" },
{ .compatible = "nvmem-rmem" },
{ .compatible = "google,open-dice" },
{}
};
static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
device_links_supplier_sync_state_pause();
if (!of_have_populated_dt())
return -ENODEV;
if (IS_ENABLED(CONFIG_PPC)) {
struct device_node *boot_display = NULL;
struct platform_device *dev;
int display_number = 0;
int ret;
/* Check if we have a MacOS display without a node spec */
if (of_property_present(of_chosen, "linux,bootx-noscreen")) {
/*
* The old code tried to work out which node was the MacOS
* display based on the address. I'm dropping that since the
* lack of a node spec only happens with old BootX versions
* (users can update) and with this code, they'll still get
* a display (just not the palette hacks).
*/
dev = platform_device_alloc("bootx-noscreen", 0);
if (WARN_ON(!dev))
return -ENOMEM;
ret = platform_device_add(dev);
if (WARN_ON(ret)) {
platform_device_put(dev);
return ret;
}
}
/*
* For OF framebuffers, first create the device for the boot display,
* then for the other framebuffers. Only fail for the boot display;
* ignore errors for the rest.
*/
for_each_node_by_type(node, "display") {
if (!of_get_property(node, "linux,opened", NULL) ||
!of_get_property(node, "linux,boot-display", NULL))
continue;
dev = of_platform_device_create(node, "of-display", NULL);
of_node_put(node);
if (WARN_ON(!dev))
return -ENOMEM;
boot_display = node;
display_number++;
break;
}
for_each_node_by_type(node, "display") {
char buf[14];
const char *of_display_format = "of-display.%d";
if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
continue;
ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
if (ret < sizeof(buf))
of_platform_device_create(node, buf, NULL);
}
} else {
/*
* Handle certain compatibles explicitly, since we don't want to create
* platform_devices for every node in /reserved-memory with a
* "compatible",
*/
for_each_matching_node(node, reserved_mem_matches)
of_platform_device_create(node, NULL, NULL);
node = of_find_node_by_path("/firmware");
if (node) {
of_platform_populate(node, NULL, NULL, NULL);
of_node_put(node);
}
node = of_get_compatible_child(of_chosen, "simple-framebuffer");
of_platform_device_create(node, NULL, NULL);
of_node_put(node);
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
}
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
static int __init of_platform_sync_state_init(void)
{
device_links_supplier_sync_state_resume();
return 0;
}
late_initcall_sync(of_platform_sync_state_init);
int of_platform_device_destroy(struct device *dev, void *data)
{
/* Do not touch devices not populated from the device tree */
if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
return 0;
/* Recurse for any nodes that were treated as busses */
if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
device_for_each_child(dev, NULL, of_platform_device_destroy);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
else if (dev->bus == &amba_bustype)
amba_device_unregister(to_amba_device(dev));
#endif
return 0;
}
EXPORT_SYMBOL_GPL(of_platform_device_destroy);
/**
* of_platform_depopulate() - Remove devices populated from device tree
* @parent: device which children will be removed
*
* Complementary to of_platform_populate(), this function removes children
* of the given device (and, recurrently, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
*/
void of_platform_depopulate(struct device *parent)
{
if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) {
device_for_each_child_reverse(parent, NULL, of_platform_device_destroy);
of_node_clear_flag(parent->of_node, OF_POPULATED_BUS);
}
}
EXPORT_SYMBOL_GPL(of_platform_depopulate);
static void devm_of_platform_populate_release(struct device *dev, void *res)
{
of_platform_depopulate(*(struct device **)res);
}
/**
* devm_of_platform_populate() - Populate platform_devices from device tree data
* @dev: device that requested to populate from device tree data
*
* Similar to of_platform_populate(), but will automatically call
* of_platform_depopulate() when the device is unbound from the bus.
*
* Return: 0 on success, < 0 on failure.
*/
int devm_of_platform_populate(struct device *dev)
{
struct device **ptr;
int ret;
if (!dev)
return -EINVAL;
ptr = devres_alloc(devm_of_platform_populate_release,
sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
devres_free(ptr);
} else {
*ptr = dev;
devres_add(dev, ptr);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_of_platform_populate);
static int devm_of_platform_match(struct device *dev, void *res, void *data)
{
struct device **ptr = res;
if (!ptr) {
WARN_ON(!ptr);
return 0;
}
return *ptr == data;
}
/**
* devm_of_platform_depopulate() - Remove devices populated from device tree
* @dev: device that requested to depopulate from device tree data
*
* Complementary to devm_of_platform_populate(), this function removes children
* of the given device (and, recurrently, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
*/
void devm_of_platform_depopulate(struct device *dev)
{
int ret;
ret = devres_release(dev, devm_of_platform_populate_release,
devm_of_platform_match, dev);
WARN_ON(ret);
}
EXPORT_SYMBOL_GPL(devm_of_platform_depopulate);
#ifdef CONFIG_OF_DYNAMIC
static int of_platform_notify(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct of_reconfig_data *rd = arg;
struct platform_device *pdev_parent, *pdev;
bool children_left;
switch (of_reconfig_get_state_change(action, rd)) {
case OF_RECONFIG_CHANGE_ADD:
/* verify that the parent is a bus */
if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
/* already populated? (driver using of_populate manually) */
if (of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
/*
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
pdev_parent ? &pdev_parent->dev : NULL);
platform_device_put(pdev_parent);
if (pdev == NULL) {
pr_err("%s: failed to create for '%pOF'\n",
__func__, rd->dn);
/* of_platform_device_create tosses the error code */
return notifier_from_errno(-EINVAL);
}
break;
case OF_RECONFIG_CHANGE_REMOVE:
/* already depopulated? */
if (!of_node_check_flag(rd->dn, OF_POPULATED))
return NOTIFY_OK;
/* find our device by node */
pdev = of_find_device_by_node(rd->dn);
if (pdev == NULL)
return NOTIFY_OK; /* no? not meant for us */
/* unregister takes one ref away */
of_platform_device_destroy(&pdev->dev, &children_left);
/* and put the reference of the find */
platform_device_put(pdev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block platform_of_notifier = {
.notifier_call = of_platform_notify,
};
void of_platform_register_reconfig_notifier(void)
{
WARN_ON(of_reconfig_notifier_register(&platform_of_notifier));
}
#endif /* CONFIG_OF_DYNAMIC */
#endif /* CONFIG_OF_ADDRESS */
| linux-master | drivers/of/platform.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_reserved_mem.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
#include <linux/dma-map-ops.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <asm/errno.h>
#include "of_private.h"
/**
* of_match_device - Tell if a struct device matches an of_device_id list
* @matches: array of of device match structures to search in
* @dev: the of device structure to match against
*
* Used by a driver to check whether an platform_device present in the
* system is in its list of supported devices.
*/
const struct of_device_id *of_match_device(const struct of_device_id *matches,
const struct device *dev)
{
if (!matches || !dev->of_node || dev->of_node_reused)
return NULL;
return of_match_node(matches, dev->of_node);
}
EXPORT_SYMBOL(of_match_device);
static void
of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
{
struct device_node *node, *of_node = dev->of_node;
int count, i;
if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
return;
count = of_property_count_elems_of_size(of_node, "memory-region",
sizeof(u32));
/*
* If dev->of_node doesn't exist or doesn't contain memory-region, try
* the OF node having DMA configuration.
*/
if (count <= 0) {
of_node = np;
count = of_property_count_elems_of_size(
of_node, "memory-region", sizeof(u32));
}
for (i = 0; i < count; i++) {
node = of_parse_phandle(of_node, "memory-region", i);
/*
* There might be multiple memory regions, but only one
* restricted-dma-pool region is allowed.
*/
if (of_device_is_compatible(node, "restricted-dma-pool") &&
of_device_is_available(node)) {
of_node_put(node);
break;
}
of_node_put(node);
}
/*
* Attempt to initialize a restricted-dma-pool region if one was found.
* Note that count can hold a negative error code.
*/
if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}
/**
* of_dma_configure_id - Setup DMA configuration
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
* @force_dma: Whether device is to be set up by of_dma_configure() even if
* DMA capability is not explicitly described by firmware.
* @id: Optional const pointer value input id
*
* Try to get devices's DMA configuration from DT and update it
* accordingly.
*
* If platform code needs to use its own special DMA configuration, it
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
int of_dma_configure_id(struct device *dev, struct device_node *np,
bool force_dma, const u32 *id)
{
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
int ret;
if (np == dev->of_node)
bus_np = __of_get_dma_parent(np);
else
bus_np = of_node_get(np);
ret = of_dma_get_range(bus_np, &map);
of_node_put(bus_np);
if (ret < 0) {
/*
* For legacy reasons, we have to assume some devices need
* DMA configuration regardless of whether "dma-ranges" is
* correctly specified or not.
*/
if (!force_dma)
return ret == -ENODEV ? 0 : ret;
} else {
const struct bus_dma_region *r = map;
u64 dma_end = 0;
/* Determine the overall bounds of all DMA regions */
for (dma_start = ~0; r->size; r++) {
/* Take lower and upper limits */
if (r->dma_start < dma_start)
dma_start = r->dma_start;
if (r->dma_start + r->size > dma_end)
dma_end = r->dma_start + r->size;
}
size = dma_end - dma_start;
/*
* Add a work around to treat the size as mask + 1 in case
* it is defined in DT as a mask.
*/
if (size & 1) {
dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
size);
size = size + 1;
}
if (!size) {
dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
kfree(map);
return -EINVAL;
}
}
/*
* If @dev is expected to be DMA-capable then the bus code that created
* it should have initialised its dma_mask pointer by this point. For
* now, we'll continue the legacy behaviour of coercing it to the
* coherent mask if not, but we'll no longer do so quietly.
*/
if (!dev->dma_mask) {
dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
}
if (!size && dev->coherent_dma_mask)
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
else if (!size)
size = 1ULL << 32;
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
end = dma_start + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
if (!ret) {
dev->bus_dma_limit = end;
dev->dma_range_map = map;
}
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) {
/* Don't touch range map if it wasn't set from a valid dma-ranges */
if (!ret)
dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
}
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
if (!iommu)
of_dma_set_restricted_buffer(dev, np);
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
const void *of_device_get_match_data(const struct device *dev)
{
const struct of_device_id *match;
match = of_match_device(dev->driver->of_match_table, dev);
if (!match)
return NULL;
return match->data;
}
EXPORT_SYMBOL(of_device_get_match_data);
/**
* of_device_modalias - Fill buffer with newline terminated modalias string
* @dev: Calling device
* @str: Modalias string
* @len: Size of @str
*/
ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
{
ssize_t sl;
if (!dev || !dev->of_node || dev->of_node_reused)
return -ENODEV;
sl = of_modalias(dev->of_node, str, len - 2);
if (sl < 0)
return sl;
if (sl > len - 2)
return -ENOMEM;
str[sl++] = '\n';
str[sl] = 0;
return sl;
}
EXPORT_SYMBOL_GPL(of_device_modalias);
/**
* of_device_uevent - Display OF related uevent information
* @dev: Device to display the uevent information for
* @env: Kernel object's userspace event reference to fill up
*/
void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const char *compat, *type;
struct alias_prop *app;
struct property *p;
int seen = 0;
if ((!dev) || (!dev->of_node))
return;
add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
type = of_node_get_device_type(dev->of_node);
if (type)
add_uevent_var(env, "OF_TYPE=%s", type);
/* Since the compatible field can contain pretty much anything
* it's not really legal to split it out with commas. We split it
* up using a number of environment variables instead. */
of_property_for_each_string(dev->of_node, "compatible", p, compat) {
add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
seen++;
}
add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
seen = 0;
mutex_lock(&of_mutex);
list_for_each_entry(app, &aliases_lookup, link) {
if (dev->of_node == app->np) {
add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
app->alias);
seen++;
}
}
mutex_unlock(&of_mutex);
}
EXPORT_SYMBOL_GPL(of_device_uevent);
int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
int sl;
if ((!dev) || (!dev->of_node) || dev->of_node_reused)
return -ENODEV;
/* Devicetree modalias is tricky, we add it in 2 steps */
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
sizeof(env->buf) - env->buflen);
if (sl < 0)
return sl;
if (sl >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += sl;
return 0;
}
EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
| linux-master | drivers/of/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Self tests for device tree subsystem
*/
#define pr_fmt(fmt) "### dt-test ### " fmt
#include <linux/memblock.h>
#include <linux/clk.h>
#include <linux/dma-direct.h> /* to test phys_to_dma/dma_to_phys */
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/hashtable.h>
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include "of_private.h"
static struct unittest_results {
int passed;
int failed;
} unittest_results;
#define unittest(result, fmt, ...) ({ \
bool failed = !(result); \
if (failed) { \
unittest_results.failed++; \
pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
unittest_results.passed++; \
pr_info("pass %s():%i\n", __func__, __LINE__); \
} \
failed; \
})
/*
* Expected message may have a message level other than KERN_INFO.
* Print the expected message only if the current loglevel will allow
* the actual message to print.
*
* Do not use EXPECT_BEGIN(), EXPECT_END(), EXPECT_NOT_BEGIN(), or
* EXPECT_NOT_END() to report messages expected to be reported or not
* reported by pr_debug().
*/
#define EXPECT_BEGIN(level, fmt, ...) \
printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__)
#define EXPECT_END(level, fmt, ...) \
printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__)
#define EXPECT_NOT_BEGIN(level, fmt, ...) \
printk(level pr_fmt("EXPECT_NOT \\ : ") fmt, ##__VA_ARGS__)
#define EXPECT_NOT_END(level, fmt, ...) \
printk(level pr_fmt("EXPECT_NOT / : ") fmt, ##__VA_ARGS__)
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
const char *options, *name;
np = of_find_node_by_path("/testcase-data");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && name && !strcmp("/testcase-data", name),
"find /testcase-data failed\n");
of_node_put(np);
kfree(name);
/* Test if trailing '/' works */
np = of_find_node_by_path("/testcase-data/");
unittest(!np, "trailing '/' on /testcase-data/ should fail\n");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find /testcase-data/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
np = of_find_node_by_path("testcase-alias");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && name && !strcmp("/testcase-data", name),
"find testcase-alias failed\n");
of_node_put(np);
kfree(name);
/* Test if trailing '/' works on aliases */
np = of_find_node_by_path("testcase-alias/");
unittest(!np, "trailing '/' on testcase-alias/ should fail\n");
np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a");
name = kasprintf(GFP_KERNEL, "%pOF", np);
unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name),
"find testcase-alias/phandle-tests/consumer-a failed\n");
of_node_put(np);
kfree(name);
np = of_find_node_by_path("/testcase-data/missing-path");
unittest(!np, "non-existent path returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_by_path("missing-alias");
unittest(!np, "non-existent alias returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_by_path("testcase-alias/missing-path");
unittest(!np, "non-existent alias with relative path returned node %pOF\n", np);
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", &options);
unittest(np && !strcmp("testoption", options),
"option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
unittest(np && !strcmp("test/option", options),
"option path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
unittest(np && !strcmp("test/option", options),
"option path test, subcase #2 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
unittest(np, "NULL option path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption",
&options);
unittest(np && !strcmp("testaliasoption", options),
"option alias path test failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
&options);
unittest(np && !strcmp("test/alias/option", options),
"option alias path test, subcase #1 failed\n");
of_node_put(np);
np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
unittest(np, "NULL option alias path test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("testcase-alias", &options);
unittest(np && !options, "option clearing test failed\n");
of_node_put(np);
options = "testoption";
np = of_find_node_opts_by_path("/", &options);
unittest(np && !options, "option clearing root node test failed\n");
of_node_put(np);
}
static void __init of_unittest_dynamic(void)
{
struct device_node *np;
struct property *prop;
np = of_find_node_by_path("/testcase-data");
if (!np) {
pr_err("missing testcase data\n");
return;
}
/* Array of 4 properties for the purpose of testing */
prop = kcalloc(4, sizeof(*prop), GFP_KERNEL);
if (!prop) {
unittest(0, "kzalloc() failed\n");
return;
}
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
/* Try to modify non-existent property - should pass*/
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");
/* Remove property - should pass */
unittest(of_remove_property(np, prop) == 0,
"Removing a property should have passed\n");
/* Adding very large property - should pass */
prop++;
prop->name = "large-property-PAGE_SIZEx8";
prop->length = PAGE_SIZE * 8;
prop->value = kzalloc(prop->length, GFP_KERNEL);
unittest(prop->value != NULL, "Unable to allocate large buffer\n");
if (prop->value)
unittest(of_add_property(np, prop) == 0,
"Adding a large property should have passed\n");
}
static int __init of_unittest_check_node_linkage(struct device_node *np)
{
struct device_node *child;
int count = 0, rc;
for_each_child_of_node(np, child) {
if (child->parent != np) {
pr_err("Child node %pOFn links to wrong parent %pOFn\n",
child, np);
rc = -EINVAL;
goto put_child;
}
rc = of_unittest_check_node_linkage(child);
if (rc < 0)
goto put_child;
count += rc;
}
return count + 1;
put_child:
of_node_put(child);
return rc;
}
static void __init of_unittest_check_tree_linkage(void)
{
struct device_node *np;
int allnode_count = 0, child_count;
if (!of_root)
return;
for_each_of_allnodes(np)
allnode_count++;
child_count = of_unittest_check_node_linkage(of_root);
unittest(child_count > 0, "Device node data structure is corrupted\n");
unittest(child_count == allnode_count,
"allnodes list size (%i) doesn't match sibling lists size (%i)\n",
allnode_count, child_count);
pr_debug("allnodes list size (%i); sibling lists size (%i)\n", allnode_count, child_count);
}
static void __init of_unittest_printf_one(struct device_node *np, const char *fmt,
const char *expected)
{
unsigned char *buf;
int buf_size;
int size, i;
buf_size = strlen(expected) + 10;
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
return;
/* Baseline; check conversion with a large size limit */
memset(buf, 0xff, buf_size);
size = snprintf(buf, buf_size - 2, fmt, np);
/* use strcmp() instead of strncmp() here to be absolutely sure strings match */
unittest((strcmp(buf, expected) == 0) && (buf[size+1] == 0xff),
"sprintf failed; fmt='%s' expected='%s' rslt='%s'\n",
fmt, expected, buf);
/* Make sure length limits work */
size++;
for (i = 0; i < 2; i++, size--) {
/* Clear the buffer, and make sure it works correctly still */
memset(buf, 0xff, buf_size);
snprintf(buf, size+1, fmt, np);
unittest(strncmp(buf, expected, size) == 0 && (buf[size+1] == 0xff),
"snprintf failed; size=%i fmt='%s' expected='%s' rslt='%s'\n",
size, fmt, expected, buf);
}
kfree(buf);
}
static void __init of_unittest_printf(void)
{
struct device_node *np;
const char *full_name = "/testcase-data/platform-tests/test-device@1/dev@100";
char phandle_str[16] = "";
np = of_find_node_by_path(full_name);
if (!np) {
unittest(np, "testcase data missing\n");
return;
}
num_to_str(phandle_str, sizeof(phandle_str), np->phandle, 0);
of_unittest_printf_one(np, "%pOF", full_name);
of_unittest_printf_one(np, "%pOFf", full_name);
of_unittest_printf_one(np, "%pOFn", "dev");
of_unittest_printf_one(np, "%2pOFn", "dev");
of_unittest_printf_one(np, "%5pOFn", " dev");
of_unittest_printf_one(np, "%pOFnc", "dev:test-sub-device");
of_unittest_printf_one(np, "%pOFp", phandle_str);
of_unittest_printf_one(np, "%pOFP", "dev@100");
of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC");
of_unittest_printf_one(np, "%10pOFP", " dev@100");
of_unittest_printf_one(np, "%-10pOFP", "dev@100 ");
of_unittest_printf_one(of_root, "%pOFP", "/");
of_unittest_printf_one(np, "%pOFF", "----");
of_unittest_printf_one(np, "%pOFPF", "dev@100:----");
of_unittest_printf_one(np, "%pOFPFPc", "dev@100:----:dev@100:test-sub-device");
of_unittest_printf_one(np, "%pOFc", "test-sub-device");
of_unittest_printf_one(np, "%pOFC",
"\"test-sub-device\",\"test-compat2\",\"test-compat3\"");
}
struct node_hash {
struct hlist_node node;
struct device_node *np;
};
static DEFINE_HASHTABLE(phandle_ht, 8);
static void __init of_unittest_check_phandles(void)
{
struct device_node *np;
struct node_hash *nh;
struct hlist_node *tmp;
int i, dup_count = 0, phandle_count = 0;
for_each_of_allnodes(np) {
if (!np->phandle)
continue;
hash_for_each_possible(phandle_ht, nh, node, np->phandle) {
if (nh->np->phandle == np->phandle) {
pr_info("Duplicate phandle! %i used by %pOF and %pOF\n",
np->phandle, nh->np, np);
dup_count++;
break;
}
}
nh = kzalloc(sizeof(*nh), GFP_KERNEL);
if (!nh)
return;
nh->np = np;
hash_add(phandle_ht, &nh->node, np->phandle);
phandle_count++;
}
unittest(dup_count == 0, "Found %i duplicates in %i phandles\n",
dup_count, phandle_count);
/* Clean up */
hash_for_each_safe(phandle_ht, i, tmp, nh, node) {
hash_del(&nh->node);
kfree(nh);
}
}
static void __init of_unittest_parse_phandle_with_args(void)
{
struct device_node *np;
struct of_phandle_args args;
int i, rc;
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_err("missing testcase data\n");
return;
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
for (i = 0; i < 8; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells", i, &args);
/* Test the values from tests-phandle.dtsi */
switch (i) {
case 0:
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == (i + 1));
break;
case 1:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == (i + 1));
passed &= (args.args[1] == 0);
break;
case 2:
passed &= (rc == -ENOENT);
break;
case 3:
passed &= !rc;
passed &= (args.args_count == 3);
passed &= (args.args[0] == (i + 1));
passed &= (args.args[1] == 4);
passed &= (args.args[2] == 3);
break;
case 4:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == (i + 1));
passed &= (args.args[1] == 100);
break;
case 5:
passed &= !rc;
passed &= (args.args_count == 0);
break;
case 6:
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == (i + 1));
break;
case 7:
passed &= (rc == -ENOENT);
break;
default:
passed = false;
}
unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
}
/* Check for missing list property */
memset(&args, 0, sizeof(args));
rc = of_parse_phandle_with_args(np, "phandle-list-missing",
"#phandle-cells", 0, &args);
unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
rc = of_count_phandle_with_args(np, "phandle-list-missing",
"#phandle-cells");
unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
/* Check for missing cells property */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
rc = of_parse_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
rc = of_count_phandle_with_args(np, "phandle-list",
"#phandle-cells-missing");
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle",
"#phandle-cells");
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: could not find phandle");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
static void __init of_unittest_parse_phandle_with_args_map(void)
{
struct device_node *np, *p0, *p1, *p2, *p3;
struct of_phandle_args args;
int i, rc;
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
if (!np) {
pr_err("missing testcase data\n");
return;
}
p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
if (!p0) {
pr_err("missing testcase data\n");
return;
}
p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
if (!p1) {
pr_err("missing testcase data\n");
return;
}
p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
if (!p2) {
pr_err("missing testcase data\n");
return;
}
p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
if (!p3) {
pr_err("missing testcase data\n");
return;
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
for (i = 0; i < 8; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
rc = of_parse_phandle_with_args_map(np, "phandle-list",
"phandle", i, &args);
/* Test the values from tests-phandle.dtsi */
switch (i) {
case 0:
passed &= !rc;
passed &= (args.np == p1);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 1);
break;
case 1:
passed &= !rc;
passed &= (args.np == p3);
passed &= (args.args_count == 3);
passed &= (args.args[0] == 2);
passed &= (args.args[1] == 5);
passed &= (args.args[2] == 3);
break;
case 2:
passed &= (rc == -ENOENT);
break;
case 3:
passed &= !rc;
passed &= (args.np == p0);
passed &= (args.args_count == 0);
break;
case 4:
passed &= !rc;
passed &= (args.np == p1);
passed &= (args.args_count == 1);
passed &= (args.args[0] == 3);
break;
case 5:
passed &= !rc;
passed &= (args.np == p0);
passed &= (args.args_count == 0);
break;
case 6:
passed &= !rc;
passed &= (args.np == p2);
passed &= (args.args_count == 2);
passed &= (args.args[0] == 15);
passed &= (args.args[1] == 0x20);
break;
case 7:
passed &= (rc == -ENOENT);
break;
default:
passed = false;
}
unittest(passed, "index %i - data error on node %s rc=%i\n",
i, args.np->full_name, rc);
}
/* Check for missing list property */
memset(&args, 0, sizeof(args));
rc = of_parse_phandle_with_args_map(np, "phandle-list-missing",
"phandle", 0, &args);
unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
/* Check for missing cells,map,mask property */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
rc = of_parse_phandle_with_args_map(np, "phandle-list",
"phandle-missing", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for bad phandle in list */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
"phandle", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
/* Check for incorrectly formed argument list */
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
static void __init of_unittest_property_string(void)
{
const char *strings[4];
struct device_node *np;
int rc;
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_err("No testcase data in device tree\n");
return;
}
rc = of_property_match_string(np, "phandle-list-names", "first");
unittest(rc == 0, "first expected:0 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "second");
unittest(rc == 1, "second expected:1 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "third");
unittest(rc == 2, "third expected:2 got:%i\n", rc);
rc = of_property_match_string(np, "phandle-list-names", "fourth");
unittest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
rc = of_property_match_string(np, "missing-property", "blah");
unittest(rc == -EINVAL, "missing property; rc=%i\n", rc);
rc = of_property_match_string(np, "empty-property", "blah");
unittest(rc == -ENODATA, "empty property; rc=%i\n", rc);
rc = of_property_match_string(np, "unterminated-string", "blah");
unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
/* of_property_count_strings() tests */
rc = of_property_count_strings(np, "string-property");
unittest(rc == 1, "Incorrect string count; rc=%i\n", rc);
rc = of_property_count_strings(np, "phandle-list-names");
unittest(rc == 3, "Incorrect string count; rc=%i\n", rc);
rc = of_property_count_strings(np, "unterminated-string");
unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
rc = of_property_count_strings(np, "unterminated-string-list");
unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
/* of_property_read_string_index() tests */
rc = of_property_read_string_index(np, "string-property", 0, strings);
unittest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "string-property", 1, strings);
unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
unittest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
unittest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
strings[0] = NULL;
rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
strings[1] = NULL;
/* of_property_read_string_array() tests */
rc = of_property_read_string_array(np, "string-property", strings, 4);
unittest(rc == 1, "Incorrect string count; rc=%i\n", rc);
rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
unittest(rc == 3, "Incorrect string count; rc=%i\n", rc);
rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
/* -- An incorrectly formed string should cause a failure */
rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
/* -- parsing the correctly formed strings should still work: */
strings[2] = NULL;
rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
unittest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
strings[1] = NULL;
rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
unittest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
}
#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
(p1)->value && (p2)->value && \
!memcmp((p1)->value, (p2)->value, (p1)->length) && \
!strcmp((p1)->name, (p2)->name))
static void __init of_unittest_property_copy(void)
{
#ifdef CONFIG_OF_DYNAMIC
struct property p1 = { .name = "p1", .length = 0, .value = "" };
struct property p2 = { .name = "p2", .length = 5, .value = "abcd" };
struct property *new;
new = __of_prop_dup(&p1, GFP_KERNEL);
unittest(new && propcmp(&p1, new), "empty property didn't copy correctly\n");
kfree(new->value);
kfree(new->name);
kfree(new);
new = __of_prop_dup(&p2, GFP_KERNEL);
unittest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n");
kfree(new->value);
kfree(new->name);
kfree(new);
#endif
}
static void __init of_unittest_changeset(void)
{
#ifdef CONFIG_OF_DYNAMIC
int ret;
struct property *ppadd, padd = { .name = "prop-add", .length = 1, .value = "" };
struct property *ppname_n1, pname_n1 = { .name = "name", .length = 3, .value = "n1" };
struct property *ppname_n2, pname_n2 = { .name = "name", .length = 3, .value = "n2" };
struct property *ppname_n21, pname_n21 = { .name = "name", .length = 3, .value = "n21" };
struct property *ppupdate, pupdate = { .name = "prop-update", .length = 5, .value = "abcd" };
struct property *ppremove;
struct device_node *n1, *n2, *n21, *n22, *nchangeset, *nremove, *parent, *np;
static const char * const str_array[] = { "str1", "str2", "str3" };
const u32 u32_array[] = { 1, 2, 3 };
struct of_changeset chgset;
const char *propstr = NULL;
n1 = __of_node_dup(NULL, "n1");
unittest(n1, "testcase setup failure\n");
n2 = __of_node_dup(NULL, "n2");
unittest(n2, "testcase setup failure\n");
n21 = __of_node_dup(NULL, "n21");
unittest(n21, "testcase setup failure %p\n", n21);
nchangeset = of_find_node_by_path("/testcase-data/changeset");
nremove = of_get_child_by_name(nchangeset, "node-remove");
unittest(nremove, "testcase setup failure\n");
ppadd = __of_prop_dup(&padd, GFP_KERNEL);
unittest(ppadd, "testcase setup failure\n");
ppname_n1 = __of_prop_dup(&pname_n1, GFP_KERNEL);
unittest(ppname_n1, "testcase setup failure\n");
ppname_n2 = __of_prop_dup(&pname_n2, GFP_KERNEL);
unittest(ppname_n2, "testcase setup failure\n");
ppname_n21 = __of_prop_dup(&pname_n21, GFP_KERNEL);
unittest(ppname_n21, "testcase setup failure\n");
ppupdate = __of_prop_dup(&pupdate, GFP_KERNEL);
unittest(ppupdate, "testcase setup failure\n");
parent = nchangeset;
n1->parent = parent;
n2->parent = parent;
n21->parent = n2;
ppremove = of_find_property(parent, "prop-remove", NULL);
unittest(ppremove, "failed to find removal prop");
of_changeset_init(&chgset);
unittest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n");
unittest(!of_changeset_add_property(&chgset, n1, ppname_n1), "fail add prop name\n");
unittest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n");
unittest(!of_changeset_add_property(&chgset, n2, ppname_n2), "fail add prop name\n");
unittest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n");
unittest(!of_changeset_add_property(&chgset, n21, ppname_n21), "fail add prop name\n");
unittest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n");
unittest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop prop-add\n");
unittest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n");
unittest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n");
n22 = of_changeset_create_node(&chgset, n2, "n22");
unittest(n22, "fail create n22\n");
unittest(!of_changeset_add_prop_string(&chgset, n22, "prop-str", "abcd"),
"fail add prop prop-str");
unittest(!of_changeset_add_prop_string_array(&chgset, n22, "prop-str-array",
(const char **)str_array,
ARRAY_SIZE(str_array)),
"fail add prop prop-str-array");
unittest(!of_changeset_add_prop_u32_array(&chgset, n22, "prop-u32-array",
u32_array, ARRAY_SIZE(u32_array)),
"fail add prop prop-u32-array");
unittest(!of_changeset_apply(&chgset), "apply failed\n");
of_node_put(nchangeset);
/* Make sure node names are constructed correctly */
unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")),
"'%pOF' not added\n", n21);
of_node_put(np);
unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n22")),
"'%pOF' not added\n", n22);
of_node_put(np);
unittest(!of_changeset_revert(&chgset), "revert failed\n");
unittest(!of_find_node_by_path("/testcase-data/changeset/n2/n21"),
"'%pOF' still present after revert\n", n21);
ppremove = of_find_property(parent, "prop-remove", NULL);
unittest(ppremove, "failed to find removed prop after revert\n");
ret = of_property_read_string(parent, "prop-update", &propstr);
unittest(!ret, "failed to find updated prop after revert\n");
if (!ret)
unittest(strcmp(propstr, "hello") == 0, "original value not in updated property after revert");
of_changeset_destroy(&chgset);
of_node_put(n1);
of_node_put(n2);
of_node_put(n21);
of_node_put(n22);
#endif
}
static void __init of_unittest_dma_get_max_cpu_address(void)
{
struct device_node *np;
phys_addr_t cpu_addr;
if (!IS_ENABLED(CONFIG_OF_ADDRESS))
return;
np = of_find_node_by_path("/testcase-data/address-tests");
if (!np) {
pr_err("missing testcase data\n");
return;
}
cpu_addr = of_dma_get_max_cpu_address(np);
unittest(cpu_addr == 0x4fffffff,
"of_dma_get_max_cpu_address: wrong CPU addr %pad (expecting %x)\n",
&cpu_addr, 0x4fffffff);
}
static void __init of_unittest_dma_ranges_one(const char *path,
u64 expect_dma_addr, u64 expect_paddr)
{
#ifdef CONFIG_HAS_DMA
struct device_node *np;
const struct bus_dma_region *map = NULL;
int rc;
np = of_find_node_by_path(path);
if (!np) {
pr_err("missing testcase data\n");
return;
}
rc = of_dma_get_range(np, &map);
unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc);
if (!rc) {
phys_addr_t paddr;
dma_addr_t dma_addr;
struct device *dev_bogus;
dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!dev_bogus) {
unittest(0, "kzalloc() failed\n");
kfree(map);
return;
}
dev_bogus->dma_range_map = map;
paddr = dma_to_phys(dev_bogus, expect_dma_addr);
dma_addr = phys_to_dma(dev_bogus, expect_paddr);
unittest(paddr == expect_paddr,
"of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n",
&paddr, expect_paddr, np);
unittest(dma_addr == expect_dma_addr,
"of_dma_get_range: wrong DMA addr %pad (expecting %llx) on node %pOF\n",
&dma_addr, expect_dma_addr, np);
kfree(map);
kfree(dev_bogus);
}
of_node_put(np);
#endif
}
static void __init of_unittest_parse_dma_ranges(void)
{
of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
0x0, 0x20000000);
if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
0x100000000, 0x20000000);
of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
0x80000000, 0x20000000);
}
static void __init of_unittest_pci_dma_ranges(void)
{
struct device_node *np;
struct of_pci_range range;
struct of_pci_range_parser parser;
int i = 0;
if (!IS_ENABLED(CONFIG_PCI))
return;
np = of_find_node_by_path("/testcase-data/address-tests/pci@90000000");
if (!np) {
pr_err("missing testcase data\n");
return;
}
if (of_pci_dma_range_parser_init(&parser, np)) {
pr_err("missing dma-ranges property\n");
return;
}
/*
* Get the dma-ranges from the device tree
*/
for_each_of_pci_range(&parser, &range) {
if (!i) {
unittest(range.size == 0x10000000,
"for_each_of_pci_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0x20000000,
"for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.pci_addr == 0x80000000,
"for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
range.pci_addr, np);
} else {
unittest(range.size == 0x10000000,
"for_each_of_pci_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0x40000000,
"for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.pci_addr == 0xc0000000,
"for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
range.pci_addr, np);
}
i++;
}
of_node_put(np);
}
static void __init of_unittest_bus_ranges(void)
{
struct device_node *np;
struct of_range range;
struct of_range_parser parser;
struct resource res;
int ret, count, i = 0;
np = of_find_node_by_path("/testcase-data/address-tests");
if (!np) {
pr_err("missing testcase data\n");
return;
}
if (of_range_parser_init(&parser, np)) {
pr_err("missing ranges property\n");
return;
}
ret = of_range_to_resource(np, 1, &res);
unittest(!ret, "of_range_to_resource returned error (%d) node %pOF\n",
ret, np);
unittest(resource_type(&res) == IORESOURCE_MEM,
"of_range_to_resource wrong resource type on node %pOF res=%pR\n",
np, &res);
unittest(res.start == 0xd0000000,
"of_range_to_resource wrong resource start address on node %pOF res=%pR\n",
np, &res);
unittest(resource_size(&res) == 0x20000000,
"of_range_to_resource wrong resource start address on node %pOF res=%pR\n",
np, &res);
count = of_range_count(&parser);
unittest(count == 2,
"of_range_count wrong size on node %pOF count=%d\n",
np, count);
/*
* Get the "ranges" from the device tree
*/
for_each_of_range(&parser, &range) {
unittest(range.flags == IORESOURCE_MEM,
"for_each_of_range wrong flags on node %pOF flags=%x (expected %x)\n",
np, range.flags, IORESOURCE_MEM);
if (!i) {
unittest(range.size == 0x50000000,
"for_each_of_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0x70000000,
"for_each_of_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.bus_addr == 0x70000000,
"for_each_of_range wrong bus addr (%llx) on node %pOF",
range.pci_addr, np);
} else {
unittest(range.size == 0x20000000,
"for_each_of_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0xd0000000,
"for_each_of_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.bus_addr == 0x00000000,
"for_each_of_range wrong bus addr (%llx) on node %pOF",
range.pci_addr, np);
}
i++;
}
of_node_put(np);
}
static void __init of_unittest_bus_3cell_ranges(void)
{
struct device_node *np;
struct of_range range;
struct of_range_parser parser;
int i = 0;
np = of_find_node_by_path("/testcase-data/address-tests/bus@a0000000");
if (!np) {
pr_err("missing testcase data\n");
return;
}
if (of_range_parser_init(&parser, np)) {
pr_err("missing ranges property\n");
return;
}
/*
* Get the "ranges" from the device tree
*/
for_each_of_range(&parser, &range) {
if (!i) {
unittest(range.flags == 0xf00baa,
"for_each_of_range wrong flags on node %pOF flags=%x\n",
np, range.flags);
unittest(range.size == 0x100000,
"for_each_of_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0xa0000000,
"for_each_of_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.bus_addr == 0x0,
"for_each_of_range wrong bus addr (%llx) on node %pOF",
range.pci_addr, np);
} else {
unittest(range.flags == 0xf00bee,
"for_each_of_range wrong flags on node %pOF flags=%x\n",
np, range.flags);
unittest(range.size == 0x200000,
"for_each_of_range wrong size on node %pOF size=%llx\n",
np, range.size);
unittest(range.cpu_addr == 0xb0000000,
"for_each_of_range wrong CPU addr (%llx) on node %pOF",
range.cpu_addr, np);
unittest(range.bus_addr == 0x100000000,
"for_each_of_range wrong bus addr (%llx) on node %pOF",
range.pci_addr, np);
}
i++;
}
of_node_put(np);
}
static void __init of_unittest_reg(void)
{
struct device_node *np;
int ret;
u64 addr, size;
np = of_find_node_by_path("/testcase-data/address-tests/bus@80000000/device@1000");
if (!np) {
pr_err("missing testcase data\n");
return;
}
ret = of_property_read_reg(np, 0, &addr, &size);
unittest(!ret, "of_property_read_reg(%pOF) returned error %d\n",
np, ret);
unittest(addr == 0x1000, "of_property_read_reg(%pOF) untranslated address (%llx) incorrect\n",
np, addr);
of_node_put(np);
}
static void __init of_unittest_parse_interrupts(void)
{
struct device_node *np;
struct of_phandle_args args;
int i, rc;
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return;
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
return;
}
for (i = 0; i < 4; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
rc = of_irq_parse_one(np, i, &args);
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == (i + 1));
unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
}
of_node_put(np);
np = of_find_node_by_path("/testcase-data/interrupts/interrupts1");
if (!np) {
pr_err("missing testcase data\n");
return;
}
for (i = 0; i < 4; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
rc = of_irq_parse_one(np, i, &args);
/* Test the values from tests-phandle.dtsi */
switch (i) {
case 0:
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == 9);
break;
case 1:
passed &= !rc;
passed &= (args.args_count == 3);
passed &= (args.args[0] == 10);
passed &= (args.args[1] == 11);
passed &= (args.args[2] == 12);
break;
case 2:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == 13);
passed &= (args.args[1] == 14);
break;
case 3:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == 15);
passed &= (args.args[1] == 16);
break;
default:
passed = false;
}
unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
}
of_node_put(np);
}
static void __init of_unittest_parse_interrupts_extended(void)
{
struct device_node *np;
struct of_phandle_args args;
int i, rc;
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return;
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
return;
}
for (i = 0; i < 7; i++) {
bool passed = true;
memset(&args, 0, sizeof(args));
rc = of_irq_parse_one(np, i, &args);
/* Test the values from tests-phandle.dtsi */
switch (i) {
case 0:
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == 1);
break;
case 1:
passed &= !rc;
passed &= (args.args_count == 3);
passed &= (args.args[0] == 2);
passed &= (args.args[1] == 3);
passed &= (args.args[2] == 4);
break;
case 2:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == 5);
passed &= (args.args[1] == 6);
break;
case 3:
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == 9);
break;
case 4:
passed &= !rc;
passed &= (args.args_count == 3);
passed &= (args.args[0] == 10);
passed &= (args.args[1] == 11);
passed &= (args.args[2] == 12);
break;
case 5:
passed &= !rc;
passed &= (args.args_count == 2);
passed &= (args.args[0] == 13);
passed &= (args.args[1] == 14);
break;
case 6:
/*
* Tests child node that is missing property
* #address-cells. See the comments in
* drivers/of/unittest-data/tests-interrupts.dtsi
* nodes intmap1 and interrupts-extended0
*/
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == 15);
break;
default:
passed = false;
}
unittest(passed, "index %i - data error on node %pOF rc=%i\n",
i, args.np, rc);
}
of_node_put(np);
}
static const struct of_device_id match_node_table[] = {
{ .data = "A", .name = "name0", }, /* Name alone is lowest priority */
{ .data = "B", .type = "type1", }, /* followed by type alone */
{ .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
{ .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
{ .data = "Cc", .name = "name2", .type = "type2", },
{ .data = "E", .compatible = "compat3" },
{ .data = "G", .compatible = "compat2", },
{ .data = "H", .compatible = "compat2", .name = "name5", },
{ .data = "I", .compatible = "compat2", .type = "type1", },
{ .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
{ .data = "K", .compatible = "compat2", .name = "name9", },
{}
};
static struct {
const char *path;
const char *data;
} match_node_tests[] = {
{ .path = "/testcase-data/match-node/name0", .data = "A", },
{ .path = "/testcase-data/match-node/name1", .data = "B", },
{ .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
{ .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
{ .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
{ .path = "/testcase-data/match-node/name3", .data = "E", },
{ .path = "/testcase-data/match-node/name4", .data = "G", },
{ .path = "/testcase-data/match-node/name5", .data = "H", },
{ .path = "/testcase-data/match-node/name6", .data = "G", },
{ .path = "/testcase-data/match-node/name7", .data = "I", },
{ .path = "/testcase-data/match-node/name8", .data = "J", },
{ .path = "/testcase-data/match-node/name9", .data = "K", },
};
static void __init of_unittest_match_node(void)
{
struct device_node *np;
const struct of_device_id *match;
int i;
for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
np = of_find_node_by_path(match_node_tests[i].path);
if (!np) {
unittest(0, "missing testcase node %s\n",
match_node_tests[i].path);
continue;
}
match = of_match_node(match_node_table, np);
if (!match) {
unittest(0, "%s didn't match anything\n",
match_node_tests[i].path);
continue;
}
if (strcmp(match->data, match_node_tests[i].data) != 0) {
unittest(0, "%s got wrong match. expected %s, got %s\n",
match_node_tests[i].path, match_node_tests[i].data,
(const char *)match->data);
continue;
}
unittest(1, "passed");
}
}
static struct resource test_bus_res = DEFINE_RES_MEM(0xfffffff8, 2);
static const struct platform_device_info test_bus_info = {
.name = "unittest-bus",
};
static void __init of_unittest_platform_populate(void)
{
int irq, rc;
struct device_node *np, *child, *grandchild;
struct platform_device *pdev, *test_bus;
const struct of_device_id match[] = {
{ .compatible = "test-device", },
{}
};
np = of_find_node_by_path("/testcase-data");
of_platform_default_populate(np, NULL, NULL);
/* Test that a missing irq domain returns -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device1");
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
irq = platform_get_irq(pdev, 0);
unittest(irq == -EPROBE_DEFER,
"device deferred probe failed - %d\n", irq);
/* Test that a parsing failure does not return -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np);
unittest(pdev, "device 2 creation failed\n");
EXPECT_BEGIN(KERN_INFO,
"platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
irq = platform_get_irq(pdev, 0);
EXPECT_END(KERN_INFO,
"platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
}
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
if (!np)
return;
test_bus = platform_device_register_full(&test_bus_info);
rc = PTR_ERR_OR_ZERO(test_bus);
unittest(!rc, "testbus registration failed; rc=%i\n", rc);
if (rc) {
of_node_put(np);
return;
}
test_bus->dev.of_node = np;
/*
* Add a dummy resource to the test bus node after it is
* registered to catch problems with un-inserted resources. The
* DT code doesn't insert the resources, and it has caused the
* kernel to oops in the past. This makes sure the same bug
* doesn't crop up again.
*/
platform_device_add_resources(test_bus, &test_bus_res, 1);
of_platform_populate(np, match, NULL, &test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild) {
pdev = of_find_device_by_node(grandchild);
unittest(pdev,
"Could not create device for node '%pOFn'\n",
grandchild);
platform_device_put(pdev);
}
}
of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(!of_find_device_by_node(grandchild),
"device didn't get destroyed '%pOFn'\n",
grandchild);
}
platform_device_unregister(test_bus);
of_node_put(np);
}
/**
* update_node_properties - adds the properties
* of np into dup node (present in live tree) and
* updates parent of children of np to dup.
*
* @np: node whose properties are being added to the live tree
* @dup: node present in live tree to be updated
*/
static void update_node_properties(struct device_node *np,
struct device_node *dup)
{
struct property *prop;
struct property *save_next;
struct device_node *child;
int ret;
for_each_child_of_node(np, child)
child->parent = dup;
/*
* "unittest internal error: unable to add testdata property"
*
* If this message reports a property in node '/__symbols__' then
* the respective unittest overlay contains a label that has the
* same name as a label in the live devicetree. The label will
* be in the live devicetree only if the devicetree source was
* compiled with the '-@' option. If you encounter this error,
* please consider renaming __all__ of the labels in the unittest
* overlay dts files with an odd prefix that is unlikely to be
* used in a real devicetree.
*/
/*
* open code for_each_property_of_node() because of_add_property()
* sets prop->next to NULL
*/
for (prop = np->properties; prop != NULL; prop = save_next) {
save_next = prop->next;
ret = of_add_property(dup, prop);
if (ret) {
if (ret == -EEXIST && !strcmp(prop->name, "name"))
continue;
pr_err("unittest internal error: unable to add testdata property %pOF/%s",
np, prop->name);
}
}
}
/**
* attach_node_and_children - attaches nodes
* and its children to live tree.
* CAUTION: misleading function name - if node @np already exists in
* the live tree then children of @np are *not* attached to the live
* tree. This works for the current test devicetree nodes because such
* nodes do not have child nodes.
*
* @np: Node to attach to live tree
*/
static void attach_node_and_children(struct device_node *np)
{
struct device_node *next, *dup, *child;
unsigned long flags;
const char *full_name;
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
if (!full_name)
return;
if (!strcmp(full_name, "/__local_fixups__") ||
!strcmp(full_name, "/__fixups__")) {
kfree(full_name);
return;
}
dup = of_find_node_by_path(full_name);
kfree(full_name);
if (dup) {
update_node_properties(np, dup);
return;
}
child = np->child;
np->child = NULL;
mutex_lock(&of_mutex);
raw_spin_lock_irqsave(&devtree_lock, flags);
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
__of_attach_node_sysfs(np);
mutex_unlock(&of_mutex);
while (child) {
next = child->sibling;
attach_node_and_children(child);
child = next;
}
}
/**
* unittest_data_add - Reads, copies data from
* linked tree and attaches it to the live tree
*/
static int __init unittest_data_add(void)
{
void *unittest_data;
void *unittest_data_align;
struct device_node *unittest_data_node = NULL, *np;
/*
* __dtbo_testcases_begin[] and __dtbo_testcases_end[] are magically
* created by cmd_dt_S_dtbo in scripts/Makefile.lib
*/
extern uint8_t __dtbo_testcases_begin[];
extern uint8_t __dtbo_testcases_end[];
const int size = __dtbo_testcases_end - __dtbo_testcases_begin;
int rc;
void *ret;
if (!size) {
pr_warn("%s: testcases is empty\n", __func__);
return -ENODATA;
}
/* creating copy */
unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!unittest_data)
return -ENOMEM;
unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
memcpy(unittest_data_align, __dtbo_testcases_begin, size);
ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
if (!ret) {
pr_warn("%s: unflatten testcases tree failed\n", __func__);
kfree(unittest_data);
return -ENODATA;
}
if (!unittest_data_node) {
pr_warn("%s: testcases tree is empty\n", __func__);
kfree(unittest_data);
return -ENODATA;
}
/*
* This lock normally encloses of_resolve_phandles()
*/
of_overlay_mutex_lock();
rc = of_resolve_phandles(unittest_data_node);
if (rc) {
pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
of_overlay_mutex_unlock();
return -EINVAL;
}
if (!of_root) {
of_root = unittest_data_node;
for_each_of_allnodes(np)
__of_attach_node_sysfs(np);
of_aliases = of_find_node_by_path("/aliases");
of_chosen = of_find_node_by_path("/chosen");
of_overlay_mutex_unlock();
return 0;
}
EXPECT_BEGIN(KERN_INFO,
"Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
/* attach the sub-tree to live tree */
np = unittest_data_node->child;
while (np) {
struct device_node *next = np->sibling;
np->parent = of_root;
/* this will clear OF_DETACHED in np and children */
attach_node_and_children(np);
np = next;
}
EXPECT_END(KERN_INFO,
"Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
of_overlay_mutex_unlock();
return 0;
}
#ifdef CONFIG_OF_OVERLAY
static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id);
static int unittest_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
if (np == NULL) {
dev_err(dev, "No OF data for device\n");
return -EINVAL;
}
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
}
static void unittest_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
}
static const struct of_device_id unittest_match[] = {
{ .compatible = "unittest", },
{},
};
static struct platform_driver unittest_driver = {
.probe = unittest_probe,
.remove_new = unittest_remove,
.driver = {
.name = "unittest",
.of_match_table = unittest_match,
},
};
/* get the platform device instantiated at the path */
static struct platform_device *of_path_to_platform_device(const char *path)
{
struct device_node *np;
struct platform_device *pdev;
np = of_find_node_by_path(path);
if (np == NULL)
return NULL;
pdev = of_find_device_by_node(np);
of_node_put(np);
return pdev;
}
/* find out if a platform device exists at that path */
static int of_path_platform_device_exists(const char *path)
{
struct platform_device *pdev;
pdev = of_path_to_platform_device(path);
platform_device_put(pdev);
return pdev != NULL;
}
#ifdef CONFIG_OF_GPIO
struct unittest_gpio_dev {
struct gpio_chip chip;
};
static int unittest_gpio_chip_request_count;
static int unittest_gpio_probe_count;
static int unittest_gpio_probe_pass_count;
static int unittest_gpio_chip_request(struct gpio_chip *chip, unsigned int offset)
{
unittest_gpio_chip_request_count++;
pr_debug("%s(): %s %d %d\n", __func__, chip->label, offset,
unittest_gpio_chip_request_count);
return 0;
}
static int unittest_gpio_probe(struct platform_device *pdev)
{
struct unittest_gpio_dev *devptr;
int ret;
unittest_gpio_probe_count++;
devptr = kzalloc(sizeof(*devptr), GFP_KERNEL);
if (!devptr)
return -ENOMEM;
platform_set_drvdata(pdev, devptr);
devptr->chip.fwnode = dev_fwnode(&pdev->dev);
devptr->chip.label = "of-unittest-gpio";
devptr->chip.base = -1; /* dynamic allocation */
devptr->chip.ngpio = 5;
devptr->chip.request = unittest_gpio_chip_request;
ret = gpiochip_add_data(&devptr->chip, NULL);
unittest(!ret,
"gpiochip_add_data() for node @%pfw failed, ret = %d\n", devptr->chip.fwnode, ret);
if (!ret)
unittest_gpio_probe_pass_count++;
return ret;
}
static void unittest_gpio_remove(struct platform_device *pdev)
{
struct unittest_gpio_dev *devptr = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
dev_dbg(dev, "%s for node @%pfw\n", __func__, devptr->chip.fwnode);
if (devptr->chip.base != -1)
gpiochip_remove(&devptr->chip);
kfree(devptr);
}
static const struct of_device_id unittest_gpio_id[] = {
{ .compatible = "unittest-gpio", },
{}
};
static struct platform_driver unittest_gpio_driver = {
.probe = unittest_gpio_probe,
.remove_new = unittest_gpio_remove,
.driver = {
.name = "unittest-gpio",
.of_match_table = unittest_gpio_id,
},
};
static void __init of_unittest_overlay_gpio(void)
{
int chip_request_count;
int probe_pass_count;
int ret;
/*
* tests: apply overlays before registering driver
* Similar to installing a driver as a module, the
* driver is registered after applying the overlays.
*
* The overlays are applied by overlay_data_apply()
* instead of of_unittest_apply_overlay() so that they
* will not be tracked. Thus they will not be removed
* by of_unittest_remove_tracked_overlays().
*
* - apply overlay_gpio_01
* - apply overlay_gpio_02a
* - apply overlay_gpio_02b
* - register driver
*
* register driver will result in
* - probe and processing gpio hog for overlay_gpio_01
* - probe for overlay_gpio_02a
* - processing gpio for overlay_gpio_02b
*/
probe_pass_count = unittest_gpio_probe_pass_count;
chip_request_count = unittest_gpio_chip_request_count;
/*
* overlay_gpio_01 contains gpio node and child gpio hog node
* overlay_gpio_02a contains gpio node
* overlay_gpio_02b contains child gpio hog node
*/
unittest(overlay_data_apply("overlay_gpio_01", NULL),
"Adding overlay 'overlay_gpio_01' failed\n");
unittest(overlay_data_apply("overlay_gpio_02a", NULL),
"Adding overlay 'overlay_gpio_02a' failed\n");
unittest(overlay_data_apply("overlay_gpio_02b", NULL),
"Adding overlay 'overlay_gpio_02b' failed\n");
ret = platform_driver_register(&unittest_gpio_driver);
if (unittest(ret == 0, "could not register unittest gpio driver\n"))
return;
unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count,
"unittest_gpio_probe() failed or not called\n");
unittest(chip_request_count + 2 == unittest_gpio_chip_request_count,
"unittest_gpio_chip_request() called %d times (expected 1 time)\n",
unittest_gpio_chip_request_count - chip_request_count);
/*
* tests: apply overlays after registering driver
*
* Similar to a driver built-in to the kernel, the
* driver is registered before applying the overlays.
*
* overlay_gpio_03 contains gpio node and child gpio hog node
*
* - apply overlay_gpio_03
*
* apply overlay will result in
* - probe and processing gpio hog.
*/
probe_pass_count = unittest_gpio_probe_pass_count;
chip_request_count = unittest_gpio_chip_request_count;
/* overlay_gpio_03 contains gpio node and child gpio hog node */
unittest(overlay_data_apply("overlay_gpio_03", NULL),
"Adding overlay 'overlay_gpio_03' failed\n");
unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
"unittest_gpio_probe() failed or not called\n");
unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
"unittest_gpio_chip_request() called %d times (expected 1 time)\n",
unittest_gpio_chip_request_count - chip_request_count);
/*
* overlay_gpio_04a contains gpio node
*
* - apply overlay_gpio_04a
*
* apply the overlay will result in
* - probe for overlay_gpio_04a
*/
probe_pass_count = unittest_gpio_probe_pass_count;
chip_request_count = unittest_gpio_chip_request_count;
/* overlay_gpio_04a contains gpio node */
unittest(overlay_data_apply("overlay_gpio_04a", NULL),
"Adding overlay 'overlay_gpio_04a' failed\n");
unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
"unittest_gpio_probe() failed or not called\n");
/*
* overlay_gpio_04b contains child gpio hog node
*
* - apply overlay_gpio_04b
*
* apply the overlay will result in
* - processing gpio for overlay_gpio_04b
*/
/* overlay_gpio_04b contains child gpio hog node */
unittest(overlay_data_apply("overlay_gpio_04b", NULL),
"Adding overlay 'overlay_gpio_04b' failed\n");
unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
"unittest_gpio_chip_request() called %d times (expected 1 time)\n",
unittest_gpio_chip_request_count - chip_request_count);
}
#else
static void __init of_unittest_overlay_gpio(void)
{
/* skip tests */
}
#endif
#if IS_BUILTIN(CONFIG_I2C)
/* get the i2c client device instantiated at the path */
static struct i2c_client *of_path_to_i2c_client(const char *path)
{
struct device_node *np;
struct i2c_client *client;
np = of_find_node_by_path(path);
if (np == NULL)
return NULL;
client = of_find_i2c_device_by_node(np);
of_node_put(np);
return client;
}
/* find out if a i2c client device exists at that path */
static int of_path_i2c_client_exists(const char *path)
{
struct i2c_client *client;
client = of_path_to_i2c_client(path);
if (client)
put_device(&client->dev);
return client != NULL;
}
#else
static int of_path_i2c_client_exists(const char *path)
{
return 0;
}
#endif
enum overlay_type {
PDEV_OVERLAY,
I2C_OVERLAY
};
static int of_path_device_type_exists(const char *path,
enum overlay_type ovtype)
{
switch (ovtype) {
case PDEV_OVERLAY:
return of_path_platform_device_exists(path);
case I2C_OVERLAY:
return of_path_i2c_client_exists(path);
}
return 0;
}
static const char *unittest_path(int nr, enum overlay_type ovtype)
{
const char *base;
static char buf[256];
switch (ovtype) {
case PDEV_OVERLAY:
base = "/testcase-data/overlay-node/test-bus";
break;
case I2C_OVERLAY:
base = "/testcase-data/overlay-node/test-bus/i2c-test-bus";
break;
default:
buf[0] = '\0';
return buf;
}
snprintf(buf, sizeof(buf) - 1, "%s/test-unittest%d", base, nr);
buf[sizeof(buf) - 1] = '\0';
return buf;
}
static int of_unittest_device_exists(int unittest_nr, enum overlay_type ovtype)
{
const char *path;
path = unittest_path(unittest_nr, ovtype);
switch (ovtype) {
case PDEV_OVERLAY:
return of_path_platform_device_exists(path);
case I2C_OVERLAY:
return of_path_i2c_client_exists(path);
}
return 0;
}
static const char *overlay_name_from_nr(int nr)
{
static char buf[256];
snprintf(buf, sizeof(buf) - 1,
"overlay_%d", nr);
buf[sizeof(buf) - 1] = '\0';
return buf;
}
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
#define MAX_TRACK_OVCS_IDS 256
static int track_ovcs_id[MAX_TRACK_OVCS_IDS];
static int track_ovcs_id_overlay_nr[MAX_TRACK_OVCS_IDS];
static int track_ovcs_id_cnt;
static void of_unittest_track_overlay(int ovcs_id, int overlay_nr)
{
if (WARN_ON(track_ovcs_id_cnt >= MAX_TRACK_OVCS_IDS))
return;
track_ovcs_id[track_ovcs_id_cnt] = ovcs_id;
track_ovcs_id_overlay_nr[track_ovcs_id_cnt] = overlay_nr;
track_ovcs_id_cnt++;
}
static void of_unittest_untrack_overlay(int ovcs_id)
{
if (WARN_ON(track_ovcs_id_cnt < 1))
return;
track_ovcs_id_cnt--;
/* If out of synch then test is broken. Do not try to recover. */
WARN_ON(track_ovcs_id[track_ovcs_id_cnt] != ovcs_id);
}
static void of_unittest_remove_tracked_overlays(void)
{
int ret, ovcs_id, overlay_nr, save_ovcs_id;
const char *overlay_name;
while (track_ovcs_id_cnt > 0) {
ovcs_id = track_ovcs_id[track_ovcs_id_cnt - 1];
overlay_nr = track_ovcs_id_overlay_nr[track_ovcs_id_cnt - 1];
save_ovcs_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret == -ENODEV) {
overlay_name = overlay_name_from_nr(overlay_nr);
pr_warn("%s: of_overlay_remove() for overlay \"%s\" failed, ret = %d\n",
__func__, overlay_name, ret);
}
of_unittest_untrack_overlay(save_ovcs_id);
}
}
static int __init of_unittest_apply_overlay(int overlay_nr, int *ovcs_id)
{
/*
* The overlay will be tracked, thus it will be removed
* by of_unittest_remove_tracked_overlays().
*/
const char *overlay_name;
overlay_name = overlay_name_from_nr(overlay_nr);
if (!overlay_data_apply(overlay_name, ovcs_id)) {
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return -EFAULT;
}
of_unittest_track_overlay(*ovcs_id, overlay_nr);
return 0;
}
static int __init __of_unittest_apply_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
int ret, ovcs_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr),
unittest_path(unittest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
/* apply the overlay */
ovcs_id = 0;
ret = of_unittest_apply_overlay(overlay_nr, &ovcs_id);
if (ret != 0) {
/* of_unittest_apply_overlay already called unittest() */
return ret;
}
/* unittest device must be in after state */
if (of_unittest_device_exists(unittest_nr, ovtype) != after) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr),
unittest_path(unittest_nr, ovtype),
!after ? "enabled" : "disabled");
return -EINVAL;
}
return ovcs_id;
}
/* apply an overlay while checking before and after states */
static int __init of_unittest_apply_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
int ovcs_id = __of_unittest_apply_overlay_check(overlay_nr,
unittest_nr, before, after, ovtype);
if (ovcs_id < 0)
return ovcs_id;
return 0;
}
/* apply an overlay and then revert it while checking before, after states */
static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
int ret, ovcs_id, save_ovcs_id;
ovcs_id = __of_unittest_apply_overlay_check(overlay_nr, unittest_nr,
before, after, ovtype);
if (ovcs_id < 0)
return ovcs_id;
/* remove the overlay */
save_ovcs_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "%s failed to be destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr),
unittest_path(unittest_nr, ovtype));
return ret;
}
of_unittest_untrack_overlay(save_ovcs_id);
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr),
unittest_path(unittest_nr, ovtype),
!before ? "enabled" : "disabled");
return -EINVAL;
}
return 0;
}
/* test activation of device */
static void __init of_unittest_overlay_0(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
/* device should enable */
ret = of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 0);
}
/* test deactivation of device */
static void __init of_unittest_overlay_1(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
/* device should disable */
ret = of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 1);
}
/* test activation of device */
static void __init of_unittest_overlay_2(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
/* device should enable */
ret = of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 2);
}
/* test deactivation of device */
static void __init of_unittest_overlay_3(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
/* device should disable */
ret = of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 3);
}
/* test activation of a full device node */
static void __init of_unittest_overlay_4(void)
{
/* device should disable */
if (of_unittest_apply_overlay_check(4, 4, 0, 1, PDEV_OVERLAY))
return;
unittest(1, "overlay test %d passed\n", 4);
}
/* test overlay apply/revert sequence */
static void __init of_unittest_overlay_5(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
/* device should disable */
ret = of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 5);
}
/* test overlay application in sequence */
static void __init of_unittest_overlay_6(void)
{
int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
const char *overlay_name;
int ret;
/* unittest device must be in before state */
for (i = 0; i < 2; i++) {
if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= before) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr + i),
unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
}
}
/* apply the overlays */
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
overlay_name = overlay_name_from_nr(overlay_nr + 0);
ret = overlay_data_apply(overlay_name, &ovcs_id);
if (!ret) {
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
save_ovcs_id[0] = ovcs_id;
of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
overlay_name = overlay_name_from_nr(overlay_nr + 1);
ret = overlay_data_apply(overlay_name, &ovcs_id);
if (!ret) {
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
save_ovcs_id[1] = ovcs_id;
of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
for (i = 0; i < 2; i++) {
/* unittest device must be in after state */
if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= after) {
unittest(0, "overlay @\"%s\" failed @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr + i),
unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!after ? "enabled" : "disabled");
return;
}
}
for (i = 1; i >= 0; i--) {
ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s failed destroy @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
unittest_path(unittest_nr + i,
PDEV_OVERLAY));
return;
}
of_unittest_untrack_overlay(save_ovcs_id[i]);
}
for (i = 0; i < 2; i++) {
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY)
!= before) {
unittest(0, "%s with device @\"%s\" %s\n",
overlay_name_from_nr(overlay_nr + i),
unittest_path(unittest_nr + i,
PDEV_OVERLAY),
!before ? "enabled" : "disabled");
return;
}
}
unittest(1, "overlay test %d passed\n", 6);
}
/* test overlay application in sequence */
static void __init of_unittest_overlay_8(void)
{
int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
const char *overlay_name;
int ret;
/* we don't care about device state in this test */
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
overlay_name = overlay_name_from_nr(overlay_nr + 0);
ret = overlay_data_apply(overlay_name, &ovcs_id);
if (!ret)
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status");
if (!ret)
return;
save_ovcs_id[0] = ovcs_id;
of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
overlay_name = overlay_name_from_nr(overlay_nr + 1);
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
/* apply the overlays */
ret = overlay_data_apply(overlay_name, &ovcs_id);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo");
if (!ret) {
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
save_ovcs_id[1] = ovcs_id;
of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
/* now try to remove first overlay (it should fail) */
ovcs_id = save_ovcs_id[0];
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: overlay #6 is not topmost");
ret = of_overlay_remove(&ovcs_id);
EXPECT_END(KERN_INFO,
"OF: overlay: overlay #6 is not topmost");
EXPECT_END(KERN_INFO,
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
if (!ret) {
/*
* Should never get here. If we do, expect a lot of
* subsequent tracking and overlay removal related errors.
*/
unittest(0, "%s was destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + 0),
unittest_path(unittest_nr,
PDEV_OVERLAY));
return;
}
/* removing them in order should work */
for (i = 1; i >= 0; i--) {
ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s not destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
unittest_path(unittest_nr,
PDEV_OVERLAY));
return;
}
of_unittest_untrack_overlay(save_ovcs_id[i]);
}
unittest(1, "overlay test %d passed\n", 8);
}
/* test insertion of a bus with parent devices */
static void __init of_unittest_overlay_10(void)
{
int ret;
char *child_path;
/* device should disable */
ret = of_unittest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY);
if (unittest(ret == 0,
"overlay test %d failed; overlay application\n", 10))
return;
child_path = kasprintf(GFP_KERNEL, "%s/test-unittest101",
unittest_path(10, PDEV_OVERLAY));
if (unittest(child_path, "overlay test %d failed; kasprintf\n", 10))
return;
ret = of_path_device_type_exists(child_path, PDEV_OVERLAY);
kfree(child_path);
unittest(ret, "overlay test %d failed; no child device\n", 10);
}
/* test insertion of a bus with parent devices (and revert) */
static void __init of_unittest_overlay_11(void)
{
int ret;
/* device should disable */
ret = of_unittest_apply_revert_overlay_check(11, 11, 0, 1,
PDEV_OVERLAY);
unittest(ret == 0, "overlay test %d failed; overlay apply\n", 11);
}
#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
struct unittest_i2c_bus_data {
struct platform_device *pdev;
struct i2c_adapter adap;
};
static int unittest_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct unittest_i2c_bus_data *std = i2c_get_adapdata(adap);
(void)std;
return num;
}
static u32 unittest_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm unittest_i2c_algo = {
.master_xfer = unittest_i2c_master_xfer,
.functionality = unittest_i2c_functionality,
};
static int unittest_i2c_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct unittest_i2c_bus_data *std;
struct i2c_adapter *adap;
int ret;
if (np == NULL) {
dev_err(dev, "No OF data for device\n");
return -EINVAL;
}
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL);
if (!std)
return -ENOMEM;
/* link them together */
std->pdev = pdev;
platform_set_drvdata(pdev, std);
adap = &std->adap;
i2c_set_adapdata(adap, std);
adap->nr = -1;
strscpy(adap->name, pdev->name, sizeof(adap->name));
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &unittest_i2c_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
adap->timeout = 5 * HZ;
adap->retries = 3;
ret = i2c_add_numbered_adapter(adap);
if (ret != 0) {
dev_err(dev, "Failed to add I2C adapter\n");
return ret;
}
return 0;
}
static void unittest_i2c_bus_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct unittest_i2c_bus_data *std = platform_get_drvdata(pdev);
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
i2c_del_adapter(&std->adap);
}
static const struct of_device_id unittest_i2c_bus_match[] = {
{ .compatible = "unittest-i2c-bus", },
{},
};
static struct platform_driver unittest_i2c_bus_driver = {
.probe = unittest_i2c_bus_probe,
.remove_new = unittest_i2c_bus_remove,
.driver = {
.name = "unittest-i2c-bus",
.of_match_table = unittest_i2c_bus_match,
},
};
static int unittest_i2c_dev_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
if (!np) {
dev_err(dev, "No OF node\n");
return -EINVAL;
}
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
return 0;
};
static void unittest_i2c_dev_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
}
static const struct i2c_device_id unittest_i2c_dev_id[] = {
{ .name = "unittest-i2c-dev" },
{ }
};
static struct i2c_driver unittest_i2c_dev_driver = {
.driver = {
.name = "unittest-i2c-dev",
},
.probe = unittest_i2c_dev_probe,
.remove = unittest_i2c_dev_remove,
.id_table = unittest_i2c_dev_id,
};
#if IS_BUILTIN(CONFIG_I2C_MUX)
static int unittest_i2c_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
return 0;
}
static int unittest_i2c_mux_probe(struct i2c_client *client)
{
int i, nchans;
struct device *dev = &client->dev;
struct i2c_adapter *adap = client->adapter;
struct device_node *np = client->dev.of_node, *child;
struct i2c_mux_core *muxc;
u32 reg, max_reg;
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
if (!np) {
dev_err(dev, "No OF node\n");
return -EINVAL;
}
max_reg = (u32)-1;
for_each_child_of_node(np, child) {
if (of_property_read_u32(child, "reg", ®))
continue;
if (max_reg == (u32)-1 || reg > max_reg)
max_reg = reg;
}
nchans = max_reg == (u32)-1 ? 0 : max_reg + 1;
if (nchans == 0) {
dev_err(dev, "No channels\n");
return -EINVAL;
}
muxc = i2c_mux_alloc(adap, dev, nchans, 0, 0,
unittest_i2c_mux_select_chan, NULL);
if (!muxc)
return -ENOMEM;
for (i = 0; i < nchans; i++) {
if (i2c_mux_add_adapter(muxc, 0, i, 0)) {
dev_err(dev, "Failed to register mux #%d\n", i);
i2c_mux_del_adapters(muxc);
return -ENODEV;
}
}
i2c_set_clientdata(client, muxc);
return 0;
};
static void unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
i2c_mux_del_adapters(muxc);
}
static const struct i2c_device_id unittest_i2c_mux_id[] = {
{ .name = "unittest-i2c-mux" },
{ }
};
static struct i2c_driver unittest_i2c_mux_driver = {
.driver = {
.name = "unittest-i2c-mux",
},
.probe = unittest_i2c_mux_probe,
.remove = unittest_i2c_mux_remove,
.id_table = unittest_i2c_mux_id,
};
#endif
static int of_unittest_overlay_i2c_init(void)
{
int ret;
ret = i2c_add_driver(&unittest_i2c_dev_driver);
if (unittest(ret == 0,
"could not register unittest i2c device driver\n"))
return ret;
ret = platform_driver_register(&unittest_i2c_bus_driver);
if (unittest(ret == 0,
"could not register unittest i2c bus driver\n"))
return ret;
#if IS_BUILTIN(CONFIG_I2C_MUX)
EXPECT_BEGIN(KERN_INFO,
"i2c i2c-1: Added multiplexed i2c bus 2");
ret = i2c_add_driver(&unittest_i2c_mux_driver);
EXPECT_END(KERN_INFO,
"i2c i2c-1: Added multiplexed i2c bus 2");
if (unittest(ret == 0,
"could not register unittest i2c mux driver\n"))
return ret;
#endif
return 0;
}
static void of_unittest_overlay_i2c_cleanup(void)
{
#if IS_BUILTIN(CONFIG_I2C_MUX)
i2c_del_driver(&unittest_i2c_mux_driver);
#endif
platform_driver_unregister(&unittest_i2c_bus_driver);
i2c_del_driver(&unittest_i2c_dev_driver);
}
static void __init of_unittest_overlay_i2c_12(void)
{
int ret;
/* device should enable */
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
ret = of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 12);
}
/* test deactivation of device */
static void __init of_unittest_overlay_i2c_13(void)
{
int ret;
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
/* device should disable */
ret = of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 13);
}
/* just check for i2c mux existence */
static void of_unittest_overlay_i2c_14(void)
{
}
static void __init of_unittest_overlay_i2c_15(void)
{
int ret;
/* device should enable */
EXPECT_BEGIN(KERN_INFO,
"i2c i2c-1: Added multiplexed i2c bus 3");
ret = of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY);
EXPECT_END(KERN_INFO,
"i2c i2c-1: Added multiplexed i2c bus 3");
if (ret)
return;
unittest(1, "overlay test %d passed\n", 15);
}
#else
static inline void of_unittest_overlay_i2c_14(void) { }
static inline void of_unittest_overlay_i2c_15(void) { }
#endif
static int of_notify(struct notifier_block *nb, unsigned long action,
void *arg)
{
struct of_overlay_notify_data *nd = arg;
struct device_node *found;
int ret;
/*
* For overlay_16 .. overlay_19, check that returning an error
* works for each of the actions by setting an arbitrary return
* error number that matches the test number. e.g. for unittest16,
* ret = -EBUSY which is -16.
*
* OVERLAY_INFO() for the overlays is declared to expect the same
* error number, so overlay_data_apply() will return no error.
*
* overlay_20 will return NOTIFY_DONE
*/
ret = 0;
of_node_get(nd->overlay);
switch (action) {
case OF_OVERLAY_PRE_APPLY:
found = of_find_node_by_name(nd->overlay, "test-unittest16");
if (found) {
of_node_put(found);
ret = -EBUSY;
}
break;
case OF_OVERLAY_POST_APPLY:
found = of_find_node_by_name(nd->overlay, "test-unittest17");
if (found) {
of_node_put(found);
ret = -EEXIST;
}
break;
case OF_OVERLAY_PRE_REMOVE:
found = of_find_node_by_name(nd->overlay, "test-unittest18");
if (found) {
of_node_put(found);
ret = -EXDEV;
}
break;
case OF_OVERLAY_POST_REMOVE:
found = of_find_node_by_name(nd->overlay, "test-unittest19");
if (found) {
of_node_put(found);
ret = -ENODEV;
}
break;
default: /* should not happen */
of_node_put(nd->overlay);
ret = -EINVAL;
break;
}
if (ret)
return notifier_from_errno(ret);
return NOTIFY_DONE;
}
static struct notifier_block of_nb = {
.notifier_call = of_notify,
};
static void __init of_unittest_overlay_notify(void)
{
int ovcs_id;
int ret;
ret = of_overlay_notifier_register(&of_nb);
unittest(!ret,
"of_overlay_notifier_register() failed, ret = %d\n", ret);
if (ret)
return;
/*
* The overlays are applied by overlay_data_apply()
* instead of of_unittest_apply_overlay() so that they
* will not be tracked. Thus they will not be removed
* by of_unittest_remove_tracked_overlays().
*
* Applying overlays 16 - 19 will each trigger an error for a
* different action in of_notify().
*
* Applying overlay 20 will not trigger any error in of_notify().
*/
/* --- overlay 16 --- */
EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset pre-apply notifier error -16, target: /testcase-data/overlay-node/test-bus");
unittest(overlay_data_apply("overlay_16", &ovcs_id),
"test OF_OVERLAY_PRE_APPLY notify injected error\n");
EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset pre-apply notifier error -16, target: /testcase-data/overlay-node/test-bus");
unittest(ovcs_id, "ovcs_id not created for overlay_16\n");
/* --- overlay 17 --- */
EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset post-apply notifier error -17, target: /testcase-data/overlay-node/test-bus");
unittest(overlay_data_apply("overlay_17", &ovcs_id),
"test OF_OVERLAY_POST_APPLY notify injected error\n");
EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset post-apply notifier error -17, target: /testcase-data/overlay-node/test-bus");
unittest(ovcs_id, "ovcs_id not created for overlay_17\n");
/* --- overlay 18 --- */
unittest(overlay_data_apply("overlay_18", &ovcs_id),
"OF_OVERLAY_PRE_REMOVE notify injected error\n");
unittest(ovcs_id, "ovcs_id not created for overlay_18\n");
if (ovcs_id) {
EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset pre-remove notifier error -18, target: /testcase-data/overlay-node/test-bus");
ret = of_overlay_remove(&ovcs_id);
EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset pre-remove notifier error -18, target: /testcase-data/overlay-node/test-bus");
if (ret == -EXDEV) {
/*
* change set ovcs_id should still exist
*/
unittest(1, "overlay_18 of_overlay_remove() injected error for OF_OVERLAY_PRE_REMOVE\n");
} else {
unittest(0, "overlay_18 of_overlay_remove() injected error for OF_OVERLAY_PRE_REMOVE not returned\n");
}
} else {
unittest(1, "ovcs_id not created for overlay_18\n");
}
unittest(ovcs_id, "ovcs_id removed for overlay_18\n");
/* --- overlay 19 --- */
unittest(overlay_data_apply("overlay_19", &ovcs_id),
"OF_OVERLAY_POST_REMOVE notify injected error\n");
unittest(ovcs_id, "ovcs_id not created for overlay_19\n");
if (ovcs_id) {
EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset post-remove notifier error -19, target: /testcase-data/overlay-node/test-bus");
ret = of_overlay_remove(&ovcs_id);
EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset post-remove notifier error -19, target: /testcase-data/overlay-node/test-bus");
if (ret == -ENODEV)
unittest(1, "overlay_19 of_overlay_remove() injected error for OF_OVERLAY_POST_REMOVE\n");
else
unittest(0, "overlay_19 of_overlay_remove() injected error for OF_OVERLAY_POST_REMOVE not returned\n");
} else {
unittest(1, "ovcs_id removed for overlay_19\n");
}
unittest(!ovcs_id, "changeset ovcs_id = %d not removed for overlay_19\n",
ovcs_id);
/* --- overlay 20 --- */
unittest(overlay_data_apply("overlay_20", &ovcs_id),
"overlay notify no injected error\n");
if (ovcs_id) {
ret = of_overlay_remove(&ovcs_id);
if (ret)
unittest(1, "overlay_20 failed to be destroyed, ret = %d\n",
ret);
} else {
unittest(1, "ovcs_id not created for overlay_20\n");
}
unittest(!of_overlay_notifier_unregister(&of_nb),
"of_overlay_notifier_unregister() failed, ret = %d\n", ret);
}
static void __init of_unittest_overlay(void)
{
struct device_node *bus_np = NULL;
unsigned int i;
if (platform_driver_register(&unittest_driver)) {
unittest(0, "could not register unittest driver\n");
goto out;
}
bus_np = of_find_node_by_path(bus_path);
if (bus_np == NULL) {
unittest(0, "could not find bus_path \"%s\"\n", bus_path);
goto out;
}
if (of_platform_default_populate(bus_np, NULL, NULL)) {
unittest(0, "could not populate bus @ \"%s\"\n", bus_path);
goto out;
}
if (!of_unittest_device_exists(100, PDEV_OVERLAY)) {
unittest(0, "could not find unittest0 @ \"%s\"\n",
unittest_path(100, PDEV_OVERLAY));
goto out;
}
if (of_unittest_device_exists(101, PDEV_OVERLAY)) {
unittest(0, "unittest1 @ \"%s\" should not exist\n",
unittest_path(101, PDEV_OVERLAY));
goto out;
}
unittest(1, "basic infrastructure of overlays passed");
/* tests in sequence */
of_unittest_overlay_0();
of_unittest_overlay_1();
of_unittest_overlay_2();
of_unittest_overlay_3();
of_unittest_overlay_4();
for (i = 0; i < 3; i++)
of_unittest_overlay_5();
of_unittest_overlay_6();
of_unittest_overlay_8();
of_unittest_overlay_10();
of_unittest_overlay_11();
#if IS_BUILTIN(CONFIG_I2C)
if (unittest(of_unittest_overlay_i2c_init() == 0, "i2c init failed\n"))
goto out;
of_unittest_overlay_i2c_12();
of_unittest_overlay_i2c_13();
of_unittest_overlay_i2c_14();
of_unittest_overlay_i2c_15();
of_unittest_overlay_i2c_cleanup();
#endif
of_unittest_overlay_gpio();
of_unittest_remove_tracked_overlays();
of_unittest_overlay_notify();
out:
of_node_put(bus_np);
}
#else
static inline void __init of_unittest_overlay(void) { }
#endif
static void __init of_unittest_lifecycle(void)
{
#ifdef CONFIG_OF_DYNAMIC
unsigned int refcount;
int found_refcount_one = 0;
int put_count = 0;
struct device_node *np;
struct device_node *prev_sibling, *next_sibling;
const char *refcount_path = "/testcase-data/refcount-node";
const char *refcount_parent_path = "/testcase-data";
/*
* Node lifecycle tests, non-dynamic node:
*
* - Decrementing refcount to zero via of_node_put() should cause the
* attempt to free the node memory by of_node_release() to fail
* because the node is not a dynamic node.
*
* - Decrementing refcount past zero should result in additional
* errors reported.
*/
np = of_find_node_by_path(refcount_path);
unittest(np, "find refcount_path \"%s\"\n", refcount_path);
if (np == NULL)
goto out_skip_tests;
while (!found_refcount_one) {
if (put_count++ > 10) {
unittest(0, "guardrail to avoid infinite loop\n");
goto out_skip_tests;
}
refcount = kref_read(&np->kobj.kref);
if (refcount == 1)
found_refcount_one = 1;
else
of_node_put(np);
}
EXPECT_BEGIN(KERN_INFO, "OF: ERROR: of_node_release() detected bad of_node_put() on /testcase-data/refcount-node");
/*
* refcount is now one, decrementing to zero will result in a call to
* of_node_release() to free the node's memory, which should result
* in an error
*/
unittest(1, "/testcase-data/refcount-node is one");
of_node_put(np);
EXPECT_END(KERN_INFO, "OF: ERROR: of_node_release() detected bad of_node_put() on /testcase-data/refcount-node");
/*
* expect stack trace for subsequent of_node_put():
* __refcount_sub_and_test() calls:
* refcount_warn_saturate(r, REFCOUNT_SUB_UAF)
*
* Not capturing entire WARN_ONCE() trace with EXPECT_*(), just
* the first three lines, and the last line.
*/
EXPECT_BEGIN(KERN_INFO, "------------[ cut here ]------------");
EXPECT_BEGIN(KERN_INFO, "WARNING: <<all>>");
EXPECT_BEGIN(KERN_INFO, "refcount_t: underflow; use-after-free.");
EXPECT_BEGIN(KERN_INFO, "---[ end trace <<int>> ]---");
/* refcount is now zero, this should fail */
unittest(1, "/testcase-data/refcount-node is zero");
of_node_put(np);
EXPECT_END(KERN_INFO, "---[ end trace <<int>> ]---");
EXPECT_END(KERN_INFO, "refcount_t: underflow; use-after-free.");
EXPECT_END(KERN_INFO, "WARNING: <<all>>");
EXPECT_END(KERN_INFO, "------------[ cut here ]------------");
/*
* Q. do we expect to get yet another warning?
* A. no, the WARNING is from WARN_ONCE()
*/
EXPECT_NOT_BEGIN(KERN_INFO, "------------[ cut here ]------------");
EXPECT_NOT_BEGIN(KERN_INFO, "WARNING: <<all>>");
EXPECT_NOT_BEGIN(KERN_INFO, "refcount_t: underflow; use-after-free.");
EXPECT_NOT_BEGIN(KERN_INFO, "---[ end trace <<int>> ]---");
unittest(1, "/testcase-data/refcount-node is zero, second time");
of_node_put(np);
EXPECT_NOT_END(KERN_INFO, "---[ end trace <<int>> ]---");
EXPECT_NOT_END(KERN_INFO, "refcount_t: underflow; use-after-free.");
EXPECT_NOT_END(KERN_INFO, "WARNING: <<all>>");
EXPECT_NOT_END(KERN_INFO, "------------[ cut here ]------------");
/*
* refcount of zero will trigger stack traces from any further
* attempt to of_node_get() node "refcount-node". One example of
* this is where of_unittest_check_node_linkage() will recursively
* scan the tree, with 'for_each_child_of_node()' doing an
* of_node_get() of the children of a node.
*
* Prevent the stack trace by removing node "refcount-node" from
* its parent's child list.
*
* WARNING: EVIL, EVIL, EVIL:
*
* Directly manipulate the child list of node /testcase-data to
* remove child refcount-node. This is ignoring all proper methods
* of removing a child and will leak a small amount of memory.
*/
np = of_find_node_by_path(refcount_parent_path);
unittest(np, "find refcount_parent_path \"%s\"\n", refcount_parent_path);
unittest(np, "ERROR: devicetree live tree left in a 'bad state' if test fail\n");
if (np == NULL)
return;
prev_sibling = np->child;
next_sibling = prev_sibling->sibling;
if (!strcmp(prev_sibling->full_name, "refcount-node")) {
np->child = next_sibling;
next_sibling = next_sibling->sibling;
}
while (next_sibling) {
if (!strcmp(next_sibling->full_name, "refcount-node"))
prev_sibling->sibling = next_sibling->sibling;
prev_sibling = next_sibling;
next_sibling = next_sibling->sibling;
}
of_node_put(np);
return;
out_skip_tests:
#endif
unittest(0, "One or more lifecycle tests skipped\n");
}
#ifdef CONFIG_OF_OVERLAY
/*
* __dtbo_##overlay_name##_begin[] and __dtbo_##overlay_name##_end[] are
* created by cmd_dt_S_dtbo in scripts/Makefile.lib
*/
#define OVERLAY_INFO_EXTERN(overlay_name) \
extern uint8_t __dtbo_##overlay_name##_begin[]; \
extern uint8_t __dtbo_##overlay_name##_end[]
#define OVERLAY_INFO(overlay_name, expected, expected_remove) \
{ .dtbo_begin = __dtbo_##overlay_name##_begin, \
.dtbo_end = __dtbo_##overlay_name##_end, \
.expected_result = expected, \
.expected_result_remove = expected_remove, \
.name = #overlay_name, \
}
struct overlay_info {
uint8_t *dtbo_begin;
uint8_t *dtbo_end;
int expected_result;
int expected_result_remove; /* if apply failed */
int ovcs_id;
char *name;
};
OVERLAY_INFO_EXTERN(overlay_base);
OVERLAY_INFO_EXTERN(overlay);
OVERLAY_INFO_EXTERN(overlay_0);
OVERLAY_INFO_EXTERN(overlay_1);
OVERLAY_INFO_EXTERN(overlay_2);
OVERLAY_INFO_EXTERN(overlay_3);
OVERLAY_INFO_EXTERN(overlay_4);
OVERLAY_INFO_EXTERN(overlay_5);
OVERLAY_INFO_EXTERN(overlay_6);
OVERLAY_INFO_EXTERN(overlay_7);
OVERLAY_INFO_EXTERN(overlay_8);
OVERLAY_INFO_EXTERN(overlay_9);
OVERLAY_INFO_EXTERN(overlay_10);
OVERLAY_INFO_EXTERN(overlay_11);
OVERLAY_INFO_EXTERN(overlay_12);
OVERLAY_INFO_EXTERN(overlay_13);
OVERLAY_INFO_EXTERN(overlay_15);
OVERLAY_INFO_EXTERN(overlay_16);
OVERLAY_INFO_EXTERN(overlay_17);
OVERLAY_INFO_EXTERN(overlay_18);
OVERLAY_INFO_EXTERN(overlay_19);
OVERLAY_INFO_EXTERN(overlay_20);
OVERLAY_INFO_EXTERN(overlay_gpio_01);
OVERLAY_INFO_EXTERN(overlay_gpio_02a);
OVERLAY_INFO_EXTERN(overlay_gpio_02b);
OVERLAY_INFO_EXTERN(overlay_gpio_03);
OVERLAY_INFO_EXTERN(overlay_gpio_04a);
OVERLAY_INFO_EXTERN(overlay_gpio_04b);
OVERLAY_INFO_EXTERN(overlay_pci_node);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_node);
OVERLAY_INFO_EXTERN(overlay_bad_add_dup_prop);
OVERLAY_INFO_EXTERN(overlay_bad_phandle);
OVERLAY_INFO_EXTERN(overlay_bad_symbol);
OVERLAY_INFO_EXTERN(overlay_bad_unresolved);
/* entries found by name */
static struct overlay_info overlays[] = {
OVERLAY_INFO(overlay_base, -9999, 0),
OVERLAY_INFO(overlay, 0, 0),
OVERLAY_INFO(overlay_0, 0, 0),
OVERLAY_INFO(overlay_1, 0, 0),
OVERLAY_INFO(overlay_2, 0, 0),
OVERLAY_INFO(overlay_3, 0, 0),
OVERLAY_INFO(overlay_4, 0, 0),
OVERLAY_INFO(overlay_5, 0, 0),
OVERLAY_INFO(overlay_6, 0, 0),
OVERLAY_INFO(overlay_7, 0, 0),
OVERLAY_INFO(overlay_8, 0, 0),
OVERLAY_INFO(overlay_9, 0, 0),
OVERLAY_INFO(overlay_10, 0, 0),
OVERLAY_INFO(overlay_11, 0, 0),
OVERLAY_INFO(overlay_12, 0, 0),
OVERLAY_INFO(overlay_13, 0, 0),
OVERLAY_INFO(overlay_15, 0, 0),
OVERLAY_INFO(overlay_16, -EBUSY, 0),
OVERLAY_INFO(overlay_17, -EEXIST, 0),
OVERLAY_INFO(overlay_18, 0, 0),
OVERLAY_INFO(overlay_19, 0, 0),
OVERLAY_INFO(overlay_20, 0, 0),
OVERLAY_INFO(overlay_gpio_01, 0, 0),
OVERLAY_INFO(overlay_gpio_02a, 0, 0),
OVERLAY_INFO(overlay_gpio_02b, 0, 0),
OVERLAY_INFO(overlay_gpio_03, 0, 0),
OVERLAY_INFO(overlay_gpio_04a, 0, 0),
OVERLAY_INFO(overlay_gpio_04b, 0, 0),
OVERLAY_INFO(overlay_pci_node, 0, 0),
OVERLAY_INFO(overlay_bad_add_dup_node, -EINVAL, -ENODEV),
OVERLAY_INFO(overlay_bad_add_dup_prop, -EINVAL, -ENODEV),
OVERLAY_INFO(overlay_bad_phandle, -EINVAL, 0),
OVERLAY_INFO(overlay_bad_symbol, -EINVAL, -ENODEV),
OVERLAY_INFO(overlay_bad_unresolved, -EINVAL, 0),
/* end marker */
{ }
};
static struct device_node *overlay_base_root;
static void * __init dt_alloc_memory(u64 size, u64 align)
{
void *ptr = memblock_alloc(size, align);
if (!ptr)
panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
__func__, size, align);
return ptr;
}
/*
* Create base device tree for the overlay unittest.
*
* This is called from very early boot code.
*
* Do as much as possible the same way as done in __unflatten_device_tree
* and other early boot steps for the normal FDT so that the overlay base
* unflattened tree will have the same characteristics as the real tree
* (such as having memory allocated by the early allocator). The goal
* is to test "the real thing" as much as possible, and test "test setup
* code" as little as possible.
*
* Have to stop before resolving phandles, because that uses kmalloc.
*/
void __init unittest_unflatten_overlay_base(void)
{
struct overlay_info *info;
u32 data_size;
void *new_fdt;
u32 size;
int found = 0;
const char *overlay_name = "overlay_base";
for (info = overlays; info && info->name; info++) {
if (!strcmp(overlay_name, info->name)) {
found = 1;
break;
}
}
if (!found) {
pr_err("no overlay data for %s\n", overlay_name);
return;
}
info = &overlays[0];
if (info->expected_result != -9999) {
pr_err("No dtb 'overlay_base' to attach\n");
return;
}
data_size = info->dtbo_end - info->dtbo_begin;
if (!data_size) {
pr_err("No dtb 'overlay_base' to attach\n");
return;
}
size = fdt_totalsize(info->dtbo_begin);
if (size != data_size) {
pr_err("dtb 'overlay_base' header totalsize != actual size");
return;
}
new_fdt = dt_alloc_memory(size, roundup_pow_of_two(FDT_V17_SIZE));
if (!new_fdt) {
pr_err("alloc for dtb 'overlay_base' failed");
return;
}
memcpy(new_fdt, info->dtbo_begin, size);
__unflatten_device_tree(new_fdt, NULL, &overlay_base_root,
dt_alloc_memory, true);
}
/*
* The purpose of of_unittest_overlay_data_add is to add an
* overlay in the normal fashion. This is a test of the whole
* picture, instead of testing individual elements.
*
* A secondary purpose is to be able to verify that the contents of
* /proc/device-tree/ contains the updated structure and values from
* the overlay. That must be verified separately in user space.
*
* Return 0 on unexpected error.
*/
static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id)
{
struct overlay_info *info;
int passed = 1;
int found = 0;
int ret, ret2;
u32 size;
for (info = overlays; info && info->name; info++) {
if (!strcmp(overlay_name, info->name)) {
found = 1;
break;
}
}
if (!found) {
pr_err("no overlay data for %s\n", overlay_name);
return 0;
}
size = info->dtbo_end - info->dtbo_begin;
if (!size)
pr_err("no overlay data for %s\n", overlay_name);
ret = of_overlay_fdt_apply(info->dtbo_begin, size, &info->ovcs_id,
NULL);
if (ovcs_id)
*ovcs_id = info->ovcs_id;
if (ret < 0)
goto out;
pr_debug("%s applied\n", overlay_name);
out:
if (ret != info->expected_result) {
pr_err("of_overlay_fdt_apply() expected %d, ret=%d, %s\n",
info->expected_result, ret, overlay_name);
passed = 0;
}
if (ret < 0) {
/* changeset may be partially applied */
ret2 = of_overlay_remove(&info->ovcs_id);
if (ret2 != info->expected_result_remove) {
pr_err("of_overlay_remove() expected %d, ret=%d, %s\n",
info->expected_result_remove, ret2,
overlay_name);
passed = 0;
}
}
return passed;
}
/*
* The purpose of of_unittest_overlay_high_level is to add an overlay
* in the normal fashion. This is a test of the whole picture,
* instead of individual elements.
*
* The first part of the function is _not_ normal overlay usage; it is
* finishing splicing the base overlay device tree into the live tree.
*/
static __init void of_unittest_overlay_high_level(void)
{
struct device_node *last_sibling;
struct device_node *np;
struct device_node *of_symbols;
struct device_node *overlay_base_symbols;
struct device_node **pprev;
struct property *prop;
int ret;
if (!overlay_base_root) {
unittest(0, "overlay_base_root not initialized\n");
return;
}
/*
* Could not fixup phandles in unittest_unflatten_overlay_base()
* because kmalloc() was not yet available.
*/
of_overlay_mutex_lock();
of_resolve_phandles(overlay_base_root);
of_overlay_mutex_unlock();
/*
* do not allow overlay_base to duplicate any node already in
* tree, this greatly simplifies the code
*/
/*
* remove overlay_base_root node "__local_fixups", after
* being used by of_resolve_phandles()
*/
pprev = &overlay_base_root->child;
for (np = overlay_base_root->child; np; np = np->sibling) {
if (of_node_name_eq(np, "__local_fixups__")) {
*pprev = np->sibling;
break;
}
pprev = &np->sibling;
}
/* remove overlay_base_root node "__symbols__" if in live tree */
of_symbols = of_get_child_by_name(of_root, "__symbols__");
if (of_symbols) {
/* will have to graft properties from node into live tree */
pprev = &overlay_base_root->child;
for (np = overlay_base_root->child; np; np = np->sibling) {
if (of_node_name_eq(np, "__symbols__")) {
overlay_base_symbols = np;
*pprev = np->sibling;
break;
}
pprev = &np->sibling;
}
}
for_each_child_of_node(overlay_base_root, np) {
struct device_node *base_child;
for_each_child_of_node(of_root, base_child) {
if (!strcmp(np->full_name, base_child->full_name)) {
unittest(0, "illegal node name in overlay_base %pOFn",
np);
of_node_put(np);
of_node_put(base_child);
return;
}
}
}
/*
* overlay 'overlay_base' is not allowed to have root
* properties, so only need to splice nodes into main device tree.
*
* root node of *overlay_base_root will not be freed, it is lost
* memory.
*/
for (np = overlay_base_root->child; np; np = np->sibling)
np->parent = of_root;
mutex_lock(&of_mutex);
for (last_sibling = np = of_root->child; np; np = np->sibling)
last_sibling = np;
if (last_sibling)
last_sibling->sibling = overlay_base_root->child;
else
of_root->child = overlay_base_root->child;
for_each_of_allnodes_from(overlay_base_root, np)
__of_attach_node_sysfs(np);
if (of_symbols) {
struct property *new_prop;
for_each_property_of_node(overlay_base_symbols, prop) {
new_prop = __of_prop_dup(prop, GFP_KERNEL);
if (!new_prop) {
unittest(0, "__of_prop_dup() of '%s' from overlay_base node __symbols__",
prop->name);
goto err_unlock;
}
if (__of_add_property(of_symbols, new_prop)) {
kfree(new_prop->name);
kfree(new_prop->value);
kfree(new_prop);
/* "name" auto-generated by unflatten */
if (!strcmp(prop->name, "name"))
continue;
unittest(0, "duplicate property '%s' in overlay_base node __symbols__",
prop->name);
goto err_unlock;
}
if (__of_add_property_sysfs(of_symbols, new_prop)) {
unittest(0, "unable to add property '%s' in overlay_base node __symbols__ to sysfs",
prop->name);
goto err_unlock;
}
}
}
mutex_unlock(&of_mutex);
/* now do the normal overlay usage test */
/* --- overlay --- */
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
ret = overlay_data_apply("overlay", NULL);
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status");
EXPECT_END(KERN_ERR,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status");
unittest(ret, "Adding overlay 'overlay' failed\n");
/* --- overlay_bad_add_dup_node --- */
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
EXPECT_BEGIN(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/motor-1/controller:name");
EXPECT_BEGIN(KERN_ERR,
"OF: Error reverting changeset (-19)");
unittest(overlay_data_apply("overlay_bad_add_dup_node", NULL),
"Adding overlay 'overlay_bad_add_dup_node' failed\n");
EXPECT_END(KERN_ERR,
"OF: Error reverting changeset (-19)");
EXPECT_END(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/motor-1/controller:name");
EXPECT_END(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name");
EXPECT_END(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller");
/* --- overlay_bad_add_dup_prop --- */
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
EXPECT_BEGIN(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
EXPECT_BEGIN(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/motor-1/electric:name");
EXPECT_BEGIN(KERN_ERR,
"OF: Error reverting changeset (-19)");
unittest(overlay_data_apply("overlay_bad_add_dup_prop", NULL),
"Adding overlay 'overlay_bad_add_dup_prop' failed\n");
EXPECT_END(KERN_ERR,
"OF: Error reverting changeset (-19)");
EXPECT_END(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/motor-1/electric:name");
EXPECT_END(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name");
EXPECT_END(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail");
EXPECT_END(KERN_ERR,
"OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric");
/* --- overlay_bad_phandle --- */
unittest(overlay_data_apply("overlay_bad_phandle", NULL),
"Adding overlay 'overlay_bad_phandle' failed\n");
/* --- overlay_bad_symbol --- */
EXPECT_BEGIN(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/hvac-medium-2:name");
EXPECT_BEGIN(KERN_ERR,
"OF: Error reverting changeset (-19)");
unittest(overlay_data_apply("overlay_bad_symbol", NULL),
"Adding overlay 'overlay_bad_symbol' failed\n");
EXPECT_END(KERN_ERR,
"OF: Error reverting changeset (-19)");
EXPECT_END(KERN_ERR,
"OF: changeset: apply failed: REMOVE_PROPERTY /testcase-data-2/substation@100/hvac-medium-2:name");
/* --- overlay_bad_unresolved --- */
EXPECT_BEGIN(KERN_ERR,
"OF: resolver: node label 'this_label_does_not_exist' not found in live devicetree symbols table");
EXPECT_BEGIN(KERN_ERR,
"OF: resolver: overlay phandle fixup failed: -22");
unittest(overlay_data_apply("overlay_bad_unresolved", NULL),
"Adding overlay 'overlay_bad_unresolved' failed\n");
EXPECT_END(KERN_ERR,
"OF: resolver: overlay phandle fixup failed: -22");
EXPECT_END(KERN_ERR,
"OF: resolver: node label 'this_label_does_not_exist' not found in live devicetree symbols table");
return;
err_unlock:
mutex_unlock(&of_mutex);
}
static int of_unittest_pci_dev_num;
static int of_unittest_pci_child_num;
/*
* PCI device tree node test driver
*/
static const struct pci_device_id testdrv_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REDHAT, 0x5), }, /* PCI_VENDOR_ID_REDHAT */
{ 0, }
};
static int testdrv_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct overlay_info *info;
struct device_node *dn;
int ret, ovcs_id;
u32 size;
dn = pdev->dev.of_node;
if (!dn) {
dev_err(&pdev->dev, "does not find bus endpoint");
return -EINVAL;
}
for (info = overlays; info && info->name; info++) {
if (!strcmp(info->name, "overlay_pci_node"))
break;
}
if (!info || !info->name) {
dev_err(&pdev->dev, "no overlay data for overlay_pci_node");
return -ENODEV;
}
size = info->dtbo_end - info->dtbo_begin;
ret = of_overlay_fdt_apply(info->dtbo_begin, size, &ovcs_id, dn);
of_node_put(dn);
if (ret)
return ret;
of_platform_default_populate(dn, NULL, &pdev->dev);
pci_set_drvdata(pdev, (void *)(uintptr_t)ovcs_id);
return 0;
}
static void testdrv_remove(struct pci_dev *pdev)
{
int ovcs_id = (int)(uintptr_t)pci_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
of_overlay_remove(&ovcs_id);
}
static struct pci_driver testdrv_driver = {
.name = "pci_dt_testdrv",
.id_table = testdrv_pci_ids,
.probe = testdrv_probe,
.remove = testdrv_remove,
};
static int unittest_pci_probe(struct platform_device *pdev)
{
struct resource *res;
struct device *dev;
u64 exp_addr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
dev = &pdev->dev;
while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev) {
pr_err("unable to find parent device\n");
return -ENODEV;
}
exp_addr = pci_resource_start(to_pci_dev(dev), 0) + 0x100;
unittest(res->start == exp_addr, "Incorrect translated address %llx, expected %llx\n",
(u64)res->start, exp_addr);
of_unittest_pci_child_num++;
return 0;
}
static const struct of_device_id unittest_pci_of_match[] = {
{ .compatible = "unittest-pci" },
{ }
};
static struct platform_driver unittest_pci_driver = {
.probe = unittest_pci_probe,
.driver = {
.name = "unittest-pci",
.of_match_table = unittest_pci_of_match,
},
};
static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add)
{
struct device_node *pnp, *np = NULL;
struct device *child_dev;
char *path = NULL;
const __be32 *reg;
int rc = 0;
pnp = pdev->dev.of_node;
unittest(pnp, "Failed creating PCI dt node\n");
if (!pnp)
return -ENODEV;
if (add) {
path = kasprintf(GFP_KERNEL, "%pOF/pci-ep-bus@0/unittest-pci@100", pnp);
np = of_find_node_by_path(path);
unittest(np, "Failed to get unittest-pci node under PCI node\n");
if (!np) {
rc = -ENODEV;
goto failed;
}
reg = of_get_property(np, "reg", NULL);
unittest(reg, "Failed to get reg property\n");
if (!reg)
rc = -ENODEV;
} else {
path = kasprintf(GFP_KERNEL, "%pOF/pci-ep-bus@0", pnp);
np = of_find_node_by_path(path);
unittest(!np, "Child device tree node is not removed\n");
child_dev = device_find_any_child(&pdev->dev);
unittest(!child_dev, "Child device is not removed\n");
}
failed:
kfree(path);
if (np)
of_node_put(np);
return rc;
}
static void __init of_unittest_pci_node(void)
{
struct pci_dev *pdev = NULL;
int rc;
if (!IS_ENABLED(CONFIG_PCI_DYNAMIC_OF_NODES))
return;
rc = pci_register_driver(&testdrv_driver);
unittest(!rc, "Failed to register pci test driver; rc = %d\n", rc);
if (rc)
return;
rc = platform_driver_register(&unittest_pci_driver);
if (unittest(!rc, "Failed to register unittest pci driver\n")) {
pci_unregister_driver(&testdrv_driver);
return;
}
while ((pdev = pci_get_device(PCI_VENDOR_ID_REDHAT, 0x5, pdev)) != NULL) {
of_unittest_pci_node_verify(pdev, true);
of_unittest_pci_dev_num++;
}
if (pdev)
pci_dev_put(pdev);
unittest(of_unittest_pci_dev_num,
"No test PCI device been found. Please run QEMU with '-device pci-testdev'\n");
unittest(of_unittest_pci_dev_num == of_unittest_pci_child_num,
"Child device number %d is not expected %d", of_unittest_pci_child_num,
of_unittest_pci_dev_num);
platform_driver_unregister(&unittest_pci_driver);
pci_unregister_driver(&testdrv_driver);
while ((pdev = pci_get_device(PCI_VENDOR_ID_REDHAT, 0x5, pdev)) != NULL)
of_unittest_pci_node_verify(pdev, false);
if (pdev)
pci_dev_put(pdev);
}
#else
static inline __init void of_unittest_overlay_high_level(void) {}
static inline __init void of_unittest_pci_node(void) { }
#endif
static int __init of_unittest(void)
{
struct device_node *np;
int res;
pr_info("start of unittest - you will see error messages\n");
/* Taint the kernel so we know we've run tests. */
add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
unittest_unflatten_overlay_base();
res = unittest_data_add();
if (res)
return res;
if (!of_aliases)
of_aliases = of_find_node_by_path("/aliases");
np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
if (!np) {
pr_info("No testcase data in device tree; not running tests\n");
return 0;
}
of_node_put(np);
of_unittest_check_tree_linkage();
of_unittest_check_phandles();
of_unittest_find_node_by_name();
of_unittest_dynamic();
of_unittest_parse_phandle_with_args();
of_unittest_parse_phandle_with_args_map();
of_unittest_printf();
of_unittest_property_string();
of_unittest_property_copy();
of_unittest_changeset();
of_unittest_parse_interrupts();
of_unittest_parse_interrupts_extended();
of_unittest_dma_get_max_cpu_address();
of_unittest_parse_dma_ranges();
of_unittest_pci_dma_ranges();
of_unittest_bus_ranges();
of_unittest_bus_3cell_ranges();
of_unittest_reg();
of_unittest_match_node();
of_unittest_platform_populate();
of_unittest_overlay();
of_unittest_lifecycle();
of_unittest_pci_node();
/* Double check linkage after removing testcase data */
of_unittest_check_tree_linkage();
of_unittest_overlay_high_level();
pr_info("end of unittest - %i passed, %i failed\n",
unittest_results.passed, unittest_results.failed);
return 0;
}
late_initcall(of_unittest);
| linux-master | drivers/of/unittest.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for working with device tree overlays
*
* Copyright (C) 2012 Pantelis Antoniou <[email protected]>
* Copyright (C) 2012 Texas Instruments Inc.
*/
#define pr_fmt(fmt) "OF: overlay: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_fdt.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/libfdt.h>
#include <linux/err.h>
#include <linux/idr.h>
#include "of_private.h"
/**
* struct target - info about current target node as recursing through overlay
* @np: node where current level of overlay will be applied
* @in_livetree: @np is a node in the live devicetree
*
* Used in the algorithm to create the portion of a changeset that describes
* an overlay fragment, which is a devicetree subtree. Initially @np is a node
* in the live devicetree where the overlay subtree is targeted to be grafted
* into. When recursing to the next level of the overlay subtree, the target
* also recurses to the next level of the live devicetree, as long as overlay
* subtree node also exists in the live devicetree. When a node in the overlay
* subtree does not exist at the same level in the live devicetree, target->np
* points to a newly allocated node, and all subsequent targets in the subtree
* will be newly allocated nodes.
*/
struct target {
struct device_node *np;
bool in_livetree;
};
/**
* struct fragment - info about fragment nodes in overlay expanded device tree
* @target: target of the overlay operation
* @overlay: pointer to the __overlay__ node
*/
struct fragment {
struct device_node *overlay;
struct device_node *target;
};
/**
* struct overlay_changeset
* @id: changeset identifier
* @ovcs_list: list on which we are located
* @new_fdt: Memory allocated to hold unflattened aligned FDT
* @overlay_mem: the memory chunk that contains @overlay_root
* @overlay_root: expanded device tree that contains the fragment nodes
* @notify_state: most recent notify action used on overlay
* @count: count of fragment structures
* @fragments: fragment nodes in the overlay expanded device tree
* @symbols_fragment: last element of @fragments[] is the __symbols__ node
* @cset: changeset to apply fragments to live device tree
*/
struct overlay_changeset {
int id;
struct list_head ovcs_list;
const void *new_fdt;
const void *overlay_mem;
struct device_node *overlay_root;
enum of_overlay_notify_action notify_state;
int count;
struct fragment *fragments;
bool symbols_fragment;
struct of_changeset cset;
};
/* flags are sticky - once set, do not reset */
static int devicetree_state_flags;
#define DTSF_APPLY_FAIL 0x01
#define DTSF_REVERT_FAIL 0x02
/*
* If a changeset apply or revert encounters an error, an attempt will
* be made to undo partial changes, but may fail. If the undo fails
* we do not know the state of the devicetree.
*/
static int devicetree_corrupt(void)
{
return devicetree_state_flags &
(DTSF_APPLY_FAIL | DTSF_REVERT_FAIL);
}
static int build_changeset_next_level(struct overlay_changeset *ovcs,
struct target *target, const struct device_node *overlay_node);
/*
* of_resolve_phandles() finds the largest phandle in the live tree.
* of_overlay_apply() may add a larger phandle to the live tree.
* Do not allow race between two overlays being applied simultaneously:
* mutex_lock(&of_overlay_phandle_mutex)
* of_resolve_phandles()
* of_overlay_apply()
* mutex_unlock(&of_overlay_phandle_mutex)
*/
static DEFINE_MUTEX(of_overlay_phandle_mutex);
void of_overlay_mutex_lock(void)
{
mutex_lock(&of_overlay_phandle_mutex);
}
void of_overlay_mutex_unlock(void)
{
mutex_unlock(&of_overlay_phandle_mutex);
}
static LIST_HEAD(ovcs_list);
static DEFINE_IDR(ovcs_idr);
static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
/**
* of_overlay_notifier_register() - Register notifier for overlay operations
* @nb: Notifier block to register
*
* Register for notification on overlay operations on device tree nodes. The
* reported actions definied by @of_reconfig_change. The notifier callback
* furthermore receives a pointer to the affected device tree node.
*
* Note that a notifier callback is not supposed to store pointers to a device
* tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the
* respective node it received.
*/
int of_overlay_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
/**
* of_overlay_notifier_unregister() - Unregister notifier for overlay operations
* @nb: Notifier block to unregister
*/
int of_overlay_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
static int overlay_notify(struct overlay_changeset *ovcs,
enum of_overlay_notify_action action)
{
struct of_overlay_notify_data nd;
int i, ret;
ovcs->notify_state = action;
for (i = 0; i < ovcs->count; i++) {
struct fragment *fragment = &ovcs->fragments[i];
nd.target = fragment->target;
nd.overlay = fragment->overlay;
ret = blocking_notifier_call_chain(&overlay_notify_chain,
action, &nd);
if (notifier_to_errno(ret)) {
ret = notifier_to_errno(ret);
pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
of_overlay_action_name(action), ret, nd.target);
return ret;
}
}
return 0;
}
/*
* The values of properties in the "/__symbols__" node are paths in
* the ovcs->overlay_root. When duplicating the properties, the paths
* need to be adjusted to be the correct path for the live device tree.
*
* The paths refer to a node in the subtree of a fragment node's "__overlay__"
* node, for example "/fragment@0/__overlay__/symbol_path_tail",
* where symbol_path_tail can be a single node or it may be a multi-node path.
*
* The duplicated property value will be modified by replacing the
* "/fragment_name/__overlay/" portion of the value with the target
* path from the fragment node.
*/
static struct property *dup_and_fixup_symbol_prop(
struct overlay_changeset *ovcs, const struct property *prop)
{
struct fragment *fragment;
struct property *new_prop;
struct device_node *fragment_node;
struct device_node *overlay_node;
const char *path;
const char *path_tail;
const char *target_path;
int k;
int overlay_name_len;
int path_len;
int path_tail_len;
int target_path_len;
if (!prop->value)
return NULL;
if (strnlen(prop->value, prop->length) >= prop->length)
return NULL;
path = prop->value;
path_len = strlen(path);
if (path_len < 1)
return NULL;
fragment_node = __of_find_node_by_path(ovcs->overlay_root, path + 1);
overlay_node = __of_find_node_by_path(fragment_node, "__overlay__/");
of_node_put(fragment_node);
of_node_put(overlay_node);
for (k = 0; k < ovcs->count; k++) {
fragment = &ovcs->fragments[k];
if (fragment->overlay == overlay_node)
break;
}
if (k >= ovcs->count)
return NULL;
overlay_name_len = snprintf(NULL, 0, "%pOF", fragment->overlay);
if (overlay_name_len > path_len)
return NULL;
path_tail = path + overlay_name_len;
path_tail_len = strlen(path_tail);
target_path = kasprintf(GFP_KERNEL, "%pOF", fragment->target);
if (!target_path)
return NULL;
target_path_len = strlen(target_path);
new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
if (!new_prop)
goto err_free_target_path;
new_prop->name = kstrdup(prop->name, GFP_KERNEL);
new_prop->length = target_path_len + path_tail_len + 1;
new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
if (!new_prop->name || !new_prop->value)
goto err_free_new_prop;
strcpy(new_prop->value, target_path);
strcpy(new_prop->value + target_path_len, path_tail);
of_property_set_flag(new_prop, OF_DYNAMIC);
kfree(target_path);
return new_prop;
err_free_new_prop:
kfree(new_prop->name);
kfree(new_prop->value);
kfree(new_prop);
err_free_target_path:
kfree(target_path);
return NULL;
}
/**
* add_changeset_property() - add @overlay_prop to overlay changeset
* @ovcs: overlay changeset
* @target: where @overlay_prop will be placed
* @overlay_prop: property to add or update, from overlay tree
* @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
*
* If @overlay_prop does not already exist in live devicetree, add changeset
* entry to add @overlay_prop in @target, else add changeset entry to update
* value of @overlay_prop.
*
* @target may be either in the live devicetree or in a new subtree that
* is contained in the changeset.
*
* Some special properties are not added or updated (no error returned):
* "name", "phandle", "linux,phandle".
*
* Properties "#address-cells" and "#size-cells" are not updated if they
* are already in the live tree, but if present in the live tree, the values
* in the overlay must match the values in the live tree.
*
* Update of property in symbols node is not allowed.
*
* Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay.
*/
static int add_changeset_property(struct overlay_changeset *ovcs,
struct target *target, struct property *overlay_prop,
bool is_symbols_prop)
{
struct property *new_prop = NULL, *prop;
int ret = 0;
if (target->in_livetree)
if (!of_prop_cmp(overlay_prop->name, "name") ||
!of_prop_cmp(overlay_prop->name, "phandle") ||
!of_prop_cmp(overlay_prop->name, "linux,phandle"))
return 0;
if (target->in_livetree)
prop = of_find_property(target->np, overlay_prop->name, NULL);
else
prop = NULL;
if (prop) {
if (!of_prop_cmp(prop->name, "#address-cells")) {
if (!of_prop_val_eq(prop, overlay_prop)) {
pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
target->np);
ret = -EINVAL;
}
return ret;
} else if (!of_prop_cmp(prop->name, "#size-cells")) {
if (!of_prop_val_eq(prop, overlay_prop)) {
pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
target->np);
ret = -EINVAL;
}
return ret;
}
}
if (is_symbols_prop) {
if (prop)
return -EINVAL;
new_prop = dup_and_fixup_symbol_prop(ovcs, overlay_prop);
} else {
new_prop = __of_prop_dup(overlay_prop, GFP_KERNEL);
}
if (!new_prop)
return -ENOMEM;
if (!prop) {
if (!target->in_livetree) {
new_prop->next = target->np->deadprops;
target->np->deadprops = new_prop;
}
ret = of_changeset_add_property(&ovcs->cset, target->np,
new_prop);
} else {
ret = of_changeset_update_property(&ovcs->cset, target->np,
new_prop);
}
if (!of_node_check_flag(target->np, OF_OVERLAY))
pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
target->np, new_prop->name);
if (ret) {
kfree(new_prop->name);
kfree(new_prop->value);
kfree(new_prop);
}
return ret;
}
/**
* add_changeset_node() - add @node (and children) to overlay changeset
* @ovcs: overlay changeset
* @target: where @node will be placed in live tree or changeset
* @node: node from within overlay device tree fragment
*
* If @node does not already exist in @target, add changeset entry
* to add @node in @target.
*
* If @node already exists in @target, and the existing node has
* a phandle, the overlay node is not allowed to have a phandle.
*
* If @node has child nodes, add the children recursively via
* build_changeset_next_level().
*
* NOTE_1: A live devicetree created from a flattened device tree (FDT) will
* not contain the full path in node->full_name. Thus an overlay
* created from an FDT also will not contain the full path in
* node->full_name. However, a live devicetree created from Open
* Firmware may have the full path in node->full_name.
*
* add_changeset_node() follows the FDT convention and does not include
* the full path in node->full_name. Even though it expects the overlay
* to not contain the full path, it uses kbasename() to remove the
* full path should it exist. It also uses kbasename() in comparisons
* to nodes in the live devicetree so that it can apply an overlay to
* a live devicetree created from Open Firmware.
*
* NOTE_2: Multiple mods of created nodes not supported.
*
* Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay.
*/
static int add_changeset_node(struct overlay_changeset *ovcs,
struct target *target, struct device_node *node)
{
const char *node_kbasename;
const __be32 *phandle;
struct device_node *tchild;
struct target target_child;
int ret = 0, size;
node_kbasename = kbasename(node->full_name);
for_each_child_of_node(target->np, tchild)
if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
break;
if (!tchild) {
tchild = __of_node_dup(NULL, node_kbasename);
if (!tchild)
return -ENOMEM;
tchild->parent = target->np;
tchild->name = __of_get_property(node, "name", NULL);
if (!tchild->name)
tchild->name = "<NULL>";
/* ignore obsolete "linux,phandle" */
phandle = __of_get_property(node, "phandle", &size);
if (phandle && (size == 4))
tchild->phandle = be32_to_cpup(phandle);
of_node_set_flag(tchild, OF_OVERLAY);
ret = of_changeset_attach_node(&ovcs->cset, tchild);
if (ret)
return ret;
target_child.np = tchild;
target_child.in_livetree = false;
ret = build_changeset_next_level(ovcs, &target_child, node);
of_node_put(tchild);
return ret;
}
if (node->phandle && tchild->phandle) {
ret = -EINVAL;
} else {
target_child.np = tchild;
target_child.in_livetree = target->in_livetree;
ret = build_changeset_next_level(ovcs, &target_child, node);
}
of_node_put(tchild);
return ret;
}
/**
* build_changeset_next_level() - add level of overlay changeset
* @ovcs: overlay changeset
* @target: where to place @overlay_node in live tree
* @overlay_node: node from within an overlay device tree fragment
*
* Add the properties (if any) and nodes (if any) from @overlay_node to the
* @ovcs->cset changeset. If an added node has child nodes, they will
* be added recursively.
*
* Do not allow symbols node to have any children.
*
* Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid @overlay_node.
*/
static int build_changeset_next_level(struct overlay_changeset *ovcs,
struct target *target, const struct device_node *overlay_node)
{
struct device_node *child;
struct property *prop;
int ret;
for_each_property_of_node(overlay_node, prop) {
ret = add_changeset_property(ovcs, target, prop, 0);
if (ret) {
pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
target->np, prop->name, ret);
return ret;
}
}
for_each_child_of_node(overlay_node, child) {
ret = add_changeset_node(ovcs, target, child);
if (ret) {
pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
target->np, child, ret);
of_node_put(child);
return ret;
}
}
return 0;
}
/*
* Add the properties from __overlay__ node to the @ovcs->cset changeset.
*/
static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
struct target *target,
const struct device_node *overlay_symbols_node)
{
struct property *prop;
int ret;
for_each_property_of_node(overlay_symbols_node, prop) {
ret = add_changeset_property(ovcs, target, prop, 1);
if (ret) {
pr_debug("Failed to apply symbols prop @%pOF/%s, err=%d\n",
target->np, prop->name, ret);
return ret;
}
}
return 0;
}
static int find_dup_cset_node_entry(struct overlay_changeset *ovcs,
struct of_changeset_entry *ce_1)
{
struct of_changeset_entry *ce_2;
char *fn_1, *fn_2;
int node_path_match;
if (ce_1->action != OF_RECONFIG_ATTACH_NODE &&
ce_1->action != OF_RECONFIG_DETACH_NODE)
return 0;
ce_2 = ce_1;
list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) {
if ((ce_2->action != OF_RECONFIG_ATTACH_NODE &&
ce_2->action != OF_RECONFIG_DETACH_NODE) ||
of_node_cmp(ce_1->np->full_name, ce_2->np->full_name))
continue;
fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
kfree(fn_1);
kfree(fn_2);
if (node_path_match) {
pr_err("ERROR: multiple fragments add and/or delete node %pOF\n",
ce_1->np);
return -EINVAL;
}
}
return 0;
}
static int find_dup_cset_prop(struct overlay_changeset *ovcs,
struct of_changeset_entry *ce_1)
{
struct of_changeset_entry *ce_2;
char *fn_1, *fn_2;
int node_path_match;
if (ce_1->action != OF_RECONFIG_ADD_PROPERTY &&
ce_1->action != OF_RECONFIG_REMOVE_PROPERTY &&
ce_1->action != OF_RECONFIG_UPDATE_PROPERTY)
return 0;
ce_2 = ce_1;
list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) {
if ((ce_2->action != OF_RECONFIG_ADD_PROPERTY &&
ce_2->action != OF_RECONFIG_REMOVE_PROPERTY &&
ce_2->action != OF_RECONFIG_UPDATE_PROPERTY) ||
of_node_cmp(ce_1->np->full_name, ce_2->np->full_name))
continue;
fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
kfree(fn_1);
kfree(fn_2);
if (node_path_match &&
!of_prop_cmp(ce_1->prop->name, ce_2->prop->name)) {
pr_err("ERROR: multiple fragments add, update, and/or delete property %pOF/%s\n",
ce_1->np, ce_1->prop->name);
return -EINVAL;
}
}
return 0;
}
/**
* changeset_dup_entry_check() - check for duplicate entries
* @ovcs: Overlay changeset
*
* Check changeset @ovcs->cset for multiple {add or delete} node entries for
* the same node or duplicate {add, delete, or update} properties entries
* for the same property.
*
* Return: 0 on success, or -EINVAL if duplicate changeset entry found.
*/
static int changeset_dup_entry_check(struct overlay_changeset *ovcs)
{
struct of_changeset_entry *ce_1;
int dup_entry = 0;
list_for_each_entry(ce_1, &ovcs->cset.entries, node) {
dup_entry |= find_dup_cset_node_entry(ovcs, ce_1);
dup_entry |= find_dup_cset_prop(ovcs, ce_1);
}
return dup_entry ? -EINVAL : 0;
}
/**
* build_changeset() - populate overlay changeset in @ovcs from @ovcs->fragments
* @ovcs: Overlay changeset
*
* Create changeset @ovcs->cset to contain the nodes and properties of the
* overlay device tree fragments in @ovcs->fragments[]. If an error occurs,
* any portions of the changeset that were successfully created will remain
* in @ovcs->cset.
*
* Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
* invalid overlay in @ovcs->fragments[].
*/
static int build_changeset(struct overlay_changeset *ovcs)
{
struct fragment *fragment;
struct target target;
int fragments_count, i, ret;
/*
* if there is a symbols fragment in ovcs->fragments[i] it is
* the final element in the array
*/
if (ovcs->symbols_fragment)
fragments_count = ovcs->count - 1;
else
fragments_count = ovcs->count;
for (i = 0; i < fragments_count; i++) {
fragment = &ovcs->fragments[i];
target.np = fragment->target;
target.in_livetree = true;
ret = build_changeset_next_level(ovcs, &target,
fragment->overlay);
if (ret) {
pr_debug("fragment apply failed '%pOF'\n",
fragment->target);
return ret;
}
}
if (ovcs->symbols_fragment) {
fragment = &ovcs->fragments[ovcs->count - 1];
target.np = fragment->target;
target.in_livetree = true;
ret = build_changeset_symbols_node(ovcs, &target,
fragment->overlay);
if (ret) {
pr_debug("symbols fragment apply failed '%pOF'\n",
fragment->target);
return ret;
}
}
return changeset_dup_entry_check(ovcs);
}
/*
* Find the target node using a number of different strategies
* in order of preference:
*
* 1) "target" property containing the phandle of the target
* 2) "target-path" property containing the path of the target
*/
static struct device_node *find_target(struct device_node *info_node,
struct device_node *target_base)
{
struct device_node *node;
char *target_path;
const char *path;
u32 val;
int ret;
ret = of_property_read_u32(info_node, "target", &val);
if (!ret) {
node = of_find_node_by_phandle(val);
if (!node)
pr_err("find target, node: %pOF, phandle 0x%x not found\n",
info_node, val);
return node;
}
ret = of_property_read_string(info_node, "target-path", &path);
if (!ret) {
if (target_base) {
target_path = kasprintf(GFP_KERNEL, "%pOF%s", target_base, path);
if (!target_path)
return NULL;
node = of_find_node_by_path(target_path);
if (!node) {
pr_err("find target, node: %pOF, path '%s' not found\n",
info_node, target_path);
}
kfree(target_path);
} else {
node = of_find_node_by_path(path);
if (!node) {
pr_err("find target, node: %pOF, path '%s' not found\n",
info_node, path);
}
}
return node;
}
pr_err("find target, node: %pOF, no target property\n", info_node);
return NULL;
}
/**
* init_overlay_changeset() - initialize overlay changeset from overlay tree
* @ovcs: Overlay changeset to build
* @target_base: Point to the target node to apply overlay
*
* Initialize @ovcs. Populate @ovcs->fragments with node information from
* the top level of @overlay_root. The relevant top level nodes are the
* fragment nodes and the __symbols__ node. Any other top level node will
* be ignored. Populate other @ovcs fields.
*
* Return: 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error
* detected in @overlay_root. On error return, the caller of
* init_overlay_changeset() must call free_overlay_changeset().
*/
static int init_overlay_changeset(struct overlay_changeset *ovcs,
struct device_node *target_base)
{
struct device_node *node, *overlay_node;
struct fragment *fragment;
struct fragment *fragments;
int cnt, ret;
/*
* None of the resources allocated by this function will be freed in
* the error paths. Instead the caller of this function is required
* to call free_overlay_changeset() (which will free the resources)
* if error return.
*/
/*
* Warn for some issues. Can not return -EINVAL for these until
* of_unittest_apply_overlay() is fixed to pass these checks.
*/
if (!of_node_check_flag(ovcs->overlay_root, OF_DYNAMIC))
pr_debug("%s() ovcs->overlay_root is not dynamic\n", __func__);
if (!of_node_check_flag(ovcs->overlay_root, OF_DETACHED))
pr_debug("%s() ovcs->overlay_root is not detached\n", __func__);
if (!of_node_is_root(ovcs->overlay_root))
pr_debug("%s() ovcs->overlay_root is not root\n", __func__);
cnt = 0;
/* fragment nodes */
for_each_child_of_node(ovcs->overlay_root, node) {
overlay_node = of_get_child_by_name(node, "__overlay__");
if (overlay_node) {
cnt++;
of_node_put(overlay_node);
}
}
node = of_get_child_by_name(ovcs->overlay_root, "__symbols__");
if (node) {
cnt++;
of_node_put(node);
}
fragments = kcalloc(cnt, sizeof(*fragments), GFP_KERNEL);
if (!fragments) {
ret = -ENOMEM;
goto err_out;
}
ovcs->fragments = fragments;
cnt = 0;
for_each_child_of_node(ovcs->overlay_root, node) {
overlay_node = of_get_child_by_name(node, "__overlay__");
if (!overlay_node)
continue;
fragment = &fragments[cnt];
fragment->overlay = overlay_node;
fragment->target = find_target(node, target_base);
if (!fragment->target) {
of_node_put(fragment->overlay);
ret = -EINVAL;
of_node_put(node);
goto err_out;
}
cnt++;
}
/*
* if there is a symbols fragment in ovcs->fragments[i] it is
* the final element in the array
*/
node = of_get_child_by_name(ovcs->overlay_root, "__symbols__");
if (node) {
ovcs->symbols_fragment = 1;
fragment = &fragments[cnt];
fragment->overlay = node;
fragment->target = of_find_node_by_path("/__symbols__");
if (!fragment->target) {
pr_err("symbols in overlay, but not in live tree\n");
ret = -EINVAL;
of_node_put(node);
goto err_out;
}
cnt++;
}
if (!cnt) {
pr_err("no fragments or symbols in overlay\n");
ret = -EINVAL;
goto err_out;
}
ovcs->count = cnt;
return 0;
err_out:
pr_err("%s() failed, ret = %d\n", __func__, ret);
return ret;
}
static void free_overlay_changeset(struct overlay_changeset *ovcs)
{
int i;
if (ovcs->cset.entries.next)
of_changeset_destroy(&ovcs->cset);
if (ovcs->id) {
idr_remove(&ovcs_idr, ovcs->id);
list_del(&ovcs->ovcs_list);
ovcs->id = 0;
}
for (i = 0; i < ovcs->count; i++) {
of_node_put(ovcs->fragments[i].target);
of_node_put(ovcs->fragments[i].overlay);
}
kfree(ovcs->fragments);
/*
* There should be no live pointers into ovcs->overlay_mem and
* ovcs->new_fdt due to the policy that overlay notifiers are not
* allowed to retain pointers into the overlay devicetree other
* than during the window from OF_OVERLAY_PRE_APPLY overlay
* notifiers until the OF_OVERLAY_POST_REMOVE overlay notifiers.
*
* A memory leak will occur here if within the window.
*/
if (ovcs->notify_state == OF_OVERLAY_INIT ||
ovcs->notify_state == OF_OVERLAY_POST_REMOVE) {
kfree(ovcs->overlay_mem);
kfree(ovcs->new_fdt);
}
kfree(ovcs);
}
/*
* internal documentation
*
* of_overlay_apply() - Create and apply an overlay changeset
* @ovcs: overlay changeset
* @base: point to the target node to apply overlay
*
* Creates and applies an overlay changeset.
*
* If an error is returned by an overlay changeset pre-apply notifier
* then no further overlay changeset pre-apply notifier will be called.
*
* If an error is returned by an overlay changeset post-apply notifier
* then no further overlay changeset post-apply notifier will be called.
*
* If more than one notifier returns an error, then the last notifier
* error to occur is returned.
*
* If an error occurred while applying the overlay changeset, then an
* attempt is made to revert any changes that were made to the
* device tree. If there were any errors during the revert attempt
* then the state of the device tree can not be determined, and any
* following attempt to apply or remove an overlay changeset will be
* refused.
*
* Returns 0 on success, or a negative error number. On error return,
* the caller of of_overlay_apply() must call free_overlay_changeset().
*/
static int of_overlay_apply(struct overlay_changeset *ovcs,
struct device_node *base)
{
int ret = 0, ret_revert, ret_tmp;
ret = of_resolve_phandles(ovcs->overlay_root);
if (ret)
goto out;
ret = init_overlay_changeset(ovcs, base);
if (ret)
goto out;
ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
if (ret)
goto out;
ret = build_changeset(ovcs);
if (ret)
goto out;
ret_revert = 0;
ret = __of_changeset_apply_entries(&ovcs->cset, &ret_revert);
if (ret) {
if (ret_revert) {
pr_debug("overlay changeset revert error %d\n",
ret_revert);
devicetree_state_flags |= DTSF_APPLY_FAIL;
}
goto out;
}
ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret)
pr_err("overlay apply changeset entry notify error %d\n", ret);
/* notify failure is not fatal, continue */
ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_APPLY);
if (ret_tmp)
if (!ret)
ret = ret_tmp;
out:
pr_debug("%s() err=%d\n", __func__, ret);
return ret;
}
/*
* of_overlay_fdt_apply() - Create and apply an overlay changeset
* @overlay_fdt: pointer to overlay FDT
* @overlay_fdt_size: number of bytes in @overlay_fdt
* @ret_ovcs_id: pointer for returning created changeset id
* @base: pointer for the target node to apply overlay
*
* Creates and applies an overlay changeset.
*
* See of_overlay_apply() for important behavior information.
*
* Return: 0 on success, or a negative error number. *@ret_ovcs_id is set to
* the value of overlay changeset id, which can be passed to of_overlay_remove()
* to remove the overlay.
*
* On error return, the changeset may be partially applied. This is especially
* likely if an OF_OVERLAY_POST_APPLY notifier returns an error. In this case
* the caller should call of_overlay_remove() with the value in *@ret_ovcs_id.
*/
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
int *ret_ovcs_id, struct device_node *base)
{
void *new_fdt;
void *new_fdt_align;
void *overlay_mem;
int ret;
u32 size;
struct overlay_changeset *ovcs;
*ret_ovcs_id = 0;
if (devicetree_corrupt()) {
pr_err("devicetree state suspect, refuse to apply overlay\n");
return -EBUSY;
}
if (overlay_fdt_size < sizeof(struct fdt_header) ||
fdt_check_header(overlay_fdt)) {
pr_err("Invalid overlay_fdt header\n");
return -EINVAL;
}
size = fdt_totalsize(overlay_fdt);
if (overlay_fdt_size < size)
return -EINVAL;
ovcs = kzalloc(sizeof(*ovcs), GFP_KERNEL);
if (!ovcs)
return -ENOMEM;
of_overlay_mutex_lock();
mutex_lock(&of_mutex);
/*
* ovcs->notify_state must be set to OF_OVERLAY_INIT before allocating
* ovcs resources, implicitly set by kzalloc() of ovcs
*/
ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
if (ovcs->id <= 0) {
ret = ovcs->id;
goto err_free_ovcs;
}
INIT_LIST_HEAD(&ovcs->ovcs_list);
list_add_tail(&ovcs->ovcs_list, &ovcs_list);
of_changeset_init(&ovcs->cset);
/*
* Must create permanent copy of FDT because of_fdt_unflatten_tree()
* will create pointers to the passed in FDT in the unflattened tree.
*/
new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
if (!new_fdt) {
ret = -ENOMEM;
goto err_free_ovcs;
}
ovcs->new_fdt = new_fdt;
new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
memcpy(new_fdt_align, overlay_fdt, size);
overlay_mem = of_fdt_unflatten_tree(new_fdt_align, NULL,
&ovcs->overlay_root);
if (!overlay_mem) {
pr_err("unable to unflatten overlay_fdt\n");
ret = -EINVAL;
goto err_free_ovcs;
}
ovcs->overlay_mem = overlay_mem;
ret = of_overlay_apply(ovcs, base);
/*
* If of_overlay_apply() error, calling free_overlay_changeset() may
* result in a memory leak if the apply partly succeeded, so do NOT
* goto err_free_ovcs. Instead, the caller of of_overlay_fdt_apply()
* can call of_overlay_remove();
*/
*ret_ovcs_id = ovcs->id;
goto out_unlock;
err_free_ovcs:
free_overlay_changeset(ovcs);
out_unlock:
mutex_unlock(&of_mutex);
of_overlay_mutex_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(of_overlay_fdt_apply);
/*
* Find @np in @tree.
*
* Returns 1 if @np is @tree or is contained in @tree, else 0
*/
static int find_node(struct device_node *tree, struct device_node *np)
{
struct device_node *child;
if (tree == np)
return 1;
for_each_child_of_node(tree, child) {
if (find_node(child, np)) {
of_node_put(child);
return 1;
}
}
return 0;
}
/*
* Is @remove_ce_node a child of, a parent of, or the same as any
* node in an overlay changeset more topmost than @remove_ovcs?
*
* Returns 1 if found, else 0
*/
static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs,
struct device_node *remove_ce_node)
{
struct overlay_changeset *ovcs;
struct of_changeset_entry *ce;
list_for_each_entry_reverse(ovcs, &ovcs_list, ovcs_list) {
if (ovcs == remove_ovcs)
break;
list_for_each_entry(ce, &ovcs->cset.entries, node) {
if (find_node(ce->np, remove_ce_node)) {
pr_err("%s: #%d overlaps with #%d @%pOF\n",
__func__, remove_ovcs->id, ovcs->id,
remove_ce_node);
return 1;
}
if (find_node(remove_ce_node, ce->np)) {
pr_err("%s: #%d overlaps with #%d @%pOF\n",
__func__, remove_ovcs->id, ovcs->id,
remove_ce_node);
return 1;
}
}
}
return 0;
}
/*
* We can safely remove the overlay only if it's the top-most one.
* Newly applied overlays are inserted at the tail of the overlay list,
* so a top most overlay is the one that is closest to the tail.
*
* The topmost check is done by exploiting this property. For each
* affected device node in the log list we check if this overlay is
* the one closest to the tail. If another overlay has affected this
* device node and is closest to the tail, then removal is not permitted.
*/
static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
{
struct of_changeset_entry *remove_ce;
list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) {
if (node_overlaps_later_cs(remove_ovcs, remove_ce->np)) {
pr_err("overlay #%d is not topmost\n", remove_ovcs->id);
return 0;
}
}
return 1;
}
/**
* of_overlay_remove() - Revert and free an overlay changeset
* @ovcs_id: Pointer to overlay changeset id
*
* Removes an overlay if it is permissible. @ovcs_id was previously returned
* by of_overlay_fdt_apply().
*
* If an error occurred while attempting to revert the overlay changeset,
* then an attempt is made to re-apply any changeset entry that was
* reverted. If an error occurs on re-apply then the state of the device
* tree can not be determined, and any following attempt to apply or remove
* an overlay changeset will be refused.
*
* A non-zero return value will not revert the changeset if error is from:
* - parameter checks
* - overlay changeset pre-remove notifier
* - overlay changeset entry revert
*
* If an error is returned by an overlay changeset pre-remove notifier
* then no further overlay changeset pre-remove notifier will be called.
*
* If more than one notifier returns an error, then the last notifier
* error to occur is returned.
*
* A non-zero return value will revert the changeset if error is from:
* - overlay changeset entry notifier
* - overlay changeset post-remove notifier
*
* If an error is returned by an overlay changeset post-remove notifier
* then no further overlay changeset post-remove notifier will be called.
*
* Return: 0 on success, or a negative error number. *@ovcs_id is set to
* zero after reverting the changeset, even if a subsequent error occurs.
*/
int of_overlay_remove(int *ovcs_id)
{
struct overlay_changeset *ovcs;
int ret, ret_apply, ret_tmp;
if (devicetree_corrupt()) {
pr_err("suspect devicetree state, refuse to remove overlay\n");
ret = -EBUSY;
goto out;
}
mutex_lock(&of_mutex);
ovcs = idr_find(&ovcs_idr, *ovcs_id);
if (!ovcs) {
ret = -ENODEV;
pr_err("remove: Could not find overlay #%d\n", *ovcs_id);
goto err_unlock;
}
if (!overlay_removal_is_ok(ovcs)) {
ret = -EBUSY;
goto err_unlock;
}
ret = overlay_notify(ovcs, OF_OVERLAY_PRE_REMOVE);
if (ret)
goto err_unlock;
ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
if (ret) {
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
goto err_unlock;
}
ret = __of_changeset_revert_notify(&ovcs->cset);
if (ret)
pr_err("overlay remove changeset entry notify error %d\n", ret);
/* notify failure is not fatal, continue */
*ovcs_id = 0;
/*
* Note that the overlay memory will be kfree()ed by
* free_overlay_changeset() even if the notifier for
* OF_OVERLAY_POST_REMOVE returns an error.
*/
ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
if (ret_tmp)
if (!ret)
ret = ret_tmp;
free_overlay_changeset(ovcs);
err_unlock:
/*
* If jumped over free_overlay_changeset(), then did not kfree()
* overlay related memory. This is a memory leak unless a subsequent
* of_overlay_remove() of this overlay is successful.
*/
mutex_unlock(&of_mutex);
out:
pr_debug("%s() err=%d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(of_overlay_remove);
/**
* of_overlay_remove_all() - Reverts and frees all overlay changesets
*
* Removes all overlays from the system in the correct order.
*
* Return: 0 on success, or a negative error number
*/
int of_overlay_remove_all(void)
{
struct overlay_changeset *ovcs, *ovcs_n;
int ret;
/* the tail of list is guaranteed to be safe to remove */
list_for_each_entry_safe_reverse(ovcs, ovcs_n, &ovcs_list, ovcs_list) {
ret = of_overlay_remove(&ovcs->id);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(of_overlay_remove_all);
| linux-master | drivers/of/overlay.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "OF: " fmt
#include <linux/device.h>
#include <linux/fwnode.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/logic_pio.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
#include "of_private.h"
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
/* Debug utility */
#ifdef DEBUG
static void of_dump_addr(const char *s, const __be32 *addr, int na)
{
pr_debug("%s", s);
while (na--)
pr_cont(" %08x", be32_to_cpu(*(addr++)));
pr_cont("\n");
}
#else
static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
/* Callbacks for bus specific translators */
struct of_bus {
const char *name;
const char *addresses;
int (*match)(struct device_node *parent);
void (*count_cells)(struct device_node *child,
int *addrc, int *sizec);
u64 (*map)(__be32 *addr, const __be32 *range,
int na, int ns, int pna);
int (*translate)(__be32 *addr, u64 offset, int na);
bool has_flags;
unsigned int (*get_flags)(const __be32 *addr);
};
/*
* Default translator (generic bus)
*/
static void of_bus_default_count_cells(struct device_node *dev,
int *addrc, int *sizec)
{
if (addrc)
*addrc = of_n_addr_cells(dev);
if (sizec)
*sizec = of_n_size_cells(dev);
}
static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
int na, int ns, int pna)
{
u64 cp, s, da;
cp = of_read_number(range, na);
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr, na);
pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
}
static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
{
u64 a = of_read_number(addr, na);
memset(addr, 0, na * 4);
a += offset;
if (na > 1)
addr[na - 2] = cpu_to_be32(a >> 32);
addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
return 0;
}
static unsigned int of_bus_default_flags_get_flags(const __be32 *addr)
{
return of_read_number(addr, 1);
}
static unsigned int of_bus_default_get_flags(const __be32 *addr)
{
return IORESOURCE_MEM;
}
#ifdef CONFIG_PCI
static unsigned int of_bus_pci_get_flags(const __be32 *addr)
{
unsigned int flags = 0;
u32 w = be32_to_cpup(addr);
if (!IS_ENABLED(CONFIG_PCI))
return 0;
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
break;
case 0x02: /* 32 bits */
flags |= IORESOURCE_MEM;
break;
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM | IORESOURCE_MEM_64;
break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
/*
* PCI bus specific translator
*/
static bool of_node_is_pcie(struct device_node *np)
{
bool is_pcie = of_node_name_eq(np, "pcie");
if (is_pcie)
pr_warn_once("%pOF: Missing device_type\n", np);
return is_pcie;
}
static int of_bus_pci_match(struct device_node *np)
{
/*
* "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*
* If none of the device_type match, and that the node name is
* "pcie", accept the device as PCI (with a warning).
*/
return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
of_node_is_pcie(np);
}
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 3;
if (sizec)
*sizec = 2;
}
static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
int pna)
{
u64 cp, s, da;
unsigned int af, rf;
af = of_bus_pci_get_flags(addr);
rf = of_bus_pci_get_flags(range);
/* Check address type match */
if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
return OF_BAD_ADDR;
/* Read address values, skipping high cell */
cp = of_read_number(range + 1, na - 1);
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
}
static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
{
return of_bus_default_translate(addr + 1, offset, na - 1);
}
#endif /* CONFIG_PCI */
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
* @range: the PCI range that describes the resource
* @np: device node where the range belongs to
* @res: pointer to a valid resource that will be updated to
* reflect the values contained in the range.
*
* Returns -EINVAL if the range cannot be converted to resource.
*
* Note that if the range is an IO range, the resource will be converted
* using pci_address_to_pio() which can fail if it is called too early or
* if the range cannot be matched to any host bridge IO space (our case here).
* To guard against that we try to register the IO range first.
* If that fails we know that pci_address_to_pio() will do too.
*/
int of_pci_range_to_resource(struct of_pci_range *range,
struct device_node *np, struct resource *res)
{
int err;
res->flags = range->flags;
res->parent = res->child = res->sibling = NULL;
res->name = np->full_name;
if (res->flags & IORESOURCE_IO) {
unsigned long port;
err = pci_register_io_range(&np->fwnode, range->cpu_addr,
range->size);
if (err)
goto invalid_range;
port = pci_address_to_pio(range->cpu_addr);
if (port == (unsigned long)-1) {
err = -EINVAL;
goto invalid_range;
}
res->start = port;
} else {
if ((sizeof(resource_size_t) < 8) &&
upper_32_bits(range->cpu_addr)) {
err = -EINVAL;
goto invalid_range;
}
res->start = range->cpu_addr;
}
res->end = res->start + range->size - 1;
return 0;
invalid_range:
res->start = (resource_size_t)OF_BAD_ADDR;
res->end = (resource_size_t)OF_BAD_ADDR;
return err;
}
EXPORT_SYMBOL(of_pci_range_to_resource);
/*
* of_range_to_resource - Create a resource from a ranges entry
* @np: device node where the range belongs to
* @index: the 'ranges' index to convert to a resource
* @res: pointer to a valid resource that will be updated to
* reflect the values contained in the range.
*
* Returns ENOENT if the entry is not found or EINVAL if the range cannot be
* converted to resource.
*/
int of_range_to_resource(struct device_node *np, int index, struct resource *res)
{
int ret, i = 0;
struct of_range_parser parser;
struct of_range range;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
for_each_of_range(&parser, &range)
if (i++ == index)
return of_pci_range_to_resource(&range, np, res);
return -ENOENT;
}
EXPORT_SYMBOL(of_range_to_resource);
/*
* ISA bus specific translator
*/
static int of_bus_isa_match(struct device_node *np)
{
return of_node_name_eq(np, "isa");
}
static void of_bus_isa_count_cells(struct device_node *child,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 2;
if (sizec)
*sizec = 1;
}
static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
int pna)
{
u64 cp, s, da;
/* Check address type match */
if ((addr[0] ^ range[0]) & cpu_to_be32(1))
return OF_BAD_ADDR;
/* Read address values, skipping high cell */
cp = of_read_number(range + 1, na - 1);
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
}
static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
{
return of_bus_default_translate(addr + 1, offset, na - 1);
}
static unsigned int of_bus_isa_get_flags(const __be32 *addr)
{
unsigned int flags = 0;
u32 w = be32_to_cpup(addr);
if (w & 1)
flags |= IORESOURCE_IO;
else
flags |= IORESOURCE_MEM;
return flags;
}
static int of_bus_default_flags_match(struct device_node *np)
{
return of_bus_n_addr_cells(np) == 3;
}
/*
* Array of bus specific translators
*/
static struct of_bus of_busses[] = {
#ifdef CONFIG_PCI
/* PCI */
{
.name = "pci",
.addresses = "assigned-addresses",
.match = of_bus_pci_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_pci_map,
.translate = of_bus_pci_translate,
.has_flags = true,
.get_flags = of_bus_pci_get_flags,
},
#endif /* CONFIG_PCI */
/* ISA */
{
.name = "isa",
.addresses = "reg",
.match = of_bus_isa_match,
.count_cells = of_bus_isa_count_cells,
.map = of_bus_isa_map,
.translate = of_bus_isa_translate,
.has_flags = true,
.get_flags = of_bus_isa_get_flags,
},
/* Default with flags cell */
{
.name = "default-flags",
.addresses = "reg",
.match = of_bus_default_flags_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
.has_flags = true,
.get_flags = of_bus_default_flags_get_flags,
},
/* Default */
{
.name = "default",
.addresses = "reg",
.match = NULL,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
.get_flags = of_bus_default_get_flags,
},
};
static struct of_bus *of_match_bus(struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
BUG();
return NULL;
}
static int of_empty_ranges_quirk(struct device_node *np)
{
if (IS_ENABLED(CONFIG_PPC)) {
/* To save cycles, we cache the result for global "Mac" setting */
static int quirk_state = -1;
/* PA-SEMI sdc DT bug */
if (of_device_is_compatible(np, "1682m-sdc"))
return true;
/* Make quirk cached */
if (quirk_state < 0)
quirk_state =
of_machine_is_compatible("Power Macintosh") ||
of_machine_is_compatible("MacRISC");
return quirk_state;
}
return false;
}
static int of_translate_one(struct device_node *parent, struct of_bus *bus,
struct of_bus *pbus, __be32 *addr,
int na, int ns, int pna, const char *rprop)
{
const __be32 *ranges;
unsigned int rlen;
int rone;
u64 offset = OF_BAD_ADDR;
/*
* Normally, an absence of a "ranges" property means we are
* crossing a non-translatable boundary, and thus the addresses
* below the current cannot be converted to CPU physical ones.
* Unfortunately, while this is very clear in the spec, it's not
* what Apple understood, and they do have things like /uni-n or
* /ht nodes with no "ranges" property and a lot of perfectly
* useable mapped devices below them. Thus we treat the absence of
* "ranges" as equivalent to an empty "ranges" property which means
* a 1:1 translation at that level. It's up to the caller not to try
* to translate addresses that aren't supposed to be translated in
* the first place. --BenH.
*
* As far as we know, this damage only exists on Apple machines, so
* This code is only enabled on powerpc. --gcl
*
* This quirk also applies for 'dma-ranges' which frequently exist in
* child nodes without 'dma-ranges' in the parent nodes. --RobH
*/
ranges = of_get_property(parent, rprop, &rlen);
if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
strcmp(rprop, "dma-ranges")) {
pr_debug("no ranges; cannot translate\n");
return 1;
}
if (ranges == NULL || rlen == 0) {
offset = of_read_number(addr, na);
memset(addr, 0, pna * 4);
pr_debug("empty ranges; 1:1 translation\n");
goto finish;
}
pr_debug("walking ranges...\n");
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
offset = bus->map(addr, ranges, na, ns, pna);
if (offset != OF_BAD_ADDR)
break;
}
if (offset == OF_BAD_ADDR) {
pr_debug("not found !\n");
return 1;
}
memcpy(addr, ranges + na, 4 * pna);
finish:
of_dump_addr("parent translation for:", addr, pna);
pr_debug("with offset: %llx\n", offset);
/* Translate it into parent bus space */
return pbus->translate(addr, offset, pna);
}
/*
* Translate an address from the device-tree into a CPU physical address,
* this walks up the tree and applies the various bus mappings on the
* way.
*
* Note: We consider that crossing any level with #size-cells == 0 to mean
* that translation is impossible (that is we are not dealing with a value
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
*
* Whenever the translation fails, the *host pointer will be set to the
* device that had registered logical PIO mapping, and the return code is
* relative to that node.
*/
static u64 __of_translate_address(struct device_node *dev,
struct device_node *(*get_parent)(const struct device_node *),
const __be32 *in_addr, const char *rprop,
struct device_node **host)
{
struct device_node *parent = NULL;
struct of_bus *bus, *pbus;
__be32 addr[OF_MAX_ADDR_CELLS];
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
pr_debug("** translation for device %pOF **\n", dev);
/* Increase refcount at current level */
of_node_get(dev);
*host = NULL;
/* Get parent & match bus type */
parent = get_parent(dev);
if (parent == NULL)
goto bail;
bus = of_match_bus(parent);
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
pr_debug("Bad cell count for %pOF\n", dev);
goto bail;
}
memcpy(addr, in_addr, na * 4);
pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
bus->name, na, ns, parent);
of_dump_addr("translating address:", addr, na);
/* Translate */
for (;;) {
struct logic_pio_hwaddr *iorange;
/* Switch to parent bus */
of_node_put(dev);
dev = parent;
parent = get_parent(dev);
/* If root, we have finished */
if (parent == NULL) {
pr_debug("reached root node\n");
result = of_read_number(addr, na);
break;
}
/*
* For indirectIO device which has no ranges property, get
* the address from reg directly.
*/
iorange = find_io_range_by_fwnode(&dev->fwnode);
if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
result = of_read_number(addr + 1, na - 1);
pr_debug("indirectIO matched(%pOF) 0x%llx\n",
dev, result);
*host = of_node_get(dev);
break;
}
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
break;
}
pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
pbus->name, pna, pns, parent);
/* Apply bus translation */
if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
break;
/* Complete the move up one level */
na = pna;
ns = pns;
bus = pbus;
of_dump_addr("one level translation:", addr, na);
}
bail:
of_node_put(parent);
of_node_put(dev);
return result;
}
u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
u64 ret;
ret = __of_translate_address(dev, of_get_parent,
in_addr, "ranges", &host);
if (host) {
of_node_put(host);
return OF_BAD_ADDR;
}
return ret;
}
EXPORT_SYMBOL(of_translate_address);
#ifdef CONFIG_HAS_DMA
struct device_node *__of_get_dma_parent(const struct device_node *np)
{
struct of_phandle_args args;
int ret, index;
index = of_property_match_string(np, "interconnect-names", "dma-mem");
if (index < 0)
return of_get_parent(np);
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells",
index, &args);
if (ret < 0)
return of_get_parent(np);
return of_node_get(args.np);
}
#endif
static struct device_node *of_get_next_dma_parent(struct device_node *np)
{
struct device_node *parent;
parent = __of_get_dma_parent(np);
of_node_put(np);
return parent;
}
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
u64 ret;
ret = __of_translate_address(dev, __of_get_dma_parent,
in_addr, "dma-ranges", &host);
if (host) {
of_node_put(host);
return OF_BAD_ADDR;
}
return ret;
}
EXPORT_SYMBOL(of_translate_dma_address);
/**
* of_translate_dma_region - Translate device tree address and size tuple
* @dev: device tree node for which to translate
* @prop: pointer into array of cells
* @start: return value for the start of the DMA range
* @length: return value for the length of the DMA range
*
* Returns a pointer to the cell immediately following the translated DMA region.
*/
const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *prop,
phys_addr_t *start, size_t *length)
{
struct device_node *parent;
u64 address, size;
int na, ns;
parent = __of_get_dma_parent(dev);
if (!parent)
return NULL;
na = of_bus_n_addr_cells(parent);
ns = of_bus_n_size_cells(parent);
of_node_put(parent);
address = of_translate_dma_address(dev, prop);
if (address == OF_BAD_ADDR)
return NULL;
size = of_read_number(prop + na, ns);
if (start)
*start = address;
if (length)
*length = size;
return prop + na + ns;
}
EXPORT_SYMBOL(of_translate_dma_region);
const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
u64 *size, unsigned int *flags)
{
const __be32 *prop;
unsigned int psize;
struct device_node *parent;
struct of_bus *bus;
int onesize, i, na, ns;
/* Get parent & match bus type */
parent = of_get_parent(dev);
if (parent == NULL)
return NULL;
bus = of_match_bus(parent);
if (strcmp(bus->name, "pci") && (bar_no >= 0)) {
of_node_put(parent);
return NULL;
}
bus->count_cells(dev, &na, &ns);
of_node_put(parent);
if (!OF_CHECK_ADDR_COUNT(na))
return NULL;
/* Get "reg" or "assigned-addresses" property */
prop = of_get_property(dev, bus->addresses, &psize);
if (prop == NULL)
return NULL;
psize /= 4;
onesize = na + ns;
for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
u32 val = be32_to_cpu(prop[0]);
/* PCI bus matches on BAR number instead of index */
if (((bar_no >= 0) && ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0))) ||
((index >= 0) && (i == index))) {
if (size)
*size = of_read_number(prop + na, ns);
if (flags)
*flags = bus->get_flags(prop);
return prop;
}
}
return NULL;
}
EXPORT_SYMBOL(__of_get_address);
/**
* of_property_read_reg - Retrieve the specified "reg" entry index without translating
* @np: device tree node for which to retrieve "reg" from
* @idx: "reg" entry index to read
* @addr: return value for the untranslated address
* @size: return value for the entry size
*
* Returns -EINVAL if "reg" is not found. Returns 0 on success with addr and
* size values filled in.
*/
int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size)
{
const __be32 *prop = of_get_address(np, idx, size, NULL);
if (!prop)
return -EINVAL;
*addr = of_read_number(prop, of_n_addr_cells(np));
return 0;
}
EXPORT_SYMBOL(of_property_read_reg);
static int parser_init(struct of_pci_range_parser *parser,
struct device_node *node, const char *name)
{
int rlen;
parser->node = node;
parser->pna = of_n_addr_cells(node);
parser->na = of_bus_n_addr_cells(node);
parser->ns = of_bus_n_size_cells(node);
parser->dma = !strcmp(name, "dma-ranges");
parser->bus = of_match_bus(node);
parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
return -ENOENT;
parser->end = parser->range + rlen / sizeof(__be32);
return 0;
}
int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
return parser_init(parser, node, "ranges");
}
EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
return parser_init(parser, node, "dma-ranges");
}
EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
#define of_dma_range_parser_init of_pci_dma_range_parser_init
struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
struct of_pci_range *range)
{
int na = parser->na;
int ns = parser->ns;
int np = parser->pna + na + ns;
int busflag_na = 0;
if (!range)
return NULL;
if (!parser->range || parser->range + np > parser->end)
return NULL;
range->flags = parser->bus->get_flags(parser->range);
/* A extra cell for resource flags */
if (parser->bus->has_flags)
busflag_na = 1;
range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
if (parser->dma)
range->cpu_addr = of_translate_dma_address(parser->node,
parser->range + na);
else
range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
range->size = of_read_number(parser->range + parser->pna + na, ns);
parser->range += np;
/* Now consume following elements while they are contiguous */
while (parser->range + np <= parser->end) {
u32 flags = 0;
u64 bus_addr, cpu_addr, size;
flags = parser->bus->get_flags(parser->range);
bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
if (parser->dma)
cpu_addr = of_translate_dma_address(parser->node,
parser->range + na);
else
cpu_addr = of_translate_address(parser->node,
parser->range + na);
size = of_read_number(parser->range + parser->pna + na, ns);
if (flags != range->flags)
break;
if (bus_addr != range->bus_addr + range->size ||
cpu_addr != range->cpu_addr + range->size)
break;
range->size += size;
parser->range += np;
}
return range;
}
EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
u64 size)
{
u64 taddr;
unsigned long port;
struct device_node *host;
taddr = __of_translate_address(dev, of_get_parent,
in_addr, "ranges", &host);
if (host) {
/* host-specific port access */
port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
of_node_put(host);
} else {
/* memory-mapped I/O range */
port = pci_address_to_pio(taddr);
}
if (port == (unsigned long)-1)
return OF_BAD_ADDR;
return port;
}
#ifdef CONFIG_HAS_DMA
/**
* of_dma_get_range - Get DMA range info and put it into a map array
* @np: device node to get DMA range info
* @map: dma range structure to return
*
* Look in bottom up direction for the first "dma-ranges" property
* and parse it. Put the information into a DMA offset map array.
*
* dma-ranges format:
* DMA addr (dma_addr) : naddr cells
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
* It returns -ENODEV if "dma-ranges" property was not found for this
* device in the DT.
*/
int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
{
struct device_node *node = of_node_get(np);
const __be32 *ranges = NULL;
bool found_dma_ranges = false;
struct of_range_parser parser;
struct of_range range;
struct bus_dma_region *r;
int len, num_ranges = 0;
int ret = 0;
while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
/* Ignore empty ranges, they imply no translation required */
if (ranges && len > 0)
break;
/* Once we find 'dma-ranges', then a missing one is an error */
if (found_dma_ranges && !ranges) {
ret = -ENODEV;
goto out;
}
found_dma_ranges = true;
node = of_get_next_dma_parent(node);
}
if (!node || !ranges) {
pr_debug("no dma-ranges found for node(%pOF)\n", np);
ret = -ENODEV;
goto out;
}
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
if (range.cpu_addr == OF_BAD_ADDR) {
pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
range.bus_addr, node);
continue;
}
num_ranges++;
}
if (!num_ranges) {
ret = -EINVAL;
goto out;
}
r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
if (!r) {
ret = -ENOMEM;
goto out;
}
/*
* Record all info in the generic DMA ranges array for struct device,
* returning an error if we don't find any parsable ranges.
*/
*map = r;
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
range.bus_addr, range.cpu_addr, range.size);
if (range.cpu_addr == OF_BAD_ADDR)
continue;
r->cpu_start = range.cpu_addr;
r->dma_start = range.bus_addr;
r->size = range.size;
r->offset = range.cpu_addr - range.bus_addr;
r++;
}
out:
of_node_put(node);
return ret;
}
#endif /* CONFIG_HAS_DMA */
/**
* of_dma_get_max_cpu_address - Gets highest CPU address suitable for DMA
* @np: The node to start searching from or NULL to start from the root
*
* Gets the highest CPU physical address that is addressable by all DMA masters
* in the sub-tree pointed by np, or the whole tree if NULL is passed. If no
* DMA constrained device is found, it returns PHYS_ADDR_MAX.
*/
phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
{
phys_addr_t max_cpu_addr = PHYS_ADDR_MAX;
struct of_range_parser parser;
phys_addr_t subtree_max_addr;
struct device_node *child;
struct of_range range;
const __be32 *ranges;
u64 cpu_end = 0;
int len;
if (!np)
np = of_root;
ranges = of_get_property(np, "dma-ranges", &len);
if (ranges && len) {
of_dma_range_parser_init(&parser, np);
for_each_of_range(&parser, &range)
if (range.cpu_addr + range.size > cpu_end)
cpu_end = range.cpu_addr + range.size - 1;
if (max_cpu_addr > cpu_end)
max_cpu_addr = cpu_end;
}
for_each_available_child_of_node(np, child) {
subtree_max_addr = of_dma_get_max_cpu_address(child);
if (max_cpu_addr > subtree_max_addr)
max_cpu_addr = subtree_max_addr;
}
return max_cpu_addr;
}
/**
* of_dma_is_coherent - Check if device is coherent
* @np: device node
*
* It returns true if "dma-coherent" property was found
* for this device in the DT, or if DMA is coherent by
* default for OF devices on the current platform and no
* "dma-noncoherent" property was found for this device.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node;
bool is_coherent = dma_default_coherent;
node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
is_coherent = true;
break;
}
if (of_property_read_bool(node, "dma-noncoherent")) {
is_coherent = false;
break;
}
node = of_get_next_dma_parent(node);
}
of_node_put(node);
return is_coherent;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);
/**
* of_mmio_is_nonposted - Check if device uses non-posted MMIO
* @np: device node
*
* Returns true if the "nonposted-mmio" property was found for
* the device's bus.
*
* This is currently only enabled on builds that support Apple ARM devices, as
* an optimization.
*/
static bool of_mmio_is_nonposted(struct device_node *np)
{
struct device_node *parent;
bool nonposted;
if (!IS_ENABLED(CONFIG_ARCH_APPLE))
return false;
parent = of_get_parent(np);
if (!parent)
return false;
nonposted = of_property_read_bool(parent, "nonposted-mmio");
of_node_put(parent);
return nonposted;
}
static int __of_address_to_resource(struct device_node *dev, int index, int bar_no,
struct resource *r)
{
u64 taddr;
const __be32 *addrp;
u64 size;
unsigned int flags;
const char *name = NULL;
addrp = __of_get_address(dev, index, bar_no, &size, &flags);
if (addrp == NULL)
return -EINVAL;
/* Get optional "reg-names" property to add a name to a resource */
if (index >= 0)
of_property_read_string_index(dev, "reg-names", index, &name);
if (flags & IORESOURCE_MEM)
taddr = of_translate_address(dev, addrp);
else if (flags & IORESOURCE_IO)
taddr = of_translate_ioport(dev, addrp, size);
else
return -EINVAL;
if (taddr == OF_BAD_ADDR)
return -EINVAL;
memset(r, 0, sizeof(struct resource));
if (of_mmio_is_nonposted(dev))
flags |= IORESOURCE_MEM_NONPOSTED;
r->start = taddr;
r->end = taddr + size - 1;
r->flags = flags;
r->name = name ? name : dev->full_name;
return 0;
}
/**
* of_address_to_resource - Translate device tree address and return as resource
* @dev: Caller's Device Node
* @index: Index into the array
* @r: Pointer to resource array
*
* Returns -EINVAL if the range cannot be converted to resource.
*
* Note that if your address is a PIO address, the conversion will fail if
* the physical address can't be internally converted to an IO token with
* pci_address_to_pio(), that is because it's either called too early or it
* can't be matched to any host bridge IO space
*/
int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
return __of_address_to_resource(dev, index, -1, r);
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
{
if (!IS_ENABLED(CONFIG_PCI))
return -ENOSYS;
return __of_address_to_resource(dev, -1, bar, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
/**
* of_iomap - Maps the memory mapped IO for a given device_node
* @np: the device whose io range will be mapped
* @index: index of the io range
*
* Returns a pointer to the mapped memory
*/
void __iomem *of_iomap(struct device_node *np, int index)
{
struct resource res;
if (of_address_to_resource(np, index, &res))
return NULL;
if (res.flags & IORESOURCE_MEM_NONPOSTED)
return ioremap_np(res.start, resource_size(&res));
else
return ioremap(res.start, resource_size(&res));
}
EXPORT_SYMBOL(of_iomap);
/*
* of_io_request_and_map - Requests a resource and maps the memory mapped IO
* for a given device_node
* @device: the device whose io range will be mapped
* @index: index of the io range
* @name: name "override" for the memory region request or NULL
*
* Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
* error code on failure. Usage example:
*
* base = of_io_request_and_map(node, 0, "foo");
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
void __iomem *of_io_request_and_map(struct device_node *np, int index,
const char *name)
{
struct resource res;
void __iomem *mem;
if (of_address_to_resource(np, index, &res))
return IOMEM_ERR_PTR(-EINVAL);
if (!name)
name = res.name;
if (!request_mem_region(res.start, resource_size(&res), name))
return IOMEM_ERR_PTR(-EBUSY);
if (res.flags & IORESOURCE_MEM_NONPOSTED)
mem = ioremap_np(res.start, resource_size(&res));
else
mem = ioremap(res.start, resource_size(&res));
if (!mem) {
release_mem_region(res.start, resource_size(&res));
return IOMEM_ERR_PTR(-ENOMEM);
}
return mem;
}
EXPORT_SYMBOL(of_io_request_and_map);
| linux-master | drivers/of/address.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Device tree based initialization code for reserved memory.
*
* Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Marek Szyprowski <[email protected]>
* Author: Josh Cartwright <[email protected]>
*/
#define pr_fmt(fmt) "OF: reserved mem: " fmt
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
#include "of_private.h"
#define MAX_RESERVED_REGIONS 64
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
phys_addr_t base;
int err = 0;
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
align = !align ? SMP_CACHE_BYTES : align;
base = memblock_phys_alloc_range(size, align, start, end);
if (!base)
return -ENOMEM;
*res_base = base;
if (nomap) {
err = memblock_mark_nomap(base, size);
if (err)
memblock_phys_free(base, size);
}
kmemleak_ignore_phys(base);
return err;
}
/*
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
pr_err("not enough space for all defined regions.\n");
return;
}
rmem->fdt_node = node;
rmem->name = uname;
rmem->base = base;
rmem->size = size;
reserved_mem_count++;
return;
}
/*
* __reserved_mem_alloc_in_range() - allocate reserved memory described with
* 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
* reserved regions to keep the reserved memory contiguous if possible.
*/
static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
bool prev_bottom_up = memblock_bottom_up();
bool bottom_up = false, top_down = false;
int ret, i;
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
/* Skip regions that were not reserved yet */
if (rmem->size == 0)
continue;
/*
* If range starts next to an existing reservation, use bottom-up:
* |....RRRR................RRRRRRRR..............|
* --RRRR------
*/
if (start >= rmem->base && start <= (rmem->base + rmem->size))
bottom_up = true;
/*
* If range ends next to an existing reservation, use top-down:
* |....RRRR................RRRRRRRR..............|
* -------RRRR-----
*/
if (end >= rmem->base && end <= (rmem->base + rmem->size))
top_down = true;
}
/* Change setting only if either bottom-up or top-down was selected */
if (bottom_up != top_down)
memblock_set_bottom_up(bottom_up);
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
start, end, nomap, res_base);
/* Restore old setting if needed */
if (bottom_up != top_down)
memblock_set_bottom_up(prev_bottom_up);
return ret;
}
/*
* __reserved_mem_alloc_size() - allocate reserved memory described by
* 'size', 'alignment' and 'alloc-ranges' properties.
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
phys_addr_t base = 0, align = 0, size;
int len;
const __be32 *prop;
bool nomap;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
if (!prop)
return -EINVAL;
if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("invalid size property in '%s' node.\n", uname);
return -EINVAL;
}
size = dt_mem_next_cell(dt_root_size_cells, &prop);
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL)
&& !nomap)
align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
if (len % t_len != 0) {
pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
base = 0;
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
uname, &base,
(unsigned long)(size / SZ_1M));
break;
}
len -= t_len;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
}
if (base == 0) {
pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
*res_base = base;
*res_size = size;
return 0;
}
static const struct of_device_id __rmem_of_table_sentinel
__used __section("__reservedmem_of_table_end");
/*
* __reserved_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
int ret = -ENOENT;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
continue;
ret = initfn(rmem);
if (ret == 0) {
pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
break;
}
}
return ret;
}
static int __init __rmem_cmp(const void *a, const void *b)
{
const struct reserved_mem *ra = a, *rb = b;
if (ra->base < rb->base)
return -1;
if (ra->base > rb->base)
return 1;
/*
* Put the dynamic allocations (address == 0, size == 0) before static
* allocations at address 0x0 so that overlap detection works
* correctly.
*/
if (ra->size < rb->size)
return -1;
if (ra->size > rb->size)
return 1;
if (ra->fdt_node < rb->fdt_node)
return -1;
if (ra->fdt_node > rb->fdt_node)
return 1;
return 0;
}
static void __init __rmem_check_for_overlap(void)
{
int i;
if (reserved_mem_count < 2)
return;
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
__rmem_cmp, NULL);
for (i = 0; i < reserved_mem_count - 1; i++) {
struct reserved_mem *this, *next;
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
this_end = this->base + this->size;
next_end = next->base + next->size;
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
this->name, &this->base, &this_end,
next->name, &next->base, &next_end);
}
}
}
/**
* fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
int i;
/* check for overlapping reserved regions */
__rmem_check_for_overlap();
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
int len;
const __be32 *prop;
int err = 0;
bool nomap;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "phandle", &len);
if (!prop)
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
if (prop)
rmem->phandle = of_read_number(prop, len/4);
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
&rmem->base, &rmem->size);
if (err == 0) {
err = __reserved_mem_init_node(rmem);
if (err != 0 && err != -ENOENT) {
pr_info("node %s compatible matching fail\n",
rmem->name);
if (nomap)
memblock_clear_nomap(rmem->base, rmem->size);
else
memblock_phys_free(rmem->base,
rmem->size);
} else {
phys_addr_t end = rmem->base + rmem->size - 1;
bool reusable =
(of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
nomap ? "nomap" : "map",
reusable ? "reusable" : "non-reusable",
rmem->name ? rmem->name : "unknown");
}
}
}
}
static inline struct reserved_mem *__find_rmem(struct device_node *node)
{
unsigned int i;
if (!node->phandle)
return NULL;
for (i = 0; i < reserved_mem_count; i++)
if (reserved_mem[i].phandle == node->phandle)
return &reserved_mem[i];
return NULL;
}
struct rmem_assigned_device {
struct device *dev;
struct reserved_mem *rmem;
struct list_head list;
};
static LIST_HEAD(of_rmem_assigned_device_list);
static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
/**
* of_reserved_mem_device_init_by_idx() - assign reserved memory region to
* given device
* @dev: Pointer to the device to configure
* @np: Pointer to the device_node with 'reserved-memory' property
* @idx: Index of selected region
*
* This function assigns respective DMA-mapping operations based on reserved
* memory region specified by 'memory-region' property in @np node to the @dev
* device. When driver needs to use more than one reserved memory region, it
* should allocate child devices and initialize regions by name for each of
* child device.
*
* Returns error code or zero on success.
*/
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
struct rmem_assigned_device *rd;
struct device_node *target;
struct reserved_mem *rmem;
int ret;
if (!np || !dev)
return -EINVAL;
target = of_parse_phandle(np, "memory-region", idx);
if (!target)
return -ENODEV;
if (!of_device_is_available(target)) {
of_node_put(target);
return 0;
}
rmem = __find_rmem(target);
of_node_put(target);
if (!rmem || !rmem->ops || !rmem->ops->device_init)
return -EINVAL;
rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
if (!rd)
return -ENOMEM;
ret = rmem->ops->device_init(rmem, dev);
if (ret == 0) {
rd->dev = dev;
rd->rmem = rmem;
mutex_lock(&of_rmem_assigned_device_mutex);
list_add(&rd->list, &of_rmem_assigned_device_list);
mutex_unlock(&of_rmem_assigned_device_mutex);
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
} else {
kfree(rd);
}
return ret;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
/**
* of_reserved_mem_device_init_by_name() - assign named reserved memory region
* to given device
* @dev: pointer to the device to configure
* @np: pointer to the device node with 'memory-region' property
* @name: name of the selected memory region
*
* Returns: 0 on success or a negative error-code on failure.
*/
int of_reserved_mem_device_init_by_name(struct device *dev,
struct device_node *np,
const char *name)
{
int idx = of_property_match_string(np, "memory-region-names", name);
return of_reserved_mem_device_init_by_idx(dev, np, idx);
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
/**
* of_reserved_mem_device_release() - release reserved memory device structures
* @dev: Pointer to the device to deconfigure
*
* This function releases structures allocated for memory region handling for
* the given device.
*/
void of_reserved_mem_device_release(struct device *dev)
{
struct rmem_assigned_device *rd, *tmp;
LIST_HEAD(release_list);
mutex_lock(&of_rmem_assigned_device_mutex);
list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
if (rd->dev == dev)
list_move_tail(&rd->list, &release_list);
}
mutex_unlock(&of_rmem_assigned_device_mutex);
list_for_each_entry_safe(rd, tmp, &release_list, list) {
if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
rd->rmem->ops->device_release(rd->rmem, dev);
kfree(rd);
}
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
/**
* of_reserved_mem_lookup() - acquire reserved_mem from a device node
* @np: node pointer of the desired reserved-memory region
*
* This function allows drivers to acquire a reference to the reserved_mem
* struct based on a device node handle.
*
* Returns a reserved_mem reference, or NULL on error.
*/
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
const char *name;
int i;
if (!np->full_name)
return NULL;
name = kbasename(np->full_name);
for (i = 0; i < reserved_mem_count; i++)
if (!strcmp(reserved_mem[i].name, name))
return &reserved_mem[i];
return NULL;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
| linux-master | drivers/of/of_reserved_mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for dynamic device trees.
*
* On some platforms, the device tree can be manipulated at runtime.
* The routines in this section support adding, removing and changing
* device tree nodes.
*/
#define pr_fmt(fmt) "OF: " fmt
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include "of_private.h"
static struct device_node *kobj_to_device_node(struct kobject *kobj)
{
return container_of(kobj, struct device_node, kobj);
}
/**
* of_node_get() - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to simplify writing of
* callers
*
* Return: The node with refcount incremented.
*/
struct device_node *of_node_get(struct device_node *node)
{
if (node)
kobject_get(&node->kobj);
return node;
}
EXPORT_SYMBOL(of_node_get);
/**
* of_node_put() - Decrement refcount of a node
* @node: Node to dec refcount, NULL is supported to simplify writing of
* callers
*/
void of_node_put(struct device_node *node)
{
if (node)
kobject_put(&node->kobj);
}
EXPORT_SYMBOL(of_node_put);
static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
int of_reconfig_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&of_reconfig_chain, nb);
}
EXPORT_SYMBOL_GPL(of_reconfig_notifier_register);
int of_reconfig_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&of_reconfig_chain, nb);
}
EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
static const char *action_names[] = {
[0] = "INVALID",
[OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
[OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
[OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
[OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
[OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
};
#define _do_print(func, prefix, action, node, prop, ...) ({ \
func("changeset: " prefix "%-15s %pOF%s%s\n", \
##__VA_ARGS__, action_names[action], node, \
prop ? ":" : "", prop ? prop->name : ""); \
})
#define of_changeset_action_err(...) _do_print(pr_err, __VA_ARGS__)
#define of_changeset_action_debug(...) _do_print(pr_debug, __VA_ARGS__)
int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
{
int rc;
struct of_reconfig_data *pr = p;
of_changeset_action_debug("notify: ", action, pr->dn, pr->prop);
rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
return notifier_to_errno(rc);
}
/*
* of_reconfig_get_state_change() - Returns new state of device
* @action - action of the of notifier
* @arg - argument of the of notifier
*
* Returns the new state of a device based on the notifier used.
*
* Return: 0 on device going from enabled to disabled, 1 on device
* going from disabled to enabled and -1 on no change.
*/
int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
{
struct property *prop, *old_prop = NULL;
int is_status, status_state, old_status_state, prev_state, new_state;
/* figure out if a device should be created or destroyed */
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
case OF_RECONFIG_DETACH_NODE:
prop = of_find_property(pr->dn, "status", NULL);
break;
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_REMOVE_PROPERTY:
prop = pr->prop;
break;
case OF_RECONFIG_UPDATE_PROPERTY:
prop = pr->prop;
old_prop = pr->old_prop;
break;
default:
return OF_RECONFIG_NO_CHANGE;
}
is_status = 0;
status_state = -1;
old_status_state = -1;
prev_state = -1;
new_state = -1;
if (prop && !strcmp(prop->name, "status")) {
is_status = 1;
status_state = !strcmp(prop->value, "okay") ||
!strcmp(prop->value, "ok");
if (old_prop)
old_status_state = !strcmp(old_prop->value, "okay") ||
!strcmp(old_prop->value, "ok");
}
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
prev_state = 0;
/* -1 & 0 status either missing or okay */
new_state = status_state != 0;
break;
case OF_RECONFIG_DETACH_NODE:
/* -1 & 0 status either missing or okay */
prev_state = status_state != 0;
new_state = 0;
break;
case OF_RECONFIG_ADD_PROPERTY:
if (is_status) {
/* no status property -> enabled (legacy) */
prev_state = 1;
new_state = status_state;
}
break;
case OF_RECONFIG_REMOVE_PROPERTY:
if (is_status) {
prev_state = status_state;
/* no status property -> enabled (legacy) */
new_state = 1;
}
break;
case OF_RECONFIG_UPDATE_PROPERTY:
if (is_status) {
prev_state = old_status_state != 0;
new_state = status_state != 0;
}
break;
}
if (prev_state == new_state)
return OF_RECONFIG_NO_CHANGE;
return new_state ? OF_RECONFIG_CHANGE_ADD : OF_RECONFIG_CHANGE_REMOVE;
}
EXPORT_SYMBOL_GPL(of_reconfig_get_state_change);
int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *oldprop)
{
struct of_reconfig_data pr;
/* only call notifiers if the node is attached */
if (!of_node_is_attached(np))
return 0;
pr.dn = np;
pr.prop = prop;
pr.old_prop = oldprop;
return of_reconfig_notify(action, &pr);
}
static void __of_attach_node(struct device_node *np)
{
const __be32 *phandle;
int sz;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
if (!of_node_check_flag(np, OF_OVERLAY)) {
np->name = __of_get_property(np, "name", NULL);
if (!np->name)
np->name = "<NULL>";
phandle = __of_get_property(np, "phandle", &sz);
if (!phandle)
phandle = __of_get_property(np, "linux,phandle", &sz);
if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
phandle = __of_get_property(np, "ibm,phandle", &sz);
if (phandle && (sz >= 4))
np->phandle = be32_to_cpup(phandle);
else
np->phandle = 0;
}
np->child = NULL;
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
raw_spin_unlock_irqrestore(&devtree_lock, flags);
__of_attach_node_sysfs(np);
}
/**
* of_attach_node() - Plug a device node into the tree and global list.
* @np: Pointer to the caller's Device Node
*/
int of_attach_node(struct device_node *np)
{
struct of_reconfig_data rd;
memset(&rd, 0, sizeof(rd));
rd.dn = np;
mutex_lock(&of_mutex);
__of_attach_node(np);
mutex_unlock(&of_mutex);
of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, &rd);
return 0;
}
void __of_detach_node(struct device_node *np)
{
struct device_node *parent;
unsigned long flags;
raw_spin_lock_irqsave(&devtree_lock, flags);
parent = np->parent;
if (WARN_ON(of_node_check_flag(np, OF_DETACHED) || !parent)) {
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return;
}
if (parent->child == np)
parent->child = np->sibling;
else {
struct device_node *prevsib;
for (prevsib = np->parent->child;
prevsib->sibling != np;
prevsib = prevsib->sibling)
;
prevsib->sibling = np->sibling;
}
of_node_set_flag(np, OF_DETACHED);
/* race with of_find_node_by_phandle() prevented by devtree_lock */
__of_phandle_cache_inv_entry(np->phandle);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
__of_detach_node_sysfs(np);
}
/**
* of_detach_node() - "Unplug" a node from the device tree.
* @np: Pointer to the caller's Device Node
*/
int of_detach_node(struct device_node *np)
{
struct of_reconfig_data rd;
memset(&rd, 0, sizeof(rd));
rd.dn = np;
mutex_lock(&of_mutex);
__of_detach_node(np);
mutex_unlock(&of_mutex);
of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd);
return 0;
}
EXPORT_SYMBOL_GPL(of_detach_node);
static void property_list_free(struct property *prop_list)
{
struct property *prop, *next;
for (prop = prop_list; prop != NULL; prop = next) {
next = prop->next;
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
}
/**
* of_node_release() - release a dynamically allocated node
* @kobj: kernel object of the node to be released
*
* In of_node_put() this function is passed to kref_put() as the destructor.
*/
void of_node_release(struct kobject *kobj)
{
struct device_node *node = kobj_to_device_node(kobj);
/*
* can not use '"%pOF", node' in pr_err() calls from this function
* because an of_node_get(node) when refcount is already zero
* will result in an error and a stack dump
*/
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
pr_err("ERROR: %s() detected bad of_node_put() on %pOF/%s\n",
__func__, node->parent, node->full_name);
/*
* of unittests will test this path. Do not print the stack
* trace when the error is caused by unittest so that we do
* not display what a normal developer might reasonably
* consider a real bug.
*/
if (!IS_ENABLED(CONFIG_OF_UNITTEST) ||
strcmp(node->parent->full_name, "testcase-data")) {
dump_stack();
pr_err("ERROR: next of_node_put() on this node will result in a kobject warning 'refcount_t: underflow; use-after-free.'\n");
}
return;
}
if (!of_node_check_flag(node, OF_DYNAMIC))
return;
if (of_node_check_flag(node, OF_OVERLAY)) {
if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
/* premature refcount of zero, do not free memory */
pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
node);
return;
}
/*
* If node->properties non-empty then properties were added
* to this node either by different overlay that has not
* yet been removed, or by a non-overlay mechanism.
*/
if (node->properties)
pr_err("ERROR: %s(), unexpected properties in %pOF\n",
__func__, node);
}
if (node->child)
pr_err("ERROR: %s() unexpected children for %pOF/%s\n",
__func__, node->parent, node->full_name);
property_list_free(node->properties);
property_list_free(node->deadprops);
fwnode_links_purge(of_fwnode_handle(node));
kfree(node->full_name);
kfree(node->data);
kfree(node);
}
/**
* __of_prop_dup - Copy a property dynamically.
* @prop: Property to copy
* @allocflags: Allocation flags (typically pass GFP_KERNEL)
*
* Copy a property by dynamically allocating the memory of both the
* property structure and the property name & contents. The property's
* flags have the OF_DYNAMIC bit set so that we can differentiate between
* dynamically allocated properties and not.
*
* Return: The newly allocated property or NULL on out of memory error.
*/
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
{
struct property *new;
new = kzalloc(sizeof(*new), allocflags);
if (!new)
return NULL;
/*
* NOTE: There is no check for zero length value.
* In case of a boolean property, this will allocate a value
* of zero bytes. We do this to work around the use
* of of_get_property() calls on boolean values.
*/
new->name = kstrdup(prop->name, allocflags);
new->value = kmemdup(prop->value, prop->length, allocflags);
new->length = prop->length;
if (!new->name || !new->value)
goto err_free;
/* mark the property as dynamic */
of_property_set_flag(new, OF_DYNAMIC);
return new;
err_free:
kfree(new->name);
kfree(new->value);
kfree(new);
return NULL;
}
/**
* __of_node_dup() - Duplicate or create an empty device node dynamically.
* @np: if not NULL, contains properties to be duplicated in new node
* @full_name: string value to be duplicated into new node's full_name field
*
* Create a device tree node, optionally duplicating the properties of
* another node. The node data are dynamically allocated and all the node
* flags have the OF_DYNAMIC & OF_DETACHED bits set.
*
* Return: The newly allocated node or NULL on out of memory error. Use
* of_node_put() on it when done to free the memory allocated for it.
*/
struct device_node *__of_node_dup(const struct device_node *np,
const char *full_name)
{
struct device_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
node->full_name = kstrdup(full_name, GFP_KERNEL);
if (!node->full_name) {
kfree(node);
return NULL;
}
of_node_set_flag(node, OF_DYNAMIC);
of_node_set_flag(node, OF_DETACHED);
of_node_init(node);
/* Iterate over and duplicate all properties */
if (np) {
struct property *pp, *new_pp;
for_each_property_of_node(np, pp) {
new_pp = __of_prop_dup(pp, GFP_KERNEL);
if (!new_pp)
goto err_prop;
if (__of_add_property(node, new_pp)) {
kfree(new_pp->name);
kfree(new_pp->value);
kfree(new_pp);
goto err_prop;
}
}
}
return node;
err_prop:
of_node_put(node); /* Frees the node and properties */
return NULL;
}
/**
* of_changeset_create_node - Dynamically create a device node and attach to
* a given changeset.
*
* @ocs: Pointer to changeset
* @parent: Pointer to parent device node
* @full_name: Node full name
*
* Return: Pointer to the created device node or NULL in case of an error.
*/
struct device_node *of_changeset_create_node(struct of_changeset *ocs,
struct device_node *parent,
const char *full_name)
{
struct device_node *np;
int ret;
np = __of_node_dup(NULL, full_name);
if (!np)
return NULL;
np->parent = parent;
ret = of_changeset_attach_node(ocs, np);
if (ret) {
of_node_put(np);
return NULL;
}
return np;
}
EXPORT_SYMBOL(of_changeset_create_node);
static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
{
if (ce->action == OF_RECONFIG_ATTACH_NODE &&
of_node_check_flag(ce->np, OF_OVERLAY)) {
if (kref_read(&ce->np->kobj.kref) > 1) {
pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
kref_read(&ce->np->kobj.kref), ce->np);
} else {
of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
}
}
of_node_put(ce->np);
list_del(&ce->node);
kfree(ce);
}
static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
struct of_changeset_entry *rce)
{
memcpy(rce, ce, sizeof(*rce));
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
rce->action = OF_RECONFIG_DETACH_NODE;
break;
case OF_RECONFIG_DETACH_NODE:
rce->action = OF_RECONFIG_ATTACH_NODE;
break;
case OF_RECONFIG_ADD_PROPERTY:
rce->action = OF_RECONFIG_REMOVE_PROPERTY;
break;
case OF_RECONFIG_REMOVE_PROPERTY:
rce->action = OF_RECONFIG_ADD_PROPERTY;
break;
case OF_RECONFIG_UPDATE_PROPERTY:
rce->old_prop = ce->prop;
rce->prop = ce->old_prop;
/* update was used but original property did not exist */
if (!rce->prop) {
rce->action = OF_RECONFIG_REMOVE_PROPERTY;
rce->prop = ce->prop;
}
break;
}
}
static int __of_changeset_entry_notify(struct of_changeset_entry *ce,
bool revert)
{
struct of_reconfig_data rd;
struct of_changeset_entry ce_inverted;
int ret = 0;
if (revert) {
__of_changeset_entry_invert(ce, &ce_inverted);
ce = &ce_inverted;
}
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
case OF_RECONFIG_DETACH_NODE:
memset(&rd, 0, sizeof(rd));
rd.dn = ce->np;
ret = of_reconfig_notify(ce->action, &rd);
break;
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_REMOVE_PROPERTY:
case OF_RECONFIG_UPDATE_PROPERTY:
ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop);
break;
default:
pr_err("invalid devicetree changeset action: %i\n",
(int)ce->action);
ret = -EINVAL;
}
if (ret)
pr_err("changeset notifier error @%pOF\n", ce->np);
return ret;
}
static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
{
int ret = 0;
of_changeset_action_debug("apply: ", ce->action, ce->np, ce->prop);
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
__of_attach_node(ce->np);
break;
case OF_RECONFIG_DETACH_NODE:
__of_detach_node(ce->np);
break;
case OF_RECONFIG_ADD_PROPERTY:
ret = __of_add_property(ce->np, ce->prop);
break;
case OF_RECONFIG_REMOVE_PROPERTY:
ret = __of_remove_property(ce->np, ce->prop);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
ret = __of_update_property(ce->np, ce->prop, &ce->old_prop);
break;
default:
ret = -EINVAL;
}
if (ret) {
of_changeset_action_err("apply failed: ", ce->action, ce->np, ce->prop);
return ret;
}
return 0;
}
static inline int __of_changeset_entry_revert(struct of_changeset_entry *ce)
{
struct of_changeset_entry ce_inverted;
__of_changeset_entry_invert(ce, &ce_inverted);
return __of_changeset_entry_apply(&ce_inverted);
}
/**
* of_changeset_init - Initialize a changeset for use
*
* @ocs: changeset pointer
*
* Initialize a changeset structure
*/
void of_changeset_init(struct of_changeset *ocs)
{
memset(ocs, 0, sizeof(*ocs));
INIT_LIST_HEAD(&ocs->entries);
}
EXPORT_SYMBOL_GPL(of_changeset_init);
/**
* of_changeset_destroy - Destroy a changeset
*
* @ocs: changeset pointer
*
* Destroys a changeset. Note that if a changeset is applied,
* its changes to the tree cannot be reverted.
*/
void of_changeset_destroy(struct of_changeset *ocs)
{
struct of_changeset_entry *ce, *cen;
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
__of_changeset_entry_destroy(ce);
}
EXPORT_SYMBOL_GPL(of_changeset_destroy);
/*
* Apply the changeset entries in @ocs.
* If apply fails, an attempt is made to revert the entries that were
* successfully applied.
*
* If multiple revert errors occur then only the final revert error is reported.
*
* Returns 0 on success, a negative error value in case of an error.
* If a revert error occurs, it is returned in *ret_revert.
*/
int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
{
struct of_changeset_entry *ce;
int ret, ret_tmp;
pr_debug("changeset: applying...\n");
list_for_each_entry(ce, &ocs->entries, node) {
ret = __of_changeset_entry_apply(ce);
if (ret) {
pr_err("Error applying changeset (%d)\n", ret);
list_for_each_entry_continue_reverse(ce, &ocs->entries,
node) {
ret_tmp = __of_changeset_entry_revert(ce);
if (ret_tmp)
*ret_revert = ret_tmp;
}
return ret;
}
}
return 0;
}
/*
* Returns 0 on success, a negative error value in case of an error.
*
* If multiple changeset entry notification errors occur then only the
* final notification error is reported.
*/
int __of_changeset_apply_notify(struct of_changeset *ocs)
{
struct of_changeset_entry *ce;
int ret = 0, ret_tmp;
pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
list_for_each_entry(ce, &ocs->entries, node) {
ret_tmp = __of_changeset_entry_notify(ce, 0);
if (ret_tmp)
ret = ret_tmp;
}
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
return ret;
}
/*
* Returns 0 on success, a negative error value in case of an error.
*
* If a changeset entry apply fails, an attempt is made to revert any
* previous entries in the changeset. If any of the reverts fails,
* that failure is not reported. Thus the state of the device tree
* is unknown if an apply error occurs.
*/
static int __of_changeset_apply(struct of_changeset *ocs)
{
int ret, ret_revert = 0;
ret = __of_changeset_apply_entries(ocs, &ret_revert);
if (!ret)
ret = __of_changeset_apply_notify(ocs);
return ret;
}
/**
* of_changeset_apply - Applies a changeset
*
* @ocs: changeset pointer
*
* Applies a changeset to the live tree.
* Any side-effects of live tree state changes are applied here on
* success, like creation/destruction of devices and side-effects
* like creation of sysfs properties and directories.
*
* Return: 0 on success, a negative error value in case of an error.
* On error the partially applied effects are reverted.
*/
int of_changeset_apply(struct of_changeset *ocs)
{
int ret;
mutex_lock(&of_mutex);
ret = __of_changeset_apply(ocs);
mutex_unlock(&of_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(of_changeset_apply);
/*
* Revert the changeset entries in @ocs.
* If revert fails, an attempt is made to re-apply the entries that were
* successfully removed.
*
* If multiple re-apply errors occur then only the final apply error is
* reported.
*
* Returns 0 on success, a negative error value in case of an error.
* If an apply error occurs, it is returned in *ret_apply.
*/
int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
{
struct of_changeset_entry *ce;
int ret, ret_tmp;
pr_debug("changeset: reverting...\n");
list_for_each_entry_reverse(ce, &ocs->entries, node) {
ret = __of_changeset_entry_revert(ce);
if (ret) {
pr_err("Error reverting changeset (%d)\n", ret);
list_for_each_entry_continue(ce, &ocs->entries, node) {
ret_tmp = __of_changeset_entry_apply(ce);
if (ret_tmp)
*ret_apply = ret_tmp;
}
return ret;
}
}
return 0;
}
/*
* If multiple changeset entry notification errors occur then only the
* final notification error is reported.
*/
int __of_changeset_revert_notify(struct of_changeset *ocs)
{
struct of_changeset_entry *ce;
int ret = 0, ret_tmp;
pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
list_for_each_entry_reverse(ce, &ocs->entries, node) {
ret_tmp = __of_changeset_entry_notify(ce, 1);
if (ret_tmp)
ret = ret_tmp;
}
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
return ret;
}
static int __of_changeset_revert(struct of_changeset *ocs)
{
int ret, ret_reply;
ret_reply = 0;
ret = __of_changeset_revert_entries(ocs, &ret_reply);
if (!ret)
ret = __of_changeset_revert_notify(ocs);
return ret;
}
/**
* of_changeset_revert - Reverts an applied changeset
*
* @ocs: changeset pointer
*
* Reverts a changeset returning the state of the tree to what it
* was before the application.
* Any side-effects like creation/destruction of devices and
* removal of sysfs properties and directories are applied.
*
* Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_revert(struct of_changeset *ocs)
{
int ret;
mutex_lock(&of_mutex);
ret = __of_changeset_revert(ocs);
mutex_unlock(&of_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(of_changeset_revert);
/**
* of_changeset_action - Add an action to the tail of the changeset list
*
* @ocs: changeset pointer
* @action: action to perform
* @np: Pointer to device node
* @prop: Pointer to property
*
* On action being one of:
* + OF_RECONFIG_ATTACH_NODE
* + OF_RECONFIG_DETACH_NODE,
* + OF_RECONFIG_ADD_PROPERTY
* + OF_RECONFIG_REMOVE_PROPERTY,
* + OF_RECONFIG_UPDATE_PROPERTY
*
* Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_action(struct of_changeset *ocs, unsigned long action,
struct device_node *np, struct property *prop)
{
struct of_changeset_entry *ce;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
return -EINVAL;
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);
ce->prop = prop;
/* add it to the list */
list_add_tail(&ce->node, &ocs->entries);
return 0;
}
EXPORT_SYMBOL_GPL(of_changeset_action);
static int of_changeset_add_prop_helper(struct of_changeset *ocs,
struct device_node *np,
const struct property *pp)
{
struct property *new_pp;
int ret;
new_pp = __of_prop_dup(pp, GFP_KERNEL);
if (!new_pp)
return -ENOMEM;
ret = of_changeset_add_property(ocs, np, new_pp);
if (ret) {
kfree(new_pp->name);
kfree(new_pp->value);
kfree(new_pp);
}
return ret;
}
/**
* of_changeset_add_prop_string - Add a string property to a changeset
*
* @ocs: changeset pointer
* @np: device node pointer
* @prop_name: name of the property to be added
* @str: pointer to null terminated string
*
* Create a string property and add it to a changeset.
*
* Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_add_prop_string(struct of_changeset *ocs,
struct device_node *np,
const char *prop_name, const char *str)
{
struct property prop;
prop.name = (char *)prop_name;
prop.length = strlen(str) + 1;
prop.value = (void *)str;
return of_changeset_add_prop_helper(ocs, np, &prop);
}
EXPORT_SYMBOL_GPL(of_changeset_add_prop_string);
/**
* of_changeset_add_prop_string_array - Add a string list property to
* a changeset
*
* @ocs: changeset pointer
* @np: device node pointer
* @prop_name: name of the property to be added
* @str_array: pointer to an array of null terminated strings
* @sz: number of string array elements
*
* Create a string list property and add it to a changeset.
*
* Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_add_prop_string_array(struct of_changeset *ocs,
struct device_node *np,
const char *prop_name,
const char **str_array, size_t sz)
{
struct property prop;
int i, ret;
char *vp;
prop.name = (char *)prop_name;
prop.length = 0;
for (i = 0; i < sz; i++)
prop.length += strlen(str_array[i]) + 1;
prop.value = kmalloc(prop.length, GFP_KERNEL);
if (!prop.value)
return -ENOMEM;
vp = prop.value;
for (i = 0; i < sz; i++) {
vp += snprintf(vp, (char *)prop.value + prop.length - vp, "%s",
str_array[i]) + 1;
}
ret = of_changeset_add_prop_helper(ocs, np, &prop);
kfree(prop.value);
return ret;
}
EXPORT_SYMBOL_GPL(of_changeset_add_prop_string_array);
/**
* of_changeset_add_prop_u32_array - Add a property of 32 bit integers
* property to a changeset
*
* @ocs: changeset pointer
* @np: device node pointer
* @prop_name: name of the property to be added
* @array: pointer to an array of 32 bit integers
* @sz: number of array elements
*
* Create a property of 32 bit integers and add it to a changeset.
*
* Return: 0 on success, a negative error value in case of an error.
*/
int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
struct device_node *np,
const char *prop_name,
const u32 *array, size_t sz)
{
struct property prop;
__be32 *val;
int i, ret;
val = kcalloc(sz, sizeof(__be32), GFP_KERNEL);
if (!val)
return -ENOMEM;
for (i = 0; i < sz; i++)
val[i] = cpu_to_be32(array[i]);
prop.name = (char *)prop_name;
prop.length = sizeof(u32) * sz;
prop.value = (void *)val;
ret = of_changeset_add_prop_helper(ocs, np, &prop);
kfree(val);
return ret;
}
EXPORT_SYMBOL_GPL(of_changeset_add_prop_u32_array);
| linux-master | drivers/of/dynamic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/uio/uio_pdrv_genirq.c
*
* Userspace I/O platform driver with generic IRQ handling code.
*
* Copyright (C) 2008 Magnus Damm
*
* Based on uio_pdrv.c by Uwe Kleine-Koenig,
* Copyright (C) 2008 by Digi International Inc.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#define DRIVER_NAME "uio_pdrv_genirq"
struct uio_pdrv_genirq_platdata {
struct uio_info *uioinfo;
spinlock_t lock;
unsigned long flags;
struct platform_device *pdev;
};
/* Bits in uio_pdrv_genirq_platdata.flags */
enum {
UIO_IRQ_DISABLED = 0,
};
static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_pdrv_genirq_platdata *priv = info->priv;
/* Wait until the Runtime PM code has woken up the device */
pm_runtime_get_sync(&priv->pdev->dev);
return 0;
}
static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
{
struct uio_pdrv_genirq_platdata *priv = info->priv;
/* Tell the Runtime PM code that the device has become idle */
pm_runtime_put_sync(&priv->pdev->dev);
return 0;
}
static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
/* Just disable the interrupt in the interrupt controller, and
* remember the state so we can allow user space to enable it later.
*/
spin_lock(&priv->lock);
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(irq);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
{
struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
unsigned long flags;
/* Allow user space to enable and disable the interrupt
* in the interrupt controller, but keep track of the
* state to prevent per-irq depth damage.
*
* Serialize this operation to support multiple tasks and concurrency
* with irq handler on SMP systems.
*/
spin_lock_irqsave(&priv->lock, flags);
if (irq_on) {
if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
enable_irq(dev_info->irq);
} else {
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(dev_info->irq);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void uio_pdrv_genirq_cleanup(void *data)
{
struct device *dev = data;
pm_runtime_disable(dev);
}
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
if (node) {
const char *name;
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
GFP_KERNEL);
if (!uioinfo) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
if (!of_property_read_string(node, "linux,uio-name", &name))
uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
else
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%pOFn", node);
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
return ret;
}
if (uioinfo->handler || uioinfo->irqcontrol ||
uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
return ret;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
priv->flags = 0; /* interrupt is enabled to begin with */
priv->pdev = pdev;
if (!uioinfo->irq) {
ret = platform_get_irq_optional(pdev, 0);
uioinfo->irq = ret;
if (ret == -ENXIO)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret == -EPROBE_DEFER)
return ret;
else if (ret < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
return ret;
}
}
if (uioinfo->irq) {
struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
/*
* If a level interrupt, dont do lazy disable. Otherwise the
* irq will fire again since clearing of the actual cause, on
* device level, is done in userspace
* irqd_is_level_type() isn't used since isn't valid until
* irq is configured.
*/
if (irq_data &&
irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
dev_dbg(&pdev->dev, "disable lazy unmask\n");
irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
}
}
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
struct resource *r = &pdev->resource[i];
if (r->flags != IORESOURCE_MEM)
continue;
if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
dev_warn(&pdev->dev, "device has more than "
__stringify(MAX_UIO_MAPS)
" I/O memory resources.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start & PAGE_MASK;
uiomem->offs = r->start & ~PAGE_MASK;
uiomem->size = (uiomem->offs + resource_size(r)
+ PAGE_SIZE - 1) & PAGE_MASK;
uiomem->name = r->name;
++uiomem;
}
while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
uiomem->size = 0;
++uiomem;
}
/* This driver requires no hardware specific kernel code to handle
* interrupts. Instead, the interrupt handler simply disables the
* interrupt in the interrupt controller. User space is responsible
* for performing hardware specific acknowledge and re-enabling of
* the interrupt in the interrupt controller.
*
* Interrupt sharing is not supported.
*/
uioinfo->handler = uio_pdrv_genirq_handler;
uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
uioinfo->open = uio_pdrv_genirq_open;
uioinfo->release = uio_pdrv_genirq_release;
uioinfo->priv = priv;
/* Enable Runtime PM for this device:
* The device starts in suspended state to allow the hardware to be
* turned off by default. The Runtime PM bus code should power on the
* hardware and enable clocks at open().
*/
pm_runtime_enable(&pdev->dev);
ret = devm_add_action_or_reset(&pdev->dev, uio_pdrv_genirq_cleanup,
&pdev->dev);
if (ret)
return ret;
ret = devm_uio_register_device(&pdev->dev, priv->uioinfo);
if (ret)
dev_err(&pdev->dev, "unable to register uio device\n");
return ret;
}
static int uio_pdrv_genirq_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
* are used at open() and release() time. This allows the
* Runtime PM code to turn off power to the device while the
* device is unused, ie before open() and after release().
*
* This Runtime PM callback does not need to save or restore
* any registers since user space is responsbile for hardware
* register reinitialization after open().
*/
return 0;
}
static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
.runtime_suspend = uio_pdrv_genirq_runtime_nop,
.runtime_resume = uio_pdrv_genirq_runtime_nop,
};
#ifdef CONFIG_OF
static struct of_device_id uio_of_genirq_match[] = {
{ /* This is filled with module_parm */ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
#endif
static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
.driver = {
.name = DRIVER_NAME,
.pm = &uio_pdrv_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
};
module_platform_driver(uio_pdrv_genirq);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/uio/uio_pdrv_genirq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* UIO driver fo Humusoft MF624 DAQ card.
* Copyright (C) 2011 Rostislav Lisovy <[email protected]>,
* Czech Technical University in Prague
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/uio_driver.h>
#define PCI_VENDOR_ID_HUMUSOFT 0x186c
#define PCI_DEVICE_ID_MF624 0x0624
#define PCI_SUBVENDOR_ID_HUMUSOFT 0x186c
#define PCI_SUBDEVICE_DEVICE 0x0624
/* BAR0 Interrupt control/status register */
#define INTCSR 0x4C
#define INTCSR_ADINT_ENABLE (1 << 0)
#define INTCSR_CTR4INT_ENABLE (1 << 3)
#define INTCSR_PCIINT_ENABLE (1 << 6)
#define INTCSR_ADINT_STATUS (1 << 2)
#define INTCSR_CTR4INT_STATUS (1 << 5)
enum mf624_interrupt_source {ADC, CTR4, ALL};
static void mf624_disable_interrupt(enum mf624_interrupt_source source,
struct uio_info *info)
{
void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
switch (source) {
case ADC:
iowrite32(ioread32(INTCSR_reg)
& ~(INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE),
INTCSR_reg);
break;
case CTR4:
iowrite32(ioread32(INTCSR_reg)
& ~(INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE),
INTCSR_reg);
break;
case ALL:
default:
iowrite32(ioread32(INTCSR_reg)
& ~(INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE
| INTCSR_PCIINT_ENABLE),
INTCSR_reg);
break;
}
}
static void mf624_enable_interrupt(enum mf624_interrupt_source source,
struct uio_info *info)
{
void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
switch (source) {
case ADC:
iowrite32(ioread32(INTCSR_reg)
| INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE,
INTCSR_reg);
break;
case CTR4:
iowrite32(ioread32(INTCSR_reg)
| INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE,
INTCSR_reg);
break;
case ALL:
default:
iowrite32(ioread32(INTCSR_reg)
| INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE
| INTCSR_PCIINT_ENABLE,
INTCSR_reg);
break;
}
}
static irqreturn_t mf624_irq_handler(int irq, struct uio_info *info)
{
void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
if ((ioread32(INTCSR_reg) & INTCSR_ADINT_ENABLE)
&& (ioread32(INTCSR_reg) & INTCSR_ADINT_STATUS)) {
mf624_disable_interrupt(ADC, info);
return IRQ_HANDLED;
}
if ((ioread32(INTCSR_reg) & INTCSR_CTR4INT_ENABLE)
&& (ioread32(INTCSR_reg) & INTCSR_CTR4INT_STATUS)) {
mf624_disable_interrupt(CTR4, info);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mf624_irqcontrol(struct uio_info *info, s32 irq_on)
{
if (irq_on == 0)
mf624_disable_interrupt(ALL, info);
else if (irq_on == 1)
mf624_enable_interrupt(ALL, info);
return 0;
}
static int mf624_setup_mem(struct pci_dev *dev, int bar, struct uio_mem *mem, const char *name)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
mem->name = name;
mem->addr = start & PAGE_MASK;
mem->offs = start & ~PAGE_MASK;
if (!mem->addr)
return -ENODEV;
mem->size = ((start & ~PAGE_MASK) + len + PAGE_SIZE - 1) & PAGE_MASK;
mem->memtype = UIO_MEM_PHYS;
mem->internal_addr = pci_ioremap_bar(dev, bar);
if (!mem->internal_addr)
return -ENODEV;
return 0;
}
static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct uio_info *info;
info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (pci_enable_device(dev))
return -ENODEV;
if (pci_request_regions(dev, "mf624"))
goto out_disable;
info->name = "mf624";
info->version = "0.0.1";
/* Note: Datasheet says device uses BAR0, BAR1, BAR2 -- do not trust it */
/* BAR0 */
if (mf624_setup_mem(dev, 0, &info->mem[0], "PCI chipset, interrupts, status "
"bits, special functions"))
goto out_release;
/* BAR2 */
if (mf624_setup_mem(dev, 2, &info->mem[1], "ADC, DAC, DIO"))
goto out_unmap0;
/* BAR4 */
if (mf624_setup_mem(dev, 4, &info->mem[2], "Counter/timer chip"))
goto out_unmap1;
info->irq = dev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = mf624_irq_handler;
info->irqcontrol = mf624_irqcontrol;
if (uio_register_device(&dev->dev, info))
goto out_unmap2;
pci_set_drvdata(dev, info);
return 0;
out_unmap2:
iounmap(info->mem[2].internal_addr);
out_unmap1:
iounmap(info->mem[1].internal_addr);
out_unmap0:
iounmap(info->mem[0].internal_addr);
out_release:
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
return -ENODEV;
}
static void mf624_pci_remove(struct pci_dev *dev)
{
struct uio_info *info = pci_get_drvdata(dev);
mf624_disable_interrupt(ALL, info);
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
iounmap(info->mem[0].internal_addr);
iounmap(info->mem[1].internal_addr);
iounmap(info->mem[2].internal_addr);
}
static const struct pci_device_id mf624_pci_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) },
{ 0, }
};
static struct pci_driver mf624_pci_driver = {
.name = "mf624",
.id_table = mf624_pci_id,
.probe = mf624_pci_probe,
.remove = mf624_pci_remove,
};
MODULE_DEVICE_TABLE(pci, mf624_pci_id);
module_pci_driver(mf624_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rostislav Lisovy <[email protected]>");
| linux-master | drivers/uio/uio_mf624.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UIO driver for Hilscher NetX based fieldbus cards (cifX, comX).
* See http://www.hilscher.com for details.
*
* (C) 2007 Hans J. Koch <[email protected]>
* (C) 2008 Manuel Traut <[email protected]>
*
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uio_driver.h>
#define PCI_VENDOR_ID_HILSCHER 0x15CF
#define PCI_DEVICE_ID_HILSCHER_NETX 0x0000
#define PCI_DEVICE_ID_HILSCHER_NETPLC 0x0010
#define PCI_SUBDEVICE_ID_NETPLC_RAM 0x0000
#define PCI_SUBDEVICE_ID_NETPLC_FLASH 0x0001
#define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235
#define PCI_SUBDEVICE_ID_NXPCA 0x3335
#define DPM_HOST_INT_EN0 0xfff0
#define DPM_HOST_INT_STAT0 0xffe0
#define DPM_HOST_INT_MASK 0xe600ffff
#define DPM_HOST_INT_GLOBAL_EN 0x80000000
static irqreturn_t netx_handler(int irq, struct uio_info *dev_info)
{
void __iomem *int_enable_reg = dev_info->mem[0].internal_addr
+ DPM_HOST_INT_EN0;
void __iomem *int_status_reg = dev_info->mem[0].internal_addr
+ DPM_HOST_INT_STAT0;
/* Is one of our interrupts enabled and active ? */
if (!(ioread32(int_enable_reg) & ioread32(int_status_reg)
& DPM_HOST_INT_MASK))
return IRQ_NONE;
/* Disable interrupt */
iowrite32(ioread32(int_enable_reg) & ~DPM_HOST_INT_GLOBAL_EN,
int_enable_reg);
return IRQ_HANDLED;
}
static int netx_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct uio_info *info;
int bar;
info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (pci_enable_device(dev))
return -ENODEV;
if (pci_request_regions(dev, "netx"))
goto out_disable;
switch (id->device) {
case PCI_DEVICE_ID_HILSCHER_NETX:
bar = 0;
info->name = "netx";
break;
case PCI_DEVICE_ID_HILSCHER_NETPLC:
bar = 0;
info->name = "netplc";
break;
default:
bar = 2;
info->name = "netx_plx";
}
/* BAR0 or 2 points to the card's dual port memory */
info->mem[0].addr = pci_resource_start(dev, bar);
if (!info->mem[0].addr)
goto out_release;
info->mem[0].internal_addr = ioremap(pci_resource_start(dev, bar),
pci_resource_len(dev, bar));
if (!info->mem[0].internal_addr)
goto out_release;
info->mem[0].size = pci_resource_len(dev, bar);
info->mem[0].memtype = UIO_MEM_PHYS;
info->irq = dev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = netx_handler;
info->version = "0.0.1";
/* Make sure all interrupts are disabled */
iowrite32(0, info->mem[0].internal_addr + DPM_HOST_INT_EN0);
if (uio_register_device(&dev->dev, info))
goto out_unmap;
pci_set_drvdata(dev, info);
dev_info(&dev->dev, "Found %s card, registered UIO device.\n",
info->name);
return 0;
out_unmap:
iounmap(info->mem[0].internal_addr);
out_release:
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
return -ENODEV;
}
static void netx_pci_remove(struct pci_dev *dev)
{
struct uio_info *info = pci_get_drvdata(dev);
/* Disable all interrupts */
iowrite32(0, info->mem[0].internal_addr + DPM_HOST_INT_EN0);
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
iounmap(info->mem[0].internal_addr);
}
static struct pci_device_id netx_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_HILSCHER,
.device = PCI_DEVICE_ID_HILSCHER_NETX,
.subvendor = 0,
.subdevice = 0,
},
{
.vendor = PCI_VENDOR_ID_HILSCHER,
.device = PCI_DEVICE_ID_HILSCHER_NETPLC,
.subvendor = PCI_VENDOR_ID_HILSCHER,
.subdevice = PCI_SUBDEVICE_ID_NETPLC_RAM,
},
{
.vendor = PCI_VENDOR_ID_HILSCHER,
.device = PCI_DEVICE_ID_HILSCHER_NETPLC,
.subvendor = PCI_VENDOR_ID_HILSCHER,
.subdevice = PCI_SUBDEVICE_ID_NETPLC_FLASH,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_VENDOR_ID_PLX,
.subdevice = PCI_SUBDEVICE_ID_NXSB_PCA,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_VENDOR_ID_PLX,
.subdevice = PCI_SUBDEVICE_ID_NXPCA,
},
{ 0, }
};
static struct pci_driver netx_pci_driver = {
.name = "netx",
.id_table = netx_pci_ids,
.probe = netx_pci_probe,
.remove = netx_pci_remove,
};
module_pci_driver(netx_pci_driver);
MODULE_DEVICE_TABLE(pci, netx_pci_ids);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hans J. Koch, Manuel Traut");
| linux-master | drivers/uio/uio_netx.c |
// SPDX-License-Identifier: GPL-2.0
/* uio_fsl_elbc_gpcm: UIO driver for eLBC/GPCM peripherals
Copyright (C) 2014 Linutronix GmbH
Author: John Ogness <[email protected]>
This driver provides UIO access to memory of a peripheral connected
to the Freescale enhanced local bus controller (eLBC) interface
using the general purpose chip-select mode (GPCM).
Here is an example of the device tree entries:
localbus@ffe05000 {
ranges = <0x2 0x0 0x0 0xff810000 0x10000>;
dpm@2,0 {
compatible = "fsl,elbc-gpcm-uio";
reg = <0x2 0x0 0x10000>;
elbc-gpcm-br = <0xff810800>;
elbc-gpcm-or = <0xffff09f7>;
interrupt-parent = <&mpic>;
interrupts = <4 1>;
device_type = "netx5152";
uio_name = "netx_custom";
netx5152,init-win0-offset = <0x0>;
};
};
Only the entries reg (to identify bank) and elbc-gpcm-* (initial BR/OR
values) are required. The entries interrupt*, device_type, and uio_name
are optional (as well as any type-specific options such as
netx5152,init-win0-offset). As long as no interrupt handler is needed,
this driver can be used without any type-specific implementation.
The netx5152 type has been tested to work with the netX 51/52 hardware
from Hilscher using the Hilscher userspace netX stack.
The netx5152 type should serve as a model to add new type-specific
devices as needed.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/fsl_lbc.h>
#define MAX_BANKS 8
struct fsl_elbc_gpcm {
struct device *dev;
struct fsl_lbc_regs __iomem *lbc;
u32 bank;
const char *name;
void (*init)(struct uio_info *info);
void (*shutdown)(struct uio_info *info, bool init_err);
irqreturn_t (*irq_handler)(int irq, struct uio_info *info);
};
static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
char *buf);
static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
static DEVICE_ATTR(reg_br, 0664, reg_show, reg_store);
static DEVICE_ATTR(reg_or, 0664, reg_show, reg_store);
static struct attribute *uio_fsl_elbc_gpcm_attrs[] = {
&dev_attr_reg_br.attr,
&dev_attr_reg_or.attr,
NULL,
};
ATTRIBUTE_GROUPS(uio_fsl_elbc_gpcm);
static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct uio_info *info = dev_get_drvdata(dev);
struct fsl_elbc_gpcm *priv = info->priv;
struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
if (attr == &dev_attr_reg_br) {
return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
in_be32(&bank->br));
} else if (attr == &dev_attr_reg_or) {
return scnprintf(buf, PAGE_SIZE, "0x%08x\n",
in_be32(&bank->or));
}
return 0;
}
static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct uio_info *info = dev_get_drvdata(dev);
struct fsl_elbc_gpcm *priv = info->priv;
struct fsl_lbc_bank *bank = &priv->lbc->bank[priv->bank];
unsigned long val;
u32 reg_br_cur;
u32 reg_or_cur;
u32 reg_new;
/* parse use input */
if (kstrtoul(buf, 0, &val) != 0)
return -EINVAL;
reg_new = (u32)val;
/* read current values */
reg_br_cur = in_be32(&bank->br);
reg_or_cur = in_be32(&bank->or);
if (attr == &dev_attr_reg_br) {
/* not allowed to change effective base address */
if ((reg_br_cur & reg_or_cur & BR_BA) !=
(reg_new & reg_or_cur & BR_BA)) {
return -EINVAL;
}
/* not allowed to change mode */
if ((reg_new & BR_MSEL) != BR_MS_GPCM)
return -EINVAL;
/* write new value (force valid) */
out_be32(&bank->br, reg_new | BR_V);
} else if (attr == &dev_attr_reg_or) {
/* not allowed to change access mask */
if ((reg_or_cur & OR_GPCM_AM) != (reg_new & OR_GPCM_AM))
return -EINVAL;
/* write new value */
out_be32(&bank->or, reg_new);
} else {
return -EINVAL;
}
return count;
}
#ifdef CONFIG_UIO_FSL_ELBC_GPCM_NETX5152
#define DPM_HOST_WIN0_OFFSET 0xff00
#define DPM_HOST_INT_STAT0 0xe0
#define DPM_HOST_INT_EN0 0xf0
#define DPM_HOST_INT_MASK 0xe600ffff
#define DPM_HOST_INT_GLOBAL_EN 0x80000000
static irqreturn_t netx5152_irq_handler(int irq, struct uio_info *info)
{
void __iomem *reg_int_en = info->mem[0].internal_addr +
DPM_HOST_WIN0_OFFSET +
DPM_HOST_INT_EN0;
void __iomem *reg_int_stat = info->mem[0].internal_addr +
DPM_HOST_WIN0_OFFSET +
DPM_HOST_INT_STAT0;
/* check if an interrupt is enabled and active */
if ((ioread32(reg_int_en) & ioread32(reg_int_stat) &
DPM_HOST_INT_MASK) == 0) {
return IRQ_NONE;
}
/* disable interrupts */
iowrite32(ioread32(reg_int_en) & ~DPM_HOST_INT_GLOBAL_EN, reg_int_en);
return IRQ_HANDLED;
}
static void netx5152_init(struct uio_info *info)
{
unsigned long win0_offset = DPM_HOST_WIN0_OFFSET;
struct fsl_elbc_gpcm *priv = info->priv;
const void *prop;
/* get an optional initial win0 offset */
prop = of_get_property(priv->dev->of_node,
"netx5152,init-win0-offset", NULL);
if (prop)
win0_offset = of_read_ulong(prop, 1);
/* disable interrupts */
iowrite32(0, info->mem[0].internal_addr + win0_offset +
DPM_HOST_INT_EN0);
}
static void netx5152_shutdown(struct uio_info *info, bool init_err)
{
if (init_err)
return;
/* disable interrupts */
iowrite32(0, info->mem[0].internal_addr + DPM_HOST_WIN0_OFFSET +
DPM_HOST_INT_EN0);
}
#endif
static void setup_periph(struct fsl_elbc_gpcm *priv,
const char *type)
{
#ifdef CONFIG_UIO_FSL_ELBC_GPCM_NETX5152
if (strcmp(type, "netx5152") == 0) {
priv->irq_handler = netx5152_irq_handler;
priv->init = netx5152_init;
priv->shutdown = netx5152_shutdown;
priv->name = "netX 51/52";
return;
}
#endif
}
static int check_of_data(struct fsl_elbc_gpcm *priv,
struct resource *res,
u32 reg_br, u32 reg_or)
{
/* check specified bank */
if (priv->bank >= MAX_BANKS) {
dev_err(priv->dev, "invalid bank\n");
return -ENODEV;
}
/* check specified mode (BR_MS_GPCM is 0) */
if ((reg_br & BR_MSEL) != BR_MS_GPCM) {
dev_err(priv->dev, "unsupported mode\n");
return -ENODEV;
}
/* check specified mask vs. resource size */
if ((~(reg_or & OR_GPCM_AM) + 1) != resource_size(res)) {
dev_err(priv->dev, "address mask / size mismatch\n");
return -ENODEV;
}
/* check specified address */
if ((reg_br & reg_or & BR_BA) != fsl_lbc_addr(res->start)) {
dev_err(priv->dev, "base address mismatch\n");
return -ENODEV;
}
return 0;
}
static int get_of_data(struct fsl_elbc_gpcm *priv, struct device_node *node,
struct resource *res, u32 *reg_br,
u32 *reg_or, unsigned int *irq, char **name)
{
const char *dt_name;
const char *type;
int ret;
/* get the memory resource */
ret = of_address_to_resource(node, 0, res);
if (ret) {
dev_err(priv->dev, "failed to get resource\n");
return ret;
}
/* get the bank number */
ret = of_property_read_u32(node, "reg", &priv->bank);
if (ret) {
dev_err(priv->dev, "failed to get bank number\n");
return ret;
}
/* get BR value to set */
ret = of_property_read_u32(node, "elbc-gpcm-br", reg_br);
if (ret) {
dev_err(priv->dev, "missing elbc-gpcm-br value\n");
return ret;
}
/* get OR value to set */
ret = of_property_read_u32(node, "elbc-gpcm-or", reg_or);
if (ret) {
dev_err(priv->dev, "missing elbc-gpcm-or value\n");
return ret;
}
/* get optional peripheral type */
priv->name = "generic";
if (of_property_read_string(node, "device_type", &type) == 0)
setup_periph(priv, type);
/* get optional irq value */
*irq = irq_of_parse_and_map(node, 0);
/* sanity check device tree data */
ret = check_of_data(priv, res, *reg_br, *reg_or);
if (ret)
return ret;
/* get optional uio name */
if (of_property_read_string(node, "uio_name", &dt_name) != 0)
dt_name = "eLBC_GPCM";
*name = devm_kstrdup(priv->dev, dt_name, GFP_KERNEL);
if (!*name)
return -ENOMEM;
return 0;
}
static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct fsl_elbc_gpcm *priv;
struct uio_info *info;
char *uio_name = NULL;
struct resource res;
unsigned int irq;
u32 reg_br_cur;
u32 reg_or_cur;
u32 reg_br_new;
u32 reg_or_new;
int ret;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
/* allocate private data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->lbc = fsl_lbc_ctrl_dev->regs;
/* get device tree data */
ret = get_of_data(priv, node, &res, ®_br_new, ®_or_new,
&irq, &uio_name);
if (ret)
return ret;
/* allocate UIO structure */
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
/* get current BR/OR values */
reg_br_cur = in_be32(&priv->lbc->bank[priv->bank].br);
reg_or_cur = in_be32(&priv->lbc->bank[priv->bank].or);
/* if bank already configured, make sure it matches */
if ((reg_br_cur & BR_V)) {
if ((reg_br_cur & BR_MSEL) != BR_MS_GPCM ||
(reg_br_cur & reg_or_cur & BR_BA)
!= fsl_lbc_addr(res.start)) {
dev_err(priv->dev,
"bank in use by another peripheral\n");
return -ENODEV;
}
/* warn if behavior settings changing */
if ((reg_br_cur & ~(BR_BA | BR_V)) !=
(reg_br_new & ~(BR_BA | BR_V))) {
dev_warn(priv->dev,
"modifying BR settings: 0x%08x -> 0x%08x",
reg_br_cur, reg_br_new);
}
if ((reg_or_cur & ~OR_GPCM_AM) != (reg_or_new & ~OR_GPCM_AM)) {
dev_warn(priv->dev,
"modifying OR settings: 0x%08x -> 0x%08x",
reg_or_cur, reg_or_new);
}
}
/* configure the bank (force base address and GPCM) */
reg_br_new &= ~(BR_BA | BR_MSEL);
reg_br_new |= fsl_lbc_addr(res.start) | BR_MS_GPCM | BR_V;
out_be32(&priv->lbc->bank[priv->bank].or, reg_or_new);
out_be32(&priv->lbc->bank[priv->bank].br, reg_br_new);
/* map the memory resource */
info->mem[0].internal_addr = ioremap(res.start, resource_size(&res));
if (!info->mem[0].internal_addr) {
dev_err(priv->dev, "failed to map chip region\n");
return -ENODEV;
}
/* set all UIO data */
info->mem[0].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", node);
info->mem[0].addr = res.start;
info->mem[0].size = resource_size(&res);
info->mem[0].memtype = UIO_MEM_PHYS;
info->priv = priv;
info->name = uio_name;
info->version = "0.0.1";
if (irq) {
if (priv->irq_handler) {
info->irq = irq;
info->irq_flags = IRQF_SHARED;
info->handler = priv->irq_handler;
} else {
irq = 0;
dev_warn(priv->dev, "ignoring irq, no handler\n");
}
}
if (priv->init)
priv->init(info);
/* register UIO device */
if (uio_register_device(priv->dev, info) != 0) {
dev_err(priv->dev, "UIO registration failed\n");
ret = -ENODEV;
goto out_err2;
}
/* store private data */
platform_set_drvdata(pdev, info);
dev_info(priv->dev,
"eLBC/GPCM device (%s) at 0x%llx, bank %d, irq=%d\n",
priv->name, (unsigned long long)res.start, priv->bank,
irq ? : -1);
return 0;
out_err2:
if (priv->shutdown)
priv->shutdown(info, true);
iounmap(info->mem[0].internal_addr);
return ret;
}
static int uio_fsl_elbc_gpcm_remove(struct platform_device *pdev)
{
struct uio_info *info = platform_get_drvdata(pdev);
struct fsl_elbc_gpcm *priv = info->priv;
platform_set_drvdata(pdev, NULL);
uio_unregister_device(info);
if (priv->shutdown)
priv->shutdown(info, false);
iounmap(info->mem[0].internal_addr);
return 0;
}
static const struct of_device_id uio_fsl_elbc_gpcm_match[] = {
{ .compatible = "fsl,elbc-gpcm-uio", },
{}
};
MODULE_DEVICE_TABLE(of, uio_fsl_elbc_gpcm_match);
static struct platform_driver uio_fsl_elbc_gpcm_driver = {
.driver = {
.name = "fsl,elbc-gpcm-uio",
.of_match_table = uio_fsl_elbc_gpcm_match,
.dev_groups = uio_fsl_elbc_gpcm_groups,
},
.probe = uio_fsl_elbc_gpcm_probe,
.remove = uio_fsl_elbc_gpcm_remove,
};
module_platform_driver(uio_fsl_elbc_gpcm_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Ogness <[email protected]>");
MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller GPCM driver");
| linux-master | drivers/uio/uio_fsl_elbc_gpcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/uio/uio_dmem_genirq.c
*
* Userspace I/O platform driver with generic IRQ handling code.
*
* Copyright (C) 2012 Damian Hobson-Garcia
*
* Based on uio_pdrv_genirq.c by Magnus Damm
*/
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_data/uio_dmem_genirq.h>
#include <linux/stringify.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#define DRIVER_NAME "uio_dmem_genirq"
#define DMEM_MAP_ERROR (~0)
struct uio_dmem_genirq_platdata {
struct uio_info *uioinfo;
spinlock_t lock;
unsigned long flags;
struct platform_device *pdev;
unsigned int dmem_region_start;
unsigned int num_dmem_regions;
void *dmem_region_vaddr[MAX_UIO_MAPS];
struct mutex alloc_lock;
unsigned int refcnt;
};
/* Bits in uio_dmem_genirq_platdata.flags */
enum {
UIO_IRQ_DISABLED = 0,
};
static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
mutex_lock(&priv->alloc_lock);
while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
void *addr;
if (!uiomem->size)
break;
addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
(dma_addr_t *)&uiomem->addr, GFP_KERNEL);
if (!addr) {
uiomem->addr = DMEM_MAP_ERROR;
}
priv->dmem_region_vaddr[dmem_region++] = addr;
++uiomem;
}
priv->refcnt++;
mutex_unlock(&priv->alloc_lock);
/* Wait until the Runtime PM code has woken up the device */
pm_runtime_get_sync(&priv->pdev->dev);
return 0;
}
static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
int dmem_region = priv->dmem_region_start;
/* Tell the Runtime PM code that the device has become idle */
pm_runtime_put_sync(&priv->pdev->dev);
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
mutex_lock(&priv->alloc_lock);
priv->refcnt--;
while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
if (!uiomem->size)
break;
if (priv->dmem_region_vaddr[dmem_region]) {
dma_free_coherent(&priv->pdev->dev, uiomem->size,
priv->dmem_region_vaddr[dmem_region],
uiomem->addr);
}
uiomem->addr = DMEM_MAP_ERROR;
++dmem_region;
++uiomem;
}
mutex_unlock(&priv->alloc_lock);
return 0;
}
static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
{
struct uio_dmem_genirq_platdata *priv = dev_info->priv;
/* Just disable the interrupt in the interrupt controller, and
* remember the state so we can allow user space to enable it later.
*/
spin_lock(&priv->lock);
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(irq);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
{
struct uio_dmem_genirq_platdata *priv = dev_info->priv;
unsigned long flags;
/* Allow user space to enable and disable the interrupt
* in the interrupt controller, but keep track of the
* state to prevent per-irq depth damage.
*
* Serialize this operation to support multiple tasks and concurrency
* with irq handler on SMP systems.
*/
spin_lock_irqsave(&priv->lock, flags);
if (irq_on) {
if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
enable_irq(dev_info->irq);
} else {
if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
disable_irq_nosync(dev_info->irq);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void uio_dmem_genirq_pm_disable(void *data)
{
struct device *dev = data;
pm_runtime_disable(dev);
}
static int uio_dmem_genirq_probe(struct platform_device *pdev)
{
struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
struct uio_info *uioinfo = &pdata->uioinfo;
struct uio_dmem_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
int i;
if (pdev->dev.of_node) {
/* alloc uioinfo for one device */
uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), GFP_KERNEL);
if (!uioinfo) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
uioinfo->version = "devicetree";
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
return -EINVAL;
}
if (uioinfo->handler || uioinfo->irqcontrol ||
uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
return -EINVAL;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "DMA enable failed\n");
return ret;
}
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
priv->flags = 0; /* interrupt is enabled to begin with */
priv->pdev = pdev;
mutex_init(&priv->alloc_lock);
if (!uioinfo->irq) {
/* Multiple IRQs are not supported */
ret = platform_get_irq(pdev, 0);
if (ret == -ENXIO && pdev->dev.of_node)
ret = UIO_IRQ_NONE;
else if (ret < 0)
return ret;
uioinfo->irq = ret;
}
if (uioinfo->irq) {
struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
/*
* If a level interrupt, dont do lazy disable. Otherwise the
* irq will fire again since clearing of the actual cause, on
* device level, is done in userspace
* irqd_is_level_type() isn't used since isn't valid until
* irq is configured.
*/
if (irq_data &&
irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
dev_dbg(&pdev->dev, "disable lazy unmask\n");
irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
}
}
uiomem = &uioinfo->mem[0];
for (i = 0; i < pdev->num_resources; ++i) {
struct resource *r = &pdev->resource[i];
if (r->flags != IORESOURCE_MEM)
continue;
if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
dev_warn(&pdev->dev, "device has more than "
__stringify(MAX_UIO_MAPS)
" I/O memory resources.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start;
uiomem->size = resource_size(r);
++uiomem;
}
priv->dmem_region_start = uiomem - &uioinfo->mem[0];
priv->num_dmem_regions = pdata->num_dynamic_regions;
for (i = 0; i < pdata->num_dynamic_regions; ++i) {
if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
dev_warn(&pdev->dev, "device has more than "
__stringify(MAX_UIO_MAPS)
" dynamic and fixed memory regions.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = DMEM_MAP_ERROR;
uiomem->size = pdata->dynamic_region_sizes[i];
++uiomem;
}
while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
uiomem->size = 0;
++uiomem;
}
/* This driver requires no hardware specific kernel code to handle
* interrupts. Instead, the interrupt handler simply disables the
* interrupt in the interrupt controller. User space is responsible
* for performing hardware specific acknowledge and re-enabling of
* the interrupt in the interrupt controller.
*
* Interrupt sharing is not supported.
*/
uioinfo->handler = uio_dmem_genirq_handler;
uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
uioinfo->open = uio_dmem_genirq_open;
uioinfo->release = uio_dmem_genirq_release;
uioinfo->priv = priv;
/* Enable Runtime PM for this device:
* The device starts in suspended state to allow the hardware to be
* turned off by default. The Runtime PM bus code should power on the
* hardware and enable clocks at open().
*/
pm_runtime_enable(&pdev->dev);
ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev);
if (ret)
return ret;
return devm_uio_register_device(&pdev->dev, priv->uioinfo);
}
static int uio_dmem_genirq_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
* are used at open() and release() time. This allows the
* Runtime PM code to turn off power to the device while the
* device is unused, ie before open() and after release().
*
* This Runtime PM callback does not need to save or restore
* any registers since user space is responsbile for hardware
* register reinitialization after open().
*/
return 0;
}
static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
.runtime_suspend = uio_dmem_genirq_runtime_nop,
.runtime_resume = uio_dmem_genirq_runtime_nop,
};
#ifdef CONFIG_OF
static const struct of_device_id uio_of_genirq_match[] = {
{ /* empty for now */ },
};
MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
#endif
static struct platform_driver uio_dmem_genirq = {
.probe = uio_dmem_genirq_probe,
.driver = {
.name = DRIVER_NAME,
.pm = &uio_dmem_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
};
module_platform_driver(uio_dmem_genirq);
MODULE_AUTHOR("Damian Hobson-Garcia");
MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/uio/uio_dmem_genirq.c |
// SPDX-License-Identifier: GPL-2.0
/* uio_pci_generic - generic UIO driver for PCI 2.3 devices
*
* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <[email protected]>
*
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
* # echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
* # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind
* # ls -l /sys/bus/pci/devices/0000:00:19.0/driver
* .../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic
*
* Driver won't bind to devices which do not support the Interrupt Disable Bit
* in the command register. All devices compliant to PCI 2.3 (circa 2002) and
* all compliant PCI Express devices should support this bit.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uio_driver.h>
#define DRIVER_VERSION "0.01.0"
#define DRIVER_AUTHOR "Michael S. Tsirkin <[email protected]>"
#define DRIVER_DESC "Generic UIO driver for PCI 2.3 devices"
struct uio_pci_generic_dev {
struct uio_info info;
struct pci_dev *pdev;
};
static inline struct uio_pci_generic_dev *
to_uio_pci_generic_dev(struct uio_info *info)
{
return container_of(info, struct uio_pci_generic_dev, info);
}
static int release(struct uio_info *info, struct inode *inode)
{
struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
/*
* This driver is insecure when used with devices doing DMA, but some
* people (mis)use it with such devices.
* Let's at least make sure DMA isn't left enabled after the userspace
* driver closes the fd.
* Note that there's a non-zero chance doing this will wedge the device
* at least until reset.
*/
pci_clear_master(gdev->pdev);
return 0;
}
/* Interrupt handler. Read/modify/write the command register to disable
* the interrupt. */
static irqreturn_t irqhandler(int irq, struct uio_info *info)
{
struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
if (!pci_check_and_mask_intx(gdev->pdev))
return IRQ_NONE;
/* UIO core will signal the user process. */
return IRQ_HANDLED;
}
static int probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct uio_pci_generic_dev *gdev;
struct uio_mem *uiomem;
int err;
int i;
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n",
__func__, err);
return err;
}
if (pdev->irq && !pci_intx_mask_supported(pdev))
return -ENODEV;
gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
gdev->info.name = "uio_pci_generic";
gdev->info.version = DRIVER_VERSION;
gdev->info.release = release;
gdev->pdev = pdev;
if (pdev->irq && (pdev->irq != IRQ_NOTCONNECTED)) {
gdev->info.irq = pdev->irq;
gdev->info.irq_flags = IRQF_SHARED;
gdev->info.handler = irqhandler;
} else {
dev_warn(&pdev->dev, "No IRQ assigned to device: "
"no support for interrupts?\n");
}
uiomem = &gdev->info.mem[0];
for (i = 0; i < MAX_UIO_MAPS; ++i) {
struct resource *r = &pdev->resource[i];
if (r->flags != (IORESOURCE_SIZEALIGN | IORESOURCE_MEM))
continue;
if (uiomem >= &gdev->info.mem[MAX_UIO_MAPS]) {
dev_warn(
&pdev->dev,
"device has more than " __stringify(
MAX_UIO_MAPS) " I/O memory resources.\n");
break;
}
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start & PAGE_MASK;
uiomem->offs = r->start & ~PAGE_MASK;
uiomem->size =
(uiomem->offs + resource_size(r) + PAGE_SIZE - 1) &
PAGE_MASK;
uiomem->name = r->name;
++uiomem;
}
while (uiomem < &gdev->info.mem[MAX_UIO_MAPS]) {
uiomem->size = 0;
++uiomem;
}
return devm_uio_register_device(&pdev->dev, &gdev->info);
}
static struct pci_driver uio_pci_driver = {
.name = "uio_pci_generic",
.id_table = NULL, /* only dynamic id's */
.probe = probe,
};
module_pci_driver(uio_pci_driver);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/uio/uio_pci_generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Programmable Real-Time Unit Sub System (PRUSS) UIO driver (uio_pruss)
*
* This driver exports PRUSS host event out interrupts and PRUSS, L3 RAM,
* and DDR RAM to user space for applications interacting with PRUSS firmware
*
* Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/uio_driver.h>
#include <linux/platform_data/uio_pruss.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
#define DRV_NAME "pruss_uio"
#define DRV_VERSION "1.0"
static int sram_pool_sz = SZ_16K;
module_param(sram_pool_sz, int, 0);
MODULE_PARM_DESC(sram_pool_sz, "sram pool size to allocate ");
static int extram_pool_sz = SZ_256K;
module_param(extram_pool_sz, int, 0);
MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate");
/*
* Host event IRQ numbers from PRUSS - PRUSS can generate up to 8 interrupt
* events to AINTC of ARM host processor - which can be used for IPC b/w PRUSS
* firmware and user space application, async notification from PRU firmware
* to user space application
* 3 PRU_EVTOUT0
* 4 PRU_EVTOUT1
* 5 PRU_EVTOUT2
* 6 PRU_EVTOUT3
* 7 PRU_EVTOUT4
* 8 PRU_EVTOUT5
* 9 PRU_EVTOUT6
* 10 PRU_EVTOUT7
*/
#define MAX_PRUSS_EVT 8
#define PINTC_HIDISR 0x0038
#define PINTC_HIPIR 0x0900
#define HIPIR_NOPEND 0x80000000
#define PINTC_HIER 0x1500
struct uio_pruss_dev {
struct uio_info *info;
struct clk *pruss_clk;
dma_addr_t sram_paddr;
dma_addr_t ddr_paddr;
void __iomem *prussio_vaddr;
unsigned long sram_vaddr;
void *ddr_vaddr;
unsigned int hostirq_start;
unsigned int pintc_base;
struct gen_pool *sram_pool;
};
static irqreturn_t pruss_handler(int irq, struct uio_info *info)
{
struct uio_pruss_dev *gdev = info->priv;
int intr_bit = (irq - gdev->hostirq_start + 2);
int val, intr_mask = (1 << intr_bit);
void __iomem *base = gdev->prussio_vaddr + gdev->pintc_base;
void __iomem *intren_reg = base + PINTC_HIER;
void __iomem *intrdis_reg = base + PINTC_HIDISR;
void __iomem *intrstat_reg = base + PINTC_HIPIR + (intr_bit << 2);
val = ioread32(intren_reg);
/* Is interrupt enabled and active ? */
if (!(val & intr_mask) && (ioread32(intrstat_reg) & HIPIR_NOPEND))
return IRQ_NONE;
/* Disable interrupt */
iowrite32(intr_bit, intrdis_reg);
return IRQ_HANDLED;
}
static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
{
int cnt;
struct uio_info *p = gdev->info;
for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) {
uio_unregister_device(p);
}
iounmap(gdev->prussio_vaddr);
if (gdev->ddr_vaddr) {
dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
gdev->ddr_paddr);
}
if (gdev->sram_vaddr)
gen_pool_free(gdev->sram_pool,
gdev->sram_vaddr,
sram_pool_sz);
clk_disable(gdev->pruss_clk);
}
static int pruss_probe(struct platform_device *pdev)
{
struct uio_info *p;
struct uio_pruss_dev *gdev;
struct resource *regs_prussio;
struct device *dev = &pdev->dev;
int ret, cnt, i, len;
struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
gdev = devm_kzalloc(dev, sizeof(struct uio_pruss_dev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
gdev->info = devm_kcalloc(dev, MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
if (!gdev->info)
return -ENOMEM;
/* Power on PRU in case its not done as part of boot-loader */
gdev->pruss_clk = devm_clk_get(dev, "pruss");
if (IS_ERR(gdev->pruss_clk)) {
dev_err(dev, "Failed to get clock\n");
return PTR_ERR(gdev->pruss_clk);
}
ret = clk_enable(gdev->pruss_clk);
if (ret) {
dev_err(dev, "Failed to enable clock\n");
return ret;
}
regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs_prussio) {
dev_err(dev, "No PRUSS I/O resource specified\n");
ret = -EIO;
goto err_clk_disable;
}
if (!regs_prussio->start) {
dev_err(dev, "Invalid memory resource\n");
ret = -EIO;
goto err_clk_disable;
}
if (pdata->sram_pool) {
gdev->sram_pool = pdata->sram_pool;
gdev->sram_vaddr =
(unsigned long)gen_pool_dma_alloc(gdev->sram_pool,
sram_pool_sz, &gdev->sram_paddr);
if (!gdev->sram_vaddr) {
dev_err(dev, "Could not allocate SRAM pool\n");
ret = -ENOMEM;
goto err_clk_disable;
}
}
gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz,
&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
if (!gdev->ddr_vaddr) {
dev_err(dev, "Could not allocate external memory\n");
ret = -ENOMEM;
goto err_free_sram;
}
len = resource_size(regs_prussio);
gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
if (!gdev->prussio_vaddr) {
dev_err(dev, "Can't remap PRUSS I/O address range\n");
ret = -ENOMEM;
goto err_free_ddr_vaddr;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_unmap;
gdev->hostirq_start = ret;
gdev->pintc_base = pdata->pintc_base;
for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
p->mem[0].addr = regs_prussio->start;
p->mem[0].size = resource_size(regs_prussio);
p->mem[0].memtype = UIO_MEM_PHYS;
p->mem[1].addr = gdev->sram_paddr;
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
p->mem[2].addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
p->mem[2].memtype = UIO_MEM_PHYS;
p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt);
p->version = DRV_VERSION;
/* Register PRUSS IRQ lines */
p->irq = gdev->hostirq_start + cnt;
p->handler = pruss_handler;
p->priv = gdev;
ret = uio_register_device(dev, p);
if (ret < 0)
goto err_unloop;
}
platform_set_drvdata(pdev, gdev);
return 0;
err_unloop:
for (i = 0, p = gdev->info; i < cnt; i++, p++) {
uio_unregister_device(p);
}
err_unmap:
iounmap(gdev->prussio_vaddr);
err_free_ddr_vaddr:
dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
gdev->ddr_paddr);
err_free_sram:
if (pdata->sram_pool)
gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz);
err_clk_disable:
clk_disable(gdev->pruss_clk);
return ret;
}
static int pruss_remove(struct platform_device *dev)
{
struct uio_pruss_dev *gdev = platform_get_drvdata(dev);
pruss_cleanup(&dev->dev, gdev);
return 0;
}
static struct platform_driver pruss_driver = {
.probe = pruss_probe,
.remove = pruss_remove,
.driver = {
.name = DRV_NAME,
},
};
module_platform_driver(pruss_driver);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Amit Chatterjee <[email protected]>");
MODULE_AUTHOR("Pratheesh Gangadhar <[email protected]>");
| linux-master | drivers/uio/uio_pruss.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/uio/uio.c
*
* Copyright(C) 2005, Benedikt Spranger <[email protected]>
* Copyright(C) 2005, Thomas Gleixner <[email protected]>
* Copyright(C) 2006, Hans J. Koch <[email protected]>
* Copyright(C) 2006, Greg Kroah-Hartman <[email protected]>
*
* Userspace IO
*
* Base Functions
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/idr.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
#define UIO_MAX_DEVICES (1U << MINORBITS)
static int uio_major;
static struct cdev *uio_cdev;
static DEFINE_IDR(uio_idr);
static const struct file_operations uio_fops;
/* Protect idr accesses */
static DEFINE_MUTEX(minor_lock);
/*
* attributes
*/
struct uio_map {
struct kobject kobj;
struct uio_mem *mem;
};
#define to_map(map) container_of(map, struct uio_map, kobj)
static ssize_t map_name_show(struct uio_mem *mem, char *buf)
{
if (unlikely(!mem->name))
mem->name = "";
return sprintf(buf, "%s\n", mem->name);
}
static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
{
return sprintf(buf, "%pa\n", &mem->addr);
}
static ssize_t map_size_show(struct uio_mem *mem, char *buf)
{
return sprintf(buf, "%pa\n", &mem->size);
}
static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
{
return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs);
}
struct map_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct uio_mem *, char *);
ssize_t (*store)(struct uio_mem *, const char *, size_t);
};
static struct map_sysfs_entry name_attribute =
__ATTR(name, S_IRUGO, map_name_show, NULL);
static struct map_sysfs_entry addr_attribute =
__ATTR(addr, S_IRUGO, map_addr_show, NULL);
static struct map_sysfs_entry size_attribute =
__ATTR(size, S_IRUGO, map_size_show, NULL);
static struct map_sysfs_entry offset_attribute =
__ATTR(offset, S_IRUGO, map_offset_show, NULL);
static struct attribute *map_attrs[] = {
&name_attribute.attr,
&addr_attribute.attr,
&size_attribute.attr,
&offset_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
ATTRIBUTE_GROUPS(map);
static void map_release(struct kobject *kobj)
{
struct uio_map *map = to_map(kobj);
kfree(map);
}
static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct uio_map *map = to_map(kobj);
struct uio_mem *mem = map->mem;
struct map_sysfs_entry *entry;
entry = container_of(attr, struct map_sysfs_entry, attr);
if (!entry->show)
return -EIO;
return entry->show(mem, buf);
}
static const struct sysfs_ops map_sysfs_ops = {
.show = map_type_show,
};
static struct kobj_type map_attr_type = {
.release = map_release,
.sysfs_ops = &map_sysfs_ops,
.default_groups = map_groups,
};
struct uio_portio {
struct kobject kobj;
struct uio_port *port;
};
#define to_portio(portio) container_of(portio, struct uio_portio, kobj)
static ssize_t portio_name_show(struct uio_port *port, char *buf)
{
if (unlikely(!port->name))
port->name = "";
return sprintf(buf, "%s\n", port->name);
}
static ssize_t portio_start_show(struct uio_port *port, char *buf)
{
return sprintf(buf, "0x%lx\n", port->start);
}
static ssize_t portio_size_show(struct uio_port *port, char *buf)
{
return sprintf(buf, "0x%lx\n", port->size);
}
static ssize_t portio_porttype_show(struct uio_port *port, char *buf)
{
const char *porttypes[] = {"none", "x86", "gpio", "other"};
if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER))
return -EINVAL;
return sprintf(buf, "port_%s\n", porttypes[port->porttype]);
}
struct portio_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct uio_port *, char *);
ssize_t (*store)(struct uio_port *, const char *, size_t);
};
static struct portio_sysfs_entry portio_name_attribute =
__ATTR(name, S_IRUGO, portio_name_show, NULL);
static struct portio_sysfs_entry portio_start_attribute =
__ATTR(start, S_IRUGO, portio_start_show, NULL);
static struct portio_sysfs_entry portio_size_attribute =
__ATTR(size, S_IRUGO, portio_size_show, NULL);
static struct portio_sysfs_entry portio_porttype_attribute =
__ATTR(porttype, S_IRUGO, portio_porttype_show, NULL);
static struct attribute *portio_attrs[] = {
&portio_name_attribute.attr,
&portio_start_attribute.attr,
&portio_size_attribute.attr,
&portio_porttype_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(portio);
static void portio_release(struct kobject *kobj)
{
struct uio_portio *portio = to_portio(kobj);
kfree(portio);
}
static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct uio_portio *portio = to_portio(kobj);
struct uio_port *port = portio->port;
struct portio_sysfs_entry *entry;
entry = container_of(attr, struct portio_sysfs_entry, attr);
if (!entry->show)
return -EIO;
return entry->show(port, buf);
}
static const struct sysfs_ops portio_sysfs_ops = {
.show = portio_type_show,
};
static struct kobj_type portio_attr_type = {
.release = portio_release,
.sysfs_ops = &portio_sysfs_ops,
.default_groups = portio_groups,
};
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
int ret;
mutex_lock(&idev->info_lock);
if (!idev->info) {
ret = -EINVAL;
dev_err(dev, "the device has been unregistered\n");
goto out;
}
ret = sprintf(buf, "%s\n", idev->info->name);
out:
mutex_unlock(&idev->info_lock);
return ret;
}
static DEVICE_ATTR_RO(name);
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
int ret;
mutex_lock(&idev->info_lock);
if (!idev->info) {
ret = -EINVAL;
dev_err(dev, "the device has been unregistered\n");
goto out;
}
ret = sprintf(buf, "%s\n", idev->info->version);
out:
mutex_unlock(&idev->info_lock);
return ret;
}
static DEVICE_ATTR_RO(version);
static ssize_t event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
}
static DEVICE_ATTR_RO(event);
static struct attribute *uio_attrs[] = {
&dev_attr_name.attr,
&dev_attr_version.attr,
&dev_attr_event.attr,
NULL,
};
ATTRIBUTE_GROUPS(uio);
/* UIO class infrastructure */
static struct class uio_class = {
.name = "uio",
.dev_groups = uio_groups,
};
static bool uio_class_registered;
/*
* device functions
*/
static int uio_dev_add_attributes(struct uio_device *idev)
{
int ret;
int mi, pi;
int map_found = 0;
int portio_found = 0;
struct uio_mem *mem;
struct uio_map *map;
struct uio_port *port;
struct uio_portio *portio;
for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
mem = &idev->info->mem[mi];
if (mem->size == 0)
break;
if (!map_found) {
map_found = 1;
idev->map_dir = kobject_create_and_add("maps",
&idev->dev.kobj);
if (!idev->map_dir) {
ret = -ENOMEM;
goto err_map;
}
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto err_map;
}
kobject_init(&map->kobj, &map_attr_type);
map->mem = mem;
mem->map = map;
ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
if (ret)
goto err_map_kobj;
ret = kobject_uevent(&map->kobj, KOBJ_ADD);
if (ret)
goto err_map_kobj;
}
for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
port = &idev->info->port[pi];
if (port->size == 0)
break;
if (!portio_found) {
portio_found = 1;
idev->portio_dir = kobject_create_and_add("portio",
&idev->dev.kobj);
if (!idev->portio_dir) {
ret = -ENOMEM;
goto err_portio;
}
}
portio = kzalloc(sizeof(*portio), GFP_KERNEL);
if (!portio) {
ret = -ENOMEM;
goto err_portio;
}
kobject_init(&portio->kobj, &portio_attr_type);
portio->port = port;
port->portio = portio;
ret = kobject_add(&portio->kobj, idev->portio_dir,
"port%d", pi);
if (ret)
goto err_portio_kobj;
ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
if (ret)
goto err_portio_kobj;
}
return 0;
err_portio:
pi--;
err_portio_kobj:
for (; pi >= 0; pi--) {
port = &idev->info->port[pi];
portio = port->portio;
kobject_put(&portio->kobj);
}
kobject_put(idev->portio_dir);
err_map:
mi--;
err_map_kobj:
for (; mi >= 0; mi--) {
mem = &idev->info->mem[mi];
map = mem->map;
kobject_put(&map->kobj);
}
kobject_put(idev->map_dir);
dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret);
return ret;
}
static void uio_dev_del_attributes(struct uio_device *idev)
{
int i;
struct uio_mem *mem;
struct uio_port *port;
for (i = 0; i < MAX_UIO_MAPS; i++) {
mem = &idev->info->mem[i];
if (mem->size == 0)
break;
kobject_put(&mem->map->kobj);
}
kobject_put(idev->map_dir);
for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) {
port = &idev->info->port[i];
if (port->size == 0)
break;
kobject_put(&port->portio->kobj);
}
kobject_put(idev->portio_dir);
}
static int uio_get_minor(struct uio_device *idev)
{
int retval;
mutex_lock(&minor_lock);
retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
if (retval >= 0) {
idev->minor = retval;
retval = 0;
} else if (retval == -ENOSPC) {
dev_err(&idev->dev, "too many uio devices\n");
retval = -EINVAL;
}
mutex_unlock(&minor_lock);
return retval;
}
static void uio_free_minor(unsigned long minor)
{
mutex_lock(&minor_lock);
idr_remove(&uio_idr, minor);
mutex_unlock(&minor_lock);
}
/**
* uio_event_notify - trigger an interrupt event
* @info: UIO device capabilities
*/
void uio_event_notify(struct uio_info *info)
{
struct uio_device *idev = info->uio_dev;
atomic_inc(&idev->event);
wake_up_interruptible(&idev->wait);
kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
}
EXPORT_SYMBOL_GPL(uio_event_notify);
/**
* uio_interrupt - hardware interrupt handler
* @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
* @dev_id: Pointer to the devices uio_device structure
*/
static irqreturn_t uio_interrupt(int irq, void *dev_id)
{
struct uio_device *idev = (struct uio_device *)dev_id;
irqreturn_t ret;
ret = idev->info->handler(irq, idev->info);
if (ret == IRQ_HANDLED)
uio_event_notify(idev->info);
return ret;
}
struct uio_listener {
struct uio_device *dev;
s32 event_count;
};
static int uio_open(struct inode *inode, struct file *filep)
{
struct uio_device *idev;
struct uio_listener *listener;
int ret = 0;
mutex_lock(&minor_lock);
idev = idr_find(&uio_idr, iminor(inode));
mutex_unlock(&minor_lock);
if (!idev) {
ret = -ENODEV;
goto out;
}
get_device(&idev->dev);
if (!try_module_get(idev->owner)) {
ret = -ENODEV;
goto err_module_get;
}
listener = kmalloc(sizeof(*listener), GFP_KERNEL);
if (!listener) {
ret = -ENOMEM;
goto err_alloc_listener;
}
listener->dev = idev;
listener->event_count = atomic_read(&idev->event);
filep->private_data = listener;
mutex_lock(&idev->info_lock);
if (!idev->info) {
mutex_unlock(&idev->info_lock);
ret = -EINVAL;
goto err_infoopen;
}
if (idev->info->open)
ret = idev->info->open(idev->info, inode);
mutex_unlock(&idev->info_lock);
if (ret)
goto err_infoopen;
return 0;
err_infoopen:
kfree(listener);
err_alloc_listener:
module_put(idev->owner);
err_module_get:
put_device(&idev->dev);
out:
return ret;
}
static int uio_fasync(int fd, struct file *filep, int on)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
return fasync_helper(fd, filep, on, &idev->async_queue);
}
static int uio_release(struct inode *inode, struct file *filep)
{
int ret = 0;
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
mutex_unlock(&idev->info_lock);
module_put(idev->owner);
kfree(listener);
put_device(&idev->dev);
return ret;
}
static __poll_t uio_poll(struct file *filep, poll_table *wait)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
__poll_t ret = 0;
mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
ret = -EIO;
mutex_unlock(&idev->info_lock);
if (ret)
return ret;
poll_wait(filep, &idev->wait, wait);
if (listener->event_count != atomic_read(&idev->event))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static ssize_t uio_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
DECLARE_WAITQUEUE(wait, current);
ssize_t retval = 0;
s32 event_count;
if (count != sizeof(s32))
return -EINVAL;
add_wait_queue(&idev->wait, &wait);
do {
mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq) {
retval = -EIO;
mutex_unlock(&idev->info_lock);
break;
}
mutex_unlock(&idev->info_lock);
set_current_state(TASK_INTERRUPTIBLE);
event_count = atomic_read(&idev->event);
if (event_count != listener->event_count) {
__set_current_state(TASK_RUNNING);
if (copy_to_user(buf, &event_count, count))
retval = -EFAULT;
else {
listener->event_count = event_count;
retval = count;
}
break;
}
if (filep->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
schedule();
} while (1);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&idev->wait, &wait);
return retval;
}
static ssize_t uio_write(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
ssize_t retval;
s32 irq_on;
if (count != sizeof(s32))
return -EINVAL;
if (copy_from_user(&irq_on, buf, count))
return -EFAULT;
mutex_lock(&idev->info_lock);
if (!idev->info) {
retval = -EINVAL;
goto out;
}
if (!idev->info->irq) {
retval = -EIO;
goto out;
}
if (!idev->info->irqcontrol) {
retval = -ENOSYS;
goto out;
}
retval = idev->info->irqcontrol(idev->info, irq_on);
out:
mutex_unlock(&idev->info_lock);
return retval ? retval : sizeof(s32);
}
static int uio_find_mem_index(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
if (vma->vm_pgoff < MAX_UIO_MAPS) {
if (idev->info->mem[vma->vm_pgoff].size == 0)
return -1;
return (int)vma->vm_pgoff;
}
return -1;
}
static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
{
struct uio_device *idev = vmf->vma->vm_private_data;
struct page *page;
unsigned long offset;
void *addr;
vm_fault_t ret = 0;
int mi;
mutex_lock(&idev->info_lock);
if (!idev->info) {
ret = VM_FAULT_SIGBUS;
goto out;
}
mi = uio_find_mem_index(vmf->vma);
if (mi < 0) {
ret = VM_FAULT_SIGBUS;
goto out;
}
/*
* We need to subtract mi because userspace uses offset = N*PAGE_SIZE
* to use mem[N].
*/
offset = (vmf->pgoff - mi) << PAGE_SHIFT;
addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset;
if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
page = virt_to_page(addr);
else
page = vmalloc_to_page(addr);
get_page(page);
vmf->page = page;
out:
mutex_unlock(&idev->info_lock);
return ret;
}
static const struct vm_operations_struct uio_logical_vm_ops = {
.fault = uio_vma_fault,
};
static int uio_mmap_logical(struct vm_area_struct *vma)
{
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &uio_logical_vm_ops;
return 0;
}
static const struct vm_operations_struct uio_physical_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
#endif
};
static int uio_mmap_physical(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
struct uio_mem *mem;
if (mi < 0)
return -EINVAL;
mem = idev->info->mem + mi;
if (mem->addr & ~PAGE_MASK)
return -ENODEV;
if (vma->vm_end - vma->vm_start > mem->size)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* We cannot use the vm_iomap_memory() helper here,
* because vma->vm_pgoff is the map index we looked
* up above in uio_find_mem_index(), rather than an
* actual page offset into the mmap.
*
* So we just do the physical mmap without a page
* offset.
*/
return remap_pfn_range(vma,
vma->vm_start,
mem->addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
int mi;
unsigned long requested_pages, actual_pages;
int ret = 0;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
vma->vm_private_data = idev;
mutex_lock(&idev->info_lock);
if (!idev->info) {
ret = -EINVAL;
goto out;
}
mi = uio_find_mem_index(vma);
if (mi < 0) {
ret = -EINVAL;
goto out;
}
requested_pages = vma_pages(vma);
actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
+ idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (requested_pages > actual_pages) {
ret = -EINVAL;
goto out;
}
if (idev->info->mmap) {
ret = idev->info->mmap(idev->info, vma);
goto out;
}
switch (idev->info->mem[mi].memtype) {
case UIO_MEM_IOVA:
case UIO_MEM_PHYS:
ret = uio_mmap_physical(vma);
break;
case UIO_MEM_LOGICAL:
case UIO_MEM_VIRTUAL:
ret = uio_mmap_logical(vma);
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&idev->info_lock);
return ret;
}
static const struct file_operations uio_fops = {
.owner = THIS_MODULE,
.open = uio_open,
.release = uio_release,
.read = uio_read,
.write = uio_write,
.mmap = uio_mmap,
.poll = uio_poll,
.fasync = uio_fasync,
.llseek = noop_llseek,
};
static int uio_major_init(void)
{
static const char name[] = "uio";
struct cdev *cdev = NULL;
dev_t uio_dev = 0;
int result;
result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name);
if (result)
goto out;
result = -ENOMEM;
cdev = cdev_alloc();
if (!cdev)
goto out_unregister;
cdev->owner = THIS_MODULE;
cdev->ops = &uio_fops;
kobject_set_name(&cdev->kobj, "%s", name);
result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES);
if (result)
goto out_put;
uio_major = MAJOR(uio_dev);
uio_cdev = cdev;
return 0;
out_put:
kobject_put(&cdev->kobj);
out_unregister:
unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES);
out:
return result;
}
static void uio_major_cleanup(void)
{
unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES);
cdev_del(uio_cdev);
}
static int init_uio_class(void)
{
int ret;
/* This is the first time in here, set everything up properly */
ret = uio_major_init();
if (ret)
goto exit;
ret = class_register(&uio_class);
if (ret) {
printk(KERN_ERR "class_register failed for uio\n");
goto err_class_register;
}
uio_class_registered = true;
return 0;
err_class_register:
uio_major_cleanup();
exit:
return ret;
}
static void release_uio_class(void)
{
uio_class_registered = false;
class_unregister(&uio_class);
uio_major_cleanup();
}
static void uio_device_release(struct device *dev)
{
struct uio_device *idev = dev_get_drvdata(dev);
kfree(idev);
}
/**
* __uio_register_device - register a new userspace IO device
* @owner: module that creates the new device
* @parent: parent device
* @info: UIO device capabilities
*
* returns zero on success or a negative error code.
*/
int __uio_register_device(struct module *owner,
struct device *parent,
struct uio_info *info)
{
struct uio_device *idev;
int ret = 0;
if (!uio_class_registered)
return -EPROBE_DEFER;
if (!parent || !info || !info->name || !info->version)
return -EINVAL;
info->uio_dev = NULL;
idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev) {
return -ENOMEM;
}
idev->owner = owner;
idev->info = info;
mutex_init(&idev->info_lock);
init_waitqueue_head(&idev->wait);
atomic_set(&idev->event, 0);
ret = uio_get_minor(idev);
if (ret) {
kfree(idev);
return ret;
}
device_initialize(&idev->dev);
idev->dev.devt = MKDEV(uio_major, idev->minor);
idev->dev.class = &uio_class;
idev->dev.parent = parent;
idev->dev.release = uio_device_release;
dev_set_drvdata(&idev->dev, idev);
ret = dev_set_name(&idev->dev, "uio%d", idev->minor);
if (ret)
goto err_device_create;
ret = device_add(&idev->dev);
if (ret)
goto err_device_create;
ret = uio_dev_add_attributes(idev);
if (ret)
goto err_uio_dev_add_attributes;
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
* here. The parent module can unregister the UIO device
* and call pci_disable_msi, which requires that this
* irq has been freed. However, the device may have open
* FDs at the time of unregister and therefore may not be
* freed until they are released.
*/
ret = request_irq(info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
if (ret) {
info->uio_dev = NULL;
goto err_request_irq;
}
}
return 0;
err_request_irq:
uio_dev_del_attributes(idev);
err_uio_dev_add_attributes:
device_del(&idev->dev);
err_device_create:
uio_free_minor(idev->minor);
put_device(&idev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(__uio_register_device);
static void devm_uio_unregister_device(struct device *dev, void *res)
{
uio_unregister_device(*(struct uio_info **)res);
}
/**
* __devm_uio_register_device - Resource managed uio_register_device()
* @owner: module that creates the new device
* @parent: parent device
* @info: UIO device capabilities
*
* returns zero on success or a negative error code.
*/
int __devm_uio_register_device(struct module *owner,
struct device *parent,
struct uio_info *info)
{
struct uio_info **ptr;
int ret;
ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*ptr = info;
ret = __uio_register_device(owner, parent, info);
if (ret) {
devres_free(ptr);
return ret;
}
devres_add(parent, ptr);
return 0;
}
EXPORT_SYMBOL_GPL(__devm_uio_register_device);
/**
* uio_unregister_device - unregister a industrial IO device
* @info: UIO device capabilities
*
*/
void uio_unregister_device(struct uio_info *info)
{
struct uio_device *idev;
unsigned long minor;
if (!info || !info->uio_dev)
return;
idev = info->uio_dev;
minor = idev->minor;
mutex_lock(&idev->info_lock);
uio_dev_del_attributes(idev);
if (info->irq && info->irq != UIO_IRQ_CUSTOM)
free_irq(info->irq, idev);
idev->info = NULL;
mutex_unlock(&idev->info_lock);
wake_up_interruptible(&idev->wait);
kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
device_unregister(&idev->dev);
uio_free_minor(minor);
return;
}
EXPORT_SYMBOL_GPL(uio_unregister_device);
static int __init uio_init(void)
{
return init_uio_class();
}
static void __exit uio_exit(void)
{
release_uio_class();
idr_destroy(&uio_idr);
}
module_init(uio_init)
module_exit(uio_exit)
MODULE_LICENSE("GPL v2");
| linux-master | drivers/uio/uio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UIO Hilscher CIF card driver
*
* (C) 2007 Hans J. Koch <[email protected]>
* Original code (C) 2005 Benedikt Spranger <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uio_driver.h>
#include <asm/io.h>
#define PLX9030_INTCSR 0x4C
#define INTSCR_INT1_ENABLE 0x01
#define INTSCR_INT1_STATUS 0x04
#define INT1_ENABLED_AND_ACTIVE (INTSCR_INT1_ENABLE | INTSCR_INT1_STATUS)
#define PCI_SUBVENDOR_ID_PEP 0x1518
#define CIF_SUBDEVICE_PROFIBUS 0x430
#define CIF_SUBDEVICE_DEVICENET 0x432
static irqreturn_t hilscher_handler(int irq, struct uio_info *dev_info)
{
void __iomem *plx_intscr = dev_info->mem[0].internal_addr
+ PLX9030_INTCSR;
if ((ioread8(plx_intscr) & INT1_ENABLED_AND_ACTIVE)
!= INT1_ENABLED_AND_ACTIVE)
return IRQ_NONE;
/* Disable interrupt */
iowrite8(ioread8(plx_intscr) & ~INTSCR_INT1_ENABLE, plx_intscr);
return IRQ_HANDLED;
}
static int hilscher_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct uio_info *info;
info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (pci_enable_device(dev))
return -ENODEV;
if (pci_request_regions(dev, "hilscher"))
goto out_disable;
info->mem[0].addr = pci_resource_start(dev, 0);
if (!info->mem[0].addr)
goto out_release;
info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
if (!info->mem[0].internal_addr)
goto out_release;
info->mem[0].size = pci_resource_len(dev, 0);
info->mem[0].memtype = UIO_MEM_PHYS;
info->mem[1].addr = pci_resource_start(dev, 2);
info->mem[1].size = pci_resource_len(dev, 2);
info->mem[1].memtype = UIO_MEM_PHYS;
switch (id->subdevice) {
case CIF_SUBDEVICE_PROFIBUS:
info->name = "CIF_Profibus";
break;
case CIF_SUBDEVICE_DEVICENET:
info->name = "CIF_Devicenet";
break;
default:
info->name = "CIF_???";
}
info->version = "0.0.1";
info->irq = dev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = hilscher_handler;
if (uio_register_device(&dev->dev, info))
goto out_unmap;
pci_set_drvdata(dev, info);
return 0;
out_unmap:
iounmap(info->mem[0].internal_addr);
out_release:
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
return -ENODEV;
}
static void hilscher_pci_remove(struct pci_dev *dev)
{
struct uio_info *info = pci_get_drvdata(dev);
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
iounmap(info->mem[0].internal_addr);
}
static struct pci_device_id hilscher_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_SUBVENDOR_ID_PEP,
.subdevice = CIF_SUBDEVICE_PROFIBUS,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = PCI_SUBVENDOR_ID_PEP,
.subdevice = CIF_SUBDEVICE_DEVICENET,
},
{ 0, }
};
static struct pci_driver hilscher_pci_driver = {
.name = "hilscher",
.id_table = hilscher_pci_ids,
.probe = hilscher_pci_probe,
.remove = hilscher_pci_remove,
};
module_pci_driver(hilscher_pci_driver);
MODULE_DEVICE_TABLE(pci, hilscher_pci_ids);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger");
| linux-master | drivers/uio/uio_cif.c |
// SPDX-License-Identifier: GPL-2.0
/* sercos3: UIO driver for the Automata Sercos III PCI card
Copyright (C) 2008 Linutronix GmbH
Author: John Ogness <[email protected]>
This is a straight-forward UIO driver, where interrupts are disabled
by the interrupt handler and re-enabled via a write to the UIO device
by the userspace-part.
The only part that may seem odd is the use of a logical OR when
storing and restoring enabled interrupts. This is done because the
userspace-part could directly modify the Interrupt Enable Register
at any time. To reduce possible conflicts, the kernel driver uses
a logical OR to make more controlled changes (rather than blindly
overwriting previous values).
Race conditions exist if the userspace-part directly modifies the
Interrupt Enable Register while in operation. The consequences are
that certain interrupts would fail to be enabled or disabled. For
this reason, the userspace-part should only directly modify the
Interrupt Enable Register at the beginning (to get things going).
The userspace-part can safely disable interrupts at any time using
a write to the UIO device.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/uio_driver.h>
#include <linux/io.h>
#include <linux/slab.h>
/* ID's for SERCOS III PCI card (PLX 9030) */
#define SERCOS_SUB_VENDOR_ID 0x1971
#define SERCOS_SUB_SYSID_3530 0x3530
#define SERCOS_SUB_SYSID_3535 0x3535
#define SERCOS_SUB_SYSID_3780 0x3780
/* Interrupt Enable Register */
#define IER0_OFFSET 0x08
/* Interrupt Status Register */
#define ISR0_OFFSET 0x18
struct sercos3_priv {
u32 ier0_cache;
spinlock_t ier0_cache_lock;
};
/* this function assumes ier0_cache_lock is locked! */
static void sercos3_disable_interrupts(struct uio_info *info,
struct sercos3_priv *priv)
{
void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET;
/* add enabled interrupts to cache */
priv->ier0_cache |= ioread32(ier0);
/* disable interrupts */
iowrite32(0, ier0);
}
/* this function assumes ier0_cache_lock is locked! */
static void sercos3_enable_interrupts(struct uio_info *info,
struct sercos3_priv *priv)
{
void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET;
/* restore previously enabled interrupts */
iowrite32(ioread32(ier0) | priv->ier0_cache, ier0);
priv->ier0_cache = 0;
}
static irqreturn_t sercos3_handler(int irq, struct uio_info *info)
{
struct sercos3_priv *priv = info->priv;
void __iomem *isr0 = info->mem[3].internal_addr + ISR0_OFFSET;
void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET;
if (!(ioread32(isr0) & ioread32(ier0)))
return IRQ_NONE;
spin_lock(&priv->ier0_cache_lock);
sercos3_disable_interrupts(info, priv);
spin_unlock(&priv->ier0_cache_lock);
return IRQ_HANDLED;
}
static int sercos3_irqcontrol(struct uio_info *info, s32 irq_on)
{
struct sercos3_priv *priv = info->priv;
spin_lock_irq(&priv->ier0_cache_lock);
if (irq_on)
sercos3_enable_interrupts(info, priv);
else
sercos3_disable_interrupts(info, priv);
spin_unlock_irq(&priv->ier0_cache_lock);
return 0;
}
static int sercos3_setup_iomem(struct pci_dev *dev, struct uio_info *info,
int n, int pci_bar)
{
info->mem[n].addr = pci_resource_start(dev, pci_bar);
if (!info->mem[n].addr)
return -1;
info->mem[n].internal_addr = ioremap(pci_resource_start(dev, pci_bar),
pci_resource_len(dev, pci_bar));
if (!info->mem[n].internal_addr)
return -1;
info->mem[n].size = pci_resource_len(dev, pci_bar);
info->mem[n].memtype = UIO_MEM_PHYS;
return 0;
}
static int sercos3_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct uio_info *info;
struct sercos3_priv *priv;
int i;
info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
priv = devm_kzalloc(&dev->dev, sizeof(struct sercos3_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (pci_enable_device(dev))
return -ENODEV;
if (pci_request_regions(dev, "sercos3"))
goto out_disable;
/* we only need PCI BAR's 0, 2, 3, 4, 5 */
if (sercos3_setup_iomem(dev, info, 0, 0))
goto out_unmap;
if (sercos3_setup_iomem(dev, info, 1, 2))
goto out_unmap;
if (sercos3_setup_iomem(dev, info, 2, 3))
goto out_unmap;
if (sercos3_setup_iomem(dev, info, 3, 4))
goto out_unmap;
if (sercos3_setup_iomem(dev, info, 4, 5))
goto out_unmap;
spin_lock_init(&priv->ier0_cache_lock);
info->priv = priv;
info->name = "Sercos_III_PCI";
info->version = "0.0.1";
info->irq = dev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = sercos3_handler;
info->irqcontrol = sercos3_irqcontrol;
pci_set_drvdata(dev, info);
if (uio_register_device(&dev->dev, info))
goto out_unmap;
return 0;
out_unmap:
for (i = 0; i < 5; i++) {
if (info->mem[i].internal_addr)
iounmap(info->mem[i].internal_addr);
}
pci_release_regions(dev);
out_disable:
pci_disable_device(dev);
return -ENODEV;
}
static void sercos3_pci_remove(struct pci_dev *dev)
{
struct uio_info *info = pci_get_drvdata(dev);
int i;
uio_unregister_device(info);
pci_release_regions(dev);
pci_disable_device(dev);
for (i = 0; i < 5; i++) {
if (info->mem[i].internal_addr)
iounmap(info->mem[i].internal_addr);
}
}
static struct pci_device_id sercos3_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = SERCOS_SUB_VENDOR_ID,
.subdevice = SERCOS_SUB_SYSID_3530,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = SERCOS_SUB_VENDOR_ID,
.subdevice = SERCOS_SUB_SYSID_3535,
},
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
.subvendor = SERCOS_SUB_VENDOR_ID,
.subdevice = SERCOS_SUB_SYSID_3780,
},
{ 0, }
};
static struct pci_driver sercos3_pci_driver = {
.name = "sercos3",
.id_table = sercos3_pci_ids,
.probe = sercos3_pci_probe,
.remove = sercos3_pci_remove,
};
module_pci_driver(sercos3_pci_driver);
MODULE_DESCRIPTION("UIO driver for the Automata Sercos III PCI card");
MODULE_AUTHOR("John Ogness <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/uio/uio_sercos3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* uio_hv_generic - generic UIO driver for VMBus
*
* Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
* Copyright (c) 2016, Microsoft Corporation.
*
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
* Associate Network GUID with UIO device
* # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
* Then rebind
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/hv_netvsc/unbind
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uio_driver.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/hyperv.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "../hv/hyperv_vmbus.h"
#define DRIVER_VERSION "0.02.1"
#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
#define DRIVER_DESC "Generic UIO driver for VMBus devices"
#define HV_RING_SIZE 512 /* pages */
#define SEND_BUFFER_SIZE (16 * 1024 * 1024)
#define RECV_BUFFER_SIZE (31 * 1024 * 1024)
/*
* List of resources to be mapped to user space
* can be extended up to MAX_UIO_MAPS(5) items
*/
enum hv_uio_map {
TXRX_RING_MAP = 0,
INT_PAGE_MAP,
MON_PAGE_MAP,
RECV_BUF_MAP,
SEND_BUF_MAP
};
struct hv_uio_private_data {
struct uio_info info;
struct hv_device *device;
atomic_t refcnt;
void *recv_buf;
struct vmbus_gpadl recv_gpadl;
char recv_name[32]; /* "recv_4294967295" */
void *send_buf;
struct vmbus_gpadl send_gpadl;
char send_name[32];
};
/*
* This is the irqcontrol callback to be registered to uio_info.
* It can be used to disable/enable interrupt from user space processes.
*
* @param info
* pointer to uio_info.
* @param irq_state
* state value. 1 to enable interrupt, 0 to disable interrupt.
*/
static int
hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
{
struct hv_uio_private_data *pdata = info->priv;
struct hv_device *dev = pdata->device;
dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
virt_mb();
return 0;
}
/*
* Callback from vmbus_event when something is in inbound ring.
*/
static void hv_uio_channel_cb(void *context)
{
struct vmbus_channel *chan = context;
struct hv_device *hv_dev = chan->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
uio_event_notify(&pdata->info);
}
/*
* Callback from vmbus_event when channel is rescinded.
*/
static void hv_uio_rescind(struct vmbus_channel *channel)
{
struct hv_device *hv_dev = channel->primary_channel->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
/*
* Turn off the interrupt file handle
* Next read for event will return -EIO
*/
pdata->info.irq = 0;
/* Wake up reader */
uio_event_notify(&pdata->info);
}
/* Sysfs API to allow mmap of the ring buffers
* The ring buffer is allocated as contiguous memory by vmbus_open
*/
static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
void *ring_buffer = page_address(channel->ringbuffer_page);
if (channel->state != CHANNEL_OPENED_STATE)
return -ENODEV;
return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
static const struct bin_attribute ring_buffer_bin_attr = {
.attr = {
.name = "ring",
.mode = 0600,
},
.size = 2 * HV_RING_SIZE * PAGE_SIZE,
.mmap = hv_uio_ring_mmap,
};
/* Callback from VMBUS subsystem when new channel created. */
static void
hv_uio_new_channel(struct vmbus_channel *new_sc)
{
struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
struct device *device = &hv_dev->device;
const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
int ret;
/* Create host communication ring */
ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
hv_uio_channel_cb, new_sc);
if (ret) {
dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
return;
}
/* Disable interrupts on sub channel */
new_sc->inbound.ring_buffer->interrupt_mask = 1;
set_channel_read_mode(new_sc, HV_CALL_ISR);
ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
if (ret) {
dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
vmbus_close(new_sc);
}
}
/* free the reserved buffers for send and receive */
static void
hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
if (pdata->send_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
vfree(pdata->send_buf);
}
if (pdata->recv_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
vfree(pdata->recv_buf);
}
}
/* VMBus primary channel is opened on first use */
static int
hv_uio_open(struct uio_info *info, struct inode *inode)
{
struct hv_uio_private_data *pdata
= container_of(info, struct hv_uio_private_data, info);
struct hv_device *dev = pdata->device;
int ret;
if (atomic_inc_return(&pdata->refcnt) != 1)
return 0;
vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
ret = vmbus_connect_ring(dev->channel,
hv_uio_channel_cb, dev->channel);
if (ret == 0)
dev->channel->inbound.ring_buffer->interrupt_mask = 1;
else
atomic_dec(&pdata->refcnt);
return ret;
}
/* VMBus primary channel is closed on last close */
static int
hv_uio_release(struct uio_info *info, struct inode *inode)
{
struct hv_uio_private_data *pdata
= container_of(info, struct hv_uio_private_data, info);
struct hv_device *dev = pdata->device;
int ret = 0;
if (atomic_dec_and_test(&pdata->refcnt))
ret = vmbus_disconnect_ring(dev->channel);
return ret;
}
static int
hv_uio_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
struct vmbus_channel *channel = dev->channel;
struct hv_uio_private_data *pdata;
void *ring_buffer;
int ret;
/* Communicating with host has to be via shared memory not hypercall */
if (!channel->offermsg.monitor_allocated) {
dev_err(&dev->device, "vmbus channel requires hypercall\n");
return -ENOTSUPP;
}
pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
HV_RING_SIZE * PAGE_SIZE);
if (ret)
return ret;
set_channel_read_mode(channel, HV_CALL_ISR);
/* Fill general uio info */
pdata->info.name = "uio_hv_generic";
pdata->info.version = DRIVER_VERSION;
pdata->info.irqcontrol = hv_uio_irqcontrol;
pdata->info.open = hv_uio_open;
pdata->info.release = hv_uio_release;
pdata->info.irq = UIO_IRQ_CUSTOM;
atomic_set(&pdata->refcnt, 0);
/* mem resources */
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
ring_buffer = page_address(channel->ringbuffer_page);
pdata->info.mem[TXRX_RING_MAP].addr
= (uintptr_t)virt_to_phys(ring_buffer);
pdata->info.mem[TXRX_RING_MAP].size
= channel->ringbuffer_pagecount << PAGE_SHIFT;
pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.int_page;
pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
pdata->info.mem[MON_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.monitor_pages[1];
pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
if (pdata->recv_buf == NULL) {
ret = -ENOMEM;
goto fail_free_ring;
}
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret) {
vfree(pdata->recv_buf);
goto fail_close;
}
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
"recv:%u", pdata->recv_gpadl.gpadl_handle);
pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
pdata->info.mem[RECV_BUF_MAP].addr
= (uintptr_t)pdata->recv_buf;
pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
if (pdata->send_buf == NULL) {
ret = -ENOMEM;
goto fail_close;
}
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret) {
vfree(pdata->send_buf);
goto fail_close;
}
snprintf(pdata->send_name, sizeof(pdata->send_name),
"send:%u", pdata->send_gpadl.gpadl_handle);
pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
pdata->info.mem[SEND_BUF_MAP].addr
= (uintptr_t)pdata->send_buf;
pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
pdata->info.priv = pdata;
pdata->device = dev;
ret = uio_register_device(&dev->device, &pdata->info);
if (ret) {
dev_err(&dev->device, "hv_uio register failed\n");
goto fail_close;
}
ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
if (ret)
dev_notice(&dev->device,
"sysfs create ring bin file failed; %d\n", ret);
hv_set_drvdata(dev, pdata);
return 0;
fail_close:
hv_uio_cleanup(dev, pdata);
fail_free_ring:
vmbus_free_ring(dev->channel);
return ret;
}
static void
hv_uio_remove(struct hv_device *dev)
{
struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
if (!pdata)
return;
sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
vmbus_free_ring(dev->channel);
}
static struct hv_driver hv_uio_drv = {
.name = "uio_hv_generic",
.id_table = NULL, /* only dynamic id's */
.probe = hv_uio_probe,
.remove = hv_uio_remove,
};
static int __init
hyperv_module_init(void)
{
return vmbus_driver_register(&hv_uio_drv);
}
static void __exit
hyperv_module_exit(void)
{
vmbus_driver_unregister(&hv_uio_drv);
}
module_init(hyperv_module_init);
module_exit(hyperv_module_exit);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
| linux-master | drivers/uio/uio_hv_generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic DFL driver for Userspace I/O devicess
*
* Copyright (C) 2021 Intel Corporation, Inc.
*/
#include <linux/dfl.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/uio_driver.h>
#define DRIVER_NAME "uio_dfl"
static int uio_dfl_probe(struct dfl_device *ddev)
{
struct resource *r = &ddev->mmio_res;
struct device *dev = &ddev->dev;
struct uio_info *uioinfo;
struct uio_mem *uiomem;
int ret;
uioinfo = devm_kzalloc(dev, sizeof(struct uio_info), GFP_KERNEL);
if (!uioinfo)
return -ENOMEM;
uioinfo->name = DRIVER_NAME;
uioinfo->version = "0";
uiomem = &uioinfo->mem[0];
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start & PAGE_MASK;
uiomem->offs = r->start & ~PAGE_MASK;
uiomem->size = (uiomem->offs + resource_size(r)
+ PAGE_SIZE - 1) & PAGE_MASK;
uiomem->name = r->name;
/* Irq is yet to be supported */
uioinfo->irq = UIO_IRQ_NONE;
ret = devm_uio_register_device(dev, uioinfo);
if (ret)
dev_err(dev, "unable to register uio device\n");
return ret;
}
#define FME_FEATURE_ID_ETH_GROUP 0x10
#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
#define FME_FEATURE_ID_VENDOR_SPECIFIC 0x23
#define PORT_FEATURE_ID_IOPLL_USRCLK 0x14
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
{ FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
{ FME_ID, FME_FEATURE_ID_VENDOR_SPECIFIC },
{ PORT_ID, PORT_FEATURE_ID_IOPLL_USRCLK },
{ }
};
MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
static struct dfl_driver uio_dfl_driver = {
.drv = {
.name = DRIVER_NAME,
},
.id_table = uio_dfl_ids,
.probe = uio_dfl_probe,
};
module_dfl_driver(uio_dfl_driver);
MODULE_DESCRIPTION("Generic DFL driver for Userspace I/O devices");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/uio/uio_dfl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* uio_aec.c -- simple driver for Adrienne Electronics Corp time code PCI device
*
* Copyright (C) 2008 Brandon Philips <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/uio_driver.h>
#include <linux/slab.h>
#define PCI_VENDOR_ID_AEC 0xaecb
#define PCI_DEVICE_ID_AEC_VITCLTC 0x6250
#define INT_ENABLE_ADDR 0xFC
#define INT_ENABLE 0x10
#define INT_DISABLE 0x0
#define INT_MASK_ADDR 0x2E
#define INT_MASK_ALL 0x3F
#define INTA_DRVR_ADDR 0xFE
#define INTA_ENABLED_FLAG 0x08
#define INTA_FLAG 0x01
#define MAILBOX 0x0F
static struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AEC, PCI_DEVICE_ID_AEC_VITCLTC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
static irqreturn_t aectc_irq(int irq, struct uio_info *dev_info)
{
void __iomem *int_flag = dev_info->priv + INTA_DRVR_ADDR;
unsigned char status = ioread8(int_flag);
if ((status & INTA_ENABLED_FLAG) && (status & INTA_FLAG)) {
/* application writes 0x00 to 0x2F to get next interrupt */
status = ioread8(dev_info->priv + MAILBOX);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void print_board_data(struct pci_dev *pdev, struct uio_info *i)
{
dev_info(&pdev->dev, "PCI-TC board vendor: %x%x number: %x%x"
" revision: %c%c\n",
ioread8(i->priv + 0x01),
ioread8(i->priv + 0x00),
ioread8(i->priv + 0x03),
ioread8(i->priv + 0x02),
ioread8(i->priv + 0x06),
ioread8(i->priv + 0x07));
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct uio_info *info;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (pci_enable_device(pdev))
return -ENODEV;
if (pci_request_regions(pdev, "aectc"))
goto out_disable;
info->name = "aectc";
info->port[0].start = pci_resource_start(pdev, 0);
if (!info->port[0].start)
goto out_release;
info->priv = pci_iomap(pdev, 0, 0);
if (!info->priv)
goto out_release;
info->port[0].size = pci_resource_len(pdev, 0);
info->port[0].porttype = UIO_PORT_GPIO;
info->version = "0.0.1";
info->irq = pdev->irq;
info->irq_flags = IRQF_SHARED;
info->handler = aectc_irq;
print_board_data(pdev, info);
ret = uio_register_device(&pdev->dev, info);
if (ret)
goto out_unmap;
iowrite32(INT_ENABLE, info->priv + INT_ENABLE_ADDR);
iowrite8(INT_MASK_ALL, info->priv + INT_MASK_ADDR);
if (!(ioread8(info->priv + INTA_DRVR_ADDR)
& INTA_ENABLED_FLAG))
dev_err(&pdev->dev, "aectc: interrupts not enabled\n");
pci_set_drvdata(pdev, info);
return 0;
out_unmap:
pci_iounmap(pdev, info->priv);
out_release:
pci_release_regions(pdev);
out_disable:
pci_disable_device(pdev);
return -ENODEV;
}
static void remove(struct pci_dev *pdev)
{
struct uio_info *info = pci_get_drvdata(pdev);
/* disable interrupts */
iowrite8(INT_DISABLE, info->priv + INT_MASK_ADDR);
iowrite32(INT_DISABLE, info->priv + INT_ENABLE_ADDR);
/* read mailbox to ensure board drops irq */
ioread8(info->priv + MAILBOX);
uio_unregister_device(info);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_iounmap(pdev, info->priv);
}
static struct pci_driver pci_driver = {
.name = "aectc",
.id_table = ids,
.probe = probe,
.remove = remove,
};
module_pci_driver(pci_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/uio/uio_aec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Core registration and callback routines for MTD
* drivers and users.
*
* Copyright © 1999-2010 David Woodhouse <[email protected]>
* Copyright © 2006 Red Hat UK Limited
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/ioctl.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/proc_fs.h>
#include <linux/idr.h>
#include <linux/backing-dev.h>
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/leds.h>
#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/root_dev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include "mtdcore.h"
struct backing_dev_info *mtd_bdi;
#ifdef CONFIG_PM_SLEEP
static int mtd_cls_suspend(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return mtd ? mtd_suspend(mtd) : 0;
}
static int mtd_cls_resume(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
if (mtd)
mtd_resume(mtd);
return 0;
}
static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
#else
#define MTD_CLS_PM_OPS NULL
#endif
static struct class mtd_class = {
.name = "mtd",
.pm = MTD_CLS_PM_OPS,
};
static DEFINE_IDR(mtd_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
EXPORT_SYMBOL_GPL(mtd_table_mutex);
struct mtd_info *__mtd_next_device(int i)
{
return idr_get_next(&mtd_idr, &i);
}
EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
*/
static void mtd_release(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
dev_t index = MTD_DEVT(mtd->index);
idr_remove(&mtd_idr, mtd->index);
of_node_put(mtd_get_of_node(mtd));
if (mtd_is_partition(mtd))
release_mtd_partition(mtd);
/* remove /dev/mtdXro node */
device_destroy(&mtd_class, index + 1);
}
static void mtd_device_release(struct kref *kref)
{
struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
bool is_partition = mtd_is_partition(mtd);
debugfs_remove_recursive(mtd->dbg.dfs_dir);
/* Try to remove the NVMEM provider */
nvmem_unregister(mtd->nvmem);
device_unregister(&mtd->dev);
/*
* Clear dev so mtd can be safely re-registered later if desired.
* Should not be done for partition,
* as it was already destroyed in device_unregister().
*/
if (!is_partition)
memset(&mtd->dev, 0, sizeof(mtd->dev));
module_put(THIS_MODULE);
}
#define MTD_DEVICE_ATTR_RO(name) \
static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
#define MTD_DEVICE_ATTR_RW(name) \
static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
case MTD_ABSENT:
type = "absent";
break;
case MTD_RAM:
type = "ram";
break;
case MTD_ROM:
type = "rom";
break;
case MTD_NORFLASH:
type = "nor";
break;
case MTD_NANDFLASH:
type = "nand";
break;
case MTD_DATAFLASH:
type = "dataflash";
break;
case MTD_UBIVOLUME:
type = "ubi";
break;
case MTD_MLCNANDFLASH:
type = "mlc-nand";
break;
default:
type = "unknown";
}
return sysfs_emit(buf, "%s\n", type);
}
MTD_DEVICE_ATTR_RO(type);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
}
MTD_DEVICE_ATTR_RO(flags);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
}
MTD_DEVICE_ATTR_RO(size);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
}
MTD_DEVICE_ATTR_RO(erasesize);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
}
MTD_DEVICE_ATTR_RO(writesize);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return sysfs_emit(buf, "%u\n", subpagesize);
}
MTD_DEVICE_ATTR_RO(subpagesize);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
}
MTD_DEVICE_ATTR_RO(oobsize);
static ssize_t mtd_oobavail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", mtd->oobavail);
}
MTD_DEVICE_ATTR_RO(oobavail);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
}
MTD_DEVICE_ATTR_RO(numeraseregions);
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", mtd->name);
}
MTD_DEVICE_ATTR_RO(name);
static ssize_t mtd_ecc_strength_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
}
MTD_DEVICE_ATTR_RO(ecc_strength);
static ssize_t mtd_bitflip_threshold_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
}
static ssize_t mtd_bitflip_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int bitflip_threshold;
int retval;
retval = kstrtouint(buf, 0, &bitflip_threshold);
if (retval)
return retval;
mtd->bitflip_threshold = bitflip_threshold;
return count;
}
MTD_DEVICE_ATTR_RW(bitflip_threshold);
static ssize_t mtd_ecc_step_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
}
MTD_DEVICE_ATTR_RO(ecc_step_size);
static ssize_t mtd_corrected_bits_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
}
MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
static ssize_t mtd_ecc_failures_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return sysfs_emit(buf, "%u\n", ecc_stats->failed);
}
MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
static ssize_t mtd_bad_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
}
MTD_DEVICE_ATTR_RO(bad_blocks);
static ssize_t mtd_bbt_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
}
MTD_DEVICE_ATTR_RO(bbt_blocks);
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
&dev_attr_size.attr,
&dev_attr_erasesize.attr,
&dev_attr_writesize.attr,
&dev_attr_subpagesize.attr,
&dev_attr_oobsize.attr,
&dev_attr_oobavail.attr,
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
&dev_attr_ecc_strength.attr,
&dev_attr_ecc_step_size.attr,
&dev_attr_corrected_bits.attr,
&dev_attr_ecc_failures.attr,
&dev_attr_bad_blocks.attr,
&dev_attr_bbt_blocks.attr,
&dev_attr_bitflip_threshold.attr,
NULL,
};
ATTRIBUTE_GROUPS(mtd);
static const struct device_type mtd_devtype = {
.name = "mtd",
.groups = mtd_groups,
.release = mtd_release,
};
static bool mtd_expert_analysis_mode;
#ifdef CONFIG_DEBUG_FS
bool mtd_check_expert_analysis_mode(void)
{
const char *mtd_expert_analysis_warning =
"Bad block checks have been entirely disabled.\n"
"This is only reserved for post-mortem forensics and debug purposes.\n"
"Never enable this mode if you do not know what you are doing!\n";
return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
}
EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
#endif
static struct dentry *dfs_dir_mtd;
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
}
#ifndef CONFIG_MMU
unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
{
switch (mtd->type) {
case MTD_RAM:
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
NOMMU_MAP_READ | NOMMU_MAP_WRITE;
case MTD_ROM:
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
NOMMU_MAP_READ;
default:
return NOMMU_MAP_COPY;
}
}
EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
#endif
static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
void *cmd)
{
struct mtd_info *mtd;
mtd = container_of(n, struct mtd_info, reboot_notifier);
mtd->_reboot(mtd);
return NOTIFY_DONE;
}
/**
* mtd_wunit_to_pairing_info - get pairing information of a wunit
* @mtd: pointer to new MTD device info structure
* @wunit: write unit we are interested in
* @info: returned pairing information
*
* Retrieve pairing information associated to the wunit.
* This is mainly useful when dealing with MLC/TLC NANDs where pages can be
* paired together, and where programming a page may influence the page it is
* paired with.
* The notion of page is replaced by the term wunit (write-unit) to stay
* consistent with the ->writesize field.
*
* The @wunit argument can be extracted from an absolute offset using
* mtd_offset_to_wunit(). @info is filled with the pairing information attached
* to @wunit.
*
* From the pairing info the MTD user can find all the wunits paired with
* @wunit using the following loop:
*
* for (i = 0; i < mtd_pairing_groups(mtd); i++) {
* info.pair = i;
* mtd_pairing_info_to_wunit(mtd, &info);
* ...
* }
*/
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
struct mtd_pairing_info *info)
{
struct mtd_info *master = mtd_get_master(mtd);
int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
if (wunit < 0 || wunit >= npairs)
return -EINVAL;
if (master->pairing && master->pairing->get_info)
return master->pairing->get_info(master, wunit, info);
info->group = 0;
info->pair = wunit;
return 0;
}
EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
/**
* mtd_pairing_info_to_wunit - get wunit from pairing information
* @mtd: pointer to new MTD device info structure
* @info: pairing information struct
*
* Returns a positive number representing the wunit associated to the info
* struct, or a negative error code.
*
* This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
* iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
* doc).
*
* It can also be used to only program the first page of each pair (i.e.
* page attached to group 0), which allows one to use an MLC NAND in
* software-emulated SLC mode:
*
* info.group = 0;
* npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
* for (info.pair = 0; info.pair < npairs; info.pair++) {
* wunit = mtd_pairing_info_to_wunit(mtd, &info);
* mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
* mtd->writesize, &retlen, buf + (i * mtd->writesize));
* }
*/
int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
const struct mtd_pairing_info *info)
{
struct mtd_info *master = mtd_get_master(mtd);
int ngroups = mtd_pairing_groups(master);
int npairs = mtd_wunit_per_eb(master) / ngroups;
if (!info || info->pair < 0 || info->pair >= npairs ||
info->group < 0 || info->group >= ngroups)
return -EINVAL;
if (master->pairing && master->pairing->get_wunit)
return mtd->pairing->get_wunit(master, info);
return info->pair;
}
EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
/**
* mtd_pairing_groups - get the number of pairing groups
* @mtd: pointer to new MTD device info structure
*
* Returns the number of pairing groups.
*
* This number is usually equal to the number of bits exposed by a single
* cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
* to iterate over all pages of a given pair.
*/
int mtd_pairing_groups(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->pairing || !master->pairing->ngroups)
return 1;
return master->pairing->ngroups;
}
EXPORT_SYMBOL_GPL(mtd_pairing_groups);
static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct mtd_info *mtd = priv;
size_t retlen;
int err;
err = mtd_read(mtd, offset, bytes, &retlen, val);
if (err && err != -EUCLEAN)
return err;
return retlen == bytes ? 0 : -EIO;
}
static int mtd_nvmem_add(struct mtd_info *mtd)
{
struct device_node *node = mtd_get_of_node(mtd);
struct nvmem_config config = {};
config.id = NVMEM_DEVID_NONE;
config.dev = &mtd->dev;
config.name = dev_name(&mtd->dev);
config.owner = THIS_MODULE;
config.reg_read = mtd_nvmem_reg_read;
config.size = mtd->size;
config.word_size = 1;
config.stride = 1;
config.read_only = true;
config.root_only = true;
config.ignore_wp = true;
config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
mtd->nvmem = nvmem_register(&config);
if (IS_ERR(mtd->nvmem)) {
/* Just ignore if there is no NVMEM support in the kernel */
if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
mtd->nvmem = NULL;
else
return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
"Failed to register NVMEM device\n");
}
return 0;
}
static void mtd_check_of_node(struct mtd_info *mtd)
{
struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
const char *pname, *prefix = "partition-";
int plen, mtd_name_len, offset, prefix_len;
/* Check if MTD already has a device node */
if (mtd_get_of_node(mtd))
return;
if (!mtd_is_partition(mtd))
return;
parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
if (!parent_dn)
return;
if (mtd_is_partition(mtd->parent))
partitions = of_node_get(parent_dn);
else
partitions = of_get_child_by_name(parent_dn, "partitions");
if (!partitions)
goto exit_parent;
prefix_len = strlen(prefix);
mtd_name_len = strlen(mtd->name);
/* Search if a partition is defined with the same name */
for_each_child_of_node(partitions, mtd_dn) {
/* Skip partition with no/wrong prefix */
if (!of_node_name_prefix(mtd_dn, prefix))
continue;
/* Label have priority. Check that first */
if (!of_property_read_string(mtd_dn, "label", &pname)) {
offset = 0;
} else {
pname = mtd_dn->name;
offset = prefix_len;
}
plen = strlen(pname) - offset;
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
break;
}
}
of_node_put(partitions);
exit_parent:
of_node_put(parent_dn);
}
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or non-zero on failure.
*/
int add_mtd_device(struct mtd_info *mtd)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_notifier *not;
int i, error, ofidx;
/*
* May occur, for instance, on buggy drivers which call
* mtd_device_parse_register() multiple times on the same master MTD,
* especially with CONFIG_MTD_PARTITIONED_MASTER=y.
*/
if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
return -EEXIST;
BUG_ON(mtd->writesize == 0);
/*
* MTD drivers should implement ->_{write,read}() or
* ->_{write,read}_oob(), but not both.
*/
if (WARN_ON((mtd->_write && mtd->_write_oob) ||
(mtd->_read && mtd->_read_oob)))
return -EINVAL;
if (WARN_ON((!mtd->erasesize || !master->_erase) &&
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
/*
* MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
* master is an MLC NAND and has a proper pairing scheme defined.
* We also reject masters that implement ->_writev() for now, because
* NAND controller drivers don't implement this hook, and adding the
* SLC -> MLC address/length conversion to this path is useless if we
* don't have a user.
*/
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
(!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
!master->pairing || master->_writev))
return -EINVAL;
mutex_lock(&mtd_table_mutex);
ofidx = -1;
if (np)
ofidx = of_alias_get_id(np, "mtd");
if (ofidx >= 0)
i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
else
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
if (i < 0) {
error = i;
goto fail_locked;
}
mtd->index = i;
kref_init(&mtd->refcnt);
/* default value if not set by driver */
if (mtd->bitflip_threshold == 0)
mtd->bitflip_threshold = mtd->ecc_strength;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
int ngroups = mtd_pairing_groups(master);
mtd->erasesize /= ngroups;
mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
mtd->erasesize;
}
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
mtd->erasesize_shift = 0;
if (is_power_of_2(mtd->writesize))
mtd->writesize_shift = ffs(mtd->writesize) - 1;
else
mtd->writesize_shift = 0;
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
error = mtd_unlock(mtd, 0, mtd->size);
if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
/* Ignore unlock failures? */
error = 0;
}
/* Caller should have set dev.parent to match the
* physical device, if appropriate.
*/
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
dev_set_drvdata(&mtd->dev, mtd);
mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error) {
put_device(&mtd->dev);
goto fail_added;
}
/* Add the nvmem provider */
error = mtd_nvmem_add(mtd);
if (error)
goto fail_nvmem_add;
mtd_debugfs_populate(mtd);
device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
"mtd%dro", i);
pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd);
mutex_unlock(&mtd_table_mutex);
if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
if (IS_BUILTIN(CONFIG_MTD)) {
pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
} else {
pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
mtd->index, mtd->name);
}
}
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
return 0;
fail_nvmem_add:
device_unregister(&mtd->dev);
fail_added:
of_node_put(mtd_get_of_node(mtd));
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return error;
}
/**
* del_mtd_device - unregister an MTD device
* @mtd: pointer to MTD device info structure
*
* Remove a device from the list of MTD devices present in the system,
* and notify each currently active MTD 'user' of its departure.
* Returns zero on success or 1 on failure, which currently will happen
* if the requested device does not appear to be present in the list.
*/
int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
goto out_error;
}
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
kref_put(&mtd->refcnt, mtd_device_release);
ret = 0;
out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
/*
* Set a few defaults based on the parent devices, if not provided by the
* driver
*/
static void mtd_set_dev_defaults(struct mtd_info *mtd)
{
if (mtd->dev.parent) {
if (!mtd->owner && mtd->dev.parent->driver)
mtd->owner = mtd->dev.parent->driver->owner;
if (!mtd->name)
mtd->name = dev_name(mtd->dev.parent);
} else {
pr_debug("mtd device won't show a device symlink in sysfs\n");
}
INIT_LIST_HEAD(&mtd->partitions);
mutex_init(&mtd->master.partitions_lock);
mutex_init(&mtd->master.chrdev_lock);
}
static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
{
struct otp_info *info;
ssize_t size = 0;
unsigned int i;
size_t retlen;
int ret;
info = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!info)
return -ENOMEM;
if (is_user)
ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
else
ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
if (ret)
goto err;
for (i = 0; i < retlen / sizeof(*info); i++)
size += info[i].length;
kfree(info);
return size;
err:
kfree(info);
/* ENODATA means there is no OTP region. */
return ret == -ENODATA ? 0 : ret;
}
static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
const char *compatible,
int size,
nvmem_reg_read_t reg_read)
{
struct nvmem_device *nvmem = NULL;
struct nvmem_config config = {};
struct device_node *np;
/* DT binding is optional */
np = of_get_compatible_child(mtd->dev.of_node, compatible);
/* OTP nvmem will be registered on the physical device */
config.dev = mtd->dev.parent;
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
config.reg_read = reg_read;
config.size = size;
config.of_node = np;
config.priv = mtd;
nvmem = nvmem_register(&config);
/* Just ignore if there is no NVMEM support in the kernel */
if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
nvmem = NULL;
of_node_put(np);
return nvmem;
}
static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct mtd_info *mtd = priv;
size_t retlen;
int ret;
ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
if (ret)
return ret;
return retlen == bytes ? 0 : -EIO;
}
static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct mtd_info *mtd = priv;
size_t retlen;
int ret;
ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
if (ret)
return ret;
return retlen == bytes ? 0 : -EIO;
}
static int mtd_otp_nvmem_add(struct mtd_info *mtd)
{
struct device *dev = mtd->dev.parent;
struct nvmem_device *nvmem;
ssize_t size;
int err;
if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
size = mtd_otp_size(mtd, true);
if (size < 0)
return size;
if (size > 0) {
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
mtd_nvmem_user_otp_reg_read);
if (IS_ERR(nvmem)) {
err = PTR_ERR(nvmem);
goto err;
}
mtd->otp_user_nvmem = nvmem;
}
}
if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
size = mtd_otp_size(mtd, false);
if (size < 0) {
err = size;
goto err;
}
if (size > 0) {
/*
* The factory OTP contains thing such as a unique serial
* number and is small, so let's read it out and put it
* into the entropy pool.
*/
void *otp;
otp = kmalloc(size, GFP_KERNEL);
if (!otp) {
err = -ENOMEM;
goto err;
}
err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
if (err < 0) {
kfree(otp);
goto err;
}
add_device_randomness(otp, err);
kfree(otp);
nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
mtd_nvmem_fact_otp_reg_read);
if (IS_ERR(nvmem)) {
err = PTR_ERR(nvmem);
goto err;
}
mtd->otp_factory_nvmem = nvmem;
}
}
return 0;
err:
nvmem_unregister(mtd->otp_user_nvmem);
return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
}
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
*
* @mtd: the MTD device to register
* @types: the list of MTD partition probes to try, see
* 'parse_mtd_partitions()' for more information
* @parser_data: MTD partition parser-specific data
* @parts: fallback partition information to register, if parsing fails;
* only valid if %nr_parts > %0
* @nr_parts: the number of partitions in parts, if zero then the full
* MTD device is registered if no partition info is found
*
* This function aggregates MTD partitions parsing (done by
* 'parse_mtd_partitions()') and MTD device and partitions registering. It
* basically follows the most common pattern found in many MTD drivers:
*
* * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
* registered first.
* * Then It tries to probe partitions on MTD device @mtd using parsers
* specified in @types (if @types is %NULL, then the default list of parsers
* is used, see 'parse_mtd_partitions()' for more information). If none are
* found this functions tries to fallback to information specified in
* @parts/@nr_parts.
* * If no partitions were found this function just registers the MTD device
* @mtd and exits.
*
* Returns zero in case of success and a negative error code in case of failure.
*/
int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
struct mtd_part_parser_data *parser_data,
const struct mtd_partition *parts,
int nr_parts)
{
int ret;
mtd_set_dev_defaults(mtd);
ret = mtd_otp_nvmem_add(mtd);
if (ret)
goto out;
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
ret = add_mtd_device(mtd);
if (ret)
goto out;
}
/* Prefer parsed partitions over driver-provided fallback */
ret = parse_mtd_partitions(mtd, types, parser_data);
if (ret == -EPROBE_DEFER)
goto out;
if (ret > 0)
ret = 0;
else if (nr_parts)
ret = add_mtd_partitions(mtd, parts, nr_parts);
else if (!device_is_registered(&mtd->dev))
ret = add_mtd_device(mtd);
else
ret = 0;
if (ret)
goto out;
/*
* FIXME: some drivers unfortunately call this function more than once.
* So we have to check if we've already assigned the reboot notifier.
*
* Generally, we can make multiple calls work for most cases, but it
* does cause problems with parse_mtd_partitions() above (e.g.,
* cmdlineparts will register partitions more than once).
*/
WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
"MTD already registered\n");
if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
register_reboot_notifier(&mtd->reboot_notifier);
}
out:
if (ret) {
nvmem_unregister(mtd->otp_user_nvmem);
nvmem_unregister(mtd->otp_factory_nvmem);
}
if (ret && device_is_registered(&mtd->dev))
del_mtd_device(mtd);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_device_parse_register);
/**
* mtd_device_unregister - unregister an existing MTD device.
*
* @master: the MTD device to unregister. This will unregister both the master
* and any partitions if registered.
*/
int mtd_device_unregister(struct mtd_info *master)
{
int err;
if (master->_reboot) {
unregister_reboot_notifier(&master->reboot_notifier);
memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
}
nvmem_unregister(master->otp_user_nvmem);
nvmem_unregister(master->otp_factory_nvmem);
err = del_mtd_partitions(master);
if (err)
return err;
if (!device_is_registered(&master->dev))
return 0;
return del_mtd_device(master);
}
EXPORT_SYMBOL_GPL(mtd_device_unregister);
/**
* register_mtd_user - register a 'user' of MTD devices.
* @new: pointer to notifier info structure
*
* Registers a pair of callbacks function to be called upon addition
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
list_add(&new->list, &mtd_notifiers);
__module_get(THIS_MODULE);
mtd_for_each_device(mtd)
new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
* @old: pointer to notifier info structure
*
* Removes a callback function pair from the list of 'users' to be
* notified upon addition or removal of MTD devices. Causes the
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
mtd_for_each_device(mtd)
old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
* @mtd: last known address of the required MTD device
* @num: internal device number of the required MTD device
*
* Given a number and NULL address, return the num'th entry in the device
* table, if any. Given an address and num == -1, search the device table
* for a device with that address and return if it's still present. Given
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
mtd_for_each_device(other) {
if (other == mtd) {
ret = mtd;
break;
}
}
} else if (num >= 0) {
ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
if (!ret) {
ret = ERR_PTR(err);
goto out;
}
err = __get_mtd_device(ret);
if (err)
ret = ERR_PTR(err);
out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
int err;
if (master->_get_device) {
err = master->_get_device(mtd);
if (err)
return err;
}
if (!try_module_get(master->owner)) {
if (master->_put_device)
master->_put_device(master);
return -ENODEV;
}
while (mtd) {
if (mtd != master)
kref_get(&mtd->refcnt);
mtd = mtd->parent;
}
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
kref_get(&master->refcnt);
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* of_get_mtd_device_by_node - obtain an MTD device associated with a given node
*
* @np: device tree node
*/
struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
{
struct mtd_info *mtd = NULL;
struct mtd_info *tmp;
int err;
mutex_lock(&mtd_table_mutex);
err = -EPROBE_DEFER;
mtd_for_each_device(tmp) {
if (mtd_get_of_node(tmp) == np) {
mtd = tmp;
err = __get_mtd_device(mtd);
break;
}
}
mutex_unlock(&mtd_table_mutex);
return err ? ERR_PTR(err) : mtd;
}
EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
* device name
* @name: MTD device name to open
*
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
mtd_for_each_device(other) {
if (!strcmp(name, other->name)) {
mtd = other;
break;
}
}
if (!mtd)
goto out_unlock;
err = __get_mtd_device(mtd);
if (err)
goto out_unlock;
mutex_unlock(&mtd_table_mutex);
return mtd;
out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
mutex_lock(&mtd_table_mutex);
__put_mtd_device(mtd);
mutex_unlock(&mtd_table_mutex);
}
EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
while (mtd) {
/* kref_put() can relese mtd, so keep a reference mtd->parent */
struct mtd_info *parent = mtd->parent;
if (mtd != master)
kref_put(&mtd->refcnt, mtd_device_release);
mtd = parent;
}
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
kref_put(&master->refcnt, mtd_device_release);
module_put(master->owner);
/* must be the last as master can be freed in the _put_device */
if (master->_put_device)
master->_put_device(master);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
/*
* Erase is an synchronous operation. Device drivers are epected to return a
* negative error code if the operation failed and update instr->fail_addr
* to point the portion that was not properly erased.
*/
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_info *master = mtd_get_master(mtd);
u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
struct erase_info adjinstr;
int ret;
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
adjinstr = *instr;
if (!mtd->erasesize || !master->_erase)
return -ENOTSUPP;
if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!instr->len)
return 0;
ledtrig_mtd_activity();
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
master->erasesize;
adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
master->erasesize) -
adjinstr.addr;
}
adjinstr.addr += mst_ofs;
ret = master->_erase(master, &adjinstr);
if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
instr->fail_addr = adjinstr.fail_addr - mst_ofs;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
master);
instr->fail_addr *= mtd->erasesize;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
/*
* This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
*/
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys)
{
struct mtd_info *master = mtd_get_master(mtd);
*retlen = 0;
*virt = NULL;
if (phys)
*phys = 0;
if (!master->_point)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
from = mtd_get_master_ofs(mtd, from);
return master->_point(master, from, len, retlen, virt, phys);
}
EXPORT_SYMBOL_GPL(mtd_point);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_unpoint)
return -EOPNOTSUPP;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
}
EXPORT_SYMBOL_GPL(mtd_unpoint);
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
size_t retlen;
void *virt;
int ret;
ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
if (ret)
return ret;
if (retlen != len) {
mtd_unpoint(mtd, offset, retlen);
return -ENOSYS;
}
return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
const struct mtd_ecc_stats *old_stats)
{
struct mtd_ecc_stats diff;
if (master == mtd)
return;
diff = master->ecc_stats;
diff.failed -= old_stats->failed;
diff.corrected -= old_stats->corrected;
while (mtd->parent) {
mtd->ecc_stats.failed += diff.failed;
mtd->ecc_stats.corrected += diff.corrected;
mtd = mtd->parent;
}
}
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
struct mtd_oob_ops ops = {
.len = len,
.datbuf = buf,
};
int ret;
ret = mtd_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
struct mtd_oob_ops ops = {
.len = len,
.datbuf = (u8 *)buf,
};
int ret;
ret = mtd_write_oob(mtd, to, &ops);
*retlen = ops.retlen;
return ret;
}
EXPORT_SYMBOL_GPL(mtd_write);
/*
* In blackbox flight recorder like scenarios we want to make successful writes
* in interrupt context. panic_write() is only intended to be called when its
* known the kernel is about to panic and we need the write to succeed. Since
* the kernel is not going to be running for much longer, this function can
* break locks and delay to ensure the write succeeds (but not sleep).
*/
int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
*retlen = 0;
if (!master->_panic_write)
return -EOPNOTSUPP;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
if (!master->oops_panic_write)
master->oops_panic_write = true;
return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
struct mtd_oob_ops *ops)
{
/*
* Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
* ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
* this case.
*/
if (!ops->datbuf)
ops->len = 0;
if (!ops->oobbuf)
ops->ooblen = 0;
if (offs < 0 || offs + ops->len > mtd->size)
return -EINVAL;
if (ops->ooblen) {
size_t maxooblen;
if (ops->ooboffs >= mtd_oobavail(mtd, ops))
return -EINVAL;
maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
mtd_div_by_ws(offs, mtd)) *
mtd_oobavail(mtd, ops)) - ops->ooboffs;
if (ops->ooblen > maxooblen)
return -EINVAL;
}
return 0;
}
static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
from = mtd_get_master_ofs(mtd, from);
if (master->_read_oob)
ret = master->_read_oob(master, from, ops);
else
ret = master->_read(master, from, ops->len, &ops->retlen,
ops->datbuf);
return ret;
}
static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
to = mtd_get_master_ofs(mtd, to);
if (master->_write_oob)
ret = master->_write_oob(master, to, ops);
else
ret = master->_write(master, to, ops->len, &ops->retlen,
ops->datbuf);
return ret;
}
static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
int ngroups = mtd_pairing_groups(master);
int npairs = mtd_wunit_per_eb(master) / ngroups;
struct mtd_oob_ops adjops = *ops;
unsigned int wunit, oobavail;
struct mtd_pairing_info info;
int max_bitflips = 0;
u32 ebofs, pageofs;
loff_t base, pos;
ebofs = mtd_mod_by_eb(start, mtd);
base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
info.group = 0;
info.pair = mtd_div_by_ws(ebofs, mtd);
pageofs = mtd_mod_by_ws(ebofs, mtd);
oobavail = mtd_oobavail(mtd, ops);
while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
int ret;
if (info.pair >= npairs) {
info.pair = 0;
base += master->erasesize;
}
wunit = mtd_pairing_info_to_wunit(master, &info);
pos = mtd_wunit_to_offset(mtd, base, wunit);
adjops.len = ops->len - ops->retlen;
if (adjops.len > mtd->writesize - pageofs)
adjops.len = mtd->writesize - pageofs;
adjops.ooblen = ops->ooblen - ops->oobretlen;
if (adjops.ooblen > oobavail - adjops.ooboffs)
adjops.ooblen = oobavail - adjops.ooboffs;
if (read) {
ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
if (ret > 0)
max_bitflips = max(max_bitflips, ret);
} else {
ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
}
if (ret < 0)
return ret;
max_bitflips = max(max_bitflips, ret);
ops->retlen += adjops.retlen;
ops->oobretlen += adjops.oobretlen;
adjops.datbuf += adjops.retlen;
adjops.oobbuf += adjops.oobretlen;
adjops.ooboffs = 0;
pageofs = 0;
info.pair++;
}
return max_bitflips;
}
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_ecc_stats old_stats = master->ecc_stats;
int ret_code;
ops->retlen = ops->oobretlen = 0;
ret_code = mtd_check_oob_ops(mtd, from, ops);
if (ret_code)
return ret_code;
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_read */
if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
if (ops->stats)
memset(ops->stats, 0, sizeof(*ops->stats));
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
else
ret_code = mtd_read_oob_std(mtd, from, ops);
mtd_update_ecc_stats(mtd, master, &old_stats);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
* representing max bitflips. In other cases, mtd->_read_oob() may
* return -EUCLEAN. In all cases, perform similar logic to mtd_read().
*/
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
return 0; /* device lacks ecc */
if (ops->stats)
ops->stats->max_bitflips = ret_code;
return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
ops->retlen = ops->oobretlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
ret = mtd_check_oob_ops(mtd, to, ops);
if (ret)
return ret;
ledtrig_mtd_activity();
/* Check the validity of a potential fallback on mtd->_write */
if (!master->_write_oob && (!master->_write || ops->oobbuf))
return -EOPNOTSUPP;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
return mtd_io_emulated_slc(mtd, to, false, ops);
return mtd_write_oob_std(mtd, to, ops);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
/**
* mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
* @mtd: MTD device structure
* @section: ECC section. Depending on the layout you may have all the ECC
* bytes stored in a single contiguous section, or one section
* per ECC chunk (and sometime several sections for a single ECC
* ECC chunk)
* @oobecc: OOB region struct filled with the appropriate ECC position
* information
*
* This function returns ECC section information in the OOB area. If you want
* to get all the ECC bytes information, then you should call
* mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc)
{
struct mtd_info *master = mtd_get_master(mtd);
memset(oobecc, 0, sizeof(*oobecc));
if (!master || section < 0)
return -EINVAL;
if (!master->ooblayout || !master->ooblayout->ecc)
return -ENOTSUPP;
return master->ooblayout->ecc(master, section, oobecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
/**
* mtd_ooblayout_free - Get the OOB region definition of a specific free
* section
* @mtd: MTD device structure
* @section: Free section you are interested in. Depending on the layout
* you may have all the free bytes stored in a single contiguous
* section, or one section per ECC chunk plus an extra section
* for the remaining bytes (or other funky layout).
* @oobfree: OOB region struct filled with the appropriate free position
* information
*
* This function returns free bytes position in the OOB area. If you want
* to get all the free bytes information, then you should call
* mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobfree)
{
struct mtd_info *master = mtd_get_master(mtd);
memset(oobfree, 0, sizeof(*oobfree));
if (!master || section < 0)
return -EINVAL;
if (!master->ooblayout || !master->ooblayout->free)
return -ENOTSUPP;
return master->ooblayout->free(master, section, oobfree);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
/**
* mtd_ooblayout_find_region - Find the region attached to a specific byte
* @mtd: mtd info structure
* @byte: the byte we are searching for
* @sectionp: pointer where the section id will be stored
* @oobregion: used to retrieve the ECC position
* @iter: iterator function. Should be either mtd_ooblayout_free or
* mtd_ooblayout_ecc depending on the region type you're searching for
*
* This function returns the section id and oobregion information of a
* specific byte. For example, say you want to know where the 4th ECC byte is
* stored, you'll use:
*
* mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
*
* Returns zero on success, a negative error code otherwise.
*/
static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
int *sectionp, struct mtd_oob_region *oobregion,
int (*iter)(struct mtd_info *,
int section,
struct mtd_oob_region *oobregion))
{
int pos = 0, ret, section = 0;
memset(oobregion, 0, sizeof(*oobregion));
while (1) {
ret = iter(mtd, section, oobregion);
if (ret)
return ret;
if (pos + oobregion->length > byte)
break;
pos += oobregion->length;
section++;
}
/*
* Adjust region info to make it start at the beginning at the
* 'start' ECC byte.
*/
oobregion->offset += byte - pos;
oobregion->length -= byte - pos;
*sectionp = section;
return 0;
}
/**
* mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
* ECC byte
* @mtd: mtd info structure
* @eccbyte: the byte we are searching for
* @section: pointer where the section id will be stored
* @oobregion: OOB region information
*
* Works like mtd_ooblayout_find_region() except it searches for a specific ECC
* byte.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
int *section,
struct mtd_oob_region *oobregion)
{
return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
mtd_ooblayout_ecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
/**
* mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
* @mtd: mtd info structure
* @buf: destination buffer to store OOB bytes
* @oobbuf: OOB buffer
* @start: first byte to retrieve
* @nbytes: number of bytes to retrieve
* @iter: section iterator
*
* Extract bytes attached to a specific category (ECC or free)
* from the OOB buffer and copy them into buf.
*
* Returns zero on success, a negative error code otherwise.
*/
static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
const u8 *oobbuf, int start, int nbytes,
int (*iter)(struct mtd_info *,
int section,
struct mtd_oob_region *oobregion))
{
struct mtd_oob_region oobregion;
int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, §ion,
&oobregion, iter);
while (!ret) {
int cnt;
cnt = min_t(int, nbytes, oobregion.length);
memcpy(buf, oobbuf + oobregion.offset, cnt);
buf += cnt;
nbytes -= cnt;
if (!nbytes)
break;
ret = iter(mtd, ++section, &oobregion);
}
return ret;
}
/**
* mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
* @mtd: mtd info structure
* @buf: source buffer to get OOB bytes from
* @oobbuf: OOB buffer
* @start: first OOB byte to set
* @nbytes: number of OOB bytes to set
* @iter: section iterator
*
* Fill the OOB buffer with data provided in buf. The category (ECC or free)
* is selected by passing the appropriate iterator.
*
* Returns zero on success, a negative error code otherwise.
*/
static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
u8 *oobbuf, int start, int nbytes,
int (*iter)(struct mtd_info *,
int section,
struct mtd_oob_region *oobregion))
{
struct mtd_oob_region oobregion;
int section, ret;
ret = mtd_ooblayout_find_region(mtd, start, §ion,
&oobregion, iter);
while (!ret) {
int cnt;
cnt = min_t(int, nbytes, oobregion.length);
memcpy(oobbuf + oobregion.offset, buf, cnt);
buf += cnt;
nbytes -= cnt;
if (!nbytes)
break;
ret = iter(mtd, ++section, &oobregion);
}
return ret;
}
/**
* mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
* @mtd: mtd info structure
* @iter: category iterator
*
* Count the number of bytes in a given category.
*
* Returns a positive value on success, a negative error code otherwise.
*/
static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
int (*iter)(struct mtd_info *,
int section,
struct mtd_oob_region *oobregion))
{
struct mtd_oob_region oobregion;
int section = 0, ret, nbytes = 0;
while (1) {
ret = iter(mtd, section++, &oobregion);
if (ret) {
if (ret == -ERANGE)
ret = nbytes;
break;
}
nbytes += oobregion.length;
}
return ret;
}
/**
* mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
* @mtd: mtd info structure
* @eccbuf: destination buffer to store ECC bytes
* @oobbuf: OOB buffer
* @start: first ECC byte to retrieve
* @nbytes: number of ECC bytes to retrieve
*
* Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
const u8 *oobbuf, int start, int nbytes)
{
return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
mtd_ooblayout_ecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
/**
* mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
* @mtd: mtd info structure
* @eccbuf: source buffer to get ECC bytes from
* @oobbuf: OOB buffer
* @start: first ECC byte to set
* @nbytes: number of ECC bytes to set
*
* Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
u8 *oobbuf, int start, int nbytes)
{
return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
mtd_ooblayout_ecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
/**
* mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
* @mtd: mtd info structure
* @databuf: destination buffer to store ECC bytes
* @oobbuf: OOB buffer
* @start: first ECC byte to retrieve
* @nbytes: number of ECC bytes to retrieve
*
* Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
const u8 *oobbuf, int start, int nbytes)
{
return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
mtd_ooblayout_free);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
/**
* mtd_ooblayout_set_databytes - set data bytes into the oob buffer
* @mtd: mtd info structure
* @databuf: source buffer to get data bytes from
* @oobbuf: OOB buffer
* @start: first ECC byte to set
* @nbytes: number of ECC bytes to set
*
* Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
u8 *oobbuf, int start, int nbytes)
{
return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
mtd_ooblayout_free);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
/**
* mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
* @mtd: mtd info structure
*
* Works like mtd_ooblayout_count_bytes(), except it count free bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
{
return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
/**
* mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
* @mtd: mtd info structure
*
* Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
{
return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
}
EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
/*
* Method to access the protection register area, present in some flash
* devices. The user data is one time programmable but the factory data is read
* only.
*/
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_get_fact_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_get_fact_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
*retlen = 0;
if (!master->_read_fact_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_read_fact_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_get_user_prot_info)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_get_user_prot_info(master, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
*retlen = 0;
if (!master->_read_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_read_user_prot_reg(master, from, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
*retlen = 0;
if (!master->_write_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
if (ret)
return ret;
/*
* If no data could be written at all, we are out of memory and
* must return -ENOSPC.
*/
return (*retlen) ? 0 : -ENOSPC;
}
EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_lock_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_lock_user_prot_reg(master, from, len);
}
EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_erase_user_prot_reg)
return -EOPNOTSUPP;
if (!len)
return 0;
return master->_erase_user_prot_reg(master, from, len);
}
EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
/* Chip-supported device locking */
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_lock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}
return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_lock);
int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_unlock)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}
return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_unlock);
int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_info *master = mtd_get_master(mtd);
if (!master->_is_locked)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
return -EINVAL;
if (!len)
return 0;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
}
return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!master->_block_isreserved)
return 0;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!master->_block_isbad)
return 0;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
if (!master->_block_markbad)
return -EOPNOTSUPP;
if (ofs < 0 || ofs >= mtd->size)
return -EINVAL;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
if (ret)
return ret;
while (mtd->parent) {
mtd->ecc_stats.badblocks++;
mtd = mtd->parent;
}
return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
/*
* default_mtd_writev - the default writev method
* @mtd: mtd device description object pointer
* @vecs: the vectors to write
* @count: count of vectors in @vecs
* @to: the MTD device offset to write to
* @retlen: on exit contains the count of bytes written to the MTD device.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
for (i = 0; i < count; i++) {
if (!vecs[i].iov_len)
continue;
ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
vecs[i].iov_base);
totlen += thislen;
if (ret || thislen != vecs[i].iov_len)
break;
to += vecs[i].iov_len;
}
*retlen = totlen;
return ret;
}
/*
* mtd_writev - the vector-based MTD write method
* @mtd: mtd device description object pointer
* @vecs: the vectors to write
* @count: count of vectors in @vecs
* @to: the MTD device offset to write to
* @retlen: on exit contains the count of bytes written to the MTD device.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_info *master = mtd_get_master(mtd);
*retlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!master->_writev)
return default_mtd_writev(mtd, vecs, count, to, retlen);
return master->_writev(master, vecs, count,
mtd_get_master_ofs(mtd, to), retlen);
}
EXPORT_SYMBOL_GPL(mtd_writev);
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
* @mtd: mtd device description object pointer
* @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
* the specified size, backing off the size of the request exponentially
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
* ask the memory allocator to avoid re-trying, swapping, writing back
* or performing I/O.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
*
* This is called, for example by mtd_{read,write} and jffs2_scan_medium,
* to handle smaller (i.e. degraded) buffer allocations under low- or
* fragmented-memory situations where such reduced allocations, from a
* requested ideal, are allowed.
*
* Returns a pointer to the allocated buffer on success; otherwise, NULL.
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
while (*size > min_alloc) {
kbuf = kmalloc(*size, flags);
if (kbuf)
return kbuf;
*size >>= 1;
*size = ALIGN(*size, mtd->writesize);
}
/*
* For the last resort allocation allow 'kmalloc()' to do all sorts of
* things (write-back, dropping caches, etc) by using GFP_KERNEL.
*/
return kmalloc(*size, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
/*====================================================================*/
/* Support for /proc/mtd */
static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
seq_puts(m, "dev: size erasesize name\n");
mutex_lock(&mtd_table_mutex);
mtd_for_each_device(mtd) {
seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
mtd->index, (unsigned long long)mtd->size,
mtd->erasesize, mtd->name);
}
mutex_unlock(&mtd_table_mutex);
return 0;
}
#endif /* CONFIG_PROC_FS */
/*====================================================================*/
/* Init code */
static struct backing_dev_info * __init mtd_bdi_init(const char *name)
{
struct backing_dev_info *bdi;
int ret;
bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi)
return ERR_PTR(-ENOMEM);
bdi->ra_pages = 0;
bdi->io_pages = 0;
/*
* We put '-0' suffix to the name to get the same name format as we
* used to get. Since this is called only once, we get a unique name.
*/
ret = bdi_register(bdi, "%.28s-0", name);
if (ret)
bdi_put(bdi);
return ret ? ERR_PTR(ret) : bdi;
}
static struct proc_dir_entry *proc_mtd;
static int __init init_mtd(void)
{
int ret;
ret = class_register(&mtd_class);
if (ret)
goto err_reg;
mtd_bdi = mtd_bdi_init("mtd");
if (IS_ERR(mtd_bdi)) {
ret = PTR_ERR(mtd_bdi);
goto err_bdi;
}
proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
ret = init_mtdchar();
if (ret)
goto out_procfs;
dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
&mtd_expert_analysis_mode);
return 0;
out_procfs:
if (proc_mtd)
remove_proc_entry("mtd", NULL);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
err_bdi:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
return ret;
}
static void __exit cleanup_mtd(void)
{
debugfs_remove_recursive(dfs_dir_mtd);
cleanup_mtdchar();
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
idr_destroy(&mtd_idr);
}
module_init(init_mtd);
module_exit(cleanup_mtd);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("Core MTD registration and access routines");
| linux-master | drivers/mtd/mtdcore.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Direct MTD block device access
*
* Copyright © 1999-2010 David Woodhouse <[email protected]>
* Copyright © 2000-2003 Nicolas Pitre <[email protected]>
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/mutex.h>
#include <linux/major.h>
struct mtdblk_dev {
struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
};
/*
* Cache stuff...
*
* Since typical flash erasable sectors are much larger than what Linux's
* buffer cache can handle, we must implement read-modify-write on flash
* sectors for each block write requests. To avoid over-erasing flash sectors
* and to speed things up, we locally cache a whole flash sector while it is
* being written to until a different sector is required.
*/
static int erase_write (struct mtd_info *mtd, unsigned long pos,
unsigned int len, const char *buf)
{
struct erase_info erase;
size_t retlen;
int ret;
/*
* First, let's erase the flash block.
*/
erase.addr = pos;
erase.len = len;
ret = mtd_erase(mtd, &erase);
if (ret) {
printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
"on \"%s\" failed\n",
pos, len, mtd->name);
return ret;
}
/*
* Next, write the data to flash.
*/
ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
return -EIO;
return 0;
}
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
return 0;
pr_debug("mtdblock: writing cached data for \"%s\" "
"at 0x%lx, size 0x%x\n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
/*
* Here we could arguably set the cache state to STATE_CLEAN.
* However this could lead to inconsistency since we will not
* be notified if this content is altered on the flash by other
* means. Let's declare it empty and leave buffering tasks to
* the buffer cache instead.
*
* If this cache_offset points to a bad block, data cannot be
* written to the device. Clear cache_state to avoid writing to
* bad blocks repeatedly.
*/
if (ret == 0 || ret == -EIO)
mtdblk->cache_state = STATE_EMPTY;
return ret;
}
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size)
return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
unsigned int size = sect_size - offset;
if( size > len )
size = len;
if (size == sect_size) {
/*
* We are covering a whole sector. Thus there is no
* need to bother with the cache while it may still be
* useful for other partial writes.
*/
ret = erase_write (mtd, pos, size, buf);
if (ret)
return ret;
} else {
/* Partial sector: need to use the cache */
if (mtdblk->cache_state == STATE_DIRTY &&
mtdblk->cache_offset != sect_start) {
ret = write_cached_data(mtdblk);
if (ret)
return ret;
}
if (mtdblk->cache_state == STATE_EMPTY ||
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data);
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != sect_size)
return -EIO;
mtdblk->cache_offset = sect_start;
mtdblk->cache_size = sect_size;
mtdblk->cache_state = STATE_CLEAN;
}
/* write data to our local cache */
memcpy (mtdblk->cache_data + offset, buf, size);
mtdblk->cache_state = STATE_DIRTY;
}
buf += size;
pos += size;
len -= size;
}
return 0;
}
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
if (!sect_size) {
ret = mtd_read(mtd, pos, len, &retlen, buf);
if (ret && !mtd_is_bitflip(ret))
return ret;
return 0;
}
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
unsigned int size = sect_size - offset;
if (size > len)
size = len;
/*
* Check if the requested data is already cached
* Read the requested amount of data from our internal cache if it
* contains what we want, otherwise we read the data directly
* from flash.
*/
if (mtdblk->cache_state != STATE_EMPTY &&
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret && !mtd_is_bitflip(ret))
return ret;
if (retlen != size)
return -EIO;
}
buf += size;
pos += size;
len -= size;
}
return 0;
}
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
* documented in man 2 write for all cases. We could also
* return -EAGAIN sometimes, but why bother?
*/
}
return do_cached_write(mtdblk, block<<9, 512, buf);
}
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
pr_debug("mtdblock_open\n");
if (mtdblk->count) {
mtdblk->count++;
return 0;
}
if (mtd_type_is_nand(mbd->mtd))
pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
mbd->tr->name, mbd->mtd->name);
/* OK, it's not open. Create cache info for it */
mtdblk->count = 1;
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
pr_debug("ok\n");
return 0;
}
static void mtdblock_release(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
pr_debug("mtdblock_release\n");
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
/*
* It was the last usage. Free the cache, but only sync if
* opened for writing.
*/
if (mbd->writable)
mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
pr_debug("ok\n");
}
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
int ret;
mutex_lock(&mtdblk->cache_mutex);
ret = write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
mtd_sync(dev->mtd);
return ret;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
dev->mbd.mtd = mtd;
dev->mbd.devnum = mtd->index;
dev->mbd.size = mtd->size >> 9;
dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
dev->mbd.readonly = 1;
if (add_mtd_blktrans_dev(&dev->mbd))
kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
.major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
.flush = mtdblock_flush,
.release = mtdblock_release,
.readsect = mtdblock_readsect,
.writesect = mtdblock_writesect,
.add_mtd = mtdblock_add_mtd,
.remove_dev = mtdblock_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(mtdblock_tr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <[email protected]> et al.");
MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
| linux-master | drivers/mtd/mtdblock.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Linux driver for SSFDC Flash Translation Layer (Read only)
* © 2005 Eptar srl
* Author: Claudio Lanconelli <[email protected]>
*
* Based on NTFL and MTDBLOCK_RO drivers
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/blktrans.h>
struct ssfdcr_record {
struct mtd_blktrans_dev mbd;
int usecount;
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
int cis_block; /* block n. containing CIS/IDI */
int erase_size; /* phys_block_size */
unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
the 128MiB) */
int map_len; /* n. phys_blocks on the card */
};
#define SSFDCR_MAJOR 257
#define SSFDCR_PARTN_BITS 3
#define SECTOR_SIZE 512
#define SECTOR_SHIFT 9
#define OOB_SIZE 16
#define MAX_LOGIC_BLK_PER_ZONE 1000
#define MAX_PHYS_BLK_PER_ZONE 1024
#define KiB(x) ( (x) * 1024L )
#define MiB(x) ( KiB(x) * 1024L )
/** CHS Table
1MiB 2MiB 4MiB 8MiB 16MiB 32MiB 64MiB 128MiB
NCylinder 125 125 250 250 500 500 500 500
NHead 4 4 4 4 4 8 8 16
NSector 4 8 8 16 16 16 32 32
SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000
SectorSize 512 512 512 512 512 512 512 512
**/
typedef struct {
unsigned long size;
unsigned short cyl;
unsigned char head;
unsigned char sec;
} chs_entry_t;
/* Must be ordered by size */
static const chs_entry_t chs_table[] = {
{ MiB( 1), 125, 4, 4 },
{ MiB( 2), 125, 4, 8 },
{ MiB( 4), 250, 4, 8 },
{ MiB( 8), 250, 4, 16 },
{ MiB( 16), 500, 4, 16 },
{ MiB( 32), 500, 8, 16 },
{ MiB( 64), 500, 8, 32 },
{ MiB(128), 500, 16, 32 },
{ 0 },
};
static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head,
unsigned char *sec)
{
int k;
int found = 0;
k = 0;
while (chs_table[k].size > 0 && size > chs_table[k].size)
k++;
if (chs_table[k].size > 0) {
if (cyl)
*cyl = chs_table[k].cyl;
if (head)
*head = chs_table[k].head;
if (sec)
*sec = chs_table[k].sec;
found = 1;
}
return found;
}
/* These bytes are the signature for the CIS/IDI sector */
static const uint8_t cis_numbers[] = {
0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
};
/* Read and check for a valid CIS sector */
static int get_valid_cis_sector(struct mtd_info *mtd)
{
int ret, k, cis_sector;
size_t retlen;
loff_t offset;
uint8_t *sect_buf;
cis_sector = -1;
sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
if (!sect_buf)
goto out;
/*
* Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
* blocks). If the first good block doesn't contain CIS number the flash
* is not SSFDC formatted
*/
for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
if (mtd_block_isbad(mtd, offset)) {
ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
sect_buf);
/* CIS pattern match on the sector buffer */
if (ret < 0 || retlen != SECTOR_SIZE) {
printk(KERN_WARNING
"SSFDC_RO:can't read CIS/IDI sector\n");
} else if (!memcmp(sect_buf, cis_numbers,
sizeof(cis_numbers))) {
/* Found */
cis_sector = (int)(offset >> SECTOR_SHIFT);
} else {
pr_debug("SSFDC_RO: CIS/IDI sector not found"
" on %s (mtd%d)\n", mtd->name,
mtd->index);
}
break;
}
}
kfree(sect_buf);
out:
return cis_sector;
}
/* Read physical sector (wrapper to MTD_READ) */
static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
int sect_no)
{
int ret;
size_t retlen;
loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
if (ret < 0 || retlen != SECTOR_SIZE)
return -1;
return 0;
}
/* Read redundancy area (wrapper to MTD_READ_OOB */
static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
{
struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_RAW;
ops.ooboffs = 0;
ops.ooblen = OOB_SIZE;
ops.oobbuf = buf;
ops.datbuf = NULL;
ret = mtd_read_oob(mtd, offs, &ops);
if (ret < 0 || ops.oobretlen != OOB_SIZE)
return -1;
return 0;
}
/* Parity calculator on a word of n bit size */
static int get_parity(int number, int size)
{
int k;
int parity;
parity = 1;
for (k = 0; k < size; k++) {
parity += (number >> k);
parity &= 1;
}
return parity;
}
/* Read and validate the logical block address field stored in the OOB */
static int get_logical_address(uint8_t *oob_buf)
{
int block_address, parity;
int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */
int j;
int ok = 0;
/*
* Look for the first valid logical address
* Valid address has fixed pattern on most significant bits and
* parity check
*/
for (j = 0; j < ARRAY_SIZE(offset); j++) {
block_address = ((int)oob_buf[offset[j]] << 8) |
oob_buf[offset[j]+1];
/* Check for the signature bits in the address field (MSBits) */
if ((block_address & ~0x7FF) == 0x1000) {
parity = block_address & 0x01;
block_address &= 0x7FF;
block_address >>= 1;
if (get_parity(block_address, 10) != parity) {
pr_debug("SSFDC_RO: logical address field%d"
"parity error(0x%04X)\n", j+1,
block_address);
} else {
ok = 1;
break;
}
}
}
if (!ok)
block_address = -2;
pr_debug("SSFDC_RO: get_logical_address() %d\n",
block_address);
return block_address;
}
/* Build the logic block map */
static int build_logical_block_map(struct ssfdcr_record *ssfdc)
{
unsigned long offset;
uint8_t oob_buf[OOB_SIZE];
int ret, block_address, phys_block;
struct mtd_info *mtd = ssfdc->mbd.mtd;
pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
ssfdc->map_len,
(unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
/* Scan every physical block, skip CIS block */
for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
phys_block++) {
offset = (unsigned long)phys_block * ssfdc->erase_size;
if (mtd_block_isbad(mtd, offset))
continue; /* skip bad blocks */
ret = read_raw_oob(mtd, offset, oob_buf);
if (ret < 0) {
pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
offset);
return -1;
}
block_address = get_logical_address(oob_buf);
/* Skip invalid addresses */
if (block_address >= 0 &&
block_address < MAX_LOGIC_BLK_PER_ZONE) {
int zone_index;
zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE;
block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE;
ssfdc->logic_block_map[block_address] =
(unsigned short)phys_block;
pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
"logic_block_addr=%d, zone=%d\n",
phys_block, block_address, zone_index);
}
}
return 0;
}
static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct ssfdcr_record *ssfdc;
int cis_sector;
/* Check for small page NAND flash */
if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
mtd->size > UINT_MAX)
return;
/* Check for SSDFC format by reading CIS/IDI sector */
cis_sector = get_valid_cis_sector(mtd);
if (cis_sector == -1)
return;
ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
if (!ssfdc)
return;
ssfdc->mbd.mtd = mtd;
ssfdc->mbd.devnum = -1;
ssfdc->mbd.tr = tr;
ssfdc->mbd.readonly = 1;
ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
ssfdc->erase_size = mtd->erasesize;
ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
/* Set geometry */
ssfdc->heads = 16;
ssfdc->sectors = 32;
get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
((long)ssfdc->sectors * (long)ssfdc->heads));
pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
(long)ssfdc->cylinders * (long)ssfdc->heads *
(long)ssfdc->sectors);
ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
(long)ssfdc->sectors;
/* Allocate logical block map */
ssfdc->logic_block_map =
kmalloc_array(ssfdc->map_len,
sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
if (!ssfdc->logic_block_map)
goto out_err;
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
ssfdc->map_len);
/* Build logical block map */
if (build_logical_block_map(ssfdc) < 0)
goto out_err;
/* Register device + partitions */
if (add_mtd_blktrans_dev(&ssfdc->mbd))
goto out_err;
printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n",
ssfdc->mbd.devnum + 'a', mtd->index, mtd->name);
return;
out_err:
kfree(ssfdc->logic_block_map);
kfree(ssfdc);
}
static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
unsigned long logic_sect_no, char *buf)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
int sectors_per_block, offset, block_address;
sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
offset = (int)(logic_sect_no % sectors_per_block);
block_address = (int)(logic_sect_no / sectors_per_block);
pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
block_address);
BUG_ON(block_address >= ssfdc->map_len);
block_address = ssfdc->logic_block_map[block_address];
pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
block_address);
if (block_address < 0xffff) {
unsigned long sect_no;
sect_no = (unsigned long)block_address * sectors_per_block +
offset;
pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
sect_no);
if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
return -EIO;
} else {
memset(buf, 0xff, SECTOR_SIZE);
}
return 0;
}
static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
geo->heads = ssfdc->heads;
geo->sectors = ssfdc->sectors;
geo->cylinders = ssfdc->cylinders;
return 0;
}
/****************************************************************************
*
* Module stuff
*
****************************************************************************/
static struct mtd_blktrans_ops ssfdcr_tr = {
.name = "ssfdc",
.major = SSFDCR_MAJOR,
.part_bits = SSFDCR_PARTN_BITS,
.blksize = SECTOR_SIZE,
.getgeo = ssfdcr_getgeo,
.readsect = ssfdcr_readsect,
.add_mtd = ssfdcr_add_mtd,
.remove_dev = ssfdcr_remove_dev,
.owner = THIS_MODULE,
};
static int __init init_ssfdcr(void)
{
printk(KERN_INFO "SSFDC read-only Flash Translation layer\n");
return register_mtd_blktrans(&ssfdcr_tr);
}
static void __exit cleanup_ssfdcr(void)
{
deregister_mtd_blktrans(&ssfdcr_tr);
}
module_init(init_ssfdcr);
module_exit(cleanup_ssfdcr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Claudio Lanconelli <[email protected]>");
MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card");
| linux-master | drivers/mtd/ssfdc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rfd_ftl.c -- resident flash disk (flash translation layer)
*
* Copyright © 2005 Sean Young <[email protected]>
*
* This type of flash translation layer (FTL) is used by the Embedded BIOS
* by General Software. It is known as the Resident Flash Disk (RFD), see:
*
* http://www.gensw.com/pages/prod/bios/rfd.htm
*
* based on ftl.c
*/
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <asm/types.h>
static int block_size = 0;
module_param(block_size, int, 0);
MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
#define PREFIX "rfd_ftl: "
/* This major has been assigned by [email protected] */
#ifndef RFD_FTL_MAJOR
#define RFD_FTL_MAJOR 256
#endif
/* Maximum number of partitions in an FTL region */
#define PART_BITS 4
/* An erase unit should start with this value */
#define RFD_MAGIC 0x9193
/* the second value is 0xffff or 0xffc8; function unknown */
/* the third value is always 0xffff, ignored */
/* next is an array of mapping for each corresponding sector */
#define HEADER_MAP_OFFSET 3
#define SECTOR_DELETED 0x0000
#define SECTOR_ZERO 0xfffe
#define SECTOR_FREE 0xffff
#define SECTOR_SIZE 512
#define SECTORS_PER_TRACK 63
struct block {
enum {
BLOCK_OK,
BLOCK_ERASING,
BLOCK_ERASED,
BLOCK_UNUSED,
BLOCK_FAILED
} state;
int free_sectors;
int used_sectors;
int erases;
u_long offset;
};
struct partition {
struct mtd_blktrans_dev mbd;
u_int block_size; /* size of erase unit */
u_int total_blocks; /* number of erase units */
u_int header_sectors_per_block; /* header sectors in erase unit */
u_int data_sectors_per_block; /* data sectors in erase unit */
u_int sector_count; /* sectors in translated disk */
u_int header_size; /* bytes in header sector */
int reserved_block; /* block next up for reclaim */
int current_block; /* block to write to */
u16 *header_cache; /* cached header */
int is_reclaiming;
int cylinders;
int errors;
u_long *sector_map;
struct block *blocks;
};
static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
static int build_block_map(struct partition *part, int block_no)
{
struct block *block = &part->blocks[block_no];
int i;
block->offset = part->block_size * block_no;
if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
block->state = BLOCK_UNUSED;
return -ENOENT;
}
block->state = BLOCK_OK;
for (i=0; i<part->data_sectors_per_block; i++) {
u16 entry;
entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
if (entry == SECTOR_DELETED)
continue;
if (entry == SECTOR_FREE) {
block->free_sectors++;
continue;
}
if (entry == SECTOR_ZERO)
entry = 0;
if (entry >= part->sector_count) {
printk(KERN_WARNING PREFIX
"'%s': unit #%d: entry %d corrupt, "
"sector %d out of range\n",
part->mbd.mtd->name, block_no, i, entry);
continue;
}
if (part->sector_map[entry] != -1) {
printk(KERN_WARNING PREFIX
"'%s': more than one entry for sector %d\n",
part->mbd.mtd->name, entry);
part->errors = 1;
continue;
}
part->sector_map[entry] = block->offset +
(i + part->header_sectors_per_block) * SECTOR_SIZE;
block->used_sectors++;
}
if (block->free_sectors == part->data_sectors_per_block)
part->reserved_block = block_no;
return 0;
}
static int scan_header(struct partition *part)
{
int sectors_per_block;
int i, rc = -ENOMEM;
int blocks_found;
size_t retlen;
sectors_per_block = part->block_size / SECTOR_SIZE;
part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
if (part->total_blocks < 2)
return -ENOENT;
/* each erase block has three bytes header, followed by the map */
part->header_sectors_per_block =
((HEADER_MAP_OFFSET + sectors_per_block) *
sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
part->data_sectors_per_block = sectors_per_block -
part->header_sectors_per_block;
part->header_size = (HEADER_MAP_OFFSET +
part->data_sectors_per_block) * sizeof(u16);
part->cylinders = (part->data_sectors_per_block *
(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
part->sector_count = part->cylinders * SECTORS_PER_TRACK;
part->current_block = -1;
part->reserved_block = -1;
part->is_reclaiming = 0;
part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
if (!part->header_cache)
goto err;
part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
GFP_KERNEL);
if (!part->blocks)
goto err;
part->sector_map = vmalloc(array_size(sizeof(u_long),
part->sector_count));
if (!part->sector_map)
goto err;
for (i=0; i<part->sector_count; i++)
part->sector_map[i] = -1;
for (i=0, blocks_found=0; i<part->total_blocks; i++) {
rc = mtd_read(part->mbd.mtd, i * part->block_size,
part->header_size, &retlen,
(u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
if (rc)
goto err;
if (!build_block_map(part, i))
blocks_found++;
}
if (blocks_found == 0) {
printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
part->mbd.mtd->name);
rc = -ENOENT;
goto err;
}
if (part->reserved_block == -1) {
printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
part->mbd.mtd->name);
part->errors = 1;
}
return 0;
err:
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
return rc;
}
static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
{
struct partition *part = container_of(dev, struct partition, mbd);
u_long addr;
size_t retlen;
int rc;
if (sector >= part->sector_count)
return -EIO;
addr = part->sector_map[sector];
if (addr != -1) {
rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
(u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
if (rc) {
printk(KERN_WARNING PREFIX "error reading '%s' at "
"0x%lx\n", part->mbd.mtd->name, addr);
return rc;
}
} else
memset(buf, 0, SECTOR_SIZE);
return 0;
}
static int erase_block(struct partition *part, int block)
{
struct erase_info *erase;
int rc;
erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
if (!erase)
return -ENOMEM;
erase->addr = part->blocks[block].offset;
erase->len = part->block_size;
part->blocks[block].state = BLOCK_ERASING;
part->blocks[block].free_sectors = 0;
rc = mtd_erase(part->mbd.mtd, erase);
if (rc) {
printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
"failed\n", (unsigned long long)erase->addr,
(unsigned long long)erase->len, part->mbd.mtd->name);
part->blocks[block].state = BLOCK_FAILED;
part->blocks[block].free_sectors = 0;
part->blocks[block].used_sectors = 0;
} else {
u16 magic = cpu_to_le16(RFD_MAGIC);
size_t retlen;
part->blocks[block].state = BLOCK_ERASED;
part->blocks[block].free_sectors = part->data_sectors_per_block;
part->blocks[block].used_sectors = 0;
part->blocks[block].erases++;
rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
sizeof(magic), &retlen, (u_char *)&magic);
if (!rc && retlen != sizeof(magic))
rc = -EIO;
if (rc) {
pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
part->mbd.mtd->name, part->blocks[block].offset);
part->blocks[block].state = BLOCK_FAILED;
} else {
part->blocks[block].state = BLOCK_OK;
}
}
kfree(erase);
return rc;
}
static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
{
void *sector_data;
u16 *map;
size_t retlen;
int i, rc = -ENOMEM;
part->is_reclaiming = 1;
sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
if (!sector_data)
goto err3;
map = kmalloc(part->header_size, GFP_KERNEL);
if (!map)
goto err2;
rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
part->header_size, &retlen, (u_char *)map);
if (!rc && retlen != part->header_size)
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "error reading '%s' at "
"0x%lx\n", part->mbd.mtd->name,
part->blocks[block_no].offset);
goto err;
}
for (i=0; i<part->data_sectors_per_block; i++) {
u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
u_long addr;
if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
continue;
if (entry == SECTOR_ZERO)
entry = 0;
/* already warned about and ignored in build_block_map() */
if (entry >= part->sector_count)
continue;
addr = part->blocks[block_no].offset +
(i + part->header_sectors_per_block) * SECTOR_SIZE;
if (*old_sector == addr) {
*old_sector = -1;
if (!part->blocks[block_no].used_sectors--) {
rc = erase_block(part, block_no);
break;
}
continue;
}
rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
sector_data);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "'%s': Unable to "
"read sector for relocation\n",
part->mbd.mtd->name);
goto err;
}
rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
entry, sector_data);
if (rc)
goto err;
}
err:
kfree(map);
err2:
kfree(sector_data);
err3:
part->is_reclaiming = 0;
return rc;
}
static int reclaim_block(struct partition *part, u_long *old_sector)
{
int block, best_block, score, old_sector_block;
int rc;
/* we have a race if sync doesn't exist */
mtd_sync(part->mbd.mtd);
score = 0x7fffffff; /* MAX_INT */
best_block = -1;
if (*old_sector != -1)
old_sector_block = *old_sector / part->block_size;
else
old_sector_block = -1;
for (block=0; block<part->total_blocks; block++) {
int this_score;
if (block == part->reserved_block)
continue;
/*
* Postpone reclaiming if there is a free sector as
* more removed sectors is more efficient (have to move
* less).
*/
if (part->blocks[block].free_sectors)
return 0;
this_score = part->blocks[block].used_sectors;
if (block == old_sector_block)
this_score--;
else {
/* no point in moving a full block */
if (part->blocks[block].used_sectors ==
part->data_sectors_per_block)
continue;
}
this_score += part->blocks[block].erases;
if (this_score < score) {
best_block = block;
score = this_score;
}
}
if (best_block == -1)
return -ENOSPC;
part->current_block = -1;
part->reserved_block = best_block;
pr_debug("reclaim_block: reclaiming block #%d with %d used "
"%d free sectors\n", best_block,
part->blocks[best_block].used_sectors,
part->blocks[best_block].free_sectors);
if (part->blocks[best_block].used_sectors)
rc = move_block_contents(part, best_block, old_sector);
else
rc = erase_block(part, best_block);
return rc;
}
/*
* IMPROVE: It would be best to choose the block with the most deleted sectors,
* because if we fill that one up first it'll have the most chance of having
* the least live sectors at reclaim.
*/
static int find_free_block(struct partition *part)
{
int block, stop;
block = part->current_block == -1 ?
jiffies % part->total_blocks : part->current_block;
stop = block;
do {
if (part->blocks[block].free_sectors &&
block != part->reserved_block)
return block;
if (part->blocks[block].state == BLOCK_UNUSED)
erase_block(part, block);
if (++block >= part->total_blocks)
block = 0;
} while (block != stop);
return -1;
}
static int find_writable_block(struct partition *part, u_long *old_sector)
{
int rc, block;
size_t retlen;
block = find_free_block(part);
if (block == -1) {
if (!part->is_reclaiming) {
rc = reclaim_block(part, old_sector);
if (rc)
goto err;
block = find_free_block(part);
}
if (block == -1) {
rc = -ENOSPC;
goto err;
}
}
rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
part->header_size, &retlen,
(u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "'%s': unable to read header at "
"0x%lx\n", part->mbd.mtd->name,
part->blocks[block].offset);
goto err;
}
part->current_block = block;
err:
return rc;
}
static int mark_sector_deleted(struct partition *part, u_long old_addr)
{
int block, offset, rc;
u_long addr;
size_t retlen;
u16 del = cpu_to_le16(SECTOR_DELETED);
block = old_addr / part->block_size;
offset = (old_addr % part->block_size) / SECTOR_SIZE -
part->header_sectors_per_block;
addr = part->blocks[block].offset +
(HEADER_MAP_OFFSET + offset) * sizeof(u16);
rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
(u_char *)&del);
if (!rc && retlen != sizeof(del))
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at "
"0x%lx\n", part->mbd.mtd->name, addr);
goto err;
}
if (block == part->current_block)
part->header_cache[offset + HEADER_MAP_OFFSET] = del;
part->blocks[block].used_sectors--;
if (!part->blocks[block].used_sectors &&
!part->blocks[block].free_sectors)
rc = erase_block(part, block);
err:
return rc;
}
static int find_free_sector(const struct partition *part, const struct block *block)
{
int i, stop;
i = stop = part->data_sectors_per_block - block->free_sectors;
do {
if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
== SECTOR_FREE)
return i;
if (++i == part->data_sectors_per_block)
i = 0;
}
while(i != stop);
return -1;
}
static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
{
struct partition *part = container_of(dev, struct partition, mbd);
struct block *block;
u_long addr;
int i;
int rc;
size_t retlen;
u16 entry;
if (part->current_block == -1 ||
!part->blocks[part->current_block].free_sectors) {
rc = find_writable_block(part, old_addr);
if (rc)
goto err;
}
block = &part->blocks[part->current_block];
i = find_free_sector(part, block);
if (i < 0) {
rc = -ENOSPC;
goto err;
}
addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
block->offset;
rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
(u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
goto err;
}
part->sector_map[sector] = addr;
entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
part->header_cache[i + HEADER_MAP_OFFSET] = entry;
addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
(u_char *)&entry);
if (!rc && retlen != sizeof(entry))
rc = -EIO;
if (rc) {
printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
part->mbd.mtd->name, addr);
goto err;
}
block->used_sectors++;
block->free_sectors--;
err:
return rc;
}
static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
{
struct partition *part = container_of(dev, struct partition, mbd);
u_long old_addr;
int i;
int rc = 0;
pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
if (part->reserved_block == -1) {
rc = -EACCES;
goto err;
}
if (sector >= part->sector_count) {
rc = -EIO;
goto err;
}
old_addr = part->sector_map[sector];
for (i=0; i<SECTOR_SIZE; i++) {
if (!buf[i])
continue;
rc = do_writesect(dev, sector, buf, &old_addr);
if (rc)
goto err;
break;
}
if (i == SECTOR_SIZE)
part->sector_map[sector] = -1;
if (old_addr != -1)
rc = mark_sector_deleted(part, old_addr);
err:
return rc;
}
static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
unsigned long sector, unsigned int nr_sects)
{
struct partition *part = container_of(dev, struct partition, mbd);
u_long addr;
int rc;
while (nr_sects) {
if (sector >= part->sector_count)
return -EIO;
addr = part->sector_map[sector];
if (addr != -1) {
rc = mark_sector_deleted(part, addr);
if (rc)
return rc;
part->sector_map[sector] = -1;
}
sector++;
nr_sects--;
}
return 0;
}
static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct partition *part = container_of(dev, struct partition, mbd);
geo->heads = 1;
geo->sectors = SECTORS_PER_TRACK;
geo->cylinders = part->cylinders;
return 0;
}
static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct partition *part;
if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
mtd->size > UINT_MAX)
return;
part = kzalloc(sizeof(struct partition), GFP_KERNEL);
if (!part)
return;
part->mbd.mtd = mtd;
if (block_size)
part->block_size = block_size;
else {
if (!mtd->erasesize) {
printk(KERN_WARNING PREFIX "please provide block_size");
goto out;
} else
part->block_size = mtd->erasesize;
}
if (scan_header(part) == 0) {
part->mbd.size = part->sector_count;
part->mbd.tr = tr;
part->mbd.devnum = -1;
if (!(mtd->flags & MTD_WRITEABLE))
part->mbd.readonly = 1;
else if (part->errors) {
printk(KERN_WARNING PREFIX "'%s': errors found, "
"setting read-only\n", mtd->name);
part->mbd.readonly = 1;
}
printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
mtd->name, mtd->type, mtd->flags);
if (!add_mtd_blktrans_dev(&part->mbd))
return;
}
out:
kfree(part);
}
static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct partition *part = container_of(dev, struct partition, mbd);
int i;
for (i=0; i<part->total_blocks; i++) {
pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
part->mbd.mtd->name, i, part->blocks[i].erases);
}
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
del_mtd_blktrans_dev(&part->mbd);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
.name = "rfd",
.major = RFD_FTL_MAJOR,
.part_bits = PART_BITS,
.blksize = SECTOR_SIZE,
.readsect = rfd_ftl_readsect,
.writesect = rfd_ftl_writesect,
.discard = rfd_ftl_discardsect,
.getgeo = rfd_ftl_getgeo,
.add_mtd = rfd_ftl_add_mtd,
.remove_dev = rfd_ftl_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(rfd_ftl_tr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
"used by General Software's Embedded BIOS");
| linux-master | drivers/mtd/rfd_ftl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inftlmount.c -- INFTL mount code with extensive checks.
*
* Author: Greg Ungerer ([email protected])
* Copyright © 2002-2003, Greg Ungerer ([email protected])
*
* Based heavily on the nftlmount.c code which is:
* Author: Fabrice Bellard ([email protected])
* Copyright © 2000 Netgem S.A.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/errno.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
/*
* find_boot_record: Find the INFTL Media Header and its Spare copy which
* contains the various device information of the INFTL partition and
* Bad Unit Table. Update the PUtable[] table according to the Bad
* Unit Table. PUtable[] is used for management of Erase Unit in
* other routines in inftlcore.c and inftlmount.c.
*/
static int find_boot_record(struct INFTLrecord *inftl)
{
struct inftl_unittail h1;
//struct inftl_oob oob;
unsigned int i, block;
u8 buf[SECTORSIZE];
struct INFTLMediaHeader *mh = &inftl->MediaHdr;
struct mtd_info *mtd = inftl->mbd.mtd;
struct INFTLPartition *ip;
size_t retlen;
pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
* scan. We'll sort it out later if we find a MediaHeader which says
* otherwise.
*/
inftl->EraseSize = inftl->mbd.mtd->erasesize;
inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
inftl->MediaUnit = BLOCK_NIL;
/* Search for a valid boot record */
for (block = 0; block < inftl->nb_blocks; block++) {
int ret;
/*
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
&retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
static int warncount = 5;
if (warncount) {
printk(KERN_WARNING "INFTL: block read at 0x%x "
"of mtd%d failed: %d\n",
block * inftl->EraseSize,
inftl->mbd.mtd->index, ret);
if (!--warncount)
printk(KERN_WARNING "INFTL: further "
"failures for this block will "
"not be printed\n");
}
continue;
}
if (retlen < 6 || memcmp(buf, "BNAND", 6)) {
/* BNAND\0 not found. Continue */
continue;
}
/* To be safer with BIOS, also use erase mark as discriminant */
ret = inftl_read_oob(mtd,
block * inftl->EraseSize + SECTORSIZE + 8,
8, &retlen,(char *)&h1);
if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,
inftl->mbd.mtd->index, ret);
continue;
}
/*
* This is the first we've seen.
* Copy the media header structure into place.
*/
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
&retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
return -1;
}
/* Check if this one is the same as the first one we found. */
if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
printk(KERN_WARNING "INFTL: Primary and spare Media "
"Headers disagree.\n");
return -1;
}
mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
pr_debug("INFTL: Media Header ->\n"
" bootRecordID = %s\n"
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
" BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = 0x%x\n"
" PercentUsed = %d\n",
mh->bootRecordID, mh->NoOfBootImageBlocks,
mh->NoOfBinaryPartitions,
mh->NoOfBDTLPartitions,
mh->BlockMultiplierBits, mh->FormatFlags,
mh->OsakVersion, mh->PercentUsed);
if (mh->NoOfBDTLPartitions == 0) {
printk(KERN_WARNING "INFTL: Media Header sanity check "
"failed: NoOfBDTLPartitions (%d) == 0, "
"must be at least 1\n", mh->NoOfBDTLPartitions);
return -1;
}
if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) {
printk(KERN_WARNING "INFTL: Media Header sanity check "
"failed: Total Partitions (%d) > 4, "
"BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions +
mh->NoOfBinaryPartitions,
mh->NoOfBDTLPartitions,
mh->NoOfBinaryPartitions);
return -1;
}
if (mh->BlockMultiplierBits > 1) {
printk(KERN_WARNING "INFTL: sorry, we don't support "
"UnitSizeFactor 0x%02x\n",
mh->BlockMultiplierBits);
return -1;
} else if (mh->BlockMultiplierBits == 1) {
printk(KERN_WARNING "INFTL: support for INFTL with "
"UnitSizeFactor 0x%02x is experimental\n",
mh->BlockMultiplierBits);
inftl->EraseSize = inftl->mbd.mtd->erasesize <<
mh->BlockMultiplierBits;
inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
block >>= mh->BlockMultiplierBits;
}
/* Scan the partitions */
for (i = 0; (i < 4); i++) {
ip = &mh->Partitions[i];
ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
ip->firstUnit = le32_to_cpu(ip->firstUnit);
ip->lastUnit = le32_to_cpu(ip->lastUnit);
ip->flags = le32_to_cpu(ip->flags);
ip->spareUnits = le32_to_cpu(ip->spareUnits);
ip->Reserved0 = le32_to_cpu(ip->Reserved0);
pr_debug(" PARTITION[%d] ->\n"
" virtualUnits = %d\n"
" firstUnit = %d\n"
" lastUnit = %d\n"
" flags = 0x%x\n"
" spareUnits = %d\n",
i, ip->virtualUnits, ip->firstUnit,
ip->lastUnit, ip->flags,
ip->spareUnits);
if (ip->Reserved0 != ip->firstUnit) {
struct erase_info *instr = &inftl->instr;
/*
* Most likely this is using the
* undocumented qiuck mount feature.
* We don't support that, we will need
* to erase the hidden block for full
* compatibility.
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
"Partition %d sanity check failed\n"
" firstUnit %d : lastUnit %d > "
"virtualUnits %d\n", i, ip->lastUnit,
ip->firstUnit, ip->Reserved0);
return -1;
}
if (ip->Reserved1 != 0) {
printk(KERN_WARNING "INFTL: Media Header "
"Partition %d sanity check failed: "
"Reserved1 %d != 0\n",
i, ip->Reserved1);
return -1;
}
if (ip->flags & INFTL_BDTL)
break;
}
if (i >= 4) {
printk(KERN_WARNING "INFTL: Media Header Partition "
"sanity check failed:\n No partition "
"marked as Disk Partition\n");
return -1;
}
inftl->nb_boot_blocks = ip->firstUnit;
inftl->numvunits = ip->virtualUnits;
if (inftl->numvunits > (inftl->nb_blocks -
inftl->nb_boot_blocks - 2)) {
printk(KERN_WARNING "INFTL: Media Header sanity check "
"failed:\n numvunits (%d) > nb_blocks "
"(%d) - nb_boot_blocks(%d) - 2\n",
inftl->numvunits, inftl->nb_blocks,
inftl->nb_boot_blocks);
return -1;
}
inftl->mbd.size = inftl->numvunits *
(inftl->EraseSize / SECTORSIZE);
/*
* Block count is set to last used EUN (we won't need to keep
* any meta-data past that point).
*/
inftl->firstEUN = ip->firstUnit;
inftl->lastEUN = ip->lastUnit;
inftl->nb_blocks = ip->lastUnit + 1;
/* Memory alloc */
inftl->PUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
if (!inftl->PUtable)
return -ENOMEM;
inftl->VUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
if (!inftl->VUtable) {
kfree(inftl->PUtable);
return -ENOMEM;
}
/* Mark the blocks before INFTL MediaHeader as reserved */
for (i = 0; i < inftl->nb_boot_blocks; i++)
inftl->PUtable[i] = BLOCK_RESERVED;
/* Mark all remaining blocks as potentially containing data */
for (; i < inftl->nb_blocks; i++)
inftl->PUtable[i] = BLOCK_NOTEXPLORED;
/* Mark this boot record (NFTL MediaHeader) block as reserved */
inftl->PUtable[block] = BLOCK_RESERVED;
/* Read Bad Erase Unit Table and modify PUtable[] accordingly */
for (i = 0; i < inftl->nb_blocks; i++) {
int physblock;
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
if (mtd_block_isbad(inftl->mbd.mtd,
i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
inftl->MediaUnit = block;
return 0;
}
/* Not found. */
return -1;
}
static int memcmpb(void *a, int c, int n)
{
int i;
for (i = 0; i < n; i++) {
if (c != ((unsigned char *)a)[i])
return 1;
}
return 0;
}
/*
* check_free_sector: check if a free sector is actually FREE,
* i.e. All 0xff in data and oob area.
*/
static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int len, int check_oob)
{
struct mtd_info *mtd = inftl->mbd.mtd;
size_t retlen;
int i, ret;
u8 *buf;
buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
goto out;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
goto out;
if (check_oob) {
if(inftl_read_oob(mtd, address, mtd->oobsize,
&retlen, &buf[SECTORSIZE]) < 0)
goto out;
if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
goto out;
}
address += SECTORSIZE;
}
ret = 0;
out:
kfree(buf);
return ret;
}
/*
* INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase
* Unit and Update INFTL metadata. Each erase operation is
* checked with check_free_sectors.
*
* Return: 0 when succeed, -1 on error.
*
* ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
size_t retlen;
struct inftl_unittail uci;
struct erase_info *instr = &inftl->instr;
struct mtd_info *mtd = inftl->mbd.mtd;
int physblock;
pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
memset(instr, 0, sizeof(struct erase_info));
/* FIXME: Shouldn't we be setting the 'discarded' flag to zero
_first_? */
/* Use async erase interface, test return code */
instr->addr = block * inftl->EraseSize;
instr->len = inftl->mbd.mtd->erasesize;
/* Erase one physical eraseblock at a time, even though the NAND api
allows us to group them. This way we if we have a failure, we can
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
int ret;
ret = mtd_erase(inftl->mbd.mtd, instr);
if (ret) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
block);
goto fail;
}
/*
* Check the "freeness" of Erase Unit before updating metadata.
* FixMe: is this check really necessary? Since we have check
* the return code after the erase operation.
*/
if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
goto fail;
}
uci.EraseMark = cpu_to_le16(ERASE_MARK);
uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
uci.Reserved[0] = 0;
uci.Reserved[1] = 0;
uci.Reserved[2] = 0;
uci.Reserved[3] = 0;
instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
if (inftl_write_oob(mtd, instr->addr + 8, 8, &retlen, (char *)&uci) < 0)
goto fail;
return 0;
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
/*
* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase
* Units in a Virtual Unit Chain, i.e. all the units are disconnected.
*
* Since the chain is invalid then we will have to erase it from its
* head (normally for INFTL we go from the oldest). But if it has a
* loop then there is no oldest...
*/
static void format_chain(struct INFTLrecord *inftl, unsigned int first_block)
{
unsigned int block = first_block, block1;
printk(KERN_WARNING "INFTL: formatting chain at block %d\n",
first_block);
for (;;) {
block1 = inftl->PUtable[block];
printk(KERN_WARNING "INFTL: formatting block %d\n", block);
if (INFTL_formatblock(inftl, block) < 0) {
/*
* Cannot format !!!! Mark it as Bad Unit,
*/
inftl->PUtable[block] = BLOCK_RESERVED;
} else {
inftl->PUtable[block] = BLOCK_FREE;
}
/* Goto next block on the chain */
block = block1;
if (block == BLOCK_NIL || block >= inftl->lastEUN)
break;
}
}
void INFTL_dumptables(struct INFTLrecord *s)
{
int i;
pr_debug("-------------------------------------------"
"----------------------------------\n");
pr_debug("VUtable[%d] ->", s->nb_blocks);
for (i = 0; i < s->nb_blocks; i++) {
if ((i % 8) == 0)
pr_debug("\n%04x: ", i);
pr_debug("%04x ", s->VUtable[i]);
}
pr_debug("\n-------------------------------------------"
"----------------------------------\n");
pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
for (i = 0; i <= s->lastEUN; i++) {
if ((i % 8) == 0)
pr_debug("\n%04x: ", i);
pr_debug("%04x ", s->PUtable[i]);
}
pr_debug("\n-------------------------------------------"
"----------------------------------\n");
pr_debug("INFTL ->\n"
" EraseSize = %d\n"
" h/s/c = %d/%d/%d\n"
" numvunits = %d\n"
" firstEUN = %d\n"
" lastEUN = %d\n"
" numfreeEUNs = %d\n"
" LastFreeEUN = %d\n"
" nb_blocks = %d\n"
" nb_boot_blocks = %d",
s->EraseSize, s->heads, s->sectors, s->cylinders,
s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
pr_debug("\n-------------------------------------------"
"----------------------------------\n");
}
void INFTL_dumpVUchains(struct INFTLrecord *s)
{
int logical, block, i;
pr_debug("-------------------------------------------"
"----------------------------------\n");
pr_debug("INFTL Virtual Unit Chains:\n");
for (logical = 0; logical < s->nb_blocks; logical++) {
block = s->VUtable[logical];
if (block >= s->nb_blocks)
continue;
pr_debug(" LOGICAL %d --> %d ", logical, block);
for (i = 0; i < s->nb_blocks; i++) {
if (s->PUtable[block] == BLOCK_NIL)
break;
block = s->PUtable[block];
pr_debug("%d ", block);
}
pr_debug("\n");
}
pr_debug("-------------------------------------------"
"----------------------------------\n");
}
int INFTL_mount(struct INFTLrecord *s)
{
struct mtd_info *mtd = s->mbd.mtd;
unsigned int block, first_block, prev_block, last_block;
unsigned int first_logical_block, logical_block, erase_mark;
int chain_length, do_format_chain;
struct inftl_unithead1 h0;
struct inftl_unittail h1;
size_t retlen;
int i;
u8 *ANACtable, ANAC;
pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
printk(KERN_WARNING "INFTL: could not find valid boot record?\n");
return -ENXIO;
}
/* Init the logical to physical table */
for (i = 0; i < s->nb_blocks; i++)
s->VUtable[i] = BLOCK_NIL;
logical_block = block = BLOCK_NIL;
/* Temporary buffer to store ANAC numbers. */
ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL);
if (!ANACtable)
return -ENOMEM;
/*
* First pass is to explore each physical unit, and construct the
* virtual chains that exist (newest physical unit goes into VUtable).
* Any block that is in any way invalid will be left in the
* NOTEXPLORED state. Then at the end we will try to format it and
* mark it as free.
*/
pr_debug("INFTL: pass 1, explore each unit\n");
for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
continue;
do_format_chain = 0;
first_logical_block = BLOCK_NIL;
last_block = BLOCK_NIL;
block = first_block;
for (chain_length = 0; ; chain_length++) {
if ((chain_length == 0) &&
(s->PUtable[block] != BLOCK_NOTEXPLORED)) {
/* Nothing to do here, onto next block */
break;
}
if (inftl_read_oob(mtd, block * s->EraseSize + 8,
8, &retlen, (char *)&h0) < 0 ||
inftl_read_oob(mtd, block * s->EraseSize +
2 * SECTORSIZE + 8, 8, &retlen,
(char *)&h1) < 0) {
/* Should never happen? */
do_format_chain++;
break;
}
logical_block = le16_to_cpu(h0.virtualUnitNo);
prev_block = le16_to_cpu(h0.prevUnitNo);
erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1));
ANACtable[block] = h0.ANAC;
/* Previous block is relative to start of Partition */
if (prev_block < s->nb_blocks)
prev_block += s->firstEUN;
/* Already explored partial chain? */
if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
/* Check if chain for this logical */
if (logical_block == first_logical_block) {
if (last_block != BLOCK_NIL)
s->PUtable[last_block] = block;
}
break;
}
/* Check for invalid block */
if (erase_mark != ERASE_MARK) {
printk(KERN_WARNING "INFTL: corrupt block %d "
"in chain %d, chain length %d, erase "
"mark 0x%x?\n", block, first_block,
chain_length, erase_mark);
/*
* Assume end of chain, probably incomplete
* fold/erase...
*/
if (chain_length == 0)
do_format_chain++;
break;
}
/* Check for it being free already then... */
if ((logical_block == BLOCK_FREE) ||
(logical_block == BLOCK_NIL)) {
s->PUtable[block] = BLOCK_FREE;
break;
}
/* Sanity checks on block numbers */
if ((logical_block >= s->nb_blocks) ||
((prev_block >= s->nb_blocks) &&
(prev_block != BLOCK_NIL))) {
if (chain_length > 0) {
printk(KERN_WARNING "INFTL: corrupt "
"block %d in chain %d?\n",
block, first_block);
do_format_chain++;
}
break;
}
if (first_logical_block == BLOCK_NIL) {
first_logical_block = logical_block;
} else {
if (first_logical_block != logical_block) {
/* Normal for folded chain... */
break;
}
}
/*
* Current block is valid, so if we followed a virtual
* chain to get here then we can set the previous
* block pointer in our PUtable now. Then move onto
* the previous block in the chain.
*/
s->PUtable[block] = BLOCK_NIL;
if (last_block != BLOCK_NIL)
s->PUtable[last_block] = block;
last_block = block;
block = prev_block;
/* Check for end of chain */
if (block == BLOCK_NIL)
break;
/* Validate next block before following it... */
if (block > s->lastEUN) {
printk(KERN_WARNING "INFTL: invalid previous "
"block %d in chain %d?\n", block,
first_block);
do_format_chain++;
break;
}
}
if (do_format_chain) {
format_chain(s, first_block);
continue;
}
/*
* Looks like a valid chain then. It may not really be the
* newest block in the chain, but it is the newest we have
* found so far. We might update it in later iterations of
* this loop if we find something newer.
*/
s->VUtable[first_logical_block] = first_block;
logical_block = BLOCK_NIL;
}
INFTL_dumptables(s);
/*
* Second pass, check for infinite loops in chains. These are
* possible because we don't update the previous pointers when
* we fold chains. No big deal, just fix them up in PUtable.
*/
pr_debug("INFTL: pass 2, validate virtual chains\n");
for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
block = s->VUtable[logical_block];
last_block = BLOCK_NIL;
/* Check for free/reserved/nil */
if (block >= BLOCK_RESERVED)
continue;
ANAC = ANACtable[block];
for (i = 0; i < s->numvunits; i++) {
if (s->PUtable[block] == BLOCK_NIL)
break;
if (s->PUtable[block] > s->lastEUN) {
printk(KERN_WARNING "INFTL: invalid prev %d, "
"in virtual chain %d\n",
s->PUtable[block], logical_block);
s->PUtable[block] = BLOCK_NIL;
}
if (ANACtable[block] != ANAC) {
/*
* Chain must point back to itself. This is ok,
* but we will need adjust the tables with this
* newest block and oldest block.
*/
s->VUtable[logical_block] = block;
s->PUtable[last_block] = BLOCK_NIL;
break;
}
ANAC--;
last_block = block;
block = s->PUtable[block];
}
if (i >= s->nb_blocks) {
/*
* Uhoo, infinite chain with valid ANACS!
* Format whole chain...
*/
format_chain(s, first_block);
}
}
INFTL_dumptables(s);
INFTL_dumpVUchains(s);
/*
* Third pass, format unreferenced blocks and init free block count.
*/
s->numfreeEUNs = 0;
s->LastFreeEUN = BLOCK_NIL;
pr_debug("INFTL: pass 3, format unused blocks\n");
for (block = s->firstEUN; block <= s->lastEUN; block++) {
if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
printk("INFTL: unreferenced block %d, formatting it\n",
block);
if (INFTL_formatblock(s, block) < 0)
s->PUtable[block] = BLOCK_RESERVED;
else
s->PUtable[block] = BLOCK_FREE;
}
if (s->PUtable[block] == BLOCK_FREE) {
s->numfreeEUNs++;
if (s->LastFreeEUN == BLOCK_NIL)
s->LastFreeEUN = block;
}
}
kfree(ANACtable);
return 0;
}
| linux-master | drivers/mtd/inftlmount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* MTD-based superblock management
*
* Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
* Copyright © 2001-2010 David Woodhouse <[email protected]>
*
* Written by: David Howells <[email protected]>
* David Woodhouse <[email protected]>
*/
#include <linux/mtd/super.h>
#include <linux/namei.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/fs_context.h>
#include "mtdcore.h"
/*
* get a superblock on an MTD-backed filesystem
*/
static int mtd_get_sb(struct fs_context *fc,
struct mtd_info *mtd,
int (*fill_super)(struct super_block *,
struct fs_context *))
{
struct super_block *sb;
int ret;
sb = sget_dev(fc, MKDEV(MTD_BLOCK_MAJOR, mtd->index));
if (IS_ERR(sb))
return PTR_ERR(sb);
if (sb->s_root) {
/* new mountpoint for an already mounted superblock */
pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
put_mtd_device(mtd);
} else {
/* fresh new superblock */
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
/*
* Would usually have been set with @sb_lock held but in
* contrast to sb->s_bdev that's checked with only
* @sb_lock held, nothing checks sb->s_mtd without also
* holding sb->s_umount and we're holding sb->s_umount
* here.
*/
sb->s_mtd = mtd;
sb->s_bdi = bdi_get(mtd_bdi);
ret = fill_super(sb, fc);
if (ret < 0)
goto error_sb;
sb->s_flags |= SB_ACTIVE;
}
BUG_ON(fc->root);
fc->root = dget(sb->s_root);
return 0;
error_sb:
deactivate_locked_super(sb);
return ret;
}
/*
* get a superblock on an MTD-backed filesystem by MTD device number
*/
static int mtd_get_sb_by_nr(struct fs_context *fc, int mtdnr,
int (*fill_super)(struct super_block *,
struct fs_context *))
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
errorf(fc, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
return PTR_ERR(mtd);
}
return mtd_get_sb(fc, mtd, fill_super);
}
/**
* get_tree_mtd - Get a superblock based on a single MTD device
* @fc: The filesystem context holding the parameters
* @fill_super: Helper to initialise a new superblock
*/
int get_tree_mtd(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
#ifdef CONFIG_BLOCK
dev_t dev;
int ret;
#endif
int mtdnr;
if (!fc->source)
return invalf(fc, "No source specified");
pr_debug("MTDSB: dev_name \"%s\"\n", fc->source);
/* the preferred way of mounting in future; especially when
* CONFIG_BLOCK=n - we specify the underlying MTD device by number or
* by name, so that we don't require block device support to be present
* in the kernel.
*/
if (fc->source[0] == 'm' &&
fc->source[1] == 't' &&
fc->source[2] == 'd') {
if (fc->source[3] == ':') {
struct mtd_info *mtd;
/* mount by MTD device name */
pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
fc->source + 4);
mtd = get_mtd_device_nm(fc->source + 4);
if (!IS_ERR(mtd))
return mtd_get_sb(fc, mtd, fill_super);
errorf(fc, "MTD: MTD device with name \"%s\" not found",
fc->source + 4);
} else if (isdigit(fc->source[3])) {
/* mount by MTD device number name */
char *endptr;
mtdnr = simple_strtoul(fc->source + 3, &endptr, 0);
if (!*endptr) {
/* It was a valid number */
pr_debug("MTDSB: mtd%%d, mtdnr %d\n", mtdnr);
return mtd_get_sb_by_nr(fc, mtdnr, fill_super);
}
}
}
#ifdef CONFIG_BLOCK
/* try the old way - the hack where we allowed users to mount
* /dev/mtdblock$(n) but didn't actually _use_ the blockdev
*/
ret = lookup_bdev(fc->source, &dev);
if (ret) {
errorf(fc, "MTD: Couldn't look up '%s': %d", fc->source, ret);
return ret;
}
pr_debug("MTDSB: lookup_bdev() returned 0\n");
if (MAJOR(dev) == MTD_BLOCK_MAJOR)
return mtd_get_sb_by_nr(fc, MINOR(dev), fill_super);
#endif /* CONFIG_BLOCK */
if (!(fc->sb_flags & SB_SILENT))
errorf(fc, "MTD: Attempt to mount non-MTD device \"%s\"",
fc->source);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(get_tree_mtd);
/*
* destroy an MTD-based superblock
*/
void kill_mtd_super(struct super_block *sb)
{
generic_shutdown_super(sb);
put_mtd_device(sb->s_mtd);
sb->s_mtd = NULL;
}
EXPORT_SYMBOL_GPL(kill_mtd_super);
| linux-master | drivers/mtd/mtdsuper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2009 - Maxim Levitsky
* SmartMedia/xD translation layer
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/hdreg.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/sysfs.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include "nand/raw/sm_common.h"
#include "sm_ftl.h"
static struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
static int debug;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* ------------------- sysfs attributes ---------------------------------- */
struct sm_sysfs_attribute {
struct device_attribute dev_attr;
char *data;
int len;
};
static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sm_sysfs_attribute *sm_attr =
container_of(attr, struct sm_sysfs_attribute, dev_attr);
strncpy(buf, sm_attr->data, sm_attr->len);
return sm_attr->len;
}
#define NUM_ATTRIBUTES 1
#define SM_CIS_VENDOR_OFFSET 0x59
static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute_group *attr_group;
struct attribute **attributes;
struct sm_sysfs_attribute *vendor_attribute;
char *vendor;
vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
if (!vendor)
goto error1;
/* Initialize sysfs attributes */
vendor_attribute =
kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
if (!vendor_attribute)
goto error2;
sysfs_attr_init(&vendor_attribute->dev_attr.attr);
vendor_attribute->data = vendor;
vendor_attribute->len = strlen(vendor);
vendor_attribute->dev_attr.attr.name = "vendor";
vendor_attribute->dev_attr.attr.mode = S_IRUGO;
vendor_attribute->dev_attr.show = sm_attr_show;
/* Create array of pointers to the attributes */
attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (!attributes)
goto error3;
attributes[0] = &vendor_attribute->dev_attr.attr;
/* Finally create the attribute group */
attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (!attr_group)
goto error4;
attr_group->attrs = attributes;
return attr_group;
error4:
kfree(attributes);
error3:
kfree(vendor_attribute);
error2:
kfree(vendor);
error1:
return NULL;
}
static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
{
struct attribute **attributes = ftl->disk_attributes->attrs;
int i;
for (i = 0; attributes[i] ; i++) {
struct device_attribute *dev_attr = container_of(attributes[i],
struct device_attribute, attr);
struct sm_sysfs_attribute *sm_attr =
container_of(dev_attr,
struct sm_sysfs_attribute, dev_attr);
kfree(sm_attr->data);
kfree(sm_attr);
}
kfree(ftl->disk_attributes->attrs);
kfree(ftl->disk_attributes);
}
/* ----------------------- oob helpers -------------------------------------- */
static int sm_get_lba(uint8_t *lba)
{
/* check fixed bits */
if ((lba[0] & 0xF8) != 0x10)
return -2;
/* check parity - endianness doesn't matter */
if (hweight16(*(uint16_t *)lba) & 1)
return -2;
return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
}
/*
* Read LBA associated with block
* returns -1, if block is erased
* returns -2 if error happens
*/
static int sm_read_lba(struct sm_oob *oob)
{
static const uint32_t erased_pattern[4] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
uint16_t lba_test;
int lba;
/* First test for erased block */
if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
return -1;
/* Now check is both copies of the LBA differ too much */
lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
if (lba_test && !is_power_of_2(lba_test))
return -2;
/* And read it */
lba = sm_get_lba(oob->lba_copy1);
if (lba == -2)
lba = sm_get_lba(oob->lba_copy2);
return lba;
}
static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
{
uint8_t tmp[2];
WARN_ON(lba >= 1000);
tmp[0] = 0x10 | ((lba >> 7) & 0x07);
tmp[1] = (lba << 1) & 0xFF;
if (hweight16(*(uint16_t *)tmp) & 0x01)
tmp[1] |= 1;
oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
}
/* Make offset from parts */
static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
{
WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
WARN_ON(zone < 0 || zone >= ftl->zone_count);
WARN_ON(block >= ftl->zone_size);
WARN_ON(boffset >= ftl->block_size);
if (block == -1)
return -1;
return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
}
/* Breaks offset into parts */
static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
int *zone, int *block, int *boffset)
{
u64 offset = loffset;
*boffset = do_div(offset, ftl->block_size);
*block = do_div(offset, ftl->max_lba);
*zone = offset >= ftl->zone_count ? -1 : offset;
}
/* ---------------------- low level IO ------------------------------------- */
static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
uint8_t ecc[3];
ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
sm_order) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
sm_order) < 0)
return -EIO;
return 0;
}
/* Reads a sector + oob*/
static int sm_read_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_info *mtd = ftl->trans->mtd;
struct mtd_oob_ops ops = { };
struct sm_oob tmp_oob;
int ret = -EIO;
int try = 0;
/* FTL can contain -1 entries that are by default filled with bits */
if (block == -1) {
if (buffer)
memset(buffer, 0xFF, SM_SECTOR_SIZE);
return 0;
}
/* User might not need the oob, but we do for data verification */
if (!oob)
oob = &tmp_oob;
ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
again:
if (try++) {
/* Avoid infinite recursion on CIS reads, sm_recheck_media
* won't help anyway
*/
if (zone == 0 && block == ftl->cis_block && boffset ==
ftl->cis_boffset)
return ret;
/* Test if media is stable */
if (try == 3 || sm_recheck_media(ftl))
return ret;
}
/* Unfortunately, oob read will _always_ succeed,
* despite card removal.....
*/
ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
dbg("read of block %d at zone %d, failed due to error (%d)",
block, zone, ret);
goto again;
}
/* Do a basic test on the oob, to guard against returned garbage */
if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
goto again;
/* This should never happen, unless there is a bug in the mtd driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
if (!buffer)
return 0;
/* Test if sector marked as bad */
if (!sm_sector_valid(oob)) {
dbg("read of block %d at zone %d, failed because it is marked"
" as bad" , block, zone);
goto again;
}
/* Test ECC*/
if (mtd_is_eccerr(ret) ||
(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
dbg("read of block %d at zone %d, failed due to ECC error",
block, zone);
goto again;
}
return 0;
}
/* Writes a sector to media */
static int sm_write_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_oob_ops ops = { };
struct mtd_info *mtd = ftl->trans->mtd;
int ret;
BUG_ON(ftl->readonly);
if (zone == 0 && (block == ftl->cis_block || block == 0)) {
dbg("attempted to write the CIS!");
return -EIO;
}
if (ftl->unstable)
return -EIO;
ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
ops.len = SM_SECTOR_SIZE;
ops.datbuf = buffer;
ops.ooboffs = 0;
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
if (ret) {
dbg("write to block %d at zone %d, failed with error %d",
block, zone, ret);
sm_recheck_media(ftl);
return ret;
}
/* This should never happen, unless there is a bug in the driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
return 0;
}
/* ------------------------ block IO ------------------------------------- */
/* Write a block using data and lba, and invalid sector bitmap */
static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
int zone, int block, int lba,
unsigned long invalid_bitmap)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
struct sm_oob oob;
int boffset;
int retry = 0;
/* Initialize the oob with requested values */
memset(&oob, 0xFF, SM_OOB_SIZE);
sm_write_lba(&oob, lba);
restart:
if (ftl->unstable)
return -EIO;
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
oob.data_status = 0xFF;
if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
sm_printk("sector %d of block at LBA %d of zone %d"
" couldn't be read, marking it as invalid",
boffset / SM_SECTOR_SIZE, lba, zone);
oob.data_status = 0;
}
if (ftl->smallpagenand) {
ecc_sw_hamming_calculate(buf + boffset,
SM_SMALL_PAGE, oob.ecc1,
sm_order);
ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
SM_SMALL_PAGE, oob.ecc2,
sm_order);
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
continue;
if (!retry) {
/* If write fails. try to erase the block */
/* This is safe, because we never write in blocks
* that contain valuable data.
* This is intended to repair block that are marked
* as erased, but that isn't fully erased
*/
if (sm_erase_block(ftl, zone, block, 0))
return -EIO;
retry = 1;
goto restart;
} else {
sm_mark_block_bad(ftl, zone, block);
return -EIO;
}
}
return 0;
}
/* Mark whole block at offset 'offs' as bad. */
static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
{
struct sm_oob oob;
int boffset;
memset(&oob, 0xFF, SM_OOB_SIZE);
oob.block_status = 0xF0;
if (ftl->unstable)
return;
if (sm_recheck_media(ftl))
return;
sm_printk("marking block %d of zone %d as bad", block, zone);
/* We aren't checking the return value, because we don't care */
/* This also fails on fake xD cards, but I guess these won't expose
* any bad blocks till fail completely
*/
for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
/*
* Erase a block within a zone
* If erase succeeds, it updates free block fifo, otherwise marks block as bad
*/
static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
int put_free)
{
struct ftl_zone *zone = &ftl->zones[zone_num];
struct mtd_info *mtd = ftl->trans->mtd;
struct erase_info erase;
erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
erase.len = ftl->block_size;
if (ftl->unstable)
return -EIO;
BUG_ON(ftl->readonly);
if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
sm_printk("attempted to erase the CIS!");
return -EIO;
}
if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
}
if (put_free)
kfifo_in(&zone->free_sectors,
(const unsigned char *)&block, sizeof(block));
return 0;
error:
sm_mark_block_bad(ftl, zone_num, block);
return -EIO;
}
/* Thoroughly test that block is valid. */
static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
{
int boffset;
struct sm_oob oob;
int lbas[] = { -3, 0, 0, 0 };
int i = 0;
int test_lba;
/* First just check that block doesn't look fishy */
/* Only blocks that are valid or are sliced in two parts, are
* accepted
*/
for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
/* This shouldn't happen anyway */
if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
return -2;
test_lba = sm_read_lba(&oob);
if (lbas[i] != test_lba)
lbas[++i] = test_lba;
/* If we found three different LBAs, something is fishy */
if (i == 3)
return -EIO;
}
/* If the block is sliced (partially erased usually) erase it */
if (i == 2) {
sm_erase_block(ftl, zone, block, 1);
return 1;
}
return 0;
}
/* ----------------- media scanning --------------------------------- */
static const struct chs_entry chs_table[] = {
{ 1, 125, 4, 4 },
{ 2, 125, 4, 8 },
{ 4, 250, 4, 8 },
{ 8, 250, 4, 16 },
{ 16, 500, 4, 16 },
{ 32, 500, 8, 16 },
{ 64, 500, 8, 32 },
{ 128, 500, 16, 32 },
{ 256, 1000, 16, 32 },
{ 512, 1015, 32, 63 },
{ 1024, 985, 33, 63 },
{ 2048, 985, 33, 63 },
{ 0 },
};
static const uint8_t cis_signature[] = {
0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
};
/* Find out media parameters.
* This ideally has to be based on nand id, but for now device size is enough
*/
static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{
int i;
int size_in_megs = mtd->size / (1024 * 1024);
ftl->readonly = mtd->type == MTD_ROM;
/* Manual settings for very old devices */
ftl->zone_count = 1;
ftl->smallpagenand = 0;
switch (size_in_megs) {
case 1:
/* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
ftl->zone_size = 256;
ftl->max_lba = 250;
ftl->block_size = 8 * SM_SECTOR_SIZE;
ftl->smallpagenand = 1;
break;
case 2:
/* 2 MiB flash SmartMedia (256 byte pages)*/
if (mtd->writesize == SM_SMALL_PAGE) {
ftl->zone_size = 512;
ftl->max_lba = 500;
ftl->block_size = 8 * SM_SECTOR_SIZE;
ftl->smallpagenand = 1;
/* 2 MiB rom SmartMedia */
} else {
if (!ftl->readonly)
return -ENODEV;
ftl->zone_size = 256;
ftl->max_lba = 250;
ftl->block_size = 16 * SM_SECTOR_SIZE;
}
break;
case 4:
/* 4 MiB flash/rom SmartMedia device */
ftl->zone_size = 512;
ftl->max_lba = 500;
ftl->block_size = 16 * SM_SECTOR_SIZE;
break;
case 8:
/* 8 MiB flash/rom SmartMedia device */
ftl->zone_size = 1024;
ftl->max_lba = 1000;
ftl->block_size = 16 * SM_SECTOR_SIZE;
}
/* Minimum xD size is 16MiB. Also, all xD cards have standard zone
* sizes. SmartMedia cards exist up to 128 MiB and have same layout
*/
if (size_in_megs >= 16) {
ftl->zone_count = size_in_megs / 16;
ftl->zone_size = 1024;
ftl->max_lba = 1000;
ftl->block_size = 32 * SM_SECTOR_SIZE;
}
/* Test for proper write,erase and oob sizes */
if (mtd->erasesize > ftl->block_size)
return -ENODEV;
if (mtd->writesize > SM_SECTOR_SIZE)
return -ENODEV;
if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
return -ENODEV;
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
/* We use OOB */
if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
if (chs_table[i].size == size_in_megs) {
ftl->cylinders = chs_table[i].cyl;
ftl->heads = chs_table[i].head;
ftl->sectors = chs_table[i].sec;
return 0;
}
}
sm_printk("media has unknown size : %dMiB", size_in_megs);
ftl->cylinders = 985;
ftl->heads = 33;
ftl->sectors = 63;
return 0;
}
/* Validate the CIS */
static int sm_read_cis(struct sm_ftl *ftl)
{
struct sm_oob oob;
if (sm_read_sector(ftl,
0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
return -EIO;
if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
return -EIO;
if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
cis_signature, sizeof(cis_signature))) {
return 0;
}
return -EIO;
}
/* Scan the media for the CIS */
static int sm_find_cis(struct sm_ftl *ftl)
{
struct sm_oob oob;
int block, boffset;
int block_found = 0;
int cis_found = 0;
/* Search for first valid block */
for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
continue;
if (!sm_block_valid(&oob))
continue;
block_found = 1;
break;
}
if (!block_found)
return -EIO;
/* Search for first valid sector in this block */
for (boffset = 0 ; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
continue;
if (!sm_sector_valid(&oob))
continue;
break;
}
if (boffset == ftl->block_size)
return -EIO;
ftl->cis_block = block;
ftl->cis_boffset = boffset;
ftl->cis_page_offset = 0;
cis_found = !sm_read_cis(ftl);
if (!cis_found) {
ftl->cis_page_offset = SM_SMALL_PAGE;
cis_found = !sm_read_cis(ftl);
}
if (cis_found) {
dbg("CIS block found at offset %x",
block * ftl->block_size +
boffset + ftl->cis_page_offset);
return 0;
}
return -EIO;
}
/* Basic test to determine if underlying mtd device if functional */
static int sm_recheck_media(struct sm_ftl *ftl)
{
if (sm_read_cis(ftl)) {
if (!ftl->unstable) {
sm_printk("media unstable, not allowing writes");
ftl->unstable = 1;
}
return -EIO;
}
return 0;
}
/* Initialize a FTL zone */
static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone = &ftl->zones[zone_num];
struct sm_oob oob;
uint16_t block;
int lba;
int i = 0;
int len;
dbg("initializing zone %d", zone_num);
/* Allocate memory for FTL table */
zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
if (!zone->lba_to_phys_table)
return -ENOMEM;
memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
/* Allocate memory for free sectors FIFO */
if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
kfree(zone->lba_to_phys_table);
return -ENOMEM;
}
/* Now scan the zone */
for (block = 0 ; block < ftl->zone_size ; block++) {
/* Skip blocks till the CIS (including) */
if (zone_num == 0 && block <= ftl->cis_block)
continue;
/* Read the oob of first sector */
if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
kfifo_free(&zone->free_sectors);
kfree(zone->lba_to_phys_table);
return -EIO;
}
/* Test to see if block is erased. It is enough to test
* first sector, because erase happens in one shot
*/
if (sm_block_erased(&oob)) {
kfifo_in(&zone->free_sectors,
(unsigned char *)&block, 2);
continue;
}
/* If block is marked as bad, skip it */
/* This assumes we can trust first sector*/
/* However the way the block valid status is defined, ensures
* very low probability of failure here
*/
if (!sm_block_valid(&oob)) {
dbg("PH %04d <-> <marked bad>", block);
continue;
}
lba = sm_read_lba(&oob);
/* Invalid LBA means that block is damaged. */
/* We can try to erase it, or mark it as bad, but
* lets leave that to recovery application
*/
if (lba == -2 || lba >= ftl->max_lba) {
dbg("PH %04d <-> LBA %04d(bad)", block, lba);
continue;
}
/* If there is no collision,
* just put the sector in the FTL table
*/
if (zone->lba_to_phys_table[lba] < 0) {
dbg_verbose("PH %04d <-> LBA %04d", block, lba);
zone->lba_to_phys_table[lba] = block;
continue;
}
sm_printk("collision"
" of LBA %d between blocks %d and %d in zone %d",
lba, zone->lba_to_phys_table[lba], block, zone_num);
/* Test that this block is valid*/
if (sm_check_block(ftl, zone_num, block))
continue;
/* Test now the old block */
if (sm_check_block(ftl, zone_num,
zone->lba_to_phys_table[lba])) {
zone->lba_to_phys_table[lba] = block;
continue;
}
/* If both blocks are valid and share same LBA, it means that
* they hold different versions of same data. It not
* known which is more recent, thus just erase one of them
*/
sm_printk("both blocks are valid, erasing the later");
sm_erase_block(ftl, zone_num, block, 1);
}
dbg("zone initialized");
zone->initialized = 1;
/* No free sectors, means that the zone is heavily damaged, write won't
* work, but it can still can be (partially) read
*/
if (!kfifo_len(&zone->free_sectors)) {
sm_printk("no free blocks in zone %d", zone_num);
return 0;
}
/* Randomize first block we write to */
get_random_bytes(&i, 2);
i %= (kfifo_len(&zone->free_sectors) / 2);
while (i--) {
len = kfifo_out(&zone->free_sectors,
(unsigned char *)&block, 2);
WARN_ON(len != 2);
kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
}
return 0;
}
/* Get and automatically initialize an FTL mapping for one zone */
static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
{
struct ftl_zone *zone;
int error;
BUG_ON(zone_num >= ftl->zone_count);
zone = &ftl->zones[zone_num];
if (!zone->initialized) {
error = sm_init_zone(ftl, zone_num);
if (error)
return ERR_PTR(error);
}
return zone;
}
/* ----------------- cache handling ------------------------------------------*/
/* Initialize the one block cache */
static void sm_cache_init(struct sm_ftl *ftl)
{
ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
ftl->cache_clean = 1;
ftl->cache_zone = -1;
ftl->cache_block = -1;
/*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
}
/* Put sector in one block cache */
static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
{
memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
ftl->cache_clean = 0;
}
/* Read a sector from the cache */
static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
{
if (test_bit(boffset / SM_SECTOR_SIZE,
&ftl->cache_data_invalid_bitmap))
return -1;
memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
return 0;
}
/* Write the cache to hardware */
static int sm_cache_flush(struct sm_ftl *ftl)
{
struct ftl_zone *zone;
int sector_num;
uint16_t write_sector;
int zone_num = ftl->cache_zone;
int block_num;
if (ftl->cache_clean)
return 0;
if (ftl->unstable)
return -EIO;
BUG_ON(zone_num < 0);
zone = &ftl->zones[zone_num];
block_num = zone->lba_to_phys_table[ftl->cache_block];
/* Try to read all unread areas of the cache block*/
for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
ftl->block_size / SM_SECTOR_SIZE) {
if (!sm_read_sector(ftl,
zone_num, block_num, sector_num * SM_SECTOR_SIZE,
ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
clear_bit(sector_num,
&ftl->cache_data_invalid_bitmap);
}
restart:
if (ftl->unstable)
return -EIO;
/* If there are no spare blocks, */
/* we could still continue by erasing/writing the current block,
* but for such worn out media it doesn't worth the trouble,
* and the dangers
*/
if (kfifo_out(&zone->free_sectors,
(unsigned char *)&write_sector, 2) != 2) {
dbg("no free sectors for write!");
return -EIO;
}
if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
ftl->cache_block, ftl->cache_data_invalid_bitmap))
goto restart;
/* Update the FTL table */
zone->lba_to_phys_table[ftl->cache_block] = write_sector;
/* Write successful, so erase and free the old block */
if (block_num > 0)
sm_erase_block(ftl, zone_num, block_num, 1);
sm_cache_init(ftl);
return 0;
}
/* flush timer, runs a second after last write */
static void sm_cache_flush_timer(struct timer_list *t)
{
struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
/* cache flush work, kicked by timer */
static void sm_cache_flush_work(struct work_struct *work)
{
struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
return;
}
/* ---------------- outside interface -------------------------------------- */
/* outside interface: read a sector */
static int sm_read(struct mtd_blktrans_dev *dev,
unsigned long sect_no, char *buf)
{
struct sm_ftl *ftl = dev->priv;
struct ftl_zone *zone;
int error = 0, in_cache = 0;
int zone_num, block, boffset;
sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
if (IS_ERR(zone)) {
error = PTR_ERR(zone);
goto unlock;
}
/* Have to look at cache first */
if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
in_cache = 1;
if (!sm_cache_get(ftl, buf, boffset))
goto unlock;
}
/* Translate the block and return if doesn't exist in the table */
block = zone->lba_to_phys_table[block];
if (block == -1) {
memset(buf, 0xFF, SM_SECTOR_SIZE);
goto unlock;
}
if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
error = -EIO;
goto unlock;
}
if (in_cache)
sm_cache_put(ftl, buf, boffset);
unlock:
mutex_unlock(&ftl->mutex);
return error;
}
/* outside interface: write a sector */
static int sm_write(struct mtd_blktrans_dev *dev,
unsigned long sec_no, char *buf)
{
struct sm_ftl *ftl = dev->priv;
struct ftl_zone *zone;
int error = 0, zone_num, block, boffset;
BUG_ON(ftl->readonly);
sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
/* No need in flush thread running now */
del_timer(&ftl->timer);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
if (IS_ERR(zone)) {
error = PTR_ERR(zone);
goto unlock;
}
/* If entry is not in cache, flush it */
if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
error = sm_cache_flush(ftl);
if (error)
goto unlock;
ftl->cache_block = block;
ftl->cache_zone = zone_num;
}
sm_cache_put(ftl, buf, boffset);
unlock:
mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
mutex_unlock(&ftl->mutex);
return error;
}
/* outside interface: flush everything */
static int sm_flush(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
int retval;
mutex_lock(&ftl->mutex);
retval = sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
return retval;
}
/* outside interface: device is released */
static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
del_timer_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
mutex_unlock(&ftl->mutex);
}
/* outside interface: get geometry */
static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct sm_ftl *ftl = dev->priv;
geo->heads = ftl->heads;
geo->sectors = ftl->sectors;
geo->cylinders = ftl->cylinders;
return 0;
}
/* external interface: main initialization function */
static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *trans;
struct sm_ftl *ftl;
/* Allocate & initialize our private structure */
ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
if (!ftl)
goto error1;
mutex_init(&ftl->mutex);
timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
/* Read media information */
if (sm_get_media_info(ftl, mtd)) {
dbg("found unsupported mtd device, aborting");
goto error2;
}
/* Allocate temporary CIS buffer for read retry support */
ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
if (!ftl->cis_buffer)
goto error2;
/* Allocate zone array, it will be initialized on demand */
ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
GFP_KERNEL);
if (!ftl->zones)
goto error3;
/* Allocate the cache*/
ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
if (!ftl->cache_data)
goto error4;
sm_cache_init(ftl);
/* Allocate upper layer structure and initialize it */
trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
if (!trans)
goto error5;
ftl->trans = trans;
trans->priv = ftl;
trans->tr = tr;
trans->mtd = mtd;
trans->devnum = -1;
trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
trans->readonly = ftl->readonly;
if (sm_find_cis(ftl)) {
dbg("CIS not found on mtd device, aborting");
goto error6;
}
ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
if (!ftl->disk_attributes)
goto error6;
trans->disk_attributes = ftl->disk_attributes;
sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
(int)(mtd->size / (1024 * 1024)), mtd->index);
dbg("FTL layout:");
dbg("%d zone(s), each consists of %d blocks (+%d spares)",
ftl->zone_count, ftl->max_lba,
ftl->zone_size - ftl->max_lba);
dbg("each block consists of %d bytes",
ftl->block_size);
/* Register device*/
if (add_mtd_blktrans_dev(trans)) {
dbg("error in mtdblktrans layer");
goto error6;
}
return;
error6:
kfree(trans);
error5:
kfree(ftl->cache_data);
error4:
kfree(ftl->zones);
error3:
kfree(ftl->cis_buffer);
error2:
kfree(ftl);
error1:
return;
}
/* main interface: device {surprise,} removal */
static void sm_remove_dev(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
int i;
del_mtd_blktrans_dev(dev);
ftl->trans = NULL;
for (i = 0 ; i < ftl->zone_count; i++) {
if (!ftl->zones[i].initialized)
continue;
kfree(ftl->zones[i].lba_to_phys_table);
kfifo_free(&ftl->zones[i].free_sectors);
}
sm_delete_sysfs_attributes(ftl);
kfree(ftl->cis_buffer);
kfree(ftl->zones);
kfree(ftl->cache_data);
kfree(ftl);
}
static struct mtd_blktrans_ops sm_ftl_ops = {
.name = "smblk",
.major = 0,
.part_bits = SM_FTL_PARTN_BITS,
.blksize = SM_SECTOR_SIZE,
.getgeo = sm_getgeo,
.add_mtd = sm_add_mtd,
.remove_dev = sm_remove_dev,
.readsect = sm_read,
.writesect = sm_write,
.flush = sm_flush,
.release = sm_release,
.owner = THIS_MODULE,
};
static __init int sm_module_init(void)
{
int error = 0;
cache_flush_workqueue = create_freezable_workqueue("smflush");
if (!cache_flush_workqueue)
return -ENOMEM;
error = register_mtd_blktrans(&sm_ftl_ops);
if (error)
destroy_workqueue(cache_flush_workqueue);
return error;
}
static void __exit sm_module_exit(void)
{
destroy_workqueue(cache_flush_workqueue);
deregister_mtd_blktrans(&sm_ftl_ops);
}
module_init(sm_module_init);
module_exit(sm_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
| linux-master | drivers/mtd/sm_ftl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Swap block device support for MTDs
* Turns an MTD device into a swap device with block wear leveling
*
* Copyright © 2007,2011 Nokia Corporation. All rights reserved.
*
* Authors: Jarkko Lavinen <[email protected]>
*
* Based on Richard Purdie's earlier implementation in 2007. Background
* support and lock-less operation written by Adrian Hunter.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/math64.h>
#define MTDSWAP_PREFIX "mtdswap"
/*
* The number of free eraseblocks when GC should stop
*/
#define CLEAN_BLOCK_THRESHOLD 20
/*
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
* without disturbing gc too much. This is made by defining max GC frequency.
* Frequency value 6 means 1/6 of the GC passes will pick an erase block based
* on the biggest wear difference rather than the biggest dirtiness.
*
* The lower freq2 should be chosen so that it makes sure the maximum erase
* difference will decrease even if a malicious application is deliberately
* trying to make erase differences large.
*/
#define MAX_ERASE_DIFF 4000
#define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF
#define COLLECT_NONDIRTY_FREQ1 6
#define COLLECT_NONDIRTY_FREQ2 4
#define PAGE_UNDEF UINT_MAX
#define BLOCK_UNDEF UINT_MAX
#define BLOCK_ERROR (UINT_MAX - 1)
#define BLOCK_MAX (UINT_MAX - 2)
#define EBLOCK_BAD (1 << 0)
#define EBLOCK_NOMAGIC (1 << 1)
#define EBLOCK_BITFLIP (1 << 2)
#define EBLOCK_FAILED (1 << 3)
#define EBLOCK_READERR (1 << 4)
#define EBLOCK_IDX_SHIFT 5
struct swap_eb {
struct rb_node rb;
struct rb_root *root;
unsigned int flags;
unsigned int active_count;
unsigned int erase_count;
unsigned int pad; /* speeds up pointer decrement */
};
#define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
rb)->erase_count)
#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
rb)->erase_count)
struct mtdswap_tree {
struct rb_root root;
unsigned int count;
};
enum {
MTDSWAP_CLEAN,
MTDSWAP_USED,
MTDSWAP_LOWFRAG,
MTDSWAP_HIFRAG,
MTDSWAP_DIRTY,
MTDSWAP_BITFLIP,
MTDSWAP_FAILING,
MTDSWAP_TREE_CNT,
};
struct mtdswap_dev {
struct mtd_blktrans_dev *mbd_dev;
struct mtd_info *mtd;
struct device *dev;
unsigned int *page_data;
unsigned int *revmap;
unsigned int eblks;
unsigned int spare_eblks;
unsigned int pages_per_eblk;
unsigned int max_erase_count;
struct swap_eb *eb_data;
struct mtdswap_tree trees[MTDSWAP_TREE_CNT];
unsigned long long sect_read_count;
unsigned long long sect_write_count;
unsigned long long mtd_write_count;
unsigned long long mtd_read_count;
unsigned long long discard_count;
unsigned long long discard_page_count;
unsigned int curr_write_pos;
struct swap_eb *curr_write;
char *page_buf;
char *oob_buf;
};
struct mtdswap_oobdata {
__le16 magic;
__le32 count;
} __packed;
#define MTDSWAP_MAGIC_CLEAN 0x2095
#define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1)
#define MTDSWAP_TYPE_CLEAN 0
#define MTDSWAP_TYPE_DIRTY 1
#define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata)
#define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */
#define MTDSWAP_IO_RETRIES 3
enum {
MTDSWAP_SCANNED_CLEAN,
MTDSWAP_SCANNED_DIRTY,
MTDSWAP_SCANNED_BITFLIP,
MTDSWAP_SCANNED_BAD,
};
/*
* In the worst case mtdswap_writesect() has allocated the last clean
* page from the current block and is then pre-empted by the GC
* thread. The thread can consume a full erase block when moving a
* block.
*/
#define MIN_SPARE_EBLOCKS 2
#define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1)
#define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)
#define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL)
#define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name))
#define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count)
#define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
static char partitions[128] = "";
module_param_string(partitions, partitions, sizeof(partitions), 0444);
MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap "
"partitions=\"1,3,5\"");
static unsigned int spare_eblocks = 10;
module_param(spare_eblocks, uint, 0444);
MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for "
"garbage collection (default 10%)");
static bool header; /* false */
module_param(header, bool, 0444);
MODULE_PARM_DESC(header,
"Include builtin swap header (default 0, without header)");
static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb)
{
return (loff_t)(eb - d->eb_data) * d->mtd->erasesize;
}
static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int oldidx;
struct mtdswap_tree *tp;
if (eb->root) {
tp = container_of(eb->root, struct mtdswap_tree, root);
oldidx = tp - &d->trees[0];
d->trees[oldidx].count--;
rb_erase(&eb->rb, eb->root);
}
}
static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb)
{
struct rb_node **p, *parent = NULL;
struct swap_eb *cur;
p = &root->rb_node;
while (*p) {
parent = *p;
cur = rb_entry(parent, struct swap_eb, rb);
if (eb->erase_count > cur->erase_count)
p = &(*p)->rb_right;
else
p = &(*p)->rb_left;
}
rb_link_node(&eb->rb, parent, p);
rb_insert_color(&eb->rb, root);
}
static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx)
{
struct rb_root *root;
if (eb->root == &d->trees[idx].root)
return;
mtdswap_eb_detach(d, eb);
root = &d->trees[idx].root;
__mtdswap_rb_add(root, eb);
eb->root = root;
d->trees[idx].count++;
}
static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx)
{
struct rb_node *p;
unsigned int i;
p = rb_first(root);
i = 0;
while (i < idx && p) {
p = rb_next(p);
i++;
}
return p;
}
static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
{
int ret;
loff_t offset;
d->spare_eblks--;
eb->flags |= EBLOCK_BAD;
mtdswap_eb_detach(d, eb);
eb->root = NULL;
/* badblocks not supported */
if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
"error %d\n", offset, ret);
return ret;
}
return 1;
}
static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int marked = eb->flags & EBLOCK_FAILED;
struct swap_eb *curr_write = d->curr_write;
eb->flags |= EBLOCK_FAILED;
if (curr_write == eb) {
d->curr_write = NULL;
if (!marked && d->curr_write_pos != 0) {
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
return 0;
}
}
return mtdswap_handle_badblock(d, eb);
}
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
if (ret) {
dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n",
ret, from);
return ret;
}
if (ops->oobretlen < ops->ooblen) {
dev_warn(d->dev, "Read OOB return short read (%zd bytes not "
"%zd) for block at %08llx\n",
ops->oobretlen, ops->ooblen, from);
return -EIO;
}
return 0;
}
static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
{
struct mtdswap_oobdata *data, *data2;
int ret;
loff_t offset;
struct mtd_oob_ops ops = { };
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->oobavail;
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_AUTO_OOB;
ret = mtdswap_read_oob(d, offset, &ops);
if (ret && !mtd_is_bitflip(ret))
return ret;
data = (struct mtdswap_oobdata *)d->oob_buf;
data2 = (struct mtdswap_oobdata *)
(d->oob_buf + d->mtd->oobavail);
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
if (mtd_is_bitflip(ret))
ret = MTDSWAP_SCANNED_BITFLIP;
else {
if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
ret = MTDSWAP_SCANNED_DIRTY;
else
ret = MTDSWAP_SCANNED_CLEAN;
}
} else {
eb->flags |= EBLOCK_NOMAGIC;
ret = MTDSWAP_SCANNED_DIRTY;
}
return ret;
}
static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
u16 marker)
{
struct mtdswap_oobdata n;
int ret;
loff_t offset;
struct mtd_oob_ops ops = { };
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
ops.mode = MTD_OPS_AUTO_OOB;
ops.datbuf = NULL;
if (marker == MTDSWAP_TYPE_CLEAN) {
n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN);
n.count = cpu_to_le32(eb->erase_count);
ops.ooblen = MTDSWAP_OOBSIZE;
offset = mtdswap_eb_offset(d, eb);
} else {
n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY);
ops.ooblen = sizeof(n.magic);
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
"error %d\n", offset, ret);
if (ret == -EIO || mtd_is_eccerr(ret))
mtdswap_handle_write_error(d, eb);
return ret;
}
if (ops.oobretlen != ops.ooblen) {
dev_warn(d->dev, "Short OOB write for block at %08llx: "
"%zd not %zd\n",
offset, ops.oobretlen, ops.ooblen);
return ret;
}
return 0;
}
/*
* Are there any erase blocks without MAGIC_CLEAN header, presumably
* because power was cut off after erase but before header write? We
* need to guestimate the erase count.
*/
static void mtdswap_check_counts(struct mtdswap_dev *d)
{
struct rb_root hist_root = RB_ROOT;
struct rb_node *medrb;
struct swap_eb *eb;
unsigned int i, cnt, median;
cnt = 0;
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
continue;
__mtdswap_rb_add(&hist_root, eb);
cnt++;
}
if (cnt == 0)
return;
medrb = mtdswap_rb_index(&hist_root, cnt / 2);
median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root);
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR))
eb->erase_count = median;
if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR))
continue;
rb_erase(&eb->rb, &hist_root);
}
}
static void mtdswap_scan_eblks(struct mtdswap_dev *d)
{
int status;
unsigned int i, idx;
struct swap_eb *eb;
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
status = mtdswap_read_markers(d, eb);
if (status < 0)
eb->flags |= EBLOCK_READERR;
else if (status == MTDSWAP_SCANNED_BAD) {
eb->flags |= EBLOCK_BAD;
continue;
}
switch (status) {
case MTDSWAP_SCANNED_CLEAN:
idx = MTDSWAP_CLEAN;
break;
case MTDSWAP_SCANNED_DIRTY:
case MTDSWAP_SCANNED_BITFLIP:
idx = MTDSWAP_DIRTY;
break;
default:
idx = MTDSWAP_FAILING;
}
eb->flags |= (idx << EBLOCK_IDX_SHIFT);
}
mtdswap_check_counts(d);
for (i = 0; i < d->eblks; i++) {
eb = d->eb_data + i;
if (eb->flags & EBLOCK_BAD)
continue;
idx = eb->flags >> EBLOCK_IDX_SHIFT;
mtdswap_rb_add(d, eb, idx);
}
}
/*
* Place eblk into a tree corresponding to its number of active blocks
* it contains.
*/
static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int weight = eb->active_count;
unsigned int maxweight = d->pages_per_eblk;
if (eb == d->curr_write)
return;
if (eb->flags & EBLOCK_BITFLIP)
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED))
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
if (weight == maxweight)
mtdswap_rb_add(d, eb, MTDSWAP_USED);
else if (weight == 0)
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
else if (weight > (maxweight/2))
mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG);
else
mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG);
}
static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
{
struct mtd_info *mtd = d->mtd;
struct erase_info erase;
unsigned int retries = 0;
int ret;
eb->erase_count++;
if (eb->erase_count > d->max_erase_count)
d->max_erase_count = eb->erase_count;
retry:
memset(&erase, 0, sizeof(struct erase_info));
erase.addr = mtdswap_eb_offset(d, eb);
erase.len = mtd->erasesize;
ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
"erase of erase block %#llx on %s failed",
erase.addr, mtd->name);
yield();
goto retry;
}
dev_err(d->dev, "Cannot erase erase block %#llx on %s\n",
erase.addr, mtd->name);
mtdswap_handle_badblock(d, eb);
return -EIO;
}
return 0;
}
static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page,
unsigned int *block)
{
int ret;
struct swap_eb *old_eb = d->curr_write;
struct rb_root *clean_root;
struct swap_eb *eb;
if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) {
do {
if (TREE_EMPTY(d, CLEAN))
return -ENOSPC;
clean_root = TREE_ROOT(d, CLEAN);
eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
rb_erase(&eb->rb, clean_root);
eb->root = NULL;
TREE_COUNT(d, CLEAN)--;
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
} while (ret == -EIO || mtd_is_eccerr(ret));
if (ret)
return ret;
d->curr_write_pos = 0;
d->curr_write = eb;
if (old_eb)
mtdswap_store_eb(d, old_eb);
}
*block = (d->curr_write - d->eb_data) * d->pages_per_eblk +
d->curr_write_pos;
d->curr_write->active_count++;
d->revmap[*block] = page;
d->curr_write_pos++;
return 0;
}
static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d)
{
return TREE_COUNT(d, CLEAN) * d->pages_per_eblk +
d->pages_per_eblk - d->curr_write_pos;
}
static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d)
{
return mtdswap_free_page_cnt(d) > d->pages_per_eblk;
}
static int mtdswap_write_block(struct mtdswap_dev *d, char *buf,
unsigned int page, unsigned int *bp, int gc_context)
{
struct mtd_info *mtd = d->mtd;
struct swap_eb *eb;
size_t retlen;
loff_t writepos;
int ret;
retry:
if (!gc_context)
while (!mtdswap_enough_free_pages(d))
if (mtdswap_gc(d, 0) > 0)
return -ENOSPC;
ret = mtdswap_map_free_block(d, page, bp);
eb = d->eb_data + (*bp / d->pages_per_eblk);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write = NULL;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
goto retry;
}
if (ret < 0)
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
mtdswap_handle_write_error(d, eb);
goto retry;
}
if (ret < 0) {
dev_err(d->dev, "Write to MTD device failed: %d (%zd written)",
ret, retlen);
goto err;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short write to MTD device: %zd written",
retlen);
ret = -EIO;
goto err;
}
return ret;
err:
d->curr_write_pos--;
eb->active_count--;
d->revmap[*bp] = PAGE_UNDEF;
return ret;
}
static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
unsigned int *newblock)
{
struct mtd_info *mtd = d->mtd;
struct swap_eb *eb, *oldeb;
int ret;
size_t retlen;
unsigned int page, retries;
loff_t readpos;
page = d->revmap[oldblock];
readpos = (loff_t) oldblock << PAGE_SHIFT;
retries = 0;
retry:
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
oldeb->flags |= EBLOCK_READERR;
dev_err(d->dev, "Read Error: %d (block %u)\n", ret,
oldblock);
retries++;
if (retries < MTDSWAP_IO_RETRIES)
goto retry;
goto read_error;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short read: %zd (block %u)\n", retlen,
oldblock);
ret = -EIO;
goto read_error;
}
ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1);
if (ret < 0) {
d->page_data[page] = BLOCK_ERROR;
dev_err(d->dev, "Write error: %d\n", ret);
return ret;
}
d->page_data[page] = *newblock;
d->revmap[oldblock] = PAGE_UNDEF;
eb = d->eb_data + oldblock / d->pages_per_eblk;
eb->active_count--;
return 0;
read_error:
d->page_data[page] = BLOCK_ERROR;
d->revmap[oldblock] = PAGE_UNDEF;
return ret;
}
static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb)
{
unsigned int i, block, eblk_base, newblock;
int ret, errcode;
errcode = 0;
eblk_base = (eb - d->eb_data) * d->pages_per_eblk;
for (i = 0; i < d->pages_per_eblk; i++) {
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return -ENOSPC;
block = eblk_base + i;
if (d->revmap[block] == PAGE_UNDEF)
continue;
ret = mtdswap_move_block(d, block, &newblock);
if (ret < 0 && !errcode)
errcode = ret;
}
return errcode;
}
static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--)
if (d->trees[idx].root.rb_node != NULL)
return idx;
return -1;
}
static int mtdswap_wlfreq(unsigned int maxdiff)
{
unsigned int h, x, y, dist, base;
/*
* Calculate linear ramp down from f1 to f2 when maxdiff goes from
* MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar
* to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE.
*/
dist = maxdiff - MAX_ERASE_DIFF;
if (dist > COLLECT_NONDIRTY_BASE)
dist = COLLECT_NONDIRTY_BASE;
/*
* Modelling the slop as right angular triangle with base
* COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is
* equal to the ratio h/base.
*/
h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2;
base = COLLECT_NONDIRTY_BASE;
x = dist - base;
y = (x * h + base / 2) / base;
return COLLECT_NONDIRTY_FREQ2 + y;
}
static int mtdswap_choose_wl_tree(struct mtdswap_dev *d)
{
static unsigned int pick_cnt;
unsigned int i, idx = -1, wear, max;
struct rb_root *root;
max = 0;
for (i = 0; i <= MTDSWAP_DIRTY; i++) {
root = &d->trees[i].root;
if (root->rb_node == NULL)
continue;
wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root);
if (wear > max) {
max = wear;
idx = i;
}
}
if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) {
pick_cnt = 0;
return idx;
}
pick_cnt++;
return -1;
}
static int mtdswap_choose_gc_tree(struct mtdswap_dev *d,
unsigned int background)
{
int idx;
if (TREE_NONEMPTY(d, FAILING) &&
(background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY))))
return MTDSWAP_FAILING;
idx = mtdswap_choose_wl_tree(d);
if (idx >= MTDSWAP_CLEAN)
return idx;
return __mtdswap_choose_gc_tree(d);
}
static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d,
unsigned int background)
{
struct rb_root *rp = NULL;
struct swap_eb *eb = NULL;
int idx;
if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD &&
TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING))
return NULL;
idx = mtdswap_choose_gc_tree(d, background);
if (idx < 0)
return NULL;
rp = &d->trees[idx].root;
eb = rb_entry(rb_first(rp), struct swap_eb, rb);
rb_erase(&eb->rb, rp);
eb->root = NULL;
d->trees[idx].count--;
return eb;
}
static unsigned int mtdswap_test_patt(unsigned int i)
{
return i % 2 ? 0x55555555 : 0xAAAAAAAA;
}
static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
struct swap_eb *eb)
{
struct mtd_info *mtd = d->mtd;
unsigned int test, i, j, patt, mtd_pages;
loff_t base, pos;
unsigned int *p1 = (unsigned int *)d->page_buf;
unsigned char *p2 = (unsigned char *)d->oob_buf;
struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
ops.ooblen = mtd->oobavail;
ops.ooboffs = 0;
ops.datbuf = d->page_buf;
ops.oobbuf = d->oob_buf;
base = mtdswap_eb_offset(d, eb);
mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize;
for (test = 0; test < 2; test++) {
pos = base;
for (i = 0; i < mtd_pages; i++) {
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->oobavail);
ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
pos += mtd->writesize;
}
pos = base;
for (i = 0; i < mtd_pages; i++) {
ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
patt = mtdswap_test_patt(test + i);
for (j = 0; j < mtd->writesize/sizeof(int); j++)
if (p1[j] != patt)
goto error;
for (j = 0; j < mtd->oobavail; j++)
if (p2[j] != (unsigned char)patt)
goto error;
pos += mtd->writesize;
}
ret = mtdswap_erase_block(d, eb);
if (ret)
goto error;
}
eb->flags &= ~EBLOCK_READERR;
return 1;
error:
mtdswap_handle_badblock(d, eb);
return 0;
}
static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background)
{
struct swap_eb *eb;
int ret;
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return 1;
eb = mtdswap_pick_gc_eblk(d, background);
if (!eb)
return 1;
ret = mtdswap_gc_eblock(d, eb);
if (ret == -ENOSPC)
return 1;
if (eb->flags & EBLOCK_FAILED) {
mtdswap_handle_badblock(d, eb);
return 0;
}
eb->flags &= ~EBLOCK_BITFLIP;
ret = mtdswap_erase_block(d, eb);
if ((eb->flags & EBLOCK_READERR) &&
(ret || !mtdswap_eblk_passes(d, eb)))
return 0;
if (ret == 0)
ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN);
if (ret == 0)
mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
else if (ret != -EIO && !mtd_is_eccerr(ret))
mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
return 0;
}
static void mtdswap_background(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
int ret;
while (1) {
ret = mtdswap_gc(d, 1);
if (ret || mtd_blktrans_cease_background(dev))
return;
}
}
static void mtdswap_cleanup(struct mtdswap_dev *d)
{
vfree(d->eb_data);
vfree(d->revmap);
vfree(d->page_data);
kfree(d->oob_buf);
kfree(d->page_buf);
}
static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
mtd_sync(d->mtd);
return 0;
}
static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
{
loff_t offset;
unsigned int badcnt;
badcnt = 0;
if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
}
static int mtdswap_writesect(struct mtd_blktrans_dev *dev,
unsigned long page, char *buf)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
unsigned int newblock, mapped;
struct swap_eb *eb;
int ret;
d->sect_write_count++;
if (d->spare_eblks < MIN_SPARE_EBLOCKS)
return -ENOSPC;
if (header) {
/* Ignore writes to the header page */
if (unlikely(page == 0))
return 0;
page--;
}
mapped = d->page_data[page];
if (mapped <= BLOCK_MAX) {
eb = d->eb_data + (mapped / d->pages_per_eblk);
eb->active_count--;
mtdswap_store_eb(d, eb);
d->page_data[page] = BLOCK_UNDEF;
d->revmap[mapped] = PAGE_UNDEF;
}
ret = mtdswap_write_block(d, buf, page, &newblock, 0);
d->mtd_write_count++;
if (ret < 0)
return ret;
d->page_data[page] = newblock;
return 0;
}
/* Provide a dummy swap header for the kernel */
static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf)
{
union swap_header *hd = (union swap_header *)(buf);
memset(buf, 0, PAGE_SIZE - 10);
hd->info.version = 1;
hd->info.last_page = d->mbd_dev->size - 1;
hd->info.nr_badpages = 0;
memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10);
return 0;
}
static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
unsigned long page, char *buf)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
struct mtd_info *mtd = d->mtd;
unsigned int realblock, retries;
loff_t readpos;
struct swap_eb *eb;
size_t retlen;
int ret;
d->sect_read_count++;
if (header) {
if (unlikely(page == 0))
return mtdswap_auto_header(d, buf);
page--;
}
realblock = d->page_data[page];
if (realblock > BLOCK_MAX) {
memset(buf, 0x0, PAGE_SIZE);
if (realblock == BLOCK_UNDEF)
return 0;
else
return -EIO;
}
eb = d->eb_data + (realblock / d->pages_per_eblk);
BUG_ON(d->revmap[realblock] == PAGE_UNDEF);
readpos = (loff_t)realblock << PAGE_SHIFT;
retries = 0;
retry:
ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
eb->flags |= EBLOCK_BITFLIP;
mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
ret = 0;
}
if (ret < 0) {
dev_err(d->dev, "Read error %d\n", ret);
eb->flags |= EBLOCK_READERR;
mtdswap_rb_add(d, eb, MTDSWAP_FAILING);
retries++;
if (retries < MTDSWAP_IO_RETRIES)
goto retry;
return ret;
}
if (retlen != PAGE_SIZE) {
dev_err(d->dev, "Short read %zd\n", retlen);
return -EIO;
}
return 0;
}
static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first,
unsigned nr_pages)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
unsigned long page;
struct swap_eb *eb;
unsigned int mapped;
d->discard_count++;
for (page = first; page < first + nr_pages; page++) {
mapped = d->page_data[page];
if (mapped <= BLOCK_MAX) {
eb = d->eb_data + (mapped / d->pages_per_eblk);
eb->active_count--;
mtdswap_store_eb(d, eb);
d->page_data[page] = BLOCK_UNDEF;
d->revmap[mapped] = PAGE_UNDEF;
d->discard_page_count++;
} else if (mapped == BLOCK_ERROR) {
d->page_data[page] = BLOCK_UNDEF;
d->discard_page_count++;
}
}
return 0;
}
static int mtdswap_show(struct seq_file *s, void *data)
{
struct mtdswap_dev *d = (struct mtdswap_dev *) s->private;
unsigned long sum;
unsigned int count[MTDSWAP_TREE_CNT];
unsigned int min[MTDSWAP_TREE_CNT];
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
static const char * const name[] = {
"clean", "used", "low", "high", "dirty", "bitflip", "failing"
};
mutex_lock(&d->mbd_dev->lock);
for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
struct rb_root *root = &d->trees[i].root;
if (root->rb_node) {
count[i] = d->trees[i].count;
min[i] = MTDSWAP_ECNT_MIN(root);
max[i] = MTDSWAP_ECNT_MAX(root);
} else
count[i] = 0;
}
if (d->curr_write) {
cw = 1;
cwp = d->curr_write_pos;
cwecount = d->curr_write->erase_count;
}
sum = 0;
for (i = 0; i < d->eblks; i++)
sum += d->eb_data[i].erase_count;
use_size = (uint64_t)d->eblks * d->mtd->erasesize;
bb_cnt = mtdswap_badblocks(d->mtd, use_size);
mapped = 0;
pages = d->mbd_dev->size;
for (i = 0; i < pages; i++)
if (d->page_data[i] != BLOCK_UNDEF)
mapped++;
mutex_unlock(&d->mbd_dev->lock);
for (i = 0; i < MTDSWAP_TREE_CNT; i++) {
if (!count[i])
continue;
if (min[i] != max[i])
seq_printf(s, "%s:\t%5d erase blocks, erased min %d, "
"max %d times\n",
name[i], count[i], min[i], max[i]);
else
seq_printf(s, "%s:\t%5d erase blocks, all erased %d "
"times\n", name[i], count[i], min[i]);
}
if (bb_cnt)
seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt);
if (cw)
seq_printf(s, "current erase block: %u pages used, %u free, "
"erased %u times\n",
cwp, d->pages_per_eblk - cwp, cwecount);
seq_printf(s, "total erasures: %lu\n", sum);
seq_puts(s, "\n");
seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count);
seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count);
seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count);
seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count);
seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count);
seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count);
seq_puts(s, "\n");
seq_printf(s, "total pages: %u\n", pages);
seq_printf(s, "pages mapped: %u\n", mapped);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mtdswap);
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
struct dentry *root = d->mtd->dbg.dfs_dir;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
if (IS_ERR_OR_NULL(root))
return -1;
debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops);
return 0;
}
static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
unsigned int spare_cnt)
{
struct mtd_info *mtd = d->mbd_dev->mtd;
unsigned int i, eblk_bytes, pages, blocks;
int ret = -ENOMEM;
d->mtd = mtd;
d->eblks = eblocks;
d->spare_eblks = spare_cnt;
d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT;
pages = d->mbd_dev->size;
blocks = eblocks * d->pages_per_eblk;
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
d->trees[i].root = RB_ROOT;
d->page_data = vmalloc(array_size(pages, sizeof(int)));
if (!d->page_data)
goto page_data_fail;
d->revmap = vmalloc(array_size(blocks, sizeof(int)));
if (!d->revmap)
goto revmap_fail;
eblk_bytes = sizeof(struct swap_eb)*d->eblks;
d->eb_data = vzalloc(eblk_bytes);
if (!d->eb_data)
goto eb_data_fail;
for (i = 0; i < pages; i++)
d->page_data[i] = BLOCK_UNDEF;
for (i = 0; i < blocks; i++)
d->revmap[i] = PAGE_UNDEF;
d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!d->page_buf)
goto page_buf_fail;
d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL);
if (!d->oob_buf)
goto oob_buf_fail;
mtdswap_scan_eblks(d);
return 0;
oob_buf_fail:
kfree(d->page_buf);
page_buf_fail:
vfree(d->eb_data);
eb_data_fail:
vfree(d->revmap);
revmap_fail:
vfree(d->page_data);
page_data_fail:
printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret);
return ret;
}
static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtdswap_dev *d;
struct mtd_blktrans_dev *mbd_dev;
char *parts;
char *this_opt;
unsigned long part;
unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
uint64_t swap_size, use_size, size_limit;
int ret;
parts = &partitions[0];
if (!*parts)
return;
while ((this_opt = strsep(&parts, ",")) != NULL) {
if (kstrtoul(this_opt, 0, &part) < 0)
return;
if (mtd->index == part)
break;
}
if (mtd->index != part)
return;
if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) {
printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE "
"%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE);
return;
}
if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) {
printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size"
" %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize);
return;
}
if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
return;
}
if (spare_eblocks > 100)
spare_eblocks = 100;
use_size = mtd->size;
size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE;
if (mtd->size > size_limit) {
printk(KERN_WARNING "%s: Device too large. Limiting size to "
"%llu bytes\n", MTDSWAP_PREFIX, size_limit);
use_size = size_limit;
}
eblocks = mtd_div_by_eb(use_size, mtd);
use_size = (uint64_t)eblocks * mtd->erasesize;
bad_blocks = mtdswap_badblocks(mtd, use_size);
eavailable = eblocks - bad_blocks;
if (eavailable < MIN_ERASE_BLOCKS) {
printk(KERN_ERR "%s: Not enough erase blocks. %u available, "
"%d needed\n", MTDSWAP_PREFIX, eavailable,
MIN_ERASE_BLOCKS);
return;
}
spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100);
if (spare_cnt < MIN_SPARE_EBLOCKS)
spare_cnt = MIN_SPARE_EBLOCKS;
if (spare_cnt > eavailable - 1)
spare_cnt = eavailable - 1;
swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize +
(header ? PAGE_SIZE : 0);
printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, "
"%u spare, %u bad blocks\n",
MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks);
d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL);
if (!d)
return;
mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
if (!mbd_dev) {
kfree(d);
return;
}
d->mbd_dev = mbd_dev;
mbd_dev->priv = d;
mbd_dev->mtd = mtd;
mbd_dev->devnum = mtd->index;
mbd_dev->size = swap_size >> PAGE_SHIFT;
mbd_dev->tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
mbd_dev->readonly = 1;
if (mtdswap_init(d, eblocks, spare_cnt) < 0)
goto init_failed;
if (add_mtd_blktrans_dev(mbd_dev) < 0)
goto cleanup;
d->dev = disk_to_dev(mbd_dev->disk);
ret = mtdswap_add_debugfs(d);
if (ret < 0)
goto debugfs_failed;
return;
debugfs_failed:
del_mtd_blktrans_dev(mbd_dev);
cleanup:
mtdswap_cleanup(d);
init_failed:
kfree(mbd_dev);
kfree(d);
}
static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
del_mtd_blktrans_dev(dev);
mtdswap_cleanup(d);
kfree(d);
}
static struct mtd_blktrans_ops mtdswap_ops = {
.name = "mtdswap",
.major = 0,
.part_bits = 0,
.blksize = PAGE_SIZE,
.flush = mtdswap_flush,
.readsect = mtdswap_readsect,
.writesect = mtdswap_writesect,
.discard = mtdswap_discard,
.background = mtdswap_background,
.add_mtd = mtdswap_add_mtd,
.remove_dev = mtdswap_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(mtdswap_ops);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <[email protected]>");
MODULE_DESCRIPTION("Block device access to an MTD suitable for using as "
"swap space");
| linux-master | drivers/mtd/mtdswap.c |
// SPDX-License-Identifier: GPL-2.0
#define dev_fmt(fmt) "mtdoops-pstore: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pstore_blk.h>
#include <linux/mtd/mtd.h>
#include <linux/bitops.h>
#include <linux/slab.h>
static struct mtdpstore_context {
int index;
struct pstore_blk_config info;
struct pstore_device_info dev;
struct mtd_info *mtd;
unsigned long *rmmap; /* removed bit map */
unsigned long *usedmap; /* used bit map */
/*
* used for panic write
* As there are no block_isbad for panic case, we should keep this
* status before panic to ensure panic_write not failed.
*/
unsigned long *badmap; /* bad block bit map */
} oops_cxt;
static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
{
int ret;
struct mtd_info *mtd = cxt->mtd;
u64 blknum;
off = ALIGN_DOWN(off, mtd->erasesize);
blknum = div_u64(off, mtd->erasesize);
if (test_bit(blknum, cxt->badmap))
return true;
ret = mtd_block_isbad(mtd, off);
if (ret < 0) {
dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
return ret;
} else if (ret > 0) {
set_bit(blknum, cxt->badmap);
return true;
}
return false;
}
static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u64 blknum;
off = ALIGN_DOWN(off, mtd->erasesize);
blknum = div_u64(off, mtd->erasesize);
return test_bit(blknum, cxt->badmap);
}
static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u64 zonenum = div_u64(off, cxt->info.kmsg_size);
dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
set_bit(zonenum, cxt->usedmap);
}
static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u64 zonenum = div_u64(off, cxt->info.kmsg_size);
dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
clear_bit(zonenum, cxt->usedmap);
}
static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
u64 zonenum;
off = ALIGN_DOWN(off, mtd->erasesize);
zonenum = div_u64(off, cxt->info.kmsg_size);
while (zonecnt > 0) {
dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
clear_bit(zonenum, cxt->usedmap);
zonenum++;
zonecnt--;
}
}
static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
{
u64 zonenum = div_u64(off, cxt->info.kmsg_size);
u64 blknum = div_u64(off, cxt->mtd->erasesize);
if (test_bit(blknum, cxt->badmap))
return true;
return test_bit(zonenum, cxt->usedmap);
}
static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
u64 zonenum;
off = ALIGN_DOWN(off, mtd->erasesize);
zonenum = div_u64(off, cxt->info.kmsg_size);
while (zonecnt > 0) {
if (test_bit(zonenum, cxt->usedmap))
return true;
zonenum++;
zonecnt--;
}
return false;
}
static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
size_t size)
{
struct mtd_info *mtd = cxt->mtd;
size_t sz;
int i;
sz = min_t(uint32_t, size, mtd->writesize / 4);
for (i = 0; i < sz; i++) {
if (buf[i] != (char)0xFF)
return false;
}
return true;
}
static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u64 zonenum = div_u64(off, cxt->info.kmsg_size);
dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
set_bit(zonenum, cxt->rmmap);
}
static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
u64 zonenum;
off = ALIGN_DOWN(off, mtd->erasesize);
zonenum = div_u64(off, cxt->info.kmsg_size);
while (zonecnt > 0) {
clear_bit(zonenum, cxt->rmmap);
zonenum++;
zonecnt--;
}
}
static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
u64 zonenum;
off = ALIGN_DOWN(off, mtd->erasesize);
zonenum = div_u64(off, cxt->info.kmsg_size);
while (zonecnt > 0) {
if (test_bit(zonenum, cxt->rmmap))
return true;
zonenum++;
zonecnt--;
}
return false;
}
static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
{
struct mtd_info *mtd = cxt->mtd;
struct erase_info erase;
int ret;
off = ALIGN_DOWN(off, cxt->mtd->erasesize);
dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
erase.len = cxt->mtd->erasesize;
erase.addr = off;
ret = mtd_erase(cxt->mtd, &erase);
if (!ret)
mtdpstore_block_clear_removed(cxt, off);
else
dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
(unsigned long long)erase.addr,
(unsigned long long)erase.len, cxt->info.device);
return ret;
}
/*
* called while removing file
*
* Avoiding over erasing, do erase block only when the whole block is unused.
* If the block contains valid log, do erase lazily on flush_removed() when
* unregister.
*/
static ssize_t mtdpstore_erase(size_t size, loff_t off)
{
struct mtdpstore_context *cxt = &oops_cxt;
if (mtdpstore_block_isbad(cxt, off))
return -EIO;
mtdpstore_mark_unused(cxt, off);
/* If the block still has valid data, mtdpstore do erase lazily */
if (likely(mtdpstore_block_is_used(cxt, off))) {
mtdpstore_mark_removed(cxt, off);
return 0;
}
/* all zones are unused, erase it */
return mtdpstore_erase_do(cxt, off);
}
/*
* What is security for mtdpstore?
* As there is no erase for panic case, we should ensure at least one zone
* is writable. Otherwise, panic write will fail.
* If zone is used, write operation will return -ENOMSG, which means that
* pstore/blk will try one by one until gets an empty zone. So, it is not
* needed to ensure the next zone is empty, but at least one.
*/
static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
{
int ret = 0, i;
struct mtd_info *mtd = cxt->mtd;
u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
u32 erasesize = cxt->mtd->erasesize;
for (i = 0; i < zonecnt; i++) {
u32 num = (zonenum + i) % zonecnt;
/* found empty zone */
if (!test_bit(num, cxt->usedmap))
return 0;
}
/* If there is no any empty zone, we have no way but to do erase */
while (blkcnt--) {
div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
if (mtdpstore_block_isbad(cxt, off))
continue;
ret = mtdpstore_erase_do(cxt, off);
if (!ret) {
mtdpstore_block_mark_unused(cxt, off);
break;
}
}
if (ret)
dev_err(&mtd->dev, "all blocks bad!\n");
dev_dbg(&mtd->dev, "end security\n");
return ret;
}
static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
{
struct mtdpstore_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
int ret;
if (mtdpstore_block_isbad(cxt, off))
return -ENOMSG;
/* zone is used, please try next one */
if (mtdpstore_is_used(cxt, off))
return -ENOMSG;
dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
if (ret < 0 || retlen != size) {
dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
off, retlen, size, ret);
return -EIO;
}
mtdpstore_mark_used(cxt, off);
mtdpstore_security(cxt, off);
return retlen;
}
static inline bool mtdpstore_is_io_error(int ret)
{
return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
}
/*
* All zones will be read as pstore/blk will read zone one by one when do
* recover.
*/
static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
{
struct mtdpstore_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
size_t retlen, done;
int ret;
if (mtdpstore_block_isbad(cxt, off))
return -ENOMSG;
dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
for (done = 0, retlen = 0; done < size; done += retlen) {
retlen = 0;
ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
(u_char *)buf + done);
if (mtdpstore_is_io_error(ret)) {
dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
off + done, retlen, size - done, ret);
/* the zone may be broken, try next one */
return -ENOMSG;
}
/*
* ECC error. The impact on log data is so small. Maybe we can
* still read it and try to understand. So mtdpstore just hands
* over what it gets and user can judge whether the data is
* valid or not.
*/
if (mtd_is_eccerr(ret)) {
dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
off + done, retlen, size - done, ret);
/* driver may not set retlen when ecc error */
retlen = retlen == 0 ? size - done : retlen;
}
}
if (mtdpstore_is_empty(cxt, buf, size))
mtdpstore_mark_unused(cxt, off);
else
mtdpstore_mark_used(cxt, off);
mtdpstore_security(cxt, off);
return retlen;
}
static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
{
struct mtdpstore_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
int ret;
if (mtdpstore_panic_block_isbad(cxt, off))
return -ENOMSG;
/* zone is used, please try next one */
if (mtdpstore_is_used(cxt, off))
return -ENOMSG;
ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
if (ret < 0 || size != retlen) {
dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
off, retlen, size, ret);
return -EIO;
}
mtdpstore_mark_used(cxt, off);
return retlen;
}
static void mtdpstore_notify_add(struct mtd_info *mtd)
{
int ret;
struct mtdpstore_context *cxt = &oops_cxt;
struct pstore_blk_config *info = &cxt->info;
unsigned long longcnt;
if (!strcmp(mtd->name, info->device))
cxt->index = mtd->index;
if (mtd->index != cxt->index || cxt->index < 0)
return;
dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
if (mtd->size < info->kmsg_size * 2) {
dev_err(&mtd->dev, "MTD partition %d not big enough\n",
mtd->index);
return;
}
/*
* kmsg_size must be aligned to 4096 Bytes, which is limited by
* psblk. The default value of kmsg_size is 64KB. If kmsg_size
* is larger than erasesize, some errors will occur since mtdpstore
* is designed on it.
*/
if (mtd->erasesize < info->kmsg_size) {
dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
mtd->index);
return;
}
if (unlikely(info->kmsg_size % mtd->writesize)) {
dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
info->kmsg_size / 1024,
mtd->writesize / 1024);
return;
}
longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
cxt->dev.zone.read = mtdpstore_read;
cxt->dev.zone.write = mtdpstore_write;
cxt->dev.zone.erase = mtdpstore_erase;
cxt->dev.zone.panic_write = mtdpstore_panic_write;
cxt->dev.zone.total_size = mtd->size;
ret = register_pstore_device(&cxt->dev);
if (ret) {
dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
mtd->index);
return;
}
cxt->mtd = mtd;
dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
}
static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
loff_t off, size_t size)
{
struct mtd_info *mtd = cxt->mtd;
u_char *buf;
int ret;
size_t retlen;
struct erase_info erase;
buf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* 1st. read to cache */
ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
if (mtdpstore_is_io_error(ret))
goto free;
/* 2nd. erase block */
erase.len = mtd->erasesize;
erase.addr = off;
ret = mtd_erase(mtd, &erase);
if (ret)
goto free;
/* 3rd. write back */
while (size) {
unsigned int zonesize = cxt->info.kmsg_size;
/* there is valid data on block, write back */
if (mtdpstore_is_used(cxt, off)) {
ret = mtd_write(mtd, off, zonesize, &retlen, buf);
if (ret)
dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
off, retlen, zonesize, ret);
}
off += zonesize;
size -= min_t(unsigned int, zonesize, size);
}
free:
kfree(buf);
return ret;
}
/*
* What does mtdpstore_flush_removed() do?
* When user remove any log file on pstore filesystem, mtdpstore should do
* something to ensure log file removed. If the whole block is no longer used,
* it's nice to erase the block. However if the block still contains valid log,
* what mtdpstore can do is to erase and write the valid log back.
*/
static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
int ret;
loff_t off;
u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
ret = mtdpstore_block_isbad(cxt, off);
if (ret)
continue;
ret = mtdpstore_block_is_removed(cxt, off);
if (!ret)
continue;
ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
if (ret)
return ret;
}
return 0;
}
static void mtdpstore_notify_remove(struct mtd_info *mtd)
{
struct mtdpstore_context *cxt = &oops_cxt;
if (mtd->index != cxt->index || cxt->index < 0)
return;
mtdpstore_flush_removed(cxt);
unregister_pstore_device(&cxt->dev);
kfree(cxt->badmap);
kfree(cxt->usedmap);
kfree(cxt->rmmap);
cxt->mtd = NULL;
cxt->index = -1;
}
static struct mtd_notifier mtdpstore_notifier = {
.add = mtdpstore_notify_add,
.remove = mtdpstore_notify_remove,
};
static int __init mtdpstore_init(void)
{
int ret;
struct mtdpstore_context *cxt = &oops_cxt;
struct pstore_blk_config *info = &cxt->info;
ret = pstore_blk_get_config(info);
if (unlikely(ret))
return ret;
if (strlen(info->device) == 0) {
pr_err("mtd device must be supplied (device name is empty)\n");
return -EINVAL;
}
if (!info->kmsg_size) {
pr_err("no backend enabled (kmsg_size is 0)\n");
return -EINVAL;
}
/* Setup the MTD device to use */
ret = kstrtoint((char *)info->device, 0, &cxt->index);
if (ret)
cxt->index = -1;
register_mtd_user(&mtdpstore_notifier);
return 0;
}
module_init(mtdpstore_init);
static void __exit mtdpstore_exit(void)
{
unregister_mtd_user(&mtdpstore_notifier);
}
module_exit(mtdpstore_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("WeiXiong Liao <[email protected]>");
MODULE_DESCRIPTION("MTD backend for pstore/blk");
| linux-master | drivers/mtd/mtdpstore.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NFTL mount code with extensive checks
*
* Author: Fabrice Bellard ([email protected])
* Copyright © 2000 Netgem S.A.
* Copyright © 1999-2010 David Woodhouse <[email protected]>
*/
#include <linux/kernel.h>
#include <asm/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nftl.h>
#define SECTORSIZE 512
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
* the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
* is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
*/
static int find_boot_record(struct NFTLrecord *nftl)
{
struct nftl_uci1 h1;
unsigned int block, boot_record_count = 0;
size_t retlen;
u8 buf[SECTORSIZE];
struct NFTLMediaHeader *mh = &nftl->MediaHdr;
struct mtd_info *mtd = nftl->mbd.mtd;
unsigned int i;
/* Assume logical EraseSize == physical erasesize for starting the scan.
We'll sort it out later if we find a MediaHeader which says otherwise */
/* Actually, we won't. The new DiskOnChip driver has already scanned
the MediaHeader and adjusted the virtual erasesize it presents in
the mtd device accordingly. We could even get rid of
nftl->EraseSize if there were any point in doing so. */
nftl->EraseSize = nftl->mbd.mtd->erasesize;
nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
nftl->MediaUnit = BLOCK_NIL;
nftl->SpareMediaUnit = BLOCK_NIL;
/* search for a valid boot record */
for (block = 0; block < nftl->nb_blocks; block++) {
int ret;
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
&retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
static int warncount = 5;
if (warncount) {
printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
if (!--warncount)
printk(KERN_WARNING "Further failures for this block will not be printed\n");
}
continue;
}
if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
/* ANAND\0 not found. Continue */
#if 0
printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
block * nftl->EraseSize, nftl->mbd.mtd->index);
#endif
continue;
}
/* To be safer with BIOS, also use erase mark as discriminant */
ret = nftl_read_oob(mtd, block * nftl->EraseSize +
SECTORSIZE + 8, 8, &retlen,
(char *)&h1);
if (ret < 0) {
printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
continue;
}
#if 0 /* Some people seem to have devices without ECC or erase marks
on the Media Header blocks. There are enough other sanity
checks in here that we can probably do without it.
*/
if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
block * nftl->EraseSize, nftl->mbd.mtd->index,
le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
continue;
}
/* Finally reread to check ECC */
ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
&retlen, buf);
if (ret < 0) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
continue;
}
/* Paranoia. Check the ANAND header is still there after the ECC read */
if (memcmp(buf, "ANAND", 6)) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
block * nftl->EraseSize, nftl->mbd.mtd->index);
printk(KERN_NOTICE "New data are: %6ph\n", buf);
continue;
}
#endif
/* OK, we like it. */
if (boot_record_count) {
/* We've already processed one. So we just check if
this one is the same as the first one we found */
if (memcmp(mh, buf, sizeof(struct NFTLMediaHeader))) {
printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
/* if (debug) Print both side by side */
if (boot_record_count < 2) {
/* We haven't yet seen two real ones */
return -1;
}
continue;
}
if (boot_record_count == 1)
nftl->SpareMediaUnit = block;
/* Mark this boot record (NFTL MediaHeader) block as reserved */
nftl->ReplUnitTable[block] = BLOCK_RESERVED;
boot_record_count++;
continue;
}
/* This is the first we've seen. Copy the media header structure into place */
memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
/* Do some sanity checks on it */
#if 0
The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
device is already correct.
if (mh->UnitSizeFactor == 0) {
printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
} else if (mh->UnitSizeFactor < 0xfc) {
printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor 0x%02x\n",
mh->UnitSizeFactor);
return -1;
} else if (mh->UnitSizeFactor != 0xff) {
printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
mh->UnitSizeFactor);
nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
}
#endif
nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
nftl->nb_boot_blocks, nftl->nb_blocks);
return -1;
}
nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
return -1;
}
nftl->mbd.size = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
/* If we're not using the last sectors in the device for some reason,
reduce nb_blocks accordingly so we forget they're there */
nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
/* XXX: will be suppressed */
nftl->lastEUN = nftl->nb_blocks - 1;
/* memory alloc */
nftl->EUNtable = kmalloc_array(nftl->nb_blocks, sizeof(u16),
GFP_KERNEL);
if (!nftl->EUNtable)
return -ENOMEM;
nftl->ReplUnitTable = kmalloc_array(nftl->nb_blocks,
sizeof(u16),
GFP_KERNEL);
if (!nftl->ReplUnitTable) {
kfree(nftl->EUNtable);
return -ENOMEM;
}
/* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
for (i = 0; i < nftl->nb_boot_blocks; i++)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
/* mark all remaining blocks as potentially containing data */
for (; i < nftl->nb_blocks; i++) {
nftl->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
}
/* Mark this boot record (NFTL MediaHeader) block as reserved */
nftl->ReplUnitTable[block] = BLOCK_RESERVED;
/* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
for (i = 0; i < nftl->nb_blocks; i++) {
#if 0
The new DiskOnChip driver already scanned the bad block table. Just query it.
if ((i & (SECTORSIZE - 1)) == 0) {
/* read one sector for every SECTORSIZE of blocks */
ret = mtd->read(nftl->mbd.mtd,
block * nftl->EraseSize + i +
SECTORSIZE, SECTORSIZE,
&retlen, buf);
if (ret < 0) {
printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
ret);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
return -1;
}
}
/* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
#endif
if (mtd_block_isbad(nftl->mbd.mtd,
i * nftl->EraseSize))
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
nftl->MediaUnit = block;
boot_record_count++;
} /* foreach (block) */
return boot_record_count?0:-1;
}
static int memcmpb(void *a, int c, int n)
{
int i;
for (i = 0; i < n; i++) {
if (c != ((unsigned char *)a)[i])
return 1;
}
return 0;
}
/* check_free_sector: check if a free sector is actually FREE, i.e. All 0xff in data and oob area */
static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
int check_oob)
{
struct mtd_info *mtd = nftl->mbd.mtd;
size_t retlen;
int i, ret;
u8 *buf;
buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = -1;
for (i = 0; i < len; i += SECTORSIZE) {
if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
goto out;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
goto out;
if (check_oob) {
if(nftl_read_oob(mtd, address, mtd->oobsize,
&retlen, &buf[SECTORSIZE]) < 0)
goto out;
if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
goto out;
}
address += SECTORSIZE;
}
ret = 0;
out:
kfree(buf);
return ret;
}
/* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
* Update NFTL metadata. Each erase operation is checked with check_free_sectors
*
* Return: 0 when succeed, -1 on error.
*
* ToDo: 1. Is it necessary to check_free_sector after erasing ??
*/
int NFTL_formatblock(struct NFTLrecord *nftl, int block)
{
size_t retlen;
unsigned int nb_erases, erase_mark;
struct nftl_uci1 uci;
struct erase_info *instr = &nftl->instr;
struct mtd_info *mtd = nftl->mbd.mtd;
/* Read the Unit Control Information #1 for Wear-Leveling */
if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8,
8, &retlen, (char *)&uci) < 0)
goto default_uci1;
erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1));
if (erase_mark != ERASE_MARK) {
default_uci1:
uci.EraseMark = cpu_to_le16(ERASE_MARK);
uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
uci.WearInfo = cpu_to_le32(0);
}
memset(instr, 0, sizeof(struct erase_info));
/* XXX: use async erase interface, XXX: test return code */
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
if (mtd_erase(mtd, instr)) {
printk("Error while formatting block %d\n", block);
goto fail;
}
/* increase and write Wear-Leveling info */
nb_erases = le32_to_cpu(uci.WearInfo);
nb_erases++;
/* wrap (almost impossible with current flash) or free block */
if (nb_erases == 0)
nb_erases = 1;
/* check the "freeness" of Erase Unit before updating metadata
* FixMe: is this check really necessary ? since we have check the
* return code after the erase operation.
*/
if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
goto fail;
uci.WearInfo = le32_to_cpu(nb_erases);
if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
8, 8, &retlen, (char *)&uci) < 0)
goto fail;
return 0;
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
mtd_block_markbad(nftl->mbd.mtd, instr->addr);
return -1;
}
/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
* Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
* was being folded when NFTL was interrupted.
*
* The check_free_sectors in this function is necessary. There is a possible
* situation that after writing the Data area, the Block Control Information is
* not updated according (due to power failure or something) which leaves the block
* in an inconsistent state. So we have to check if a block is really FREE in this
* case. */
static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
{
struct mtd_info *mtd = nftl->mbd.mtd;
unsigned int block, i, status;
struct nftl_bci bci;
int sectors_per_block;
size_t retlen;
sectors_per_block = nftl->EraseSize / SECTORSIZE;
block = first_block;
for (;;) {
for (i = 0; i < sectors_per_block; i++) {
if (nftl_read_oob(mtd,
block * nftl->EraseSize + i * SECTORSIZE,
8, &retlen, (char *)&bci) < 0)
status = SECTOR_IGNORE;
else
status = bci.Status | bci.Status1;
switch(status) {
case SECTOR_FREE:
/* verify that the sector is really free. If not, mark
as ignore */
if (memcmpb(&bci, 0xff, 8) != 0 ||
check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
SECTORSIZE, 0) != 0) {
printk("Incorrect free sector %d in block %d: "
"marking it as ignored\n",
i, block);
/* sector not free actually : mark it as SECTOR_IGNORE */
bci.Status = SECTOR_IGNORE;
bci.Status1 = SECTOR_IGNORE;
nftl_write_oob(mtd, block *
nftl->EraseSize +
i * SECTORSIZE, 8,
&retlen, (char *)&bci);
}
break;
default:
break;
}
}
/* proceed to next Erase Unit on the chain */
block = nftl->ReplUnitTable[block];
if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
printk("incorrect ReplUnitTable[] : %d\n", block);
if (block == BLOCK_NIL || block >= nftl->nb_blocks)
break;
}
}
/* calc_chain_length: Walk through a Virtual Unit Chain and estimate chain length */
static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block)
{
unsigned int length = 0, block = first_block;
for (;;) {
length++;
/* avoid infinite loops, although this is guaranteed not to
happen because of the previous checks */
if (length >= nftl->nb_blocks) {
printk("nftl: length too long %d !\n", length);
break;
}
block = nftl->ReplUnitTable[block];
if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
printk("incorrect ReplUnitTable[] : %d\n", block);
if (block == BLOCK_NIL || block >= nftl->nb_blocks)
break;
}
return length;
}
/* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
* Virtual Unit Chain, i.e. all the units are disconnected.
*
* It is not strictly correct to begin from the first block of the chain because
* if we stop the code, we may see again a valid chain if there was a first_block
* flag in a block inside it. But is it really a problem ?
*
* FixMe: Figure out what the last statement means. What if power failure when we are
* in the for (;;) loop formatting blocks ??
*/
static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
{
unsigned int block = first_block, block1;
printk("Formatting chain at block %d\n", first_block);
for (;;) {
block1 = nftl->ReplUnitTable[block];
printk("Formatting block %d\n", block);
if (NFTL_formatblock(nftl, block) < 0) {
/* cannot format !!!! Mark it as Bad Unit */
nftl->ReplUnitTable[block] = BLOCK_RESERVED;
} else {
nftl->ReplUnitTable[block] = BLOCK_FREE;
}
/* goto next block on the chain */
block = block1;
if (!(block == BLOCK_NIL || block < nftl->nb_blocks))
printk("incorrect ReplUnitTable[] : %d\n", block);
if (block == BLOCK_NIL || block >= nftl->nb_blocks)
break;
}
}
/* check_and_mark_free_block: Verify that a block is free in the NFTL sense (valid erase mark) or
* totally free (only 0xff).
*
* Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
* following criteria:
* 1. */
static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
{
struct mtd_info *mtd = nftl->mbd.mtd;
struct nftl_uci1 h1;
unsigned int erase_mark;
size_t retlen;
/* check erase mark. */
if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0)
return -1;
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
if (erase_mark != ERASE_MARK) {
/* if no erase mark, the block must be totally free. This is
possible in two cases : empty filesystem or interrupted erase (very unlikely) */
if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
return -1;
/* free block : write erase mark */
h1.EraseMark = cpu_to_le16(ERASE_MARK);
h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
h1.WearInfo = cpu_to_le32(0);
if (nftl_write_oob(mtd,
block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0)
return -1;
} else {
#if 0
/* if erase mark present, need to skip it when doing check */
for (i = 0; i < nftl->EraseSize; i += SECTORSIZE) {
/* check free sector */
if (check_free_sectors (nftl, block * nftl->EraseSize + i,
SECTORSIZE, 0) != 0)
return -1;
if (nftl_read_oob(mtd, block * nftl->EraseSize + i,
16, &retlen, buf) < 0)
return -1;
if (i == SECTORSIZE) {
/* skip erase mark */
if (memcmpb(buf, 0xff, 8))
return -1;
} else {
if (memcmpb(buf, 0xff, 16))
return -1;
}
}
#endif
}
return 0;
}
/* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
* to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
* is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
* for some reason. A clean up/check of the VUC is necessary in this case.
*
* WARNING: return 0 if read error
*/
static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
{
struct mtd_info *mtd = nftl->mbd.mtd;
struct nftl_uci2 uci;
size_t retlen;
if (nftl_read_oob(mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
8, &retlen, (char *)&uci) < 0)
return 0;
return le16_to_cpu((uci.FoldMark | uci.FoldMark1));
}
int NFTL_mount(struct NFTLrecord *s)
{
int i;
unsigned int first_logical_block, logical_block, rep_block, erase_mark;
unsigned int block, first_block, is_first_block;
int chain_length, do_format_chain;
struct nftl_uci0 h0;
struct nftl_uci1 h1;
struct mtd_info *mtd = s->mbd.mtd;
size_t retlen;
/* search for NFTL MediaHeader and Spare NFTL Media Header */
if (find_boot_record(s) < 0) {
printk("Could not find valid boot record\n");
return -1;
}
/* init the logical to physical table */
for (i = 0; i < s->nb_blocks; i++) {
s->EUNtable[i] = BLOCK_NIL;
}
/* first pass : explore each block chain */
first_logical_block = 0;
for (first_block = 0; first_block < s->nb_blocks; first_block++) {
/* if the block was not already explored, we can look at it */
if (s->ReplUnitTable[first_block] == BLOCK_NOTEXPLORED) {
block = first_block;
chain_length = 0;
do_format_chain = 0;
for (;;) {
/* read the block header. If error, we format the chain */
if (nftl_read_oob(mtd,
block * s->EraseSize + 8, 8,
&retlen, (char *)&h0) < 0 ||
nftl_read_oob(mtd,
block * s->EraseSize +
SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0) {
s->ReplUnitTable[block] = BLOCK_NIL;
do_format_chain = 1;
break;
}
logical_block = le16_to_cpu ((h0.VirtUnitNum | h0.SpareVirtUnitNum));
rep_block = le16_to_cpu ((h0.ReplUnitNum | h0.SpareReplUnitNum));
erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
is_first_block = !(logical_block >> 15);
logical_block = logical_block & 0x7fff;
/* invalid/free block test */
if (erase_mark != ERASE_MARK || logical_block >= s->nb_blocks) {
if (chain_length == 0) {
/* if not currently in a chain, we can handle it safely */
if (check_and_mark_free_block(s, block) < 0) {
/* not really free: format it */
printk("Formatting block %d\n", block);
if (NFTL_formatblock(s, block) < 0) {
/* could not format: reserve the block */
s->ReplUnitTable[block] = BLOCK_RESERVED;
} else {
s->ReplUnitTable[block] = BLOCK_FREE;
}
} else {
/* free block: mark it */
s->ReplUnitTable[block] = BLOCK_FREE;
}
/* directly examine the next block. */
goto examine_ReplUnitTable;
} else {
/* the block was in a chain : this is bad. We
must format all the chain */
printk("Block %d: free but referenced in chain %d\n",
block, first_block);
s->ReplUnitTable[block] = BLOCK_NIL;
do_format_chain = 1;
break;
}
}
/* we accept only first blocks here */
if (chain_length == 0) {
/* this block is not the first block in chain :
ignore it, it will be included in a chain
later, or marked as not explored */
if (!is_first_block)
goto examine_ReplUnitTable;
first_logical_block = logical_block;
} else {
if (logical_block != first_logical_block) {
printk("Block %d: incorrect logical block: %d expected: %d\n",
block, logical_block, first_logical_block);
/* the chain is incorrect : we must format it,
but we need to read it completely */
do_format_chain = 1;
}
if (is_first_block) {
/* we accept that a block is marked as first
block while being last block in a chain
only if the chain is being folded */
if (get_fold_mark(s, block) != FOLD_MARK_IN_PROGRESS ||
rep_block != 0xffff) {
printk("Block %d: incorrectly marked as first block in chain\n",
block);
/* the chain is incorrect : we must format it,
but we need to read it completely */
do_format_chain = 1;
} else {
printk("Block %d: folding in progress - ignoring first block flag\n",
block);
}
}
}
chain_length++;
if (rep_block == 0xffff) {
/* no more blocks after */
s->ReplUnitTable[block] = BLOCK_NIL;
break;
} else if (rep_block >= s->nb_blocks) {
printk("Block %d: referencing invalid block %d\n",
block, rep_block);
do_format_chain = 1;
s->ReplUnitTable[block] = BLOCK_NIL;
break;
} else if (s->ReplUnitTable[rep_block] != BLOCK_NOTEXPLORED) {
/* same problem as previous 'is_first_block' test:
we accept that the last block of a chain has
the first_block flag set if folding is in
progress. We handle here the case where the
last block appeared first */
if (s->ReplUnitTable[rep_block] == BLOCK_NIL &&
s->EUNtable[first_logical_block] == rep_block &&
get_fold_mark(s, first_block) == FOLD_MARK_IN_PROGRESS) {
/* EUNtable[] will be set after */
printk("Block %d: folding in progress - ignoring first block flag\n",
rep_block);
s->ReplUnitTable[block] = rep_block;
s->EUNtable[first_logical_block] = BLOCK_NIL;
} else {
printk("Block %d: referencing block %d already in another chain\n",
block, rep_block);
/* XXX: should handle correctly fold in progress chains */
do_format_chain = 1;
s->ReplUnitTable[block] = BLOCK_NIL;
}
break;
} else {
/* this is OK */
s->ReplUnitTable[block] = rep_block;
block = rep_block;
}
}
/* the chain was completely explored. Now we can decide
what to do with it */
if (do_format_chain) {
/* invalid chain : format it */
format_chain(s, first_block);
} else {
unsigned int first_block1, chain_to_format, chain_length1;
int fold_mark;
/* valid chain : get foldmark */
fold_mark = get_fold_mark(s, first_block);
if (fold_mark == 0) {
/* cannot get foldmark : format the chain */
printk("Could read foldmark at block %d\n", first_block);
format_chain(s, first_block);
} else {
if (fold_mark == FOLD_MARK_IN_PROGRESS)
check_sectors_in_chain(s, first_block);
/* now handle the case where we find two chains at the
same virtual address : we select the longer one,
because the shorter one is the one which was being
folded if the folding was not done in place */
first_block1 = s->EUNtable[first_logical_block];
if (first_block1 != BLOCK_NIL) {
/* XXX: what to do if same length ? */
chain_length1 = calc_chain_length(s, first_block1);
printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
first_block1, chain_length1, first_block, chain_length);
if (chain_length >= chain_length1) {
chain_to_format = first_block1;
s->EUNtable[first_logical_block] = first_block;
} else {
chain_to_format = first_block;
}
format_chain(s, chain_to_format);
} else {
s->EUNtable[first_logical_block] = first_block;
}
}
}
}
examine_ReplUnitTable:;
}
/* second pass to format unreferenced blocks and init free block count */
s->numfreeEUNs = 0;
s->LastFreeEUN = le16_to_cpu(s->MediaHdr.FirstPhysicalEUN);
for (block = 0; block < s->nb_blocks; block++) {
if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
printk("Unreferenced block %d, formatting it\n", block);
if (NFTL_formatblock(s, block) < 0)
s->ReplUnitTable[block] = BLOCK_RESERVED;
else
s->ReplUnitTable[block] = BLOCK_FREE;
}
if (s->ReplUnitTable[block] == BLOCK_FREE) {
s->numfreeEUNs++;
s->LastFreeEUN = block;
}
}
return 0;
}
| linux-master | drivers/mtd/nftlmount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright © 1999-2010 David Woodhouse <[email protected]>
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/backing-dev.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
#include <linux/major.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/map.h>
#include <linux/uaccess.h>
#include "mtdcore.h"
/*
* Data structure to hold the pointer to the mtd device as well
* as mode information of various use cases.
*/
struct mtd_file_info {
struct mtd_info *mtd;
enum mtd_file_modes mode;
};
static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
}
static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
pr_debug("MTD_open\n");
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
mtd = get_mtd_device(NULL, devnum);
if (IS_ERR(mtd))
return PTR_ERR(mtd);
if (mtd->type == MTD_ABSENT) {
ret = -ENODEV;
goto out1;
}
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
ret = -EACCES;
goto out1;
}
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
ret = -ENOMEM;
goto out1;
}
mfi->mtd = mtd;
file->private_data = mfi;
return 0;
out1:
put_mtd_device(mtd);
return ret;
} /* mtdchar_open */
/*====================================================================*/
static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
pr_debug("MTD_close\n");
/* Only sync if opened RW */
if ((file->f_mode & FMODE_WRITE))
mtd_sync(mtd);
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
return 0;
} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
* FIXME: This _really_ needs to die. In 2.5, we should lock the
* userspace buffer down and use it directly with readv/writev.
*
* The implementation below, using mtd_kmalloc_up_to, mitigates
* allocation failures when the system is under low-memory situations
* or if memory is highly fragmented at the cost of reducing the
* performance of the requested transfer due to a smaller buffer size.
*
* A more complex but more memory-efficient implementation based on
* get_user_pages and iovecs to cover extents of those pages is a
* longer-term goal, as intimated by dwmw2 above. However, for the
* write case, this requires yet more complex head and tail transfer
* handling when those head and tail offsets and sizes are such that
* alignment requirements are not met in the NAND subdriver.
*/
static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
size_t size = count;
char *kbuf;
pr_debug("MTD_read\n");
if (*ppos + count > mtd->size) {
if (*ppos < mtd->size)
count = mtd->size - *ppos;
else
count = 0;
}
if (!count)
return 0;
kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
len = min_t(size_t, count, size);
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
&retlen, kbuf);
break;
case MTD_FILE_MODE_OTP_USER:
ret = mtd_read_user_prot_reg(mtd, *ppos, len,
&retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
ops.len = len;
ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
/* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
* to dump areas with ECC errors!
* For kernel internal usage it also might return -EUCLEAN
* to signal the caller that a bitflip has occurred and has
* been corrected by the ECC algorithm.
* Userspace software which accesses NAND this way
* must be aware of the fact that it deals with NAND
*/
if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
*ppos += retlen;
if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf);
return -EFAULT;
}
else
total_retlen += retlen;
count -= retlen;
buf += retlen;
if (retlen == 0)
count = 0;
}
else {
kfree(kbuf);
return ret;
}
}
kfree(kbuf);
return total_retlen;
} /* mtdchar_read */
static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
size_t size = count;
char *kbuf;
size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
pr_debug("MTD_write\n");
if (*ppos >= mtd->size)
return -ENOSPC;
if (*ppos + count > mtd->size)
count = mtd->size - *ppos;
if (!count)
return 0;
kbuf = mtd_kmalloc_up_to(mtd, &size);
if (!kbuf)
return -ENOMEM;
while (count) {
len = min_t(size_t, count, size);
if (copy_from_user(kbuf, buf, len)) {
kfree(kbuf);
return -EFAULT;
}
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
ret = -EROFS;
break;
case MTD_FILE_MODE_OTP_USER:
ret = mtd_write_user_prot_reg(mtd, *ppos, len,
&retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
struct mtd_oob_ops ops = {};
ops.mode = MTD_OPS_RAW;
ops.datbuf = kbuf;
ops.oobbuf = NULL;
ops.ooboffs = 0;
ops.len = len;
ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
/*
* Return -ENOSPC only if no data could be written at all.
* Otherwise just return the number of bytes that actually
* have been written.
*/
if ((ret == -ENOSPC) && (total_retlen))
break;
if (!ret) {
*ppos += retlen;
total_retlen += retlen;
count -= retlen;
buf += retlen;
}
else {
kfree(kbuf);
return ret;
}
}
kfree(kbuf);
return total_retlen;
} /* mtdchar_write */
/*======================================================================
IOCTL calls for getting device parameters.
======================================================================*/
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
size_t retlen;
switch (mode) {
case MTD_OTP_FACTORY:
if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
-EOPNOTSUPP)
return -EOPNOTSUPP;
mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
-EOPNOTSUPP)
return -EOPNOTSUPP;
mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
case MTD_OTP_OFF:
mfi->mode = MTD_FILE_MODE_NORMAL;
break;
default:
return -EINVAL;
}
return 0;
}
static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops = {};
uint32_t retlen;
int ret = 0;
if (length > 4096)
return -EINVAL;
if (!master->_write_oob)
return -EOPNOTSUPP;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
ops.oobbuf = memdup_user(ptr, length);
if (IS_ERR(ops.oobbuf))
return PTR_ERR(ops.oobbuf);
start &= ~((uint64_t)mtd->writesize - 1);
ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
retlen = ops.oobretlen;
if (copy_to_user(retp, &retlen, sizeof(length)))
ret = -EFAULT;
kfree(ops.oobbuf);
return ret;
}
static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_oob_ops ops = {};
int ret = 0;
if (length > 4096)
return -EINVAL;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
MTD_OPS_PLACE_OOB;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
return -EINVAL;
ops.oobbuf = kmalloc(length, GFP_KERNEL);
if (!ops.oobbuf)
return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
ops.oobretlen))
ret = -EFAULT;
kfree(ops.oobbuf);
/*
* NAND returns -EBADMSG on ECC errors, but it returns the OOB
* data. For our userspace tools it is important to dump areas
* with ECC errors!
* For kernel internal usage it also might return -EUCLEAN
* to signal the caller that a bitflip has occurred and has
* been corrected by the ECC algorithm.
*
* Note: currently the standard NAND function, nand_read_oob_std,
* does not calculate ECC for the OOB area, so do not rely on
* this behavior unless you have replaced it with your own.
*/
if (mtd_is_bitflip_or_eccerr(ret))
return 0;
return ret;
}
/*
* Copies (and truncates, if necessary) OOB layout information to the
* deprecated layout struct, nand_ecclayout_user. This is necessary only to
* support the deprecated API ioctl ECCGETLAYOUT while allowing all new
* functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
* can describe any kind of OOB layout with almost zero overhead from a
* memory usage point of view).
*/
static int shrink_ecclayout(struct mtd_info *mtd,
struct nand_ecclayout_user *to)
{
struct mtd_oob_region oobregion;
int i, section = 0, ret;
if (!mtd || !to)
return -EINVAL;
memset(to, 0, sizeof(*to));
to->eccbytes = 0;
for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
u32 eccpos;
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
break;
}
eccpos = oobregion.offset;
for (; i < MTD_MAX_ECCPOS_ENTRIES &&
eccpos < oobregion.offset + oobregion.length; i++) {
to->eccpos[i] = eccpos++;
to->eccbytes++;
}
}
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
ret = mtd_ooblayout_free(mtd, i, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
break;
}
to->oobfree[i].offset = oobregion.offset;
to->oobfree[i].length = oobregion.length;
to->oobavail += to->oobfree[i].length;
}
return 0;
}
static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
{
struct mtd_oob_region oobregion;
int i, section = 0, ret;
if (!mtd || !to)
return -EINVAL;
memset(to, 0, sizeof(*to));
to->eccbytes = 0;
for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
u32 eccpos;
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
break;
}
if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
return -EINVAL;
eccpos = oobregion.offset;
for (; eccpos < oobregion.offset + oobregion.length; i++) {
to->eccpos[i] = eccpos++;
to->eccbytes++;
}
}
for (i = 0; i < 8; i++) {
ret = mtd_ooblayout_free(mtd, i, &oobregion);
if (ret < 0) {
if (ret != -ERANGE)
return ret;
break;
}
to->oobfree[i][0] = oobregion.offset;
to->oobfree[i][1] = oobregion.length;
}
to->useecc = MTD_NANDECC_AUTOPLACE;
return 0;
}
static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg *arg)
{
struct blkpg_partition p;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&p, arg->data, sizeof(p)))
return -EFAULT;
switch (arg->op) {
case BLKPG_ADD_PARTITION:
/* Only master mtd device must be used to add partitions */
if (mtd_is_partition(mtd))
return -EINVAL;
/* Sanitize user input */
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
return mtd_add_partition(mtd, p.devname, p.start, p.length);
case BLKPG_DEL_PARTITION:
if (p.pno < 0)
return -EINVAL;
return mtd_del_partition(mtd, p.pno);
default:
return -EINVAL;
}
}
static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
struct mtd_oob_ops *ops)
{
uint32_t start_page, end_page;
u32 oob_per_page;
if (ops->len == 0 || ops->ooblen == 0)
return;
start_page = mtd_div_by_ws(start, mtd);
end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
oob_per_page = mtd_oobavail(mtd, ops);
ops->ooblen = min_t(size_t, ops->ooblen,
(end_page - start_page + 1) * oob_per_page);
}
static noinline_for_stack int
mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
const void __user *usr_data, *usr_oob;
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
if (!master->_write_oob)
return -EOPNOTSUPP;
if (!usr_data)
req.len = 0;
if (!usr_oob)
req.ooblen = 0;
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
if (req.start + req.len > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf)
return -ENOMEM;
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
kvfree(datbuf);
return -ENOMEM;
}
}
while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
struct mtd_oob_ops ops = {
.mode = req.mode,
.len = min_t(size_t, req.len, datbuf_len),
.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
.datbuf = datbuf,
.oobbuf = oobbuf,
};
/*
* Shorten non-page-aligned, eraseblock-sized writes so that
* the write ends on an eraseblock boundary. This is necessary
* for adjust_oob_length() to properly handle non-page-aligned
* writes.
*/
if (ops.len == mtd->erasesize)
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
/*
* For writes which are not OOB-only, adjust the amount of OOB
* data written according to the number of data pages written.
* This is necessary to prevent OOB data from being skipped
* over in data+OOB writes requiring multiple mtd_write_oob()
* calls to be completed.
*/
adjust_oob_length(mtd, req.start, &ops);
if (copy_from_user(datbuf, usr_data, ops.len) ||
copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
ret = -EFAULT;
break;
}
ret = mtd_write_oob(mtd, req.start, &ops);
if (ret)
break;
req.start += ops.retlen;
req.len -= ops.retlen;
usr_data += ops.retlen;
req.ooblen -= ops.oobretlen;
usr_oob += ops.oobretlen;
}
kvfree(datbuf);
kvfree(oobbuf);
return ret;
}
static noinline_for_stack int
mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_read_req req;
void __user *usr_data, *usr_oob;
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
size_t orig_len, orig_ooblen;
int ret = 0;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
orig_len = req.len;
orig_ooblen = req.ooblen;
usr_data = (void __user *)(uintptr_t)req.usr_data;
usr_oob = (void __user *)(uintptr_t)req.usr_oob;
if (!master->_read_oob)
return -EOPNOTSUPP;
if (!usr_data)
req.len = 0;
if (!usr_oob)
req.ooblen = 0;
req.ecc_stats.uncorrectable_errors = 0;
req.ecc_stats.corrected_bitflips = 0;
req.ecc_stats.max_bitflips = 0;
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
if (req.start + req.len > mtd->size) {
ret = -EINVAL;
goto out;
}
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
if (datbuf_len > 0) {
datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
if (!datbuf) {
ret = -ENOMEM;
goto out;
}
}
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
if (oobbuf_len > 0) {
oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
if (!oobbuf) {
ret = -ENOMEM;
goto out;
}
}
while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
struct mtd_req_stats stats;
struct mtd_oob_ops ops = {
.mode = req.mode,
.len = min_t(size_t, req.len, datbuf_len),
.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
.datbuf = datbuf,
.oobbuf = oobbuf,
.stats = &stats,
};
/*
* Shorten non-page-aligned, eraseblock-sized reads so that the
* read ends on an eraseblock boundary. This is necessary in
* order to prevent OOB data for some pages from being
* duplicated in the output of non-page-aligned reads requiring
* multiple mtd_read_oob() calls to be completed.
*/
if (ops.len == mtd->erasesize)
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
req.ecc_stats.uncorrectable_errors +=
stats.uncorrectable_errors;
req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
req.ecc_stats.max_bitflips =
max(req.ecc_stats.max_bitflips, stats.max_bitflips);
if (ret && !mtd_is_bitflip_or_eccerr(ret))
break;
if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
ret = -EFAULT;
break;
}
req.start += ops.retlen;
req.len -= ops.retlen;
usr_data += ops.retlen;
req.ooblen -= ops.oobretlen;
usr_oob += ops.oobretlen;
}
/*
* As multiple iterations of the above loop (and therefore multiple
* mtd_read_oob() calls) may be necessary to complete the read request,
* adjust the final return code to ensure it accounts for all detected
* ECC errors.
*/
if (!ret || mtd_is_bitflip(ret)) {
if (req.ecc_stats.uncorrectable_errors > 0)
ret = -EBADMSG;
else if (req.ecc_stats.corrected_bitflips > 0)
ret = -EUCLEAN;
}
out:
req.len = orig_len - req.len;
req.ooblen = orig_ooblen - req.ooblen;
if (copy_to_user(argp, &req, sizeof(req)))
ret = -EFAULT;
kvfree(datbuf);
kvfree(oobbuf);
return ret;
}
static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct mtd_info *master = mtd_get_master(mtd);
void __user *argp = (void __user *)arg;
int ret = 0;
struct mtd_info_user info;
pr_debug("MTD_ioctl\n");
/*
* Check the file mode to require "dangerous" commands to have write
* permissions.
*/
switch (cmd) {
/* "safe" commands */
case MEMGETREGIONCOUNT:
case MEMGETREGIONINFO:
case MEMGETINFO:
case MEMREADOOB:
case MEMREADOOB64:
case MEMREAD:
case MEMISLOCKED:
case MEMGETOOBSEL:
case MEMGETBADBLOCK:
case OTPSELECT:
case OTPGETREGIONCOUNT:
case OTPGETREGIONINFO:
case ECCGETLAYOUT:
case ECCGETSTATS:
case MTDFILEMODE:
case BLKPG:
case BLKRRPART:
break;
/* "dangerous" commands */
case MEMERASE:
case MEMERASE64:
case MEMLOCK:
case MEMUNLOCK:
case MEMSETBADBLOCK:
case MEMWRITEOOB:
case MEMWRITEOOB64:
case MEMWRITE:
case OTPLOCK:
case OTPERASE:
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
break;
default:
return -ENOTTY;
}
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
return -EFAULT;
break;
case MEMGETREGIONINFO:
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
if (ur_idx >= mtd->numeraseregions)
return -EINVAL;
kr = &(mtd->eraseregions[ur_idx]);
if (put_user(kr->offset, &(ur->offset))
|| put_user(kr->erasesize, &(ur->erasesize))
|| put_user(kr->numblocks, &(ur->numblocks)))
return -EFAULT;
break;
}
case MEMGETINFO:
memset(&info, 0, sizeof(info));
info.type = mtd->type;
info.flags = mtd->flags;
info.size = mtd->size;
info.erasesize = mtd->erasesize;
info.writesize = mtd->writesize;
info.oobsize = mtd->oobsize;
/* The below field is obsolete */
info.padding = 0;
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
case MEMERASE:
case MEMERASE64:
{
struct erase_info *erase;
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
else {
if (cmd == MEMERASE64) {
struct erase_info_user64 einfo64;
if (copy_from_user(&einfo64, argp,
sizeof(struct erase_info_user64))) {
kfree(erase);
return -EFAULT;
}
erase->addr = einfo64.start;
erase->len = einfo64.length;
} else {
struct erase_info_user einfo32;
if (copy_from_user(&einfo32, argp,
sizeof(struct erase_info_user))) {
kfree(erase);
return -EFAULT;
}
erase->addr = einfo32.start;
erase->len = einfo32.length;
}
ret = mtd_erase(mtd, erase);
kfree(erase);
}
break;
}
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
struct mtd_oob_buf __user *buf_user = argp;
/* NOTE: writes return length to buf_user->length */
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
struct mtd_oob_buf __user *buf_user = argp;
/* NOTE: writes return length to buf_user->start */
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
case MEMWRITEOOB64:
{
struct mtd_oob_buf64 buf;
struct mtd_oob_buf64 __user *buf_user = argp;
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
}
case MEMREADOOB64:
{
struct mtd_oob_buf64 buf;
struct mtd_oob_buf64 __user *buf_user = argp;
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
}
case MEMWRITE:
{
ret = mtdchar_write_ioctl(mtd,
(struct mtd_write_req __user *)arg);
break;
}
case MEMREAD:
{
ret = mtdchar_read_ioctl(mtd,
(struct mtd_read_req __user *)arg);
break;
}
case MEMLOCK:
{
struct erase_info_user einfo;
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
case MEMUNLOCK:
{
struct erase_info_user einfo;
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
case MEMISLOCKED:
{
struct erase_info_user einfo;
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
/* Legacy interface */
case MEMGETOOBSEL:
{
struct nand_oobinfo oi;
if (!master->ooblayout)
return -EOPNOTSUPP;
ret = get_oobinfo(mtd, &oi);
if (ret)
return ret;
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
return -EFAULT;
break;
}
case MEMGETBADBLOCK:
{
loff_t offs;
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
return mtd_block_isbad(mtd, offs);
}
case MEMSETBADBLOCK:
{
loff_t offs;
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
return mtd_block_markbad(mtd, offs);
}
case OTPSELECT:
{
int mode;
if (copy_from_user(&mode, argp, sizeof(int)))
return -EFAULT;
mfi->mode = MTD_FILE_MODE_NORMAL;
ret = otp_select_filemode(mfi, mode);
file->f_pos = 0;
break;
}
case OTPGETREGIONCOUNT:
case OTPGETREGIONINFO:
{
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
size_t retlen;
if (!buf)
return -ENOMEM;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
break;
case MTD_FILE_MODE_OTP_USER:
ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
break;
default:
ret = -EINVAL;
break;
}
if (!ret) {
if (cmd == OTPGETREGIONCOUNT) {
int nbr = retlen / sizeof(struct otp_info);
ret = copy_to_user(argp, &nbr, sizeof(int));
} else
ret = copy_to_user(argp, buf, retlen);
if (ret)
ret = -EFAULT;
}
kfree(buf);
break;
}
case OTPLOCK:
case OTPERASE:
{
struct otp_info oinfo;
if (mfi->mode != MTD_FILE_MODE_OTP_USER)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
if (cmd == OTPLOCK)
ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
else
ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
/* This ioctl is being deprecated - it truncates the ECC layout */
case ECCGETLAYOUT:
{
struct nand_ecclayout_user *usrlay;
if (!master->ooblayout)
return -EOPNOTSUPP;
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
if (!usrlay)
return -ENOMEM;
shrink_ecclayout(mtd, usrlay);
if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
ret = -EFAULT;
kfree(usrlay);
break;
}
case ECCGETSTATS:
{
if (copy_to_user(argp, &mtd->ecc_stats,
sizeof(struct mtd_ecc_stats)))
return -EFAULT;
break;
}
case MTDFILEMODE:
{
mfi->mode = 0;
switch(arg) {
case MTD_FILE_MODE_OTP_FACTORY:
case MTD_FILE_MODE_OTP_USER:
ret = otp_select_filemode(mfi, arg);
break;
case MTD_FILE_MODE_RAW:
if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
break;
case MTD_FILE_MODE_NORMAL:
break;
default:
ret = -EINVAL;
}
file->f_pos = 0;
break;
}
case BLKPG:
{
struct blkpg_ioctl_arg __user *blk_arg = argp;
struct blkpg_ioctl_arg a;
if (copy_from_user(&a, blk_arg, sizeof(a)))
ret = -EFAULT;
else
ret = mtdchar_blkpg_ioctl(mtd, &a);
break;
}
case BLKRRPART:
{
/* No reread partition feature. Just return ok */
ret = 0;
break;
}
}
return ret;
} /* memory_ioctl */
static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct mtd_info *master = mtd_get_master(mtd);
int ret;
mutex_lock(&master->master.chrdev_lock);
ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&master->master.chrdev_lock);
return ret;
}
#ifdef CONFIG_COMPAT
struct mtd_oob_buf32 {
u_int32_t start;
u_int32_t length;
compat_caddr_t ptr; /* unsigned char* */
};
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct mtd_info *master = mtd_get_master(mtd);
void __user *argp = compat_ptr(arg);
int ret = 0;
mutex_lock(&master->master.chrdev_lock);
switch (cmd) {
case MEMWRITEOOB32:
{
struct mtd_oob_buf32 buf;
struct mtd_oob_buf32 __user *buf_user = argp;
if (!(file->f_mode & FMODE_WRITE)) {
ret = -EPERM;
break;
}
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
}
case MEMREADOOB32:
{
struct mtd_oob_buf32 buf;
struct mtd_oob_buf32 __user *buf_user = argp;
/* NOTE: writes return length to buf->start */
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
case BLKPG:
{
/* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
struct blkpg_compat_ioctl_arg __user *uarg = argp;
struct blkpg_compat_ioctl_arg compat_arg;
struct blkpg_ioctl_arg a;
if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
ret = -EFAULT;
break;
}
memset(&a, 0, sizeof(a));
a.op = compat_arg.op;
a.flags = compat_arg.flags;
a.datalen = compat_arg.datalen;
a.data = compat_ptr(compat_arg.data);
ret = mtdchar_blkpg_ioctl(mtd, &a);
break;
}
default:
ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&master->master.chrdev_lock);
return ret;
}
#endif /* CONFIG_COMPAT */
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
* mappings)
*/
#ifndef CONFIG_MMU
static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
unsigned long offset;
int ret;
if (addr != 0)
return (unsigned long) -EINVAL;
if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
return (unsigned long) -EINVAL;
offset = pgoff << PAGE_SHIFT;
if (offset > mtd->size - len)
return (unsigned long) -EINVAL;
ret = mtd_get_unmapped_area(mtd, len, offset, flags);
return ret == -EOPNOTSUPP ? -ENODEV : ret;
}
static unsigned mtdchar_mmap_capabilities(struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
return mtd_mmap_capabilities(mfi->mtd);
}
#endif
/*
* set up a mapping for shared memory segments
*/
static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
/* This is broken because it assumes the MTD device is map-based
and that mtd->priv is a valid struct map_info. It should be
replaced with something that uses the mtd_get_unmapped_area()
operation properly. */
if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
#ifdef pgprot_noncached
if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
return vm_iomap_memory(vma, map->phys, map->size);
}
return -ENODEV;
#else
return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
#endif
}
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
.llseek = mtdchar_lseek,
.read = mtdchar_read,
.write = mtdchar_write,
.unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtdchar_compat_ioctl,
#endif
.open = mtdchar_open,
.release = mtdchar_close,
.mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
.get_unmapped_area = mtdchar_get_unmapped_area,
.mmap_capabilities = mtdchar_mmap_capabilities,
#endif
};
int __init init_mtdchar(void)
{
int ret;
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
"mtd", &mtd_fops);
if (ret < 0) {
pr_err("Can't allocate major number %d for MTD\n",
MTD_CHAR_MAJOR);
return ret;
}
return ret;
}
void __exit cleanup_mtdchar(void)
{
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
| linux-master | drivers/mtd/mtdchar.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Interface to Linux block layer for MTD 'translation layers'.
*
* Copyright © 2003-2010 David Woodhouse <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
static void blktrans_dev_release(struct kref *kref)
{
struct mtd_blktrans_dev *dev =
container_of(kref, struct mtd_blktrans_dev, ref);
put_disk(dev->disk);
blk_mq_free_tag_set(dev->tag_set);
kfree(dev->tag_set);
list_del(&dev->list);
kfree(dev);
}
static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
{
kref_put(&dev->ref, blktrans_dev_release);
}
static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
{
struct req_iterator iter;
struct bio_vec bvec;
unsigned long block, nsect;
char *buf;
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
switch (req_op(req)) {
case REQ_OP_FLUSH:
if (tr->flush(dev))
return BLK_STS_IOERR;
return BLK_STS_OK;
case REQ_OP_DISCARD:
if (tr->discard(dev, block, nsect))
return BLK_STS_IOERR;
return BLK_STS_OK;
case REQ_OP_READ:
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
if (tr->readsect(dev, block, buf)) {
kunmap(bio_page(req->bio));
return BLK_STS_IOERR;
}
}
kunmap(bio_page(req->bio));
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
return BLK_STS_OK;
case REQ_OP_WRITE:
if (!tr->writesect)
return BLK_STS_IOERR;
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
if (tr->writesect(dev, block, buf)) {
kunmap(bio_page(req->bio));
return BLK_STS_IOERR;
}
}
kunmap(bio_page(req->bio));
return BLK_STS_OK;
default:
return BLK_STS_IOERR;
}
}
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
{
return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
{
struct request *rq;
rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
if (rq) {
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);
return rq;
}
return NULL;
}
static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
__releases(&dev->queue_lock)
__acquires(&dev->queue_lock)
{
struct mtd_blktrans_ops *tr = dev->tr;
struct request *req = NULL;
int background_done = 0;
while (1) {
blk_status_t res;
dev->bg_stop = false;
if (!req && !(req = mtd_next_request(dev))) {
if (tr->background && !background_done) {
spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
tr->background(dev);
mutex_unlock(&dev->lock);
spin_lock_irq(&dev->queue_lock);
/*
* Do background processing just once per idle
* period.
*/
background_done = !dev->bg_stop;
continue;
}
break;
}
spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
__blk_mq_end_request(req, res);
req = NULL;
}
background_done = 0;
cond_resched();
spin_lock_irq(&dev->queue_lock);
}
}
static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct mtd_blktrans_dev *dev;
dev = hctx->queue->queuedata;
if (!dev) {
blk_mq_start_request(bd->rq);
return BLK_STS_IOERR;
}
spin_lock_irq(&dev->queue_lock);
list_add_tail(&bd->rq->queuelist, &dev->rq_list);
mtd_blktrans_work(dev);
spin_unlock_irq(&dev->queue_lock);
return BLK_STS_OK;
}
static int blktrans_open(struct gendisk *disk, blk_mode_t mode)
{
struct mtd_blktrans_dev *dev = disk->private_data;
int ret = 0;
kref_get(&dev->ref);
mutex_lock(&dev->lock);
if (dev->open)
goto unlock;
__module_get(dev->tr->owner);
if (!dev->mtd)
goto unlock;
if (dev->tr->open) {
ret = dev->tr->open(dev);
if (ret)
goto error_put;
}
ret = __get_mtd_device(dev->mtd);
if (ret)
goto error_release;
dev->writable = mode & BLK_OPEN_WRITE;
unlock:
dev->open++;
mutex_unlock(&dev->lock);
return ret;
error_release:
if (dev->tr->release)
dev->tr->release(dev);
error_put:
module_put(dev->tr->owner);
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
}
static void blktrans_release(struct gendisk *disk)
{
struct mtd_blktrans_dev *dev = disk->private_data;
mutex_lock(&dev->lock);
if (--dev->open)
goto unlock;
module_put(dev->tr->owner);
if (dev->mtd) {
if (dev->tr->release)
dev->tr->release(dev);
__put_mtd_device(dev->mtd);
}
unlock:
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
int ret = -ENXIO;
mutex_lock(&dev->lock);
if (!dev->mtd)
goto unlock;
ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
unlock:
mutex_unlock(&dev->lock);
return ret;
}
static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
.getgeo = blktrans_getgeo,
};
static const struct blk_mq_ops mtd_mq_ops = {
.queue_rq = mtd_queue_rq,
};
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
int ret;
lockdep_assert_held(&mtd_table_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
if (d->devnum != last_devnum+1) {
/* Found a free devnum. Plug it in here */
new->devnum = last_devnum+1;
list_add_tail(&new->list, &d->list);
goto added;
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
list_add_tail(&new->list, &d->list);
goto added;
}
last_devnum = d->devnum;
}
ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
/* Check that the device and any partitions will get valid
* minor numbers and that the disk naming code below can cope
* with this number. */
if (new->devnum > (MINORMASK >> tr->part_bits) ||
(tr->part_bits && new->devnum >= 27 * 26))
return ret;
list_add_tail(&new->list, &tr->devs);
added:
mutex_init(&new->lock);
kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
ret = -ENOMEM;
new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
if (!new->tag_set)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
/* Create gendisk */
gd = blk_mq_alloc_disk(new->tag_set, new);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tag_set;
}
new->disk = gd;
new->rq = new->disk->queue;
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->minors = 1 << tr->part_bits;
gd->fops = &mtd_block_ops;
if (tr->part_bits) {
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
else
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
} else {
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
gd->flags |= GENHD_FL_NO_PART;
}
set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
/* Create the request queue */
spin_lock_init(&new->queue_lock);
INIT_LIST_HEAD(&new->rq_list);
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
blk_queue_logical_block_size(new->rq, tr->blksize);
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
if (tr->discard) {
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
new->rq->limits.discard_granularity = tr->blksize;
}
gd->queue = new->rq;
if (new->readonly)
set_disk_ro(gd, 1);
ret = device_add_disk(&new->mtd->dev, gd, NULL);
if (ret)
goto out_cleanup_disk;
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
new->disk_attributes);
WARN_ON(ret);
}
return 0;
out_cleanup_disk:
put_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
kfree(new->tag_set);
out_list_del:
list_del(&new->list);
return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
lockdep_assert_held(&mtd_table_mutex);
if (old->disk_attributes)
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
old->disk_attributes);
/* Stop new requests to arrive */
del_gendisk(old->disk);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
old->rq->queuedata = NULL;
spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */
blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq);
blk_mq_unfreeze_queue(old->rq);
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
mutex_lock(&old->lock);
if (old->open) {
if (old->tr->release)
old->tr->release(old);
__put_mtd_device(old->mtd);
}
old->mtd = NULL;
mutex_unlock(&old->lock);
blktrans_dev_put(old);
return 0;
}
static void blktrans_notify_remove(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
struct mtd_blktrans_dev *dev, *next;
list_for_each_entry(tr, &blktrans_majors, list)
list_for_each_entry_safe(dev, next, &tr->devs, list)
if (dev->mtd == mtd)
tr->remove_dev(dev);
}
static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
if (mtd->type == MTD_ABSENT)
return;
list_for_each_entry(tr, &blktrans_majors, list)
tr->add_mtd(tr, mtd);
}
static struct mtd_notifier blktrans_notifier = {
.add = blktrans_notify_add,
.remove = blktrans_notify_remove,
};
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
struct mtd_info *mtd;
int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
us over. */
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
ret = register_blkdev(tr->major, tr->name);
if (ret < 0) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
return ret;
}
if (ret)
tr->major = ret;
tr->blkshift = ffs(tr->blksize) - 1;
INIT_LIST_HEAD(&tr->devs);
mutex_lock(&mtd_table_mutex);
list_add(&tr->list, &blktrans_majors);
mtd_for_each_device(mtd)
if (mtd->type != MTD_ABSENT)
tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
return 0;
}
int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
struct mtd_blktrans_dev *dev, *next;
mutex_lock(&mtd_table_mutex);
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
mutex_unlock(&mtd_table_mutex);
unregister_blkdev(tr->major, tr->name);
BUG_ON(!list_empty(&tr->devs));
return 0;
}
static void __exit mtd_blktrans_exit(void)
{
/* No race here -- if someone's currently in register_mtd_blktrans
we're screwed anyway. */
if (blktrans_notifier.list.next)
unregister_mtd_user(&blktrans_notifier);
}
module_exit(mtd_blktrans_exit);
EXPORT_SYMBOL_GPL(register_mtd_blktrans);
EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
| linux-master | drivers/mtd/mtd_blkdevs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Simple MTD partitioning layer
*
* Copyright © 2000 Nicolas Pitre <[email protected]>
* Copyright © 2002 Thomas Gleixner <[email protected]>
* Copyright © 2000-2010 David Woodhouse <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include "mtdcore.h"
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
static inline void free_partition(struct mtd_info *mtd)
{
kfree(mtd->name);
kfree(mtd);
}
void release_mtd_partition(struct mtd_info *mtd)
{
WARN_ON(!list_empty(&mtd->part.node));
free_partition(mtd);
}
static struct mtd_info *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
{
struct mtd_info *master = mtd_get_master(parent);
int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
master->writesize : master->erasesize;
u64 parent_size = mtd_is_partition(parent) ?
parent->part.size : parent->size;
struct mtd_info *child;
u32 remainder;
char *name;
u64 tmp;
/* allocate the partition structure */
child = kzalloc(sizeof(*child), GFP_KERNEL);
name = kstrdup(part->name, GFP_KERNEL);
if (!name || !child) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
parent->name);
kfree(name);
kfree(child);
return ERR_PTR(-ENOMEM);
}
/* set up the MTD object for this partition */
child->type = parent->type;
child->part.flags = parent->flags & ~part->mask_flags;
child->part.flags |= part->add_flags;
child->flags = child->part.flags;
child->part.size = part->size;
child->writesize = parent->writesize;
child->writebufsize = parent->writebufsize;
child->oobsize = parent->oobsize;
child->oobavail = parent->oobavail;
child->subpage_sft = parent->subpage_sft;
child->name = name;
child->owner = parent->owner;
/* NOTE: Historically, we didn't arrange MTDs as a tree out of
* concern for showing the same data in multiple partitions.
* However, it is very useful to have the master node present,
* so the MTD_PARTITIONED_MASTER option allows that. The master
* will have device nodes etc only if this is set, so make the
* parent conditional on that option. Note, this is a way to
* distinguish between the parent and its partitions in sysfs.
*/
child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
&parent->dev : parent->dev.parent;
child->dev.of_node = part->of_node;
child->parent = parent;
child->part.offset = part->offset;
INIT_LIST_HEAD(&child->partitions);
if (child->part.offset == MTDPART_OFS_APPEND)
child->part.offset = cur_offset;
if (child->part.offset == MTDPART_OFS_NXTBLK) {
tmp = cur_offset;
child->part.offset = cur_offset;
remainder = do_div(tmp, wr_alignment);
if (remainder) {
child->part.offset += wr_alignment - remainder;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset,
child->part.offset);
}
}
if (child->part.offset == MTDPART_OFS_RETAIN) {
child->part.offset = cur_offset;
if (parent_size - child->part.offset >= child->part.size) {
child->part.size = parent_size - child->part.offset -
child->part.size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
part->name, parent_size - child->part.offset,
child->part.size);
/* register to preserve ordering */
goto out_register;
}
}
if (child->part.size == MTDPART_SIZ_FULL)
child->part.size = parent_size - child->part.offset;
printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
child->part.offset, child->part.offset + child->part.size,
child->name);
/* let's do some sanity checks */
if (child->part.offset >= parent_size) {
/* let's register it anyway to preserve ordering */
child->part.offset = 0;
child->part.size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
child->erasesize = parent->erasesize;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
if (child->part.offset + child->part.size > parent->size) {
child->part.size = parent_size - child->part.offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
part->name, parent->name, child->part.size);
}
if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = parent->numeraseregions;
u64 end = child->part.offset + child->part.size;
struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
for (i = 0; i < max && regions[i].offset <= child->part.offset;
i++)
;
/* The loop searched for the region _behind_ the first one */
if (i > 0)
i--;
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
if (child->erasesize < regions[i].erasesize)
child->erasesize = regions[i].erasesize;
}
BUG_ON(child->erasesize == 0);
} else {
/* Single erase size */
child->erasesize = master->erasesize;
}
/*
* Child erasesize might differ from the parent one if the parent
* exposes several regions with different erasesize. Adjust
* wr_alignment accordingly.
*/
if (!(child->flags & MTD_NO_ERASE))
wr_alignment = child->erasesize;
tmp = mtd_get_master_ofs(child, 0);
remainder = do_div(tmp, wr_alignment);
if ((child->flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
part->name);
}
tmp = mtd_get_master_ofs(child, 0) + child->part.size;
remainder = do_div(tmp, wr_alignment);
if ((child->flags & MTD_WRITEABLE) && remainder) {
child->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
part->name);
}
child->size = child->part.size;
child->ecc_step_size = parent->ecc_step_size;
child->ecc_strength = parent->ecc_strength;
child->bitflip_threshold = parent->bitflip_threshold;
if (master->_block_isbad) {
uint64_t offs = 0;
while (offs < child->part.size) {
if (mtd_block_isreserved(child, offs))
child->ecc_stats.bbtblocks++;
else if (mtd_block_isbad(child, offs))
child->ecc_stats.badblocks++;
offs += child->erasesize;
}
}
out_register:
return child;
}
static ssize_t offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lld\n", mtd->part.offset);
}
static DEVICE_ATTR_RO(offset); /* mtd partition offset */
static const struct attribute *mtd_partition_attrs[] = {
&dev_attr_offset.attr,
NULL
};
static int mtd_add_partition_attrs(struct mtd_info *new)
{
int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
if (ret)
printk(KERN_WARNING
"mtd: failed to create partition attrs, err=%d\n", ret);
return ret;
}
int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
struct mtd_info *master = mtd_get_master(parent);
u64 parent_size = mtd_is_partition(parent) ?
parent->part.size : parent->size;
struct mtd_partition part;
struct mtd_info *child;
int ret = 0;
/* the direct offset is expected */
if (offset == MTDPART_OFS_APPEND ||
offset == MTDPART_OFS_NXTBLK)
return -EINVAL;
if (length == MTDPART_SIZ_FULL)
length = parent_size - offset;
if (length <= 0)
return -EINVAL;
memset(&part, 0, sizeof(part));
part.name = name;
part.size = length;
part.offset = offset;
child = allocate_partition(parent, &part, -1, offset);
if (IS_ERR(child))
return PTR_ERR(child);
mutex_lock(&master->master.partitions_lock);
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
ret = add_mtd_device(child);
if (ret)
goto err_remove_part;
mtd_add_partition_attrs(child);
return 0;
err_remove_part:
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
mutex_unlock(&master->master.partitions_lock);
free_partition(child);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
/**
* __mtd_del_partition - delete MTD partition
*
* @mtd: MTD structure to be deleted
*
* This function must be called with the partitions mutex locked.
*/
static int __mtd_del_partition(struct mtd_info *mtd)
{
struct mtd_info *child, *next;
int err;
list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
err = __mtd_del_partition(child);
if (err)
return err;
}
sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
list_del_init(&mtd->part.node);
err = del_mtd_device(mtd);
if (err)
return err;
return 0;
}
/*
* This function unregisters and destroy all slave MTD objects which are
* attached to the given MTD object, recursively.
*/
static int __del_mtd_partitions(struct mtd_info *mtd)
{
struct mtd_info *child, *next;
int ret, err = 0;
list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
if (mtd_has_partitions(child))
__del_mtd_partitions(child);
pr_info("Deleting %s MTD partition\n", child->name);
list_del_init(&child->part.node);
ret = del_mtd_device(child);
if (ret < 0) {
pr_err("Error when deleting partition \"%s\" (%d)\n",
child->name, ret);
err = ret;
continue;
}
}
return err;
}
int del_mtd_partitions(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
int ret;
pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
mutex_lock(&master->master.partitions_lock);
ret = __del_mtd_partitions(mtd);
mutex_unlock(&master->master.partitions_lock);
return ret;
}
int mtd_del_partition(struct mtd_info *mtd, int partno)
{
struct mtd_info *child, *master = mtd_get_master(mtd);
int ret = -EINVAL;
mutex_lock(&master->master.partitions_lock);
list_for_each_entry(child, &mtd->partitions, part.node) {
if (child->index == partno) {
ret = __mtd_del_partition(child);
break;
}
}
mutex_unlock(&master->master.partitions_lock);
return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);
/*
* This function, given a parent MTD object and a partition table, creates
* and registers the child MTD objects which are bound to the parent according
* to the partition definitions.
*
* For historical reasons, this function's caller only registers the parent
* if the MTD_PARTITIONED_MASTER config option is set.
*/
int add_mtd_partitions(struct mtd_info *parent,
const struct mtd_partition *parts,
int nbparts)
{
struct mtd_info *child, *master = mtd_get_master(parent);
uint64_t cur_offset = 0;
int i, ret;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
nbparts, parent->name);
for (i = 0; i < nbparts; i++) {
child = allocate_partition(parent, parts + i, i, cur_offset);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
goto err_del_partitions;
}
mutex_lock(&master->master.partitions_lock);
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
ret = add_mtd_device(child);
if (ret) {
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
mutex_unlock(&master->master.partitions_lock);
free_partition(child);
goto err_del_partitions;
}
mtd_add_partition_attrs(child);
/* Look for subpartitions */
parse_mtd_partitions(child, parts[i].types, NULL);
cur_offset = child->part.offset + child->part.size;
}
return 0;
err_del_partitions:
del_mtd_partitions(master);
return ret;
}
static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);
static struct mtd_part_parser *mtd_part_parser_get(const char *name)
{
struct mtd_part_parser *p, *ret = NULL;
spin_lock(&part_parser_lock);
list_for_each_entry(p, &part_parsers, list)
if (!strcmp(p->name, name) && try_module_get(p->owner)) {
ret = p;
break;
}
spin_unlock(&part_parser_lock);
return ret;
}
static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
{
module_put(p->owner);
}
/*
* Many partition parsers just expected the core to kfree() all their data in
* one chunk. Do that by default.
*/
static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
int nr_parts)
{
kfree(pparts);
}
int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
{
p->owner = owner;
if (!p->cleanup)
p->cleanup = &mtd_part_parser_cleanup_default;
spin_lock(&part_parser_lock);
list_add(&p->list, &part_parsers);
spin_unlock(&part_parser_lock);
return 0;
}
EXPORT_SYMBOL_GPL(__register_mtd_parser);
void deregister_mtd_parser(struct mtd_part_parser *p)
{
spin_lock(&part_parser_lock);
list_del(&p->list);
spin_unlock(&part_parser_lock);
}
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
/*
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
* are changing this array!
*/
static const char * const default_mtd_part_types[] = {
"cmdlinepart",
"ofpart",
NULL
};
/* Check DT only when looking for subpartitions. */
static const char * const default_subpartition_types[] = {
"ofpart",
NULL
};
static int mtd_part_do_parse(struct mtd_part_parser *parser,
struct mtd_info *master,
struct mtd_partitions *pparts,
struct mtd_part_parser_data *data)
{
int ret;
ret = (*parser->parse_fn)(master, &pparts->parts, data);
pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
if (ret <= 0)
return ret;
pr_notice("%d %s partitions found on MTD device %s\n", ret,
parser->name, master->name);
pparts->nr_parts = ret;
pparts->parser = parser;
return ret;
}
/**
* mtd_part_get_compatible_parser - find MTD parser by a compatible string
*
* @compat: compatible string describing partitions in a device tree
*
* MTD parsers can specify supported partitions by providing a table of
* compatibility strings. This function finds a parser that advertises support
* for a passed value of "compatible".
*/
static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
{
struct mtd_part_parser *p, *ret = NULL;
spin_lock(&part_parser_lock);
list_for_each_entry(p, &part_parsers, list) {
const struct of_device_id *matches;
matches = p->of_match_table;
if (!matches)
continue;
for (; matches->compatible[0]; matches++) {
if (!strcmp(matches->compatible, compat) &&
try_module_get(p->owner)) {
ret = p;
break;
}
}
if (ret)
break;
}
spin_unlock(&part_parser_lock);
return ret;
}
static int mtd_part_of_parse(struct mtd_info *master,
struct mtd_partitions *pparts)
{
struct mtd_part_parser *parser;
struct device_node *np;
struct device_node *child;
struct property *prop;
struct device *dev;
const char *compat;
const char *fixed = "fixed-partitions";
int ret, err = 0;
dev = &master->dev;
/* Use parent device (controller) if the top level MTD is not registered */
if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
dev = master->dev.parent;
np = mtd_get_of_node(master);
if (mtd_is_partition(master))
of_node_get(np);
else
np = of_get_child_by_name(np, "partitions");
/*
* Don't create devices that are added to a bus but will never get
* probed. That'll cause fw_devlink to block probing of consumers of
* this partition until the partition device is probed.
*/
for_each_child_of_node(np, child)
if (of_device_is_compatible(child, "nvmem-cells"))
of_node_set_flag(child, OF_POPULATED);
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
continue;
ret = mtd_part_do_parse(parser, master, pparts, NULL);
if (ret > 0) {
of_platform_populate(np, NULL, NULL, dev);
of_node_put(np);
return ret;
}
mtd_part_parser_put(parser);
if (ret < 0 && !err)
err = ret;
}
of_platform_populate(np, NULL, NULL, dev);
of_node_put(np);
/*
* For backward compatibility we have to try the "fixed-partitions"
* parser. It supports old DT format with partitions specified as a
* direct subnodes of a flash device DT node without any compatibility
* specified we could match.
*/
parser = mtd_part_parser_get(fixed);
if (!parser && !request_module("%s", fixed))
parser = mtd_part_parser_get(fixed);
if (parser) {
ret = mtd_part_do_parse(parser, master, pparts, NULL);
if (ret > 0)
return ret;
mtd_part_parser_put(parser);
if (ret < 0 && !err)
err = ret;
}
return err;
}
/**
* parse_mtd_partitions - parse and register MTD partitions
*
* @master: the master partition (describes whole MTD device)
* @types: names of partition parsers to try or %NULL
* @data: MTD partition parser-specific data
*
* This function tries to find & register partitions on MTD device @master. It
* uses MTD partition parsers, specified in @types. However, if @types is %NULL,
* then the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
* Note: If there are more then one parser in @types, the kernel only takes the
* partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
* o number of found partitions otherwise
*/
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
struct mtd_part_parser_data *data)
{
struct mtd_partitions pparts = { };
struct mtd_part_parser *parser;
int ret, err = 0;
if (!types)
types = mtd_is_partition(master) ? default_subpartition_types :
default_mtd_part_types;
for ( ; *types; types++) {
/*
* ofpart is a special type that means OF partitioning info
* should be used. It requires a bit different logic so it is
* handled in a separated function.
*/
if (!strcmp(*types, "ofpart")) {
ret = mtd_part_of_parse(master, &pparts);
} else {
pr_debug("%s: parsing partitions %s\n", master->name,
*types);
parser = mtd_part_parser_get(*types);
if (!parser && !request_module("%s", *types))
parser = mtd_part_parser_get(*types);
pr_debug("%s: got parser %s\n", master->name,
parser ? parser->name : NULL);
if (!parser)
continue;
ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
}
/* Found partitions! */
if (ret > 0) {
err = add_mtd_partitions(master, pparts.parts,
pparts.nr_parts);
mtd_part_parser_cleanup(&pparts);
return err ? err : pparts.nr_parts;
}
/*
* Stash the first error we see; only report it if no parser
* succeeds
*/
if (ret < 0 && !err)
err = ret;
}
return err;
}
void mtd_part_parser_cleanup(struct mtd_partitions *parts)
{
const struct mtd_part_parser *parser;
if (!parts)
return;
parser = parts->parser;
if (parser) {
if (parser->cleanup)
parser->cleanup(parts->parts, parts->nr_parts);
mtd_part_parser_put(parser);
}
}
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
return master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
| linux-master | drivers/mtd/mtdpart.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MTD device concatenation layer
*
* Copyright © 2002 Robert Kaiser <[email protected]>
* Copyright © 2002-2010 David Woodhouse <[email protected]>
*
* NAND support by Christian Gan <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/backing-dev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/concat.h>
#include <asm/div64.h>
/*
* Our storage structure:
* Subdev points to an array of pointers to struct mtd_info objects
* which is allocated along with this structure
*
*/
struct mtd_concat {
struct mtd_info mtd;
int num_subdev;
struct mtd_info **subdev;
};
/*
* how to calculate the size required for the above structure,
* including the pointer array subdev points to:
*/
#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
/*
* Given a pointer to the MTD object in the mtd_concat structure,
* we can retrieve the pointer to that structure with this macro.
*/
#define CONCAT(x) ((struct mtd_concat *)(x))
/*
* MTD methods which look up the relevant subdevice, translate the
* effective address and pass through to the subdevice.
*/
static int
concat_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, u_char * buf)
{
struct mtd_concat *concat = CONCAT(mtd);
int ret = 0, err;
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
if (from >= subdev->size) {
/* Not destined for this subdev */
size = 0;
from -= subdev->size;
continue;
}
if (from + len > subdev->size)
/* First part goes into this subdev */
size = subdev->size - from;
else
/* Entire transaction goes into this subdev */
size = len;
err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
} else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
ret = err;
} else
return err;
}
*retlen += retsize;
len -= size;
if (len == 0)
return ret;
buf += size;
from = 0;
}
return -EINVAL;
}
static int
concat_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
struct mtd_concat *concat = CONCAT(mtd);
int err = -EINVAL;
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
if (to >= subdev->size) {
to -= subdev->size;
continue;
}
if (to + len > subdev->size)
size = subdev->size - to;
else
size = len;
err = mtd_panic_write(subdev, to, size, &retsize, buf);
if (err == -EOPNOTSUPP) {
printk(KERN_ERR "mtdconcat: Cannot write from panic without panic_write\n");
return err;
}
if (err)
break;
*retlen += retsize;
len -= size;
if (len == 0)
break;
err = -EINVAL;
buf += size;
to = 0;
}
return err;
}
static int
concat_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
struct mtd_concat *concat = CONCAT(mtd);
int err = -EINVAL;
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
if (to >= subdev->size) {
size = 0;
to -= subdev->size;
continue;
}
if (to + len > subdev->size)
size = subdev->size - to;
else
size = len;
err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
*retlen += retsize;
len -= size;
if (len == 0)
break;
err = -EINVAL;
buf += size;
to = 0;
}
return err;
}
static int
concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t * retlen)
{
struct mtd_concat *concat = CONCAT(mtd);
struct kvec *vecs_copy;
unsigned long entry_low, entry_high;
size_t total_len = 0;
int i;
int err = -EINVAL;
/* Calculate total length of data */
for (i = 0; i < count; i++)
total_len += vecs[i].iov_len;
/* Check alignment */
if (mtd->writesize > 1) {
uint64_t __to = to;
if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
return -EINVAL;
}
/* make a copy of vecs */
vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
entry_low = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, wsize, retsize, old_iov_len;
if (to >= subdev->size) {
to -= subdev->size;
continue;
}
size = min_t(uint64_t, total_len, subdev->size - to);
wsize = size; /* store for future use */
entry_high = entry_low;
while (entry_high < count) {
if (size <= vecs_copy[entry_high].iov_len)
break;
size -= vecs_copy[entry_high++].iov_len;
}
old_iov_len = vecs_copy[entry_high].iov_len;
vecs_copy[entry_high].iov_len = size;
err = mtd_writev(subdev, &vecs_copy[entry_low],
entry_high - entry_low + 1, to, &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
entry_low = entry_high;
if (err)
break;
*retlen += retsize;
total_len -= wsize;
if (total_len == 0)
break;
err = -EINVAL;
to = 0;
}
kfree(vecs_copy);
return err;
}
static int
concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_oob_ops devops = *ops;
int i, err, ret = 0;
ops->retlen = ops->oobretlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (from >= subdev->size) {
from -= subdev->size;
continue;
}
/* partial read ? */
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
/* Save information about bitflips! */
if (unlikely(err)) {
if (mtd_is_eccerr(err)) {
mtd->ecc_stats.failed++;
ret = err;
} else if (mtd_is_bitflip(err)) {
mtd->ecc_stats.corrected++;
/* Do not overwrite -EBADMSG !! */
if (!ret)
ret = err;
} else
return err;
}
if (devops.datbuf) {
devops.len = ops->len - ops->retlen;
if (!devops.len)
return ret;
devops.datbuf += devops.retlen;
}
if (devops.oobbuf) {
devops.ooblen = ops->ooblen - ops->oobretlen;
if (!devops.ooblen)
return ret;
devops.oobbuf += ops->oobretlen;
}
from = 0;
}
return -EINVAL;
}
static int
concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
{
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_oob_ops devops = *ops;
int i, err;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
ops->retlen = ops->oobretlen = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (to >= subdev->size) {
to -= subdev->size;
continue;
}
/* partial write ? */
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
if (err)
return err;
if (devops.datbuf) {
devops.len = ops->len - ops->retlen;
if (!devops.len)
return 0;
devops.datbuf += devops.retlen;
}
if (devops.oobbuf) {
devops.ooblen = ops->ooblen - ops->oobretlen;
if (!devops.ooblen)
return 0;
devops.oobbuf += devops.oobretlen;
}
to = 0;
}
return -EINVAL;
}
static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_info *subdev;
int i, err;
uint64_t length, offset = 0;
struct erase_info *erase;
/*
* Check for proper erase block alignment of the to-be-erased area.
* It is easier to do this based on the super device's erase
* region info rather than looking at each particular sub-device
* in turn.
*/
if (!concat->mtd.numeraseregions) {
/* the easy case: device has uniform erase block size */
if (instr->addr & (concat->mtd.erasesize - 1))
return -EINVAL;
if (instr->len & (concat->mtd.erasesize - 1))
return -EINVAL;
} else {
/* device has variable erase size */
struct mtd_erase_region_info *erase_regions =
concat->mtd.eraseregions;
/*
* Find the erase region where the to-be-erased area begins:
*/
for (i = 0; i < concat->mtd.numeraseregions &&
instr->addr >= erase_regions[i].offset; i++) ;
--i;
/*
* Now erase_regions[i] is the region in which the
* to-be-erased area begins. Verify that the starting
* offset is aligned to this region's erase size:
*/
if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
return -EINVAL;
/*
* now find the erase region where the to-be-erased area ends:
*/
for (; i < concat->mtd.numeraseregions &&
(instr->addr + instr->len) >= erase_regions[i].offset;
++i) ;
--i;
/*
* check if the ending offset is aligned to this region's erase size
*/
if (i < 0 || ((instr->addr + instr->len) &
(erase_regions[i].erasesize - 1)))
return -EINVAL;
}
/* make a local copy of instr to avoid modifying the caller's struct */
erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
if (!erase)
return -ENOMEM;
*erase = *instr;
length = instr->len;
/*
* find the subdevice where the to-be-erased area begins, adjust
* starting offset to be relative to the subdevice start
*/
for (i = 0; i < concat->num_subdev; i++) {
subdev = concat->subdev[i];
if (subdev->size <= erase->addr) {
erase->addr -= subdev->size;
offset += subdev->size;
} else {
break;
}
}
/* must never happen since size limit has been verified above */
BUG_ON(i >= concat->num_subdev);
/* now do the erase: */
err = 0;
for (; length > 0; i++) {
/* loop for all subdevices affected by this request */
subdev = concat->subdev[i]; /* get current subdevice */
/* limit length to subdevice's size: */
if (erase->addr + length > subdev->size)
erase->len = subdev->size - erase->addr;
else
erase->len = length;
length -= erase->len;
if ((err = mtd_erase(subdev, erase))) {
/* sanity check: should never happen since
* block alignment has been checked above */
BUG_ON(err == -EINVAL);
if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr = erase->fail_addr + offset;
break;
}
/*
* erase->addr specifies the offset of the area to be
* erased *within the current subdevice*. It can be
* non-zero only the first time through this loop, i.e.
* for the first subdevice where blocks need to be erased.
* All the following erases must begin at the start of the
* current subdevice, i.e. at offset zero.
*/
erase->addr = 0;
offset += subdev->size;
}
kfree(erase);
return err;
}
static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
bool is_lock)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
uint64_t size;
if (ofs >= subdev->size) {
size = 0;
ofs -= subdev->size;
continue;
}
if (ofs + len > subdev->size)
size = subdev->size - ofs;
else
size = len;
if (is_lock)
err = mtd_lock(subdev, ofs, size);
else
err = mtd_unlock(subdev, ofs, size);
if (err)
break;
len -= size;
if (len == 0)
break;
err = -EINVAL;
ofs = 0;
}
return err;
}
static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return concat_xxlock(mtd, ofs, len, true);
}
static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return concat_xxlock(mtd, ofs, len, false);
}
static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (ofs >= subdev->size) {
ofs -= subdev->size;
continue;
}
if (ofs + len > subdev->size)
break;
return mtd_is_locked(subdev, ofs, len);
}
return err;
}
static void concat_sync(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
mtd_sync(subdev);
}
}
static int concat_suspend(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, rc = 0;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
}
static void concat_resume(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
mtd_resume(subdev);
}
}
static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
if (!mtd_can_have_bb(concat->subdev[0]))
return res;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (ofs >= subdev->size) {
ofs -= subdev->size;
continue;
}
res = mtd_block_isbad(subdev, ofs);
break;
}
return res;
}
static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (ofs >= subdev->size) {
ofs -= subdev->size;
continue;
}
err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
}
return err;
}
/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
* stored to *new_dev upon success. This function does _not_
* register any devices: this is the caller's responsibility.
*/
struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
int num_devs, /* number of subdevices */
const char *name)
{ /* name for the new device */
int i;
size_t size;
struct mtd_concat *concat;
struct mtd_info *subdev_master = NULL;
uint32_t max_erasesize, curr_erasesize;
int num_erase_region;
int max_writebufsize = 0;
printk(KERN_NOTICE "Concatenating MTD devices:\n");
for (i = 0; i < num_devs; i++)
printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
printk(KERN_NOTICE "into device \"%s\"\n", name);
/* allocate the device structure */
size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
concat = kzalloc(size, GFP_KERNEL);
if (!concat) {
printk
("memory allocation error while creating concatenated device \"%s\"\n",
name);
return NULL;
}
concat->subdev = (struct mtd_info **) (concat + 1);
/*
* Set up the new "super" device's MTD object structure, check for
* incompatibilities between the subdevices.
*/
concat->mtd.type = subdev[0]->type;
concat->mtd.flags = subdev[0]->flags;
concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
concat->mtd.writesize = subdev[0]->writesize;
for (i = 0; i < num_devs; i++)
if (max_writebufsize < subdev[i]->writebufsize)
max_writebufsize = subdev[i]->writebufsize;
concat->mtd.writebufsize = max_writebufsize;
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
subdev_master = mtd_get_master(subdev[0]);
if (subdev_master->_writev)
concat->mtd._writev = concat_writev;
if (subdev_master->_read_oob)
concat->mtd._read_oob = concat_read_oob;
if (subdev_master->_write_oob)
concat->mtd._write_oob = concat_write_oob;
if (subdev_master->_block_isbad)
concat->mtd._block_isbad = concat_block_isbad;
if (subdev_master->_block_markbad)
concat->mtd._block_markbad = concat_block_markbad;
if (subdev_master->_panic_write)
concat->mtd._panic_write = concat_panic_write;
if (subdev_master->_read)
concat->mtd._read = concat_read;
if (subdev_master->_write)
concat->mtd._write = concat_write;
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
concat->subdev[0] = subdev[0];
for (i = 1; i < num_devs; i++) {
if (concat->mtd.type != subdev[i]->type) {
kfree(concat);
printk("Incompatible device type on \"%s\"\n",
subdev[i]->name);
return NULL;
}
if (concat->mtd.flags != subdev[i]->flags) {
/*
* Expect all flags except MTD_WRITEABLE to be
* equal on all subdevices.
*/
if ((concat->mtd.flags ^ subdev[i]->
flags) & ~MTD_WRITEABLE) {
kfree(concat);
printk("Incompatible device flags on \"%s\"\n",
subdev[i]->name);
return NULL;
} else
/* if writeable attribute differs,
make super device writeable */
concat->mtd.flags |=
subdev[i]->flags & MTD_WRITEABLE;
}
subdev_master = mtd_get_master(subdev[i]);
concat->mtd.size += subdev[i]->size;
concat->mtd.ecc_stats.badblocks +=
subdev[i]->ecc_stats.badblocks;
if (concat->mtd.writesize != subdev[i]->writesize ||
concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
concat->mtd.oobsize != subdev[i]->oobsize ||
!concat->mtd._read_oob != !subdev_master->_read_oob ||
!concat->mtd._write_oob != !subdev_master->_write_oob) {
/*
* Check against subdev[i] for data members, because
* subdev's attributes may be different from master
* mtd device. Check against subdev's master mtd
* device for callbacks, because the existence of
* subdev's callbacks is decided by master mtd device.
*/
kfree(concat);
printk("Incompatible OOB or ECC data on \"%s\"\n",
subdev[i]->name);
return NULL;
}
concat->subdev[i] = subdev[i];
}
mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
concat->num_subdev = num_devs;
concat->mtd.name = name;
concat->mtd._erase = concat_erase;
concat->mtd._sync = concat_sync;
concat->mtd._lock = concat_lock;
concat->mtd._unlock = concat_unlock;
concat->mtd._is_locked = concat_is_locked;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
/*
* Combine the erase block size info of the subdevices:
*
* first, walk the map of the new device and see how
* many changes in erase size we have
*/
max_erasesize = curr_erasesize = subdev[0]->erasesize;
num_erase_region = 1;
for (i = 0; i < num_devs; i++) {
if (subdev[i]->numeraseregions == 0) {
/* current subdevice has uniform erase size */
if (subdev[i]->erasesize != curr_erasesize) {
/* if it differs from the last subdevice's erase size, count it */
++num_erase_region;
curr_erasesize = subdev[i]->erasesize;
if (curr_erasesize > max_erasesize)
max_erasesize = curr_erasesize;
}
} else {
/* current subdevice has variable erase size */
int j;
for (j = 0; j < subdev[i]->numeraseregions; j++) {
/* walk the list of erase regions, count any changes */
if (subdev[i]->eraseregions[j].erasesize !=
curr_erasesize) {
++num_erase_region;
curr_erasesize =
subdev[i]->eraseregions[j].
erasesize;
if (curr_erasesize > max_erasesize)
max_erasesize = curr_erasesize;
}
}
}
}
if (num_erase_region == 1) {
/*
* All subdevices have the same uniform erase size.
* This is easy:
*/
concat->mtd.erasesize = curr_erasesize;
concat->mtd.numeraseregions = 0;
} else {
uint64_t tmp64;
/*
* erase block size varies across the subdevices: allocate
* space to store the data describing the variable erase regions
*/
struct mtd_erase_region_info *erase_region_p;
uint64_t begin, position;
concat->mtd.erasesize = max_erasesize;
concat->mtd.numeraseregions = num_erase_region;
concat->mtd.eraseregions = erase_region_p =
kmalloc_array(num_erase_region,
sizeof(struct mtd_erase_region_info),
GFP_KERNEL);
if (!erase_region_p) {
kfree(concat);
printk
("memory allocation error while creating erase region list"
" for device \"%s\"\n", name);
return NULL;
}
/*
* walk the map of the new device once more and fill in
* erase region info:
*/
curr_erasesize = subdev[0]->erasesize;
begin = position = 0;
for (i = 0; i < num_devs; i++) {
if (subdev[i]->numeraseregions == 0) {
/* current subdevice has uniform erase size */
if (subdev[i]->erasesize != curr_erasesize) {
/*
* fill in an mtd_erase_region_info structure for the area
* we have walked so far:
*/
erase_region_p->offset = begin;
erase_region_p->erasesize =
curr_erasesize;
tmp64 = position - begin;
do_div(tmp64, curr_erasesize);
erase_region_p->numblocks = tmp64;
begin = position;
curr_erasesize = subdev[i]->erasesize;
++erase_region_p;
}
position += subdev[i]->size;
} else {
/* current subdevice has variable erase size */
int j;
for (j = 0; j < subdev[i]->numeraseregions; j++) {
/* walk the list of erase regions, count any changes */
if (subdev[i]->eraseregions[j].
erasesize != curr_erasesize) {
erase_region_p->offset = begin;
erase_region_p->erasesize =
curr_erasesize;
tmp64 = position - begin;
do_div(tmp64, curr_erasesize);
erase_region_p->numblocks = tmp64;
begin = position;
curr_erasesize =
subdev[i]->eraseregions[j].
erasesize;
++erase_region_p;
}
position +=
subdev[i]->eraseregions[j].
numblocks * (uint64_t)curr_erasesize;
}
}
}
/* Now write the final entry */
erase_region_p->offset = begin;
erase_region_p->erasesize = curr_erasesize;
tmp64 = position - begin;
do_div(tmp64, curr_erasesize);
erase_region_p->numblocks = tmp64;
}
return &concat->mtd;
}
/* Cleans the context obtained from mtd_concat_create() */
void mtd_concat_destroy(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
if (concat->mtd.numeraseregions)
kfree(concat->mtd.eraseregions);
kfree(concat);
}
EXPORT_SYMBOL(mtd_concat_create);
EXPORT_SYMBOL(mtd_concat_destroy);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Kaiser <[email protected]>");
MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
| linux-master | drivers/mtd/mtdconcat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Simple read-only (writable only for RAM) mtdblock driver
*
* Copyright © 2001-2010 David Woodhouse <[email protected]>
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <linux/module.h>
#include <linux/major.h>
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
size_t retlen;
int err;
err = mtd_read(dev->mtd, (block * 512), 512, &retlen, buf);
if (err && !mtd_is_bitflip(err))
return 1;
return 0;
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
size_t retlen;
if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
dev->mtd = mtd;
dev->devnum = mtd->index;
dev->size = mtd->size >> 9;
dev->tr = tr;
dev->readonly = 1;
if (mtd_type_is_nand(mtd))
pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
tr->name, mtd->name);
if (add_mtd_blktrans_dev(dev))
kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
.major = MTD_BLOCK_MAJOR,
.part_bits = 0,
.blksize = 512,
.readsect = mtdblock_readsect,
.writesect = mtdblock_writesect,
.add_mtd = mtdblock_add_mtd,
.remove_dev = mtdblock_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(mtdblock_tr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
| linux-master | drivers/mtd/mtdblock_ro.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MTD Oops/Panic logger
*
* Copyright © 2007 Nokia Corporation. All rights reserved.
*
* Author: Richard Purdie <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/timekeeping.h>
#include <linux/mtd/mtd.h>
#include <linux/kmsg_dump.h>
/* Maximum MTD partition size */
#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
static unsigned long record_size = 4096;
module_param(record_size, ulong, 0400);
MODULE_PARM_DESC(record_size,
"record size for MTD OOPS pages in bytes (default 4096)");
static char mtddev[80];
module_param_string(mtddev, mtddev, 80, 0400);
MODULE_PARM_DESC(mtddev,
"name or index number of the MTD device to use");
static int dump_oops = 1;
module_param(dump_oops, int, 0600);
MODULE_PARM_DESC(dump_oops,
"set to 1 to dump oopses, 0 to only dump panics (default 1)");
#define MTDOOPS_KERNMSG_MAGIC_v1 0x5d005d00 /* Original */
#define MTDOOPS_KERNMSG_MAGIC_v2 0x5d005e00 /* Adds the timestamp */
struct mtdoops_hdr {
u32 seq;
u32 magic;
ktime_t timestamp;
} __packed;
static struct mtdoops_context {
struct kmsg_dumper dump;
int mtd_index;
struct work_struct work_erase;
struct work_struct work_write;
struct mtd_info *mtd;
int oops_pages;
int nextpage;
int nextcount;
unsigned long *oops_page_used;
unsigned long oops_buf_busy;
void *oops_buf;
} oops_cxt;
static void mark_page_used(struct mtdoops_context *cxt, int page)
{
set_bit(page, cxt->oops_page_used);
}
static void mark_page_unused(struct mtdoops_context *cxt, int page)
{
clear_bit(page, cxt->oops_page_used);
}
static int page_is_used(struct mtdoops_context *cxt, int page)
{
return test_bit(page, cxt->oops_page_used);
}
static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
{
struct mtd_info *mtd = cxt->mtd;
u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
u32 start_page = start_page_offset / record_size;
u32 erase_pages = mtd->erasesize / record_size;
struct erase_info erase;
int ret;
int page;
erase.addr = offset;
erase.len = mtd->erasesize;
ret = mtd_erase(mtd, &erase);
if (ret) {
pr_warn("erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
(unsigned long long)erase.addr,
(unsigned long long)erase.len, mtddev);
return ret;
}
/* Mark pages as unused */
for (page = start_page; page < start_page + erase_pages; page++)
mark_page_unused(cxt, page);
return 0;
}
static void mtdoops_erase(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
int i = 0, j, ret, mod;
/* We were unregistered */
if (!mtd)
return;
mod = (cxt->nextpage * record_size) % mtd->erasesize;
if (mod != 0) {
cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
}
while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
badblock:
pr_warn("bad block at %08lx\n",
cxt->nextpage * record_size);
i++;
cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
pr_err("all blocks bad!\n");
return;
}
}
if (ret < 0) {
pr_err("mtd_block_isbad failed, aborting\n");
return;
}
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
if (ret >= 0) {
pr_debug("ready %d, %d\n",
cxt->nextpage, cxt->nextcount);
return;
}
if (ret == -EIO) {
ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0 && ret != -EOPNOTSUPP) {
pr_err("block_markbad failed, aborting\n");
return;
}
}
goto badblock;
}
/* Scheduled work - when we can't proceed without erasing a block */
static void mtdoops_workfunc_erase(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_erase);
mtdoops_erase(cxt);
}
static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic)
{
cxt->nextpage++;
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
cxt->nextcount++;
if (cxt->nextcount == 0xffffffff)
cxt->nextcount = 0;
if (page_is_used(cxt, cxt->nextpage)) {
pr_debug("not ready %d, %d (erase %s)\n",
cxt->nextpage, cxt->nextcount,
panic ? "immediately" : "scheduled");
if (panic) {
/* In case of panic, erase immediately */
mtdoops_erase(cxt);
} else {
/* Otherwise, schedule work to erase it "nicely" */
schedule_work(&cxt->work_erase);
}
} else {
pr_debug("ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
}
}
static void mtdoops_write(struct mtdoops_context *cxt, int panic)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
struct mtdoops_hdr *hdr;
int ret;
if (test_and_set_bit(0, &cxt->oops_buf_busy))
return;
/* Add mtdoops header to the buffer */
hdr = (struct mtdoops_hdr *)cxt->oops_buf;
hdr->seq = cxt->nextcount;
hdr->magic = MTDOOPS_KERNMSG_MAGIC_v2;
hdr->timestamp = ktime_get_real();
if (panic) {
ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
record_size, &retlen, cxt->oops_buf);
if (ret == -EOPNOTSUPP) {
pr_err("Cannot write from panic without panic_write\n");
goto out;
}
} else
ret = mtd_write(mtd, cxt->nextpage * record_size,
record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
pr_err("write failure at %ld (%td of %ld written), error %d\n",
cxt->nextpage * record_size, retlen, record_size, ret);
mark_page_used(cxt, cxt->nextpage);
memset(cxt->oops_buf, 0xff, record_size);
mtdoops_inc_counter(cxt, panic);
out:
clear_bit(0, &cxt->oops_buf_busy);
}
static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_write);
mtdoops_write(cxt, 0);
}
static void find_next_position(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
struct mtdoops_hdr hdr;
int ret, page, maxpos = 0;
u32 maxcount = 0xffffffff;
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
if (mtd_block_isbad(mtd, page * record_size))
continue;
/* Assume the page is used */
mark_page_used(cxt, page);
ret = mtd_read(mtd, page * record_size, sizeof(hdr),
&retlen, (u_char *)&hdr);
if (retlen != sizeof(hdr) ||
(ret < 0 && !mtd_is_bitflip(ret))) {
pr_err("read failure at %ld (%zu of %zu read), err %d\n",
page * record_size, retlen, sizeof(hdr), ret);
continue;
}
if (hdr.seq == 0xffffffff && hdr.magic == 0xffffffff)
mark_page_unused(cxt, page);
if (hdr.seq == 0xffffffff ||
(hdr.magic != MTDOOPS_KERNMSG_MAGIC_v1 &&
hdr.magic != MTDOOPS_KERNMSG_MAGIC_v2))
continue;
if (maxcount == 0xffffffff) {
maxcount = hdr.seq;
maxpos = page;
} else if (hdr.seq < 0x40000000 && maxcount > 0xc0000000) {
maxcount = hdr.seq;
maxpos = page;
} else if (hdr.seq > maxcount && hdr.seq < 0xc0000000) {
maxcount = hdr.seq;
maxpos = page;
} else if (hdr.seq > maxcount && hdr.seq > 0xc0000000
&& maxcount > 0x80000000) {
maxcount = hdr.seq;
maxpos = page;
}
}
if (maxcount == 0xffffffff) {
cxt->nextpage = cxt->oops_pages - 1;
cxt->nextcount = 0;
}
else {
cxt->nextpage = maxpos;
cxt->nextcount = maxcount;
}
mtdoops_inc_counter(cxt, 0);
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
struct kmsg_dump_iter iter;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
kmsg_dump_rewind(&iter);
if (test_and_set_bit(0, &cxt->oops_buf_busy))
return;
kmsg_dump_get_buffer(&iter, true,
cxt->oops_buf + sizeof(struct mtdoops_hdr),
record_size - sizeof(struct mtdoops_hdr), NULL);
clear_bit(0, &cxt->oops_buf_busy);
if (reason != KMSG_DUMP_OOPS) {
/* Panics must be written immediately */
mtdoops_write(cxt, 1);
} else {
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
}
}
static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
u64 mtdoops_pages = div_u64(mtd->size, record_size);
int err;
if (!strcmp(mtd->name, mtddev))
cxt->mtd_index = mtd->index;
if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
if (mtd->size < mtd->erasesize * 2) {
pr_err("MTD partition %d not big enough for mtdoops\n",
mtd->index);
return;
}
if (mtd->erasesize < record_size) {
pr_err("eraseblock size of MTD partition %d too small\n",
mtd->index);
return;
}
if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
pr_err("mtd%d is too large (limit is %d MiB)\n",
mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
return;
}
/* oops_page_used is a bit field */
cxt->oops_page_used =
vmalloc(array_size(sizeof(unsigned long),
DIV_ROUND_UP(mtdoops_pages,
BITS_PER_LONG)));
if (!cxt->oops_page_used) {
pr_err("could not allocate page array\n");
return;
}
cxt->dump.max_reason = KMSG_DUMP_OOPS;
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
pr_err("registering kmsg dumper failed, error %d\n", err);
vfree(cxt->oops_page_used);
cxt->oops_page_used = NULL;
return;
}
cxt->mtd = mtd;
cxt->oops_pages = (int)mtd->size / record_size;
find_next_position(cxt);
pr_info("Attached to MTD device %d\n", mtd->index);
}
static void mtdoops_notify_remove(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
if (kmsg_dump_unregister(&cxt->dump) < 0)
pr_warn("could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
flush_work(&cxt->work_erase);
flush_work(&cxt->work_write);
}
static struct mtd_notifier mtdoops_notifier = {
.add = mtdoops_notify_add,
.remove = mtdoops_notify_remove,
};
static int __init mtdoops_init(void)
{
struct mtdoops_context *cxt = &oops_cxt;
int mtd_index;
char *endp;
if (strlen(mtddev) == 0) {
pr_err("mtd device (mtddev=name/number) must be supplied\n");
return -EINVAL;
}
if ((record_size & 4095) != 0) {
pr_err("record_size must be a multiple of 4096\n");
return -EINVAL;
}
if (record_size < 4096) {
pr_err("record_size must be over 4096 bytes\n");
return -EINVAL;
}
/* Setup the MTD device to use */
cxt->mtd_index = -1;
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf)
return -ENOMEM;
memset(cxt->oops_buf, 0xff, record_size);
cxt->oops_buf_busy = 0;
INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
register_mtd_user(&mtdoops_notifier);
return 0;
}
static void __exit mtdoops_exit(void)
{
struct mtdoops_context *cxt = &oops_cxt;
unregister_mtd_user(&mtdoops_notifier);
vfree(cxt->oops_buf);
vfree(cxt->oops_page_used);
}
module_init(mtdoops_init);
module_exit(mtdoops_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <[email protected]>");
MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
| linux-master | drivers/mtd/mtdoops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
*
* Copyright © 2002, Greg Ungerer ([email protected])
*
* Based heavily on the nftlcore.c code which is:
* Copyright © 1999 Machine Vision Holdings, Inc.
* Copyright © 1999 David Woodhouse <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/hdreg.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/inftl.h>
#include <linux/mtd/rawnand.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/io.h>
/*
* Maximum number of loops while examining next block, to have a
* chance to detect consistency problems (they should never happen
* because of the checks done in the mounting.
*/
#define MAX_LOOPS 10000
static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct INFTLrecord *inftl;
unsigned long temp;
if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
if (!mtd->_block_isbad) {
printk(KERN_ERR
"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
return;
}
pr_debug("INFTL: add_mtd for %s\n", mtd->name);
inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
if (!inftl)
return;
inftl->mbd.mtd = mtd;
inftl->mbd.devnum = -1;
inftl->mbd.tr = tr;
if (INFTL_mount(inftl) < 0) {
printk(KERN_WARNING "INFTL: could not mount device\n");
kfree(inftl);
return;
}
/* OK, it's a new one. Set up all the data structures. */
/* Calculate geometry */
inftl->cylinders = 1024;
inftl->heads = 16;
temp = inftl->cylinders * inftl->heads;
inftl->sectors = inftl->mbd.size / temp;
if (inftl->mbd.size % temp) {
inftl->sectors++;
temp = inftl->cylinders * inftl->sectors;
inftl->heads = inftl->mbd.size / temp;
if (inftl->mbd.size % temp) {
inftl->heads++;
temp = inftl->heads * inftl->sectors;
inftl->cylinders = inftl->mbd.size / temp;
}
}
if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
/*
Oh no we don't have
mbd.size == heads * cylinders * sectors
*/
printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
"match size of 0x%lx.\n", inftl->mbd.size);
printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
"(== 0x%lx sects)\n",
inftl->cylinders, inftl->heads , inftl->sectors,
(long)inftl->cylinders * (long)inftl->heads *
(long)inftl->sectors );
}
if (add_mtd_blktrans_dev(&inftl->mbd)) {
kfree(inftl->PUtable);
kfree(inftl->VUtable);
kfree(inftl);
return;
}
#ifdef PSYCHO_DEBUG
printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
#endif
return;
}
static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct INFTLrecord *inftl = (void *)dev;
pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(inftl->PUtable);
kfree(inftl->VUtable);
}
/*
* Actual INFTL access routines.
*/
/*
* Read oob data from flash
*/
int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
/*
* Write oob data to flash
*/
int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & (mtd->writesize - 1);
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
/*
* Write data and oob to flash
*/
static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
/*
* INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
* This function is used when the give Virtual Unit Chain.
*/
static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
{
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
* blocks completely.
*/
if (!desperate && inftl->numfreeEUNs < 2) {
pr_debug("INFTL: there are too few free EUNs (%d)\n",
inftl->numfreeEUNs);
return BLOCK_NIL;
}
/* Scan for a free block */
do {
if (inftl->PUtable[pot] == BLOCK_FREE) {
inftl->LastFreeEUN = pot;
return pot;
}
if (++pot > inftl->lastEUN)
pot = 0;
if (!silly--) {
printk(KERN_WARNING "INFTL: no free blocks found! "
"EUN range = %d - %d\n", 0, inftl->LastFreeEUN);
return BLOCK_NIL;
}
} while (pot != inftl->LastFreeEUN);
return BLOCK_NIL;
}
static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
{
u16 BlockMap[MAX_SECTORS_PER_UNIT];
unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
unsigned int thisEUN, prevEUN, status;
struct mtd_info *mtd = inftl->mbd.mtd;
int block, silly;
unsigned int targetEUN;
struct inftl_oob oob;
size_t retlen;
pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
thisEUN = targetEUN = inftl->VUtable[thisVUC];
if (thisEUN == BLOCK_NIL) {
printk(KERN_WARNING "INFTL: trying to fold non-existent "
"Virtual Unit Chain %d!\n", thisVUC);
return BLOCK_NIL;
}
/*
* Scan to find the Erase Unit which holds the actual data for each
* 512-byte block within the Chain.
*/
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
if ((BlockMap[block] != BLOCK_NIL) ||
BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ (block * SECTORSIZE), 16, &retlen,
(char *)&oob) < 0)
status = SECTOR_IGNORE;
else
status = oob.b.Status | oob.b.Status1;
switch(status) {
case SECTOR_FREE:
case SECTOR_IGNORE:
break;
case SECTOR_USED:
BlockMap[block] = thisEUN;
continue;
case SECTOR_DELETED:
BlockDeleted[block] = 1;
continue;
default:
printk(KERN_WARNING "INFTL: unknown status "
"for block %d in EUN %d: %x\n",
block, thisEUN, status);
break;
}
}
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in Virtual "
"Unit Chain 0x%x\n", thisVUC);
return BLOCK_NIL;
}
thisEUN = inftl->PUtable[thisEUN];
}
/*
* OK. We now know the location of every block in the Virtual Unit
* Chain, and the Erase Unit into which we are supposed to be copying.
* Go for it.
*/
pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
unsigned char movebuf[SECTORSIZE];
int ret;
/*
* If it's in the target EUN already, or if it's pending write,
* do nothing.
*/
if (BlockMap[block] == targetEUN || (pendingblock ==
(thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
continue;
}
/*
* Copy only in non free block (free blocks can only
* happen in case of media errors or deleted blocks).
*/
if (BlockMap[block] == BLOCK_NIL)
continue;
ret = mtd_read(mtd,
(inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
SECTORSIZE,
&retlen,
movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
ret = mtd_read(mtd,
(inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
SECTORSIZE,
&retlen,
movebuf);
if (ret != -EIO)
pr_debug("INFTL: error went away on retry?\n");
}
memset(&oob, 0xff, sizeof(struct inftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
(block * SECTORSIZE), SECTORSIZE, &retlen,
movebuf, (char *)&oob);
}
/*
* Newest unit in chain now contains data from _all_ older units.
* So go through and erase each unit in chain, oldest first. (This
* is important, by doing oldest first if we crash/reboot then it
* is relatively simple to clean up the mess).
*/
pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
for (;;) {
/* Find oldest unit in chain. */
thisEUN = inftl->VUtable[thisVUC];
prevEUN = BLOCK_NIL;
while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
prevEUN = thisEUN;
thisEUN = inftl->PUtable[thisEUN];
}
/* Check if we are all done */
if (thisEUN == targetEUN)
break;
/* Unlink the last block from the chain. */
inftl->PUtable[prevEUN] = BLOCK_NIL;
/* Now try to erase it. */
if (INFTL_formatblock(inftl, thisEUN) < 0) {
/*
* Could not erase : mark block as reserved.
*/
inftl->PUtable[thisEUN] = BLOCK_RESERVED;
} else {
/* Correctly erased : mark it as free */
inftl->PUtable[thisEUN] = BLOCK_FREE;
inftl->numfreeEUNs++;
}
}
return targetEUN;
}
static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
{
/*
* This is the part that needs some cleverness applied.
* For now, I'm doing the minimum applicable to actually
* get the thing to work.
* Wear-levelling and other clever stuff needs to be implemented
* and we also need to do some assessment of the results when
* the system loses power half-way through the routine.
*/
u16 LongestChain = 0;
u16 ChainLength = 0, thislen;
u16 chain, EUN;
pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
"pending=%d)\n", inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
EUN = inftl->VUtable[chain];
thislen = 0;
while (EUN <= inftl->lastEUN) {
thislen++;
EUN = inftl->PUtable[EUN];
if (thislen > 0xff00) {
printk(KERN_WARNING "INFTL: endless loop in "
"Virtual Chain %d: Unit %x\n",
chain, EUN);
/*
* Actually, don't return failure.
* Just ignore this chain and get on with it.
*/
thislen = 0;
break;
}
}
if (thislen > ChainLength) {
ChainLength = thislen;
LongestChain = chain;
}
}
if (ChainLength < 2) {
printk(KERN_WARNING "INFTL: no Virtual Unit Chains available "
"for folding. Failing request\n");
return BLOCK_NIL;
}
return INFTL_foldchain(inftl, LongestChain, pendingblock);
}
static int nrbits(unsigned int val, int bitcount)
{
int i, total = 0;
for (i = 0; (i < bitcount); i++)
total += (((0x1 << i) & val) ? 1 : 0);
return total;
}
/*
* INFTL_findwriteunit: Return the unit number into which we can write
* for this block. Make it available if it isn't already.
*/
static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
{
unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
unsigned int thisEUN, writeEUN, prev_block, status;
unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
struct mtd_info *mtd = inftl->mbd.mtd;
struct inftl_oob oob;
struct inftl_bci bci;
unsigned char anac, nacs, parity;
size_t retlen;
int silly, silly2 = 3;
pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
inftl, block);
do {
/*
* Scan the media to find a unit in the VUC which has
* a free space for the block in question.
*/
writeEUN = BLOCK_NIL;
thisEUN = inftl->VUtable[thisVUC];
silly = MAX_LOOPS;
while (thisEUN <= inftl->lastEUN) {
inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
blockofs, 8, &retlen, (char *)&bci);
status = bci.Status | bci.Status1;
pr_debug("INFTL: status of block %d in EUN %d is %x\n",
block , writeEUN, status);
switch(status) {
case SECTOR_FREE:
writeEUN = thisEUN;
break;
case SECTOR_DELETED:
case SECTOR_USED:
/* Can't go any further */
goto hitused;
case SECTOR_IGNORE:
break;
default:
/*
* Invalid block. Don't use it any more.
* Must implement.
*/
break;
}
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
return BLOCK_NIL;
}
/* Skip to next block in chain */
thisEUN = inftl->PUtable[thisEUN];
}
hitused:
if (writeEUN != BLOCK_NIL)
return writeEUN;
/*
* OK. We didn't find one in the existing chain, or there
* is no existing chain. Allocate a new one.
*/
writeEUN = INFTL_findfreeblock(inftl, 0);
if (writeEUN == BLOCK_NIL) {
/*
* That didn't work - there were no free blocks just
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
thisEUN = INFTL_makefreeblock(inftl, block);
/*
* Hopefully we free something, lets try again.
* This time we are desperate...
*/
pr_debug("INFTL: using desperate==1 to find free EUN "
"to accommodate write to VUC %d\n",
thisVUC);
writeEUN = INFTL_findfreeblock(inftl, 1);
if (writeEUN == BLOCK_NIL) {
/*
* Ouch. This should never happen - we should
* always be able to make some room somehow.
* If we get here, we've allocated more storage
* space than actual media, or our makefreeblock
* routine is missing something.
*/
printk(KERN_WARNING "INFTL: cannot make free "
"space.\n");
#ifdef DEBUG
INFTL_dumptables(inftl);
INFTL_dumpVUchains(inftl);
#endif
return BLOCK_NIL;
}
}
/*
* Insert new block into virtual chain. Firstly update the
* block headers in flash...
*/
anac = 0;
nacs = 0;
thisEUN = inftl->VUtable[thisVUC];
if (thisEUN != BLOCK_NIL) {
inftl_read_oob(mtd, thisEUN * inftl->EraseSize
+ 8, 8, &retlen, (char *)&oob.u);
anac = oob.u.a.ANAC + 1;
nacs = oob.u.a.NACs + 1;
}
prev_block = inftl->VUtable[thisVUC];
if (prev_block < inftl->nb_blocks)
prev_block -= inftl->firstEUN;
parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;
oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
oob.u.a.ANAC = anac;
oob.u.a.NACs = nacs;
oob.u.a.parityPerField = parity;
oob.u.a.discarded = 0xaa;
inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8,
&retlen, (char *)&oob.u);
/* Also back up header... */
oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
oob.u.b.ANAC = anac;
oob.u.b.NACs = nacs;
oob.u.b.parityPerField = parity;
oob.u.b.discarded = 0xaa;
inftl_write_oob(mtd, writeEUN * inftl->EraseSize +
SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
inftl->VUtable[thisVUC] = writeEUN;
inftl->numfreeEUNs--;
return writeEUN;
} while (silly2--);
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
return BLOCK_NIL;
}
/*
* Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
*/
static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
{
struct mtd_info *mtd = inftl->mbd.mtd;
unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
unsigned int thisEUN, status;
int block, silly;
struct inftl_bci bci;
size_t retlen;
pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
"thisVUC=%d)\n", inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
thisEUN = inftl->VUtable[thisVUC];
if (thisEUN == BLOCK_NIL) {
printk(KERN_WARNING "INFTL: trying to delete non-existent "
"Virtual Unit Chain %d!\n", thisVUC);
return;
}
/*
* Scan through the Erase Units to determine whether any data is in
* each of the 512-byte blocks within the Chain.
*/
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
if (BlockUsed[block] || BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
+ (block * SECTORSIZE), 8 , &retlen,
(char *)&bci) < 0)
status = SECTOR_IGNORE;
else
status = bci.Status | bci.Status1;
switch(status) {
case SECTOR_FREE:
case SECTOR_IGNORE:
break;
case SECTOR_USED:
BlockUsed[block] = 1;
continue;
case SECTOR_DELETED:
BlockDeleted[block] = 1;
continue;
default:
printk(KERN_WARNING "INFTL: unknown status "
"for block %d in EUN %d: 0x%x\n",
block, thisEUN, status);
}
}
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in Virtual "
"Unit Chain 0x%x\n", thisVUC);
return;
}
thisEUN = inftl->PUtable[thisEUN];
}
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
if (BlockUsed[block])
return;
/*
* For each block in the chain free it and make it available
* for future use. Erase from the oldest unit first.
*/
pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
for (;;) {
u16 *prevEUN = &inftl->VUtable[thisVUC];
thisEUN = *prevEUN;
/* If the chain is all gone already, we're done */
if (thisEUN == BLOCK_NIL) {
pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
return;
}
/* Find oldest unit in chain. */
while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
BUG_ON(thisEUN >= inftl->nb_blocks);
prevEUN = &inftl->PUtable[thisEUN];
thisEUN = *prevEUN;
}
pr_debug("Deleting EUN %d from VUC %d\n",
thisEUN, thisVUC);
if (INFTL_formatblock(inftl, thisEUN) < 0) {
/*
* Could not erase : mark block as reserved.
*/
inftl->PUtable[thisEUN] = BLOCK_RESERVED;
} else {
/* Correctly erased : mark it as free */
inftl->PUtable[thisEUN] = BLOCK_FREE;
inftl->numfreeEUNs++;
}
/* Now sort out whatever was pointing to it... */
*prevEUN = BLOCK_NIL;
/* Ideally we'd actually be responsive to new
requests while we're doing this -- if there's
free space why should others be made to wait? */
cond_resched();
}
inftl->VUtable[thisVUC] = BLOCK_NIL;
}
static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
{
unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
struct mtd_info *mtd = inftl->mbd.mtd;
unsigned int status;
int silly = MAX_LOOPS;
size_t retlen;
struct inftl_bci bci;
pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
"block=%d)\n", inftl, block);
while (thisEUN < inftl->nb_blocks) {
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
blockofs, 8, &retlen, (char *)&bci) < 0)
status = SECTOR_IGNORE;
else
status = bci.Status | bci.Status1;
switch (status) {
case SECTOR_FREE:
case SECTOR_IGNORE:
break;
case SECTOR_DELETED:
thisEUN = BLOCK_NIL;
goto foundit;
case SECTOR_USED:
goto foundit;
default:
printk(KERN_WARNING "INFTL: unknown status for "
"block %d in EUN %d: 0x%x\n",
block, thisEUN, status);
break;
}
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in Virtual "
"Unit Chain 0x%x\n",
block / (inftl->EraseSize / SECTORSIZE));
return 1;
}
thisEUN = inftl->PUtable[thisEUN];
}
foundit:
if (thisEUN != BLOCK_NIL) {
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
return -EIO;
bci.Status = bci.Status1 = SECTOR_DELETED;
if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
return -EIO;
INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
}
return 0;
}
static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
char *buffer)
{
struct INFTLrecord *inftl = (void *)mbd;
unsigned int writeEUN;
unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
size_t retlen;
struct inftl_oob oob;
char *p, *pend;
pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
/* Is block all zero? */
pend = buffer + SECTORSIZE;
for (p = buffer; p < pend && !*p; p++)
;
if (p < pend) {
writeEUN = INFTL_findwriteunit(inftl, block);
if (writeEUN == BLOCK_NIL) {
printk(KERN_WARNING "inftl_writeblock(): cannot find "
"block to write to\n");
/*
* If we _still_ haven't got a block to use,
* we're screwed.
*/
return 1;
}
memset(&oob, 0xff, sizeof(struct inftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
blockofs, SECTORSIZE, &retlen, (char *)buffer,
(char *)&oob);
/*
* need to write SECTOR_USED flags since they are not written
* in mtd_writeecc
*/
} else {
INFTL_deleteblock(inftl, block);
}
return 0;
}
static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
char *buffer)
{
struct INFTLrecord *inftl = (void *)mbd;
unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
struct mtd_info *mtd = inftl->mbd.mtd;
unsigned int status;
int silly = MAX_LOOPS;
struct inftl_bci bci;
size_t retlen;
pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
"buffer=%p)\n", inftl, block, buffer);
while (thisEUN < inftl->nb_blocks) {
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
blockofs, 8, &retlen, (char *)&bci) < 0)
status = SECTOR_IGNORE;
else
status = bci.Status | bci.Status1;
switch (status) {
case SECTOR_DELETED:
thisEUN = BLOCK_NIL;
goto foundit;
case SECTOR_USED:
goto foundit;
case SECTOR_FREE:
case SECTOR_IGNORE:
break;
default:
printk(KERN_WARNING "INFTL: unknown status for "
"block %ld in EUN %d: 0x%04x\n",
block, thisEUN, status);
break;
}
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%lx\n",
block / (inftl->EraseSize / SECTORSIZE));
return 1;
}
thisEUN = inftl->PUtable[thisEUN];
}
foundit:
if (thisEUN == BLOCK_NIL) {
/* The requested block is not on the media, return all 0x00 */
memset(buffer, 0, SECTORSIZE);
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
if (ret < 0 && !mtd_is_bitflip(ret))
return -EIO;
}
return 0;
}
static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct INFTLrecord *inftl = (void *)dev;
geo->heads = inftl->heads;
geo->sectors = inftl->sectors;
geo->cylinders = inftl->cylinders;
return 0;
}
static struct mtd_blktrans_ops inftl_tr = {
.name = "inftl",
.major = INFTL_MAJOR,
.part_bits = INFTL_PARTN_BITS,
.blksize = 512,
.getgeo = inftl_getgeo,
.readsect = inftl_readblock,
.writesect = inftl_writeblock,
.add_mtd = inftl_add_mtd,
.remove_dev = inftl_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(inftl_tr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <[email protected]>, David Woodhouse <[email protected]>, Fabrice Bellard <[email protected]> et al.");
MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
| linux-master | drivers/mtd/inftlcore.c |
/* This version ported to the Linux-MTD system by [email protected]
*
* Fixes: Arnaldo Carvalho de Melo <[email protected]>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
*
* Based on:
*/
/*======================================================================
A Flash Translation Layer memory card driver
This driver implements a disk-like block device driver with an
apparent block size of 512 bytes for flash memory cards.
ftl_cs.c 1.62 2000/02/01 00:59:04
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is David A. Hinds
<[email protected]>. Portions created by David A. Hinds
are Copyright © 1999 David A. Hinds. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
which case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
LEGAL NOTE: The FTL format is patented by M-Systems. They have
granted a license for its use with PCMCIA devices:
"M-Systems grants a royalty-free, non-exclusive license under
any presently existing M-Systems intellectual property rights
necessary for the design and development of FTL-compatible
drivers, file systems and utilities using the data formats with
PCMCIA PC Cards as described in the PCMCIA Flash Translation
Layer (FTL) Specification."
Use of the FTL format for non-PCMCIA applications may be an
infringement of these patents. For additional information,
contact M-Systems directly. M-Systems since acquired by Sandisk.
======================================================================*/
#include <linux/mtd/blktrans.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
/*#define PSYCHO_DEBUG */
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/vmalloc.h>
#include <linux/blkpg.h>
#include <linux/uaccess.h>
#include <linux/mtd/ftl.h>
/*====================================================================*/
/* Parameters that can be set with 'insmod' */
static int shuffle_freq = 50;
module_param(shuffle_freq, int, 0);
/*====================================================================*/
/* Major device # for FTL device */
#ifndef FTL_MAJOR
#define FTL_MAJOR 44
#endif
/*====================================================================*/
/* Maximum number of separate memory devices we'll allow */
#define MAX_DEV 4
/* Maximum number of regions per device */
#define MAX_REGION 4
/* Maximum number of partitions in an FTL region */
#define PART_BITS 4
/* Maximum number of outstanding erase requests per socket */
#define MAX_ERASE 8
/* Sector size -- shouldn't need to change */
#define SECTOR_SIZE 512
/* Each memory region corresponds to a minor device */
typedef struct partition_t {
struct mtd_blktrans_dev mbd;
uint32_t state;
uint32_t *VirtualBlockMap;
uint32_t FreeTotal;
struct eun_info_t {
uint32_t Offset;
uint32_t EraseCount;
uint32_t Free;
uint32_t Deleted;
} *EUNInfo;
struct xfer_info_t {
uint32_t Offset;
uint32_t EraseCount;
uint16_t state;
} *XferInfo;
uint16_t bam_index;
uint32_t *bam_cache;
uint16_t DataUnits;
uint32_t BlocksPerUnit;
erase_unit_header_t header;
} partition_t;
/* Partition state flags */
#define FTL_FORMATTED 0x01
/* Transfer unit states */
#define XFER_UNKNOWN 0x00
#define XFER_ERASING 0x01
#define XFER_ERASED 0x02
#define XFER_PREPARED 0x03
#define XFER_FAILED 0x04
/*======================================================================
Scan_header() checks to see if a memory region contains an FTL
partition. build_maps() reads all the erase unit headers, builds
the erase unit map, and then builds the virtual page map.
======================================================================*/
static int scan_header(partition_t *part)
{
erase_unit_header_t header;
loff_t offset, max_offset;
size_t ret;
int err;
part->header.FormattedSize = 0;
max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
/* Search first megabyte for a valid FTL header */
for (offset = 0;
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
(unsigned char *)&header);
if (err)
return err;
if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
}
if (offset == max_offset) {
printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
return -ENOENT;
}
if (header.BlockSize != 9 ||
(header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
(header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
return -1;
}
if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
return -1;
}
part->header = header;
return 0;
}
static int build_maps(partition_t *part)
{
erase_unit_header_t header;
uint16_t xvalid, xtrans, i;
unsigned blocks, j;
int hdr_ok, ret = -1;
ssize_t retval;
loff_t offset;
/* Set up erase unit maps */
part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
part->header.NumTransferUnits;
part->EUNInfo = kmalloc_array(part->DataUnits, sizeof(struct eun_info_t),
GFP_KERNEL);
if (!part->EUNInfo)
goto out;
for (i = 0; i < part->DataUnits; i++)
part->EUNInfo[i].Offset = 0xffffffff;
part->XferInfo =
kmalloc_array(part->header.NumTransferUnits,
sizeof(struct xfer_info_t),
GFP_KERNEL);
if (!part->XferInfo)
goto out_EUNInfo;
xvalid = xtrans = 0;
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
(unsigned char *)&header);
if (ret)
goto out_XferInfo;
ret = -1;
/* Is this a transfer partition? */
hdr_ok = (strcmp(header.DataOrgTuple+3, "FTL100") == 0);
if (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&
(part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {
part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;
part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =
le32_to_cpu(header.EraseCount);
xvalid++;
} else {
if (xtrans == part->header.NumTransferUnits) {
printk(KERN_NOTICE "ftl_cs: format error: too many "
"transfer units!\n");
goto out_XferInfo;
}
if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {
part->XferInfo[xtrans].state = XFER_PREPARED;
part->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);
} else {
part->XferInfo[xtrans].state = XFER_UNKNOWN;
/* Pick anything reasonable for the erase count */
part->XferInfo[xtrans].EraseCount =
le32_to_cpu(part->header.EraseCount);
}
part->XferInfo[xtrans].Offset = offset;
xtrans++;
}
}
/* Check for format trouble */
header = part->header;
if ((xtrans != header.NumTransferUnits) ||
(xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {
printk(KERN_NOTICE "ftl_cs: format error: erase units "
"don't add up!\n");
goto out_XferInfo;
}
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
if (!part->VirtualBlockMap)
goto out_XferInfo;
memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
part->bam_cache = kmalloc_array(part->BlocksPerUnit, sizeof(uint32_t),
GFP_KERNEL);
if (!part->bam_cache)
goto out_VirtualBlockMap;
part->bam_index = 0xffff;
part->FreeTotal = 0;
for (i = 0; i < part->DataUnits; i++) {
part->EUNInfo[i].Free = 0;
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
ret = mtd_read(part->mbd.mtd, offset,
part->BlocksPerUnit * sizeof(uint32_t), &retval,
(unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
for (j = 0; j < part->BlocksPerUnit; j++) {
if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {
part->EUNInfo[i].Free++;
part->FreeTotal++;
} else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&
(BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))
part->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =
(i << header.EraseUnitSize) + (j << header.BlockSize);
else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))
part->EUNInfo[i].Deleted++;
}
}
ret = 0;
goto out;
out_bam_cache:
kfree(part->bam_cache);
out_VirtualBlockMap:
vfree(part->VirtualBlockMap);
out_XferInfo:
kfree(part->XferInfo);
out_EUNInfo:
kfree(part->EUNInfo);
out:
return ret;
} /* build_maps */
/*======================================================================
Erase_xfer() schedules an asynchronous erase operation for a
transfer unit.
======================================================================*/
static int erase_xfer(partition_t *part,
uint16_t xfernum)
{
int ret;
struct xfer_info_t *xfer;
struct erase_info *erase;
xfer = &part->XferInfo[xfernum];
pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
xfer->state = XFER_ERASING;
/* Is there a free erase slot? Always in MTD. */
erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
if (!erase)
return -ENOMEM;
erase->addr = xfer->Offset;
erase->len = 1 << part->header.EraseUnitSize;
ret = mtd_erase(part->mbd.mtd, erase);
if (!ret) {
xfer->state = XFER_ERASED;
xfer->EraseCount++;
} else {
xfer->state = XFER_FAILED;
pr_notice("ftl_cs: erase failed: err = %d\n", ret);
}
kfree(erase);
return ret;
} /* erase_xfer */
/*======================================================================
Prepare_xfer() takes a freshly erased transfer unit and gives
it an appropriate header.
======================================================================*/
static int prepare_xfer(partition_t *part, int i)
{
erase_unit_header_t header;
struct xfer_info_t *xfer;
int nbam, ret;
uint32_t ctl;
ssize_t retlen;
loff_t offset;
xfer = &part->XferInfo[i];
xfer->state = XFER_FAILED;
pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
/* Write the transfer unit header */
header = part->header;
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
(u_char *)&header);
if (ret) {
return ret;
}
/* Write the BAM stub */
nbam = DIV_ROUND_UP(part->BlocksPerUnit * sizeof(uint32_t) +
le32_to_cpu(part->header.BAMOffset), SECTOR_SIZE);
offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
ctl = cpu_to_le32(BLOCK_CONTROL);
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
(u_char *)&ctl);
if (ret)
return ret;
}
xfer->state = XFER_PREPARED;
return 0;
} /* prepare_xfer */
/*======================================================================
Copy_erase_unit() takes a full erase block and a transfer unit,
copies everything to the transfer unit, then swaps the block
pointers.
All data blocks are copied to the corresponding blocks in the
target unit, so the virtual block map does not need to be
updated.
======================================================================*/
static int copy_erase_unit(partition_t *part, uint16_t srcunit,
uint16_t xferunit)
{
u_char buf[SECTOR_SIZE];
struct eun_info_t *eun;
struct xfer_info_t *xfer;
uint32_t src, dest, free, i;
uint16_t unit;
int ret;
ssize_t retlen;
loff_t offset;
uint16_t srcunitswap = cpu_to_le16(srcunit);
eun = &part->EUNInfo[srcunit];
xfer = &part->XferInfo[xferunit];
pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
eun->Offset, xfer->Offset);
/* Read current BAM */
if (part->bam_index != srcunit) {
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
ret = mtd_read(part->mbd.mtd, offset,
part->BlocksPerUnit * sizeof(uint32_t), &retlen,
(u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
if (ret) {
printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
return ret;
}
}
/* Write the LogicalEUN for the transfer unit */
xfer->state = XFER_UNKNOWN;
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
(u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
return ret;
}
/* Copy all data blocks from source unit to transfer unit */
src = eun->Offset; dest = xfer->Offset;
free = 0;
ret = 0;
for (i = 0; i < part->BlocksPerUnit; i++) {
switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {
case BLOCK_CONTROL:
/* This gets updated later */
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
(u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
(u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
}
break;
default:
/* All other blocks must be free */
part->bam_cache[i] = cpu_to_le32(0xffffffff);
free++;
break;
}
src += SECTOR_SIZE;
dest += SECTOR_SIZE;
}
/* Write the BAM to the transfer unit */
ret = mtd_write(part->mbd.mtd,
xfer->Offset + le32_to_cpu(part->header.BAMOffset),
part->BlocksPerUnit * sizeof(int32_t),
&retlen,
(u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
}
/* All clear? Then update the LogicalEUN again */
ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
&retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
return ret;
}
/* Update the maps and usage stats*/
swap(xfer->EraseCount, eun->EraseCount);
swap(xfer->Offset, eun->Offset);
part->FreeTotal -= eun->Free;
part->FreeTotal += free;
eun->Free = free;
eun->Deleted = 0;
/* Now, the cache should be valid for the new block */
part->bam_index = srcunit;
return 0;
} /* copy_erase_unit */
/*======================================================================
reclaim_block() picks a full erase unit and a transfer unit and
then calls copy_erase_unit() to copy one to the other. Then, it
schedules an erase on the expired block.
What's a good way to decide which transfer unit and which erase
unit to use? Beats me. My way is to always pick the transfer
unit with the fewest erases, and usually pick the data unit with
the most deleted blocks. But with a small probability, pick the
oldest data unit instead. This means that we generally postpone
the next reclamation as long as possible, but shuffle static
stuff around a bit for wear leveling.
======================================================================*/
static int reclaim_block(partition_t *part)
{
uint16_t i, eun, xfer;
uint32_t best;
int queued, ret;
pr_debug("ftl_cs: reclaiming space...\n");
pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
/* Pick the least erased transfer unit */
best = 0xffffffff; xfer = 0xffff;
do {
queued = 0;
for (i = 0; i < part->header.NumTransferUnits; i++) {
int n=0;
if (part->XferInfo[i].state == XFER_UNKNOWN) {
pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
n=1;
erase_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_ERASING) {
pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
n=1;
queued = 1;
}
else if (part->XferInfo[i].state == XFER_ERASED) {
pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
n=1;
prepare_xfer(part, i);
}
if (part->XferInfo[i].state == XFER_PREPARED) {
pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
n=1;
if (part->XferInfo[i].EraseCount <= best) {
best = part->XferInfo[i].EraseCount;
xfer = i;
}
}
if (!n)
pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
}
if (xfer == 0xffff) {
if (queued) {
pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
else
pr_debug("ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
return -EIO;
}
}
} while (xfer == 0xffff);
eun = 0;
if ((jiffies % shuffle_freq) == 0) {
pr_debug("ftl_cs: recycling freshest block...\n");
best = 0xffffffff;
for (i = 0; i < part->DataUnits; i++)
if (part->EUNInfo[i].EraseCount <= best) {
best = part->EUNInfo[i].EraseCount;
eun = i;
}
} else {
best = 0;
for (i = 0; i < part->DataUnits; i++)
if (part->EUNInfo[i].Deleted >= best) {
best = part->EUNInfo[i].Deleted;
eun = i;
}
if (best == 0) {
static int ne = 0;
if (++ne < 5)
printk(KERN_NOTICE "ftl_cs: reclaim failed: "
"no free blocks!\n");
else
pr_debug("ftl_cs: reclaim failed: "
"no free blocks!\n");
return -EIO;
}
}
ret = copy_erase_unit(part, eun, xfer);
if (!ret)
erase_xfer(part, xfer);
else
printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n");
return ret;
} /* reclaim_block */
/*======================================================================
Find_free() searches for a free block. If necessary, it updates
the BAM cache for the erase unit containing the free block. It
returns the block index -- the erase unit is just the currently
cached unit. If there are no free blocks, it returns 0 -- this
is never a valid data block because it contains the header.
======================================================================*/
#ifdef PSYCHO_DEBUG
static void dump_lists(partition_t *part)
{
int i;
printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal);
for (i = 0; i < part->DataUnits; i++)
printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, "
"%d deleted\n", i,
part->EUNInfo[i].Offset >> part->header.EraseUnitSize,
part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);
}
#endif
static uint32_t find_free(partition_t *part)
{
uint16_t stop, eun;
uint32_t blk;
size_t retlen;
int ret;
/* Find an erase unit with some free space */
stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
eun = stop;
do {
if (part->EUNInfo[eun].Free != 0) break;
/* Wrap around at end of table */
if (++eun == part->DataUnits) eun = 0;
} while (eun != stop);
if (part->EUNInfo[eun].Free == 0)
return 0;
/* Is this unit's BAM cached? */
if (eun != part->bam_index) {
/* Invalidate cache */
part->bam_index = 0xffff;
ret = mtd_read(part->mbd.mtd,
part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
part->BlocksPerUnit * sizeof(uint32_t),
&retlen,
(u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
return 0;
}
part->bam_index = eun;
}
/* Find a free block */
for (blk = 0; blk < part->BlocksPerUnit; blk++)
if (BLOCK_FREE(le32_to_cpu(part->bam_cache[blk]))) break;
if (blk == part->BlocksPerUnit) {
#ifdef PSYCHO_DEBUG
static int ne = 0;
if (++ne == 1)
dump_lists(part);
#endif
printk(KERN_NOTICE "ftl_cs: bad free list!\n");
return 0;
}
pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
return blk;
} /* find_free */
/*======================================================================
Read a series of sectors from an FTL partition.
======================================================================*/
static int ftl_read(partition_t *part, caddr_t buffer,
u_long sector, u_long nblocks)
{
uint32_t log_addr, bsize;
u_long i;
int ret;
size_t offset, retlen;
pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
return -EIO;
}
bsize = 1 << part->header.EraseUnitSize;
for (i = 0; i < nblocks; i++) {
if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
printk(KERN_NOTICE "ftl_cs: bad read offset\n");
return -EIO;
}
log_addr = part->VirtualBlockMap[sector+i];
if (log_addr == 0xffffffff)
memset(buffer, 0, SECTOR_SIZE);
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
(u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
return ret;
}
}
buffer += SECTOR_SIZE;
}
return 0;
} /* ftl_read */
/*======================================================================
Write a series of sectors to an FTL partition
======================================================================*/
static int set_bam_entry(partition_t *part, uint32_t log_addr,
uint32_t virt_addr)
{
uint32_t bsize, blk, le_virt_addr;
#ifdef PSYCHO_DEBUG
uint32_t old_addr;
#endif
uint16_t eun;
int ret;
size_t retlen, offset;
pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
part, log_addr, virt_addr);
bsize = 1 << part->header.EraseUnitSize;
eun = log_addr / bsize;
blk = (log_addr % bsize) / SECTOR_SIZE;
offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
(u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
}
old_addr = le32_to_cpu(old_addr);
if (((virt_addr == 0xfffffffe) && !BLOCK_FREE(old_addr)) ||
((virt_addr == 0) && (BLOCK_TYPE(old_addr) != BLOCK_DATA)) ||
(!BLOCK_DELETED(virt_addr) && (old_addr != 0xfffffffe))) {
static int ne = 0;
if (++ne < 5) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() inconsistency!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, old = 0x%x"
", new = 0x%x\n", log_addr, old_addr, virt_addr);
}
return -EIO;
}
#endif
le_virt_addr = cpu_to_le32(virt_addr);
if (part->bam_index == eun) {
#ifdef PSYCHO_DEBUG
if (le32_to_cpu(part->bam_cache[blk]) != old_addr) {
static int ne = 0;
if (++ne < 5) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() "
"inconsistency!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, cache"
" = 0x%x\n",
le32_to_cpu(part->bam_cache[blk]), old_addr);
}
return -EIO;
}
#endif
part->bam_cache[blk] = le_virt_addr;
}
ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
(u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, new = 0x%x\n",
log_addr, virt_addr);
}
return ret;
} /* set_bam_entry */
static int ftl_write(partition_t *part, caddr_t buffer,
u_long sector, u_long nblocks)
{
uint32_t bsize, log_addr, virt_addr, old_addr, blk;
u_long i;
int ret;
size_t retlen, offset;
pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
printk(KERN_NOTICE "ftl_cs: bad partition\n");
return -EIO;
}
/* See if we need to reclaim space, before we start */
while (part->FreeTotal < nblocks) {
ret = reclaim_block(part);
if (ret)
return ret;
}
bsize = 1 << part->header.EraseUnitSize;
virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
for (i = 0; i < nblocks; i++) {
if (virt_addr >= le32_to_cpu(part->header.FormattedSize)) {
printk(KERN_NOTICE "ftl_cs: bad write offset\n");
return -EIO;
}
/* Grab a free block */
blk = find_free(part);
if (blk == 0) {
static int ne = 0;
if (++ne < 5)
printk(KERN_NOTICE "ftl_cs: internal error: "
"no free blocks!\n");
return -ENOSPC;
}
/* Tag the BAM entry, and write the new block */
log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
part->EUNInfo[part->bam_index].Free--;
part->FreeTotal--;
if (set_bam_entry(part, log_addr, 0xfffffffe))
return -EIO;
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
" = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
offset);
return -EIO;
}
/* Only delete the old entry when the new entry is ready */
old_addr = part->VirtualBlockMap[sector+i];
if (old_addr != 0xffffffff) {
part->VirtualBlockMap[sector+i] = 0xffffffff;
part->EUNInfo[old_addr/bsize].Deleted++;
if (set_bam_entry(part, old_addr, 0))
return -EIO;
}
/* Finally, set up the new pointers */
if (set_bam_entry(part, log_addr, virt_addr))
return -EIO;
part->VirtualBlockMap[sector+i] = log_addr;
part->EUNInfo[part->bam_index].Deleted--;
buffer += SECTOR_SIZE;
virt_addr += SECTOR_SIZE;
}
return 0;
} /* ftl_write */
static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
partition_t *part = container_of(dev, struct partition_t, mbd);
u_long sect;
/* Sort of arbitrary: round size down to 4KiB boundary */
sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
geo->heads = 1;
geo->sectors = 8;
geo->cylinders = sect >> 3;
return 0;
}
static int ftl_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
return ftl_read((void *)dev, buf, block, 1);
}
static int ftl_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
return ftl_write((void *)dev, buf, block, 1);
}
static int ftl_discardsect(struct mtd_blktrans_dev *dev,
unsigned long sector, unsigned nr_sects)
{
partition_t *part = container_of(dev, struct partition_t, mbd);
uint32_t bsize = 1 << part->header.EraseUnitSize;
pr_debug("FTL erase sector %ld for %d sectors\n",
sector, nr_sects);
while (nr_sects) {
uint32_t old_addr = part->VirtualBlockMap[sector];
if (old_addr != 0xffffffff) {
part->VirtualBlockMap[sector] = 0xffffffff;
part->EUNInfo[old_addr/bsize].Deleted++;
if (set_bam_entry(part, old_addr, 0))
return -EIO;
}
nr_sects--;
sector++;
}
return 0;
}
/*====================================================================*/
static void ftl_freepart(partition_t *part)
{
vfree(part->VirtualBlockMap);
part->VirtualBlockMap = NULL;
kfree(part->EUNInfo);
part->EUNInfo = NULL;
kfree(part->XferInfo);
part->XferInfo = NULL;
kfree(part->bam_cache);
part->bam_cache = NULL;
} /* ftl_freepart */
static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
partition_t *partition;
partition = kzalloc(sizeof(partition_t), GFP_KERNEL);
if (!partition) {
printk(KERN_WARNING "No memory to scan for FTL on %s\n",
mtd->name);
return;
}
partition->mbd.mtd = mtd;
if ((scan_header(partition) == 0) &&
(build_maps(partition) == 0)) {
partition->state = FTL_FORMATTED;
#ifdef PCMCIA_DEBUG
printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
le32_to_cpu(partition->header.FormattedSize) >> 10);
#endif
partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
partition->mbd.tr = tr;
partition->mbd.devnum = -1;
if (!add_mtd_blktrans_dev(&partition->mbd))
return;
}
kfree(partition);
}
static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
}
static struct mtd_blktrans_ops ftl_tr = {
.name = "ftl",
.major = FTL_MAJOR,
.part_bits = PART_BITS,
.blksize = SECTOR_SIZE,
.readsect = ftl_readsect,
.writesect = ftl_writesect,
.discard = ftl_discardsect,
.getgeo = ftl_getgeo,
.add_mtd = ftl_add_mtd,
.remove_dev = ftl_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(ftl_tr);
MODULE_LICENSE("Dual MPL/GPL");
MODULE_AUTHOR("David Hinds <[email protected]>");
MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
| linux-master | drivers/mtd/ftl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux driver for NAND Flash Translation Layer
*
* Copyright © 1999 Machine Vision Holdings, Inc.
* Copyright © 1999-2010 David Woodhouse <[email protected]>
*/
#define PRERELEASE
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/errno.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nftl.h>
#include <linux/mtd/blktrans.h>
/* maximum number of loops while examining next block, to have a
chance to detect consistency problems (they should never happen
because of the checks done in the mounting */
#define MAX_LOOPS 10000
static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct NFTLrecord *nftl;
unsigned long temp;
if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
return;
/* OK, this is moderately ugly. But probably safe. Alternatives? */
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
pr_debug("NFTL: add_mtd for %s\n", mtd->name);
nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
if (!nftl)
return;
nftl->mbd.mtd = mtd;
nftl->mbd.devnum = -1;
nftl->mbd.tr = tr;
if (NFTL_mount(nftl) < 0) {
printk(KERN_WARNING "NFTL: could not mount device\n");
kfree(nftl);
return;
}
/* OK, it's a new one. Set up all the data structures. */
/* Calculate geometry */
nftl->cylinders = 1024;
nftl->heads = 16;
temp = nftl->cylinders * nftl->heads;
nftl->sectors = nftl->mbd.size / temp;
if (nftl->mbd.size % temp) {
nftl->sectors++;
temp = nftl->cylinders * nftl->sectors;
nftl->heads = nftl->mbd.size / temp;
if (nftl->mbd.size % temp) {
nftl->heads++;
temp = nftl->heads * nftl->sectors;
nftl->cylinders = nftl->mbd.size / temp;
}
}
if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) {
/*
Oh no we don't have
mbd.size == heads * cylinders * sectors
*/
printk(KERN_WARNING "NFTL: cannot calculate a geometry to "
"match size of 0x%lx.\n", nftl->mbd.size);
printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d "
"(== 0x%lx sects)\n",
nftl->cylinders, nftl->heads , nftl->sectors,
(long)nftl->cylinders * (long)nftl->heads *
(long)nftl->sectors );
}
if (add_mtd_blktrans_dev(&nftl->mbd)) {
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
kfree(nftl);
return;
}
#ifdef PSYCHO_DEBUG
printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
#endif
}
static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
{
struct NFTLrecord *nftl = (void *)dev;
pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
}
/*
* Read oob data from flash
*/
int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
/*
* Write oob data to flash
*/
int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
#ifdef CONFIG_NFTL_RW
/*
* Write data and oob to flash
*/
static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = offs & mask;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
/* Actual NFTL access routines */
/* NFTL_findfreeblock: Find a free Erase Unit on the NFTL partition. This function is used
* when the give Virtual Unit Chain
*/
static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
{
/* For a given Virtual Unit Chain: find or create a free block and
add it to the chain */
/* We're passed the number of the last EUN in the chain, to save us from
having to look it up again */
u16 pot = nftl->LastFreeEUN;
int silly = nftl->nb_blocks;
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
return BLOCK_NIL;
}
/* Scan for a free block */
do {
if (nftl->ReplUnitTable[pot] == BLOCK_FREE) {
nftl->LastFreeEUN = pot;
nftl->numfreeEUNs--;
return pot;
}
/* This will probably point to the MediaHdr unit itself,
right at the beginning of the partition. But that unit
(and the backup unit too) should have the UCI set
up so that it's not selected for overwriting */
if (++pot > nftl->lastEUN)
pot = le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN);
if (!silly--) {
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
{
struct mtd_info *mtd = nftl->mbd.mtd;
u16 BlockMap[MAX_SECTORS_PER_UNIT];
unsigned char BlockLastState[MAX_SECTORS_PER_UNIT];
unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT];
unsigned int thisEUN;
int block;
int silly;
unsigned int targetEUN;
struct nftl_oob oob;
int inplace = 1;
size_t retlen;
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
thisEUN = nftl->EUNtable[thisVUC];
if (thisEUN == BLOCK_NIL) {
printk(KERN_WARNING "Trying to fold non-existent "
"Virtual Unit Chain %d!\n", thisVUC);
return BLOCK_NIL;
}
/* Scan to find the Erase Unit which holds the actual data for each
512-byte block within the Chain.
*/
silly = MAX_LOOPS;
targetEUN = BLOCK_NIL;
while (thisEUN <= nftl->lastEUN ) {
unsigned int status, foldmark;
targetEUN = thisEUN;
for (block = 0; block < nftl->EraseSize / 512; block ++) {
nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
(block * 512), 16 , &retlen,
(char *)&oob);
if (block == 2) {
foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
if (foldmark == FOLD_MARK_IN_PROGRESS) {
pr_debug("Write Inhibited on EUN %d\n", thisEUN);
inplace = 0;
} else {
/* There's no other reason not to do inplace,
except ones that come later. So we don't need
to preserve inplace */
inplace = 1;
}
}
status = oob.b.Status | oob.b.Status1;
BlockLastState[block] = status;
switch(status) {
case SECTOR_FREE:
BlockFreeFound[block] = 1;
break;
case SECTOR_USED:
if (!BlockFreeFound[block])
BlockMap[block] = thisEUN;
else
printk(KERN_WARNING
"SECTOR_USED found after SECTOR_FREE "
"in Virtual Unit Chain %d for block %d\n",
thisVUC, block);
break;
case SECTOR_DELETED:
if (!BlockFreeFound[block])
BlockMap[block] = BLOCK_NIL;
else
printk(KERN_WARNING
"SECTOR_DELETED found after SECTOR_FREE "
"in Virtual Unit Chain %d for block %d\n",
thisVUC, block);
break;
case SECTOR_IGNORE:
break;
default:
printk("Unknown status for block %d in EUN %d: %x\n",
block, thisEUN, status);
}
}
if (!silly--) {
printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
return BLOCK_NIL;
}
thisEUN = nftl->ReplUnitTable[thisEUN];
}
if (inplace) {
/* We're being asked to be a fold-in-place. Check
that all blocks which actually have data associated
with them (i.e. BlockMap[block] != BLOCK_NIL) are
either already present or SECTOR_FREE in the target
block. If not, we're going to have to fold out-of-place
anyway.
*/
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
if (BlockLastState[block] != SECTOR_FREE &&
BlockMap[block] != BLOCK_NIL &&
BlockMap[block] != targetEUN) {
pr_debug("Setting inplace to 0. VUC %d, "
"block %d was %x lastEUN, "
"and is in EUN %d (%s) %d\n",
thisVUC, block, BlockLastState[block],
BlockMap[block],
BlockMap[block]== targetEUN ? "==" : "!=",
targetEUN);
inplace = 0;
break;
}
}
if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) &&
pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
SECTOR_FREE) {
pr_debug("Pending write not free in EUN %d. "
"Folding out of place.\n", targetEUN);
inplace = 0;
}
}
if (!inplace) {
pr_debug("Cannot fold Virtual Unit Chain %d in place. "
"Trying out-of-place\n", thisVUC);
/* We need to find a targetEUN to fold into. */
targetEUN = NFTL_findfreeblock(nftl, 1);
if (targetEUN == BLOCK_NIL) {
/* Ouch. Now we're screwed. We need to do a
fold-in-place of another chain to make room
for this one. We need a better way of selecting
which chain to fold, because makefreeblock will
only ask us to fold the same one again.
*/
printk(KERN_WARNING
"NFTL_findfreeblock(desperate) returns 0xffff.\n");
return BLOCK_NIL;
}
} else {
/* We put a fold mark in the chain we are folding only if we
fold in place to help the mount check code. If we do not fold in
place, it is possible to find the valid chain by selecting the
longer one */
oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
oob.u.c.unused = 0xffffffff;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
8, &retlen, (char *)&oob.u);
}
/* OK. We now know the location of every block in the Virtual Unit Chain,
and the Erase Unit into which we are supposed to be copying.
Go for it.
*/
pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
unsigned char movebuf[512];
int ret;
/* If it's in the target EUN already, or if it's pending write, do nothing */
if (BlockMap[block] == targetEUN ||
(pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) {
continue;
}
/* copy only in non free block (free blocks can only
happen in case of media errors or deleted blocks) */
if (BlockMap[block] == BLOCK_NIL)
continue;
ret = mtd_read(mtd,
(nftl->EraseSize * BlockMap[block]) + (block * 512),
512,
&retlen,
movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
ret = mtd_read(mtd,
(nftl->EraseSize * BlockMap[block]) + (block * 512),
512,
&retlen,
movebuf);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
memset(&oob, 0xff, sizeof(struct nftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
(block * 512), 512, &retlen, movebuf, (char *)&oob);
}
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
/* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
/* At this point, we have two different chains for this Virtual Unit, and no way to tell
them apart. If we crash now, we get confused. However, both contain the same data, so we
shouldn't actually lose data in this case. It's just that when we load up on a medium which
has duplicate chains, we need to free one of the chains because it's not necessary any more.
*/
thisEUN = nftl->EUNtable[thisVUC];
pr_debug("Want to erase\n");
/* For each block in the old chain (except the targetEUN of course),
free it and make it available for future use */
while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
unsigned int EUNtmp;
EUNtmp = nftl->ReplUnitTable[thisEUN];
if (NFTL_formatblock(nftl, thisEUN) < 0) {
/* could not erase : mark block as reserved
*/
nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
} else {
/* correctly erased : mark it as free */
nftl->ReplUnitTable[thisEUN] = BLOCK_FREE;
nftl->numfreeEUNs++;
}
thisEUN = EUNtmp;
}
/* Make this the new start of chain for thisVUC */
nftl->ReplUnitTable[targetEUN] = BLOCK_NIL;
nftl->EUNtable[thisVUC] = targetEUN;
return targetEUN;
}
static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
{
/* This is the part that needs some cleverness applied.
For now, I'm doing the minimum applicable to actually
get the thing to work.
Wear-levelling and other clever stuff needs to be implemented
and we also need to do some assessment of the results when
the system loses power half-way through the routine.
*/
u16 LongestChain = 0;
u16 ChainLength = 0, thislen;
u16 chain, EUN;
for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) {
EUN = nftl->EUNtable[chain];
thislen = 0;
while (EUN <= nftl->lastEUN) {
thislen++;
//printk("VUC %d reaches len %d with EUN %d\n", chain, thislen, EUN);
EUN = nftl->ReplUnitTable[EUN] & 0x7fff;
if (thislen > 0xff00) {
printk("Endless loop in Virtual Chain %d: Unit %x\n",
chain, EUN);
}
if (thislen > 0xff10) {
/* Actually, don't return failure. Just ignore this chain and
get on with it. */
thislen = 0;
break;
}
}
if (thislen > ChainLength) {
//printk("New longest chain is %d with length %d\n", chain, thislen);
ChainLength = thislen;
LongestChain = chain;
}
}
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
}
/* NFTL_findwriteunit: Return the unit number into which we can write
for this block. Make it available if it isn't already
*/
static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
{
u16 lastEUN;
u16 thisVUC = block / (nftl->EraseSize / 512);
struct mtd_info *mtd = nftl->mbd.mtd;
unsigned int writeEUN;
unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
size_t retlen;
int silly, silly2 = 3;
struct nftl_oob oob;
do {
/* Scan the media to find a unit in the VUC which has
a free space for the block in question.
*/
/* This condition catches the 0x[7f]fff cases, as well as
being a sanity check for past-end-of-media access
*/
lastEUN = BLOCK_NIL;
writeEUN = nftl->EUNtable[thisVUC];
silly = MAX_LOOPS;
while (writeEUN <= nftl->lastEUN) {
struct nftl_bci bci;
size_t retlen;
unsigned int status;
lastEUN = writeEUN;
nftl_read_oob(mtd,
(writeEUN * nftl->EraseSize) + blockofs,
8, &retlen, (char *)&bci);
pr_debug("Status of block %d in EUN %d is %x\n",
block , writeEUN, le16_to_cpu(bci.Status));
status = bci.Status | bci.Status1;
switch(status) {
case SECTOR_FREE:
return writeEUN;
case SECTOR_DELETED:
case SECTOR_USED:
case SECTOR_IGNORE:
break;
default:
// Invalid block. Don't use it any more. Must implement.
break;
}
if (!silly--) {
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
return BLOCK_NIL;
}
/* Skip to next block in chain */
writeEUN = nftl->ReplUnitTable[writeEUN];
}
/* OK. We didn't find one in the existing chain, or there
is no existing chain. */
/* Try to find an already-free block */
writeEUN = NFTL_findfreeblock(nftl, 0);
if (writeEUN == BLOCK_NIL) {
/* That didn't work - there were no free blocks just
waiting to be picked up. We're going to have to fold
a chain to make room.
*/
/* First remember the start of this chain */
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
lying - there may have been free blocks
last time we called NFTL_findfreeblock(),
but they are reserved for when we're
desperate. Well, now we're desperate.
*/
pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
writeEUN = NFTL_findfreeblock(nftl, 1);
}
if (writeEUN == BLOCK_NIL) {
/* Ouch. This should never happen - we should
always be able to make some room somehow.
If we get here, we've allocated more storage
space than actual media, or our makefreeblock
routine is missing something.
*/
printk(KERN_WARNING "Cannot make free space.\n");
return BLOCK_NIL;
}
//printk("Restarting scan\n");
continue;
}
/* We've found a free block. Insert it into the chain. */
if (lastEUN != BLOCK_NIL) {
thisVUC |= 0x8000; /* It's a replacement block */
} else {
/* The first block in a new chain */
nftl->EUNtable[thisVUC] = writeEUN;
}
/* set up the actual EUN we're writing into */
/* Both in our cache... */
nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
/* ... and on the flash itself */
nftl_read_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
&retlen, (char *)&oob.u);
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
nftl_write_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
&retlen, (char *)&oob.u);
/* we link the new block to the chain only after the
block is ready. It avoids the case where the chain
could point to a free block */
if (lastEUN != BLOCK_NIL) {
/* Both in our cache... */
nftl->ReplUnitTable[lastEUN] = writeEUN;
/* ... and on the flash itself */
nftl_read_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
8, &retlen, (char *)&oob.u);
oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
= cpu_to_le16(writeEUN);
nftl_write_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
8, &retlen, (char *)&oob.u);
}
return writeEUN;
} while (silly2--);
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
char *buffer)
{
struct NFTLrecord *nftl = (void *)mbd;
u16 writeEUN;
unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
size_t retlen;
struct nftl_oob oob;
writeEUN = NFTL_findwriteunit(nftl, block);
if (writeEUN == BLOCK_NIL) {
printk(KERN_WARNING
"NFTL_writeblock(): Cannot find block to write to\n");
/* If we _still_ haven't got a block to use, we're screwed */
return 1;
}
memset(&oob, 0xff, sizeof(struct nftl_oob));
oob.b.Status = oob.b.Status1 = SECTOR_USED;
nftl_write(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
512, &retlen, (char *)buffer, (char *)&oob);
return 0;
}
#endif /* CONFIG_NFTL_RW */
static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
char *buffer)
{
struct NFTLrecord *nftl = (void *)mbd;
struct mtd_info *mtd = nftl->mbd.mtd;
u16 lastgoodEUN;
u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
unsigned int status;
int silly = MAX_LOOPS;
size_t retlen;
struct nftl_bci bci;
lastgoodEUN = BLOCK_NIL;
if (thisEUN != BLOCK_NIL) {
while (thisEUN < nftl->nb_blocks) {
if (nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
blockofs, 8, &retlen,
(char *)&bci) < 0)
status = SECTOR_IGNORE;
else
status = bci.Status | bci.Status1;
switch (status) {
case SECTOR_FREE:
/* no modification of a sector should follow a free sector */
goto the_end;
case SECTOR_DELETED:
lastgoodEUN = BLOCK_NIL;
break;
case SECTOR_USED:
lastgoodEUN = thisEUN;
break;
case SECTOR_IGNORE:
break;
default:
printk("Unknown status for block %ld in EUN %d: %x\n",
block, thisEUN, status);
break;
}
if (!silly--) {
printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n",
block / (nftl->EraseSize / 512));
return 1;
}
thisEUN = nftl->ReplUnitTable[thisEUN];
}
}
the_end:
if (lastgoodEUN == BLOCK_NIL) {
/* the requested block is not on the media, return all 0x00 */
memset(buffer, 0, 512);
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
}
return 0;
}
static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
struct NFTLrecord *nftl = (void *)dev;
geo->heads = nftl->heads;
geo->sectors = nftl->sectors;
geo->cylinders = nftl->cylinders;
return 0;
}
/****************************************************************************
*
* Module stuff
*
****************************************************************************/
static struct mtd_blktrans_ops nftl_tr = {
.name = "nftl",
.major = NFTL_MAJOR,
.part_bits = NFTL_PARTN_BITS,
.blksize = 512,
.getgeo = nftl_getgeo,
.readsect = nftl_readblock,
#ifdef CONFIG_NFTL_RW
.writesect = nftl_writeblock,
#endif
.add_mtd = nftl_add_mtd,
.remove_dev = nftl_remove_dev,
.owner = THIS_MODULE,
};
module_mtd_blktrans(nftl_tr);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>, Fabrice Bellard <[email protected]> et al.");
MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
MODULE_ALIAS_BLOCKDEV_MAJOR(NFTL_MAJOR);
| linux-master | drivers/mtd/nftlcore.c |
/*
* drivers/mtd/maps/intel_vr_nor.c
*
* An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
* Vermilion Range chipset.
*
* The Vermilion Range Expansion Bus supports four chip selects, each of which
* has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
* is a 256MiB memory region containing the address spaces for all four of the
* chip selects, with start addresses hardcoded on 64MiB boundaries.
*
* This map driver only supports NOR flash on chip select 0. The buswidth
* (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
* and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
* not modify the value in the EXP_TIMING_CS0 register except to enable writing
* and disable boot acceleration. The timing parameters in the register are
* assumed to have been properly initialized by the BIOS. The reset default
* timing parameters are maximally conservative (slow), so access to the flash
* will be slower than it should be if the BIOS has not initialized the timing
* parameters.
*
* Author: Andy Lowe <[email protected]>
*
* 2006 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#define DRV_NAME "vr_nor"
struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
struct pci_dev *dev;
};
/* Expansion Bus Configuration and Status Registers are in BAR 0 */
#define EXP_CSR_MBAR 0
/* Expansion Bus Memory Window is BAR 1 */
#define EXP_WIN_MBAR 1
/* Maximum address space for Chip Select 0 is 64MiB */
#define CS0_SIZE 0x04000000
/* Chip Select 0 is at offset 0 in the Memory Window */
#define CS0_START 0x0
/* Chip Select 0 Timing Register is at offset 0 in CSR */
#define EXP_TIMING_CS0 0x00
#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
#define TIMING_WR_EN (1 << 1) /* Write Enable */
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_register(p->info, NULL, 0);
}
static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char * const probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
const char * const *type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
if (!p->info)
return -ENODEV;
p->info->dev.parent = &p->dev->dev;
return 0;
}
static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
}
/*
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
unsigned int exp_timing_cs0;
int err;
csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
if (!csr_phys || !csr_len || !win_phys || !win_len)
return -ENODEV;
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
p->csr_base = ioremap(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
if (!(exp_timing_cs0 & TIMING_CS_EN)) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is disabled.\n");
err = -ENODEV;
goto release;
}
if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is configured for maximally slow access times.\n");
}
p->map.name = DRV_NAME;
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
p->map.virt = ioremap(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
}
simple_map_init(&p->map);
/* Enable writes to flash bank */
exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
return 0;
release:
iounmap(p->csr_base);
return err;
}
static const struct pci_device_id vr_nor_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
{0,}
};
static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
kfree(p);
pci_release_regions(dev);
pci_disable_device(dev);
}
static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, DRV_NAME);
if (err)
goto disable_dev;
p = kzalloc(sizeof(*p), GFP_KERNEL);
err = -ENOMEM;
if (!p)
goto release;
p->dev = dev;
err = vr_nor_init_maps(p);
if (err)
goto release;
err = vr_nor_mtd_setup(p);
if (err)
goto destroy_maps;
err = vr_nor_init_partitions(p);
if (err)
goto destroy_mtd_setup;
pci_set_drvdata(dev, p);
return 0;
destroy_mtd_setup:
map_destroy(p->info);
destroy_maps:
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
release:
kfree(p);
pci_release_regions(dev);
disable_dev:
pci_disable_device(dev);
out:
return err;
}
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
.remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
module_pci_driver(vr_nor_pci_driver);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
| linux-master | drivers/mtd/maps/intel_vr_nor.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board
*
* Copyright (C) 2004 Sean Young <[email protected]>
*
* Note:
* - In order for detection to work, jumper 3 must be set.
* - Drive A and B use the resident flash disk (RFD) flash translation layer.
* - If you have created your own jffs file system and the bios overwrites
* it during boot, try disabling Drive A: and B: in the boot order.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/types.h>
#define WINDOW_ADDR 0x09400000
#define WINDOW_SIZE 0x00200000
static struct map_info ts5500_map = {
.name = "TS-5500 Flash",
.size = WINDOW_SIZE,
.bankwidth = 1,
.phys = WINDOW_ADDR
};
static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,
.size = 0x0e0000
},
{
.name = "BIOS",
.offset = 0x0e0000,
.size = 0x020000,
},
{
.name = "Drive B",
.offset = 0x100000,
.size = 0x100000
}
};
#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
static struct mtd_info *mymtd;
static int __init init_ts5500_map(void)
{
int rc = 0;
ts5500_map.virt = ioremap(ts5500_map.phys, ts5500_map.size);
if (!ts5500_map.virt) {
printk(KERN_ERR "Failed to ioremap\n");
rc = -EIO;
goto err2;
}
simple_map_init(&ts5500_map);
mymtd = do_map_probe("jedec_probe", &ts5500_map);
if (!mymtd)
mymtd = do_map_probe("map_rom", &ts5500_map);
if (!mymtd) {
rc = -ENXIO;
goto err1;
}
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS);
return 0;
err1:
iounmap(ts5500_map.virt);
err2:
return rc;
}
static void __exit cleanup_ts5500_map(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (ts5500_map.virt) {
iounmap(ts5500_map.virt);
ts5500_map.virt = NULL;
}
}
module_init(init_ts5500_map);
module_exit(cleanup_ts5500_map);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for Technology Systems TS-5500 board");
| linux-master | drivers/mtd/maps/ts5500_flash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* sbc_gxx.c -- MTD map driver for Arcom Control Systems SBC-MediaGX,
SBC-GXm and SBC-GX1 series boards.
Copyright (C) 2001 Arcom Control System Ltd
The SBC-MediaGX / SBC-GXx has up to 16 MiB of
Intel StrataFlash (28F320/28F640) in x8 mode.
This driver uses the CFI probe and Intel Extended Command Set drivers.
The flash is accessed as follows:
16 KiB memory window at 0xdc000-0xdffff
Two IO address locations for paging
0x258
bit 0-7: address bit 14-21
0x259
bit 0-1: address bit 22-23
bit 7: 0 - reset/powered down
1 - device enabled
The single flash device is divided into 3 partition which appear as
separate MTD devices.
25/04/2001 AJL (Arcom) Modified signon strings and partition sizes
(to support bzImages up to 638KiB-ish)
*/
// Includes
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
// Defines
// - Hardware specific
#define WINDOW_START 0xdc000
/* Number of bits in offset. */
#define WINDOW_SHIFT 14
#define WINDOW_LENGTH (1 << WINDOW_SHIFT)
/* The bits for the offset into the window. */
#define WINDOW_MASK (WINDOW_LENGTH-1)
#define PAGE_IO 0x258
#define PAGE_IO_SIZE 2
/* bit 7 of 0x259 must be 1 to enable device. */
#define DEVICE_ENABLE 0x8000
// - Flash / Partition sizing
#define MAX_SIZE_KiB 16384
#define BOOT_PARTITION_SIZE_KiB 768
#define DATA_PARTITION_SIZE_KiB 1280
#define APP_PARTITION_SIZE_KiB 6144
// Globals
static volatile int page_in_window = -1; // Current page in window.
static void __iomem *iomapadr;
static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },
{ .name = "SBC-GXx flash data partition",
.offset = BOOT_PARTITION_SIZE_KiB*1024,
.size = (DATA_PARTITION_SIZE_KiB)*1024 },
{ .name = "SBC-GXx flash application partition",
.offset = (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
};
#define NUM_PARTITIONS 3
static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
{
unsigned long page = ofs >> WINDOW_SHIFT;
if( page!=page_in_window ) {
outw( page | DEVICE_ENABLE, PAGE_IO );
page_in_window = page;
}
}
static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
{
map_word ret;
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, ofs);
ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
return ret;
}
static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, from);
memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
spin_unlock(&sbc_gxx_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, adr);
writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
}
static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, to);
memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
spin_unlock(&sbc_gxx_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static struct map_info sbc_gxx_map = {
.name = "SBC-GXx flash",
.phys = NO_XIP,
.size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
of flash so the cfi probe routines find all
the chips */
.bankwidth = 1,
.read = sbc_gxx_read8,
.copy_from = sbc_gxx_copy_from,
.write = sbc_gxx_write8,
.copy_to = sbc_gxx_copy_to
};
/* MTD device for all of the flash. */
static struct mtd_info *all_mtd;
static void cleanup_sbc_gxx(void)
{
if( all_mtd ) {
mtd_device_unregister(all_mtd);
map_destroy( all_mtd );
}
iounmap(iomapadr);
release_region(PAGE_IO,PAGE_IO_SIZE);
}
static int __init init_sbc_gxx(void)
{
iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
if (!iomapadr) {
printk( KERN_ERR"%s: failed to ioremap memory region\n",
sbc_gxx_map.name );
return -EIO;
}
if (!request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash")) {
printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
sbc_gxx_map.name,
PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
iounmap(iomapadr);
return -EAGAIN;
}
printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
sbc_gxx_map.name,
PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
/* Probe for chip. */
all_mtd = do_map_probe( "cfi_probe", &sbc_gxx_map );
if( !all_mtd ) {
cleanup_sbc_gxx();
return -ENXIO;
}
all_mtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
mtd_device_register(all_mtd, partition_info, NUM_PARTITIONS);
return 0;
}
module_init(init_sbc_gxx);
module_exit(cleanup_sbc_gxx);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arcom Control Systems Ltd.");
MODULE_DESCRIPTION("MTD map driver for SBC-GXm and SBC-GX1 series boards");
| linux-master | drivers/mtd/maps/sbc_gxx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handle mapping of the NOR flash on implementa A7 boards
*
* Copyright 2002 SYSGO Real-Time Solutions GmbH
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
#define WINDOW_SIZE0 0x00800000
#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
#define WINDOW_SIZE1 0x00800000
#define NUM_FLASHBANKS 2
#define BUSWIDTH 4
#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
#define MTDID "impa7-%d" /* for mtdparts= partitioning */
static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
static const char * const rom_probe_types[] = { "jedec_probe", NULL };
static struct map_info impa7_map[NUM_FLASHBANKS] = {
{
.name = "impA7 NOR Flash Bank #0",
.size = WINDOW_SIZE0,
.bankwidth = BUSWIDTH,
},
{
.name = "impA7 NOR Flash Bank #1",
.size = WINDOW_SIZE1,
.bankwidth = BUSWIDTH,
},
};
/*
* MTD partitioning stuff
*/
static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
.size = 0x800000,
.offset = 0x00000000
},
};
static int __init init_impa7(void)
{
const char * const *type;
int i;
static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
{ WINDOW_ADDR0, WINDOW_SIZE0 },
{ WINDOW_ADDR1, WINDOW_SIZE1 },
};
int devicesfound = 0;
for(i=0; i<NUM_FLASHBANKS; i++)
{
printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
pt[i].size, pt[i].addr);
impa7_map[i].phys = pt[i].addr;
impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size);
if (!impa7_map[i].virt) {
printk(MSG_PREFIX "failed to ioremap\n");
return -EIO;
}
simple_map_init(&impa7_map[i]);
impa7_mtd[i] = NULL;
type = rom_probe_types;
for(; !impa7_mtd[i] && *type; type++) {
impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
}
if (impa7_mtd[i]) {
impa7_mtd[i]->owner = THIS_MODULE;
devicesfound++;
mtd_device_register(impa7_mtd[i], partitions,
ARRAY_SIZE(partitions));
} else {
iounmap((void __iomem *)impa7_map[i].virt);
}
}
return devicesfound == 0 ? -ENXIO : 0;
}
static void __exit cleanup_impa7(void)
{
int i;
for (i=0; i<NUM_FLASHBANKS; i++) {
if (impa7_mtd[i]) {
mtd_device_unregister(impa7_mtd[i]);
map_destroy(impa7_mtd[i]);
iounmap((void __iomem *)impa7_map[i].virt);
impa7_map[i].virt = NULL;
}
}
}
module_init(init_impa7);
module_exit(cleanup_impa7);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pavel Bartusek <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for implementa impA7");
| linux-master | drivers/mtd/maps/impa7.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Map driver for Intel XScale PXA2xx platforms.
*
* Author: Nicolas Pitre
* Copyright: (C) 2001 MontaVista Software Inc.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/mach/flash.h>
#define CACHELINESIZE 32
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len)
{
unsigned long start = (unsigned long)map->cached + from;
unsigned long end = start + len;
start &= ~(CACHELINESIZE - 1);
while (start < end) {
/* invalidate D cache line */
asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
start += CACHELINESIZE;
}
}
struct pxa2xx_flash_info {
struct mtd_info *mtd;
struct map_info map;
};
static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
struct pxa2xx_flash_info *info;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->map.name = flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
info->map.size = resource_size(res);
info->map.virt = ioremap(info->map.phys, info->map.size);
if (!info->map.virt) {
printk(KERN_WARNING "Failed to ioremap %s\n",
info->map.name);
kfree(info);
return -ENOMEM;
}
info->map.cached = ioremap_cache(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
info->map.inval_cache = pxa2xx_map_inval_cache;
simple_map_init(&info->map);
printk(KERN_NOTICE
"Probing %s at physical address 0x%08lx"
" (%d-bit bankwidth)\n",
info->map.name, (unsigned long)info->map.phys,
info->map.bankwidth * 8);
info->mtd = do_map_probe(flash->map_name, &info->map);
if (!info->mtd) {
iounmap((void *)info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
kfree(info);
return -EIO;
}
info->mtd->dev.parent = &pdev->dev;
mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
}
static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
kfree(info);
return 0;
}
#ifdef CONFIG_PM
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
if (info && mtd_suspend(info->mtd) == 0)
mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
#endif
static struct platform_driver pxa2xx_flash_driver = {
.driver = {
.name = "pxa2xx-flash",
},
.probe = pxa2xx_flash_probe,
.remove = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
| linux-master | drivers/mtd/maps/pxa2xx-flash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BIOS Flash chip on Intel 440GX board.
*
* Bugs this currently does not work under linuxBIOS.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#define PIIXE_IOBASE_RESOURCE 11
#define WINDOW_ADDR 0xfff00000
#define WINDOW_SIZE 0x00100000
#define BUSWIDTH 1
static u32 iobase;
#define IOBASE iobase
#define TRIBUF_PORT (IOBASE+0x37)
#define VPP_PORT (IOBASE+0x28)
static struct mtd_info *mymtd;
/* Is this really the vpp port? */
static DEFINE_SPINLOCK(l440gx_vpp_lock);
static int l440gx_vpp_refcnt;
static void l440gx_set_vpp(struct map_info *map, int vpp)
{
unsigned long flags;
spin_lock_irqsave(&l440gx_vpp_lock, flags);
if (vpp) {
if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
outl(inl(VPP_PORT) | 1, VPP_PORT);
} else {
if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
outl(inl(VPP_PORT) & ~1, VPP_PORT);
}
spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
}
static struct map_info l440gx_map = {
.name = "L440GX BIOS",
.size = WINDOW_SIZE,
.bankwidth = BUSWIDTH,
.phys = WINDOW_ADDR,
#if 0
/* FIXME verify that this is the
* appripriate code for vpp enable/disable
*/
.set_vpp = l440gx_set_vpp
#endif
};
static int __init init_l440gx(void)
{
struct pci_dev *dev, *pm_dev;
struct resource *pm_iobase;
__u16 word;
dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
pci_dev_put(dev);
if (!dev || !pm_dev) {
printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
pci_dev_put(pm_dev);
return -ENODEV;
}
l440gx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
pci_dev_put(pm_dev);
return -ENOMEM;
}
simple_map_init(&l440gx_map);
pr_debug("window_addr = %p\n", l440gx_map.virt);
/* Setup the pm iobase resource
* This code should move into some kind of generic bridge
* driver but for the moment I'm content with getting the
* allocation correct.
*/
pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
if (!(pm_iobase->flags & IORESOURCE_IO)) {
pm_iobase->name = "pm iobase";
pm_iobase->start = 0;
pm_iobase->end = 63;
pm_iobase->flags = IORESOURCE_IO;
/* Put the current value in the resource */
pci_read_config_dword(pm_dev, 0x40, &iobase);
iobase &= ~1;
pm_iobase->start += iobase & ~1;
pm_iobase->end += iobase & ~1;
pci_dev_put(pm_dev);
/* Allocate the resource region */
if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
pci_dev_put(dev);
pci_dev_put(pm_dev);
printk(KERN_WARNING "Could not allocate pm iobase resource\n");
iounmap(l440gx_map.virt);
return -ENXIO;
}
}
/* Set the iobase */
iobase = pm_iobase->start;
pci_write_config_dword(pm_dev, 0x40, iobase | 1);
/* Set XBCS# */
pci_read_config_word(dev, 0x4e, &word);
word |= 0x4;
pci_write_config_word(dev, 0x4e, word);
/* Supply write voltage to the chip */
l440gx_set_vpp(&l440gx_map, 1);
/* Enable the gate on the WE line */
outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
mymtd = do_map_probe("jedec_probe", &l440gx_map);
if (!mymtd) {
printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
mymtd = do_map_probe("map_rom", &l440gx_map);
}
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, NULL, 0);
return 0;
}
iounmap(l440gx_map.virt);
return -ENXIO;
}
static void __exit cleanup_l440gx(void)
{
mtd_device_unregister(mymtd);
map_destroy(mymtd);
iounmap(l440gx_map.virt);
}
module_init(init_l440gx);
module_exit(cleanup_l440gx);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
| linux-master | drivers/mtd/maps/l440gx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ck804xrom.c
*
* Normal mappings of chips in physical memory
*
* Dave Olsen <[email protected]>
* Ryan Jackson <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define MOD_NAME KBUILD_BASENAME
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024)
#define DEV_CK804 1
#define DEV_MCP55 2
struct ck804xrom_window {
void __iomem *virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct ck804xrom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
/*
* The following applies to ck804 only:
* The 2 bits controlling the window size are often set to allow reading
* the BIOS, but too small to allow writing, since the lock registers are
* 4MiB lower in the address space than the data.
*
* This is intended to prevent flashing the bios, perhaps accidentally.
*
* This parameter allows the normal driver to override the BIOS settings.
*
* The bits are 6 and 7. If both bits are set, it is a 5MiB window.
* If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
* 64KiB window.
*
* The following applies to mcp55 only:
* The 15 bits controlling the window size are distributed as follows:
* byte @0x88: bit 0..7
* byte @0x8c: bit 8..15
* word @0x90: bit 16..30
* If all bits are enabled, we have a 16? MiB window
* Please set win_size_bits to 0x7fffffff if you actually want to do something
*/
static uint win_size_bits = 0;
module_param(win_size_bits, uint, 0);
MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
static struct ck804xrom_window ck804xrom_window = {
.maps = LIST_HEAD_INIT(ck804xrom_window.maps),
};
static void ck804xrom_cleanup(struct ck804xrom_window *window)
{
struct ck804xrom_map_info *map, *scratch;
u8 byte;
if (window->pdev) {
/* Disable writes through the rom window */
pci_read_config_byte(window->pdev, 0x6d, &byte);
pci_write_config_byte(window->pdev, 0x6d, byte & ~1);
}
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
}
pci_dev_put(window->pdev);
}
static int __init ck804xrom_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
u16 word;
struct ck804xrom_window *window = &ck804xrom_window;
struct ck804xrom_map_info *map = NULL;
unsigned long map_top;
/* Remember the pci dev I find the window in */
window->pdev = pci_dev_get(pdev);
switch (ent->driver_data) {
case DEV_CK804:
/* Enable the selected rom window. This is often incorrectly
* set up by the BIOS, and the 4MiB offset for the lock registers
* requires the full 5MiB of window space.
*
* This 'write, then read' approach leaves the bits for
* other uses of the hardware info.
*/
pci_read_config_byte(pdev, 0x88, &byte);
pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
/* Assume the rom window is properly setup, and find it's size */
pci_read_config_byte(pdev, 0x88, &byte);
if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
window->phys = 0xffb00000; /* 5MiB */
else if ((byte & (1<<7)) == (1<<7))
window->phys = 0xffc00000; /* 4MiB */
else
window->phys = 0xffff0000; /* 64KiB */
break;
case DEV_MCP55:
pci_read_config_byte(pdev, 0x88, &byte);
pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
pci_read_config_byte(pdev, 0x8c, &byte);
pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
pci_read_config_word(pdev, 0x90, &word);
pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
window->phys = 0xff000000; /* 16MiB, hardcoded for now */
break;
}
window->size = 0xffffffffUL - window->phys + 1UL;
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to a fragment of the window being
* "reserved" by the BIOS. In the case that the
* request_mem_region() fails then once the rom size is
* discovered we will try to reserve the unreserved fragment.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
" %s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
}
/* Enable writes through the rom window */
pci_read_config_byte(pdev, 0x6d, &byte);
pci_write_config_byte(pdev, 0x6d, byte | 1);
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for a rom chip at */
map_top = window->phys;
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* Probe at most the last 4MiB of the address space.
*/
if (map_top < 0xffc00000)
map_top = 0xffc00000;
#endif
/* Loop through and look for rom chips. Since we don't know the
* starting address for each chip, probe every ROM_PROBE_STEP_SIZE
* bytes from the starting address of the window.
*/
while((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* There is no generic VPP support */
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1)
{
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++)
cfi->chips[i].start += offset;
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
ck804xrom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void ck804xrom_remove_one(struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
ck804xrom_cleanup(window);
}
static const struct pci_device_id ck804xrom_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0362), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0363), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0364), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0365), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0366), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0367), .driver_data = DEV_MCP55 },
{ 0, }
};
#if 0
MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
static struct pci_driver ck804xrom_driver = {
.name = MOD_NAME,
.id_table = ck804xrom_pci_tbl,
.probe = ck804xrom_init_one,
.remove = ck804xrom_remove_one,
};
#endif
static int __init init_ck804xrom(void)
{
struct pci_dev *pdev;
const struct pci_device_id *id;
int retVal;
pdev = NULL;
for(id = ck804xrom_pci_tbl; id->vendor; id++) {
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev)
break;
}
if (pdev) {
retVal = ck804xrom_init_one(pdev, id);
pci_dev_put(pdev);
return retVal;
}
return -ENXIO;
#if 0
return pci_register_driver(&ck804xrom_driver);
#endif
}
static void __exit cleanup_ck804xrom(void)
{
ck804xrom_remove_one(ck804xrom_window.pdev);
}
module_init(init_ck804xrom);
module_exit(cleanup_ck804xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <[email protected]>, Dave Olsen <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
| linux-master | drivers/mtd/maps/ck804xrom.c |
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/mtd/maps/scx200_docflash.c
Copyright (c) 2001,2002 Christer Weinigel <[email protected]>
National Semiconductor SCx200 flash mapped with DOCCS
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/pci.h>
#include <linux/scx200.h>
#define NAME "scx200_docflash"
MODULE_AUTHOR("Christer Weinigel <[email protected]>");
MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
MODULE_LICENSE("GPL");
static int probe = 0; /* Don't autoprobe */
static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
static unsigned width = 8; /* Default to 8 bits wide */
static char *flashtype = "cfi_probe";
module_param(probe, int, 0);
MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
module_param(size, int, 0);
MODULE_PARM_DESC(size, "Size of the flash mapping");
module_param(width, int, 0);
MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
module_param(flashtype, charp, 0);
MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
static struct resource docmem = {
.flags = IORESOURCE_MEM,
.name = "NatSemi SCx200 DOCCS Flash",
};
static struct mtd_info *mymtd;
static struct mtd_partition partition_info[] = {
{
.name = "DOCCS Boot kernel",
.offset = 0,
.size = 0xc0000
},
{
.name = "DOCCS Low BIOS",
.offset = 0xc0000,
.size = 0x40000
},
{
.name = "DOCCS File system",
.offset = 0x100000,
.size = ~0 /* calculate from flash size */
},
{
.name = "DOCCS High BIOS",
.offset = ~0, /* calculate from flash size */
.size = 0x80000
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct map_info scx200_docflash_map = {
.name = "NatSemi SCx200 DOCCS Flash",
};
static int __init init_scx200_docflash(void)
{
unsigned u;
unsigned base;
unsigned ctrl;
unsigned pmr;
struct pci_dev *bridge;
printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
PCI_DEVICE_ID_NS_SCx200_BRIDGE,
NULL)) == NULL)
return -ENODEV;
/* check that we have found the configuration block */
if (!scx200_cb_present()) {
pci_dev_put(bridge);
return -ENODEV;
}
if (probe) {
/* Try to use the present flash mapping if any */
pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
pci_dev_put(bridge);
pmr = inl(scx200_cb_base + SCx200_PMR);
if (base == 0
|| (ctrl & 0x07000000) != 0x07000000
|| (ctrl & 0x0007ffff) == 0)
return -ENODEV;
size = ((ctrl&0x1fff)<<13) + (1<<13);
for (u = size; u > 1; u >>= 1)
;
if (u != 1)
return -ENODEV;
if (pmr & (1<<6))
width = 16;
else
width = 8;
docmem.start = base;
docmem.end = base + size;
if (request_resource(&iomem_resource, &docmem)) {
printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
return -ENOMEM;
}
} else {
pci_dev_put(bridge);
for (u = size; u > 1; u >>= 1)
;
if (u != 1) {
printk(KERN_ERR NAME ": invalid size for flash mapping\n");
return -EINVAL;
}
if (width != 8 && width != 16) {
printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
return -EINVAL;
}
if (allocate_resource(&iomem_resource, &docmem,
size,
0xc0000000, 0xffffffff,
size, NULL, NULL)) {
printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
return -ENOMEM;
}
ctrl = 0x07000000 | ((size-1) >> 13);
printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
pmr = inl(scx200_cb_base + SCx200_PMR);
if (width == 8) {
pmr &= ~(1<<6);
} else {
pmr |= (1<<6);
}
outl(pmr, scx200_cb_base + SCx200_PMR);
}
printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
&docmem, width);
scx200_docflash_map.size = size;
if (width == 8)
scx200_docflash_map.bankwidth = 1;
else
scx200_docflash_map.bankwidth = 2;
simple_map_init(&scx200_docflash_map);
scx200_docflash_map.phys = docmem.start;
scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
if (!scx200_docflash_map.virt) {
printk(KERN_ERR NAME ": failed to ioremap the flash\n");
release_resource(&docmem);
return -EIO;
}
mymtd = do_map_probe(flashtype, &scx200_docflash_map);
if (!mymtd) {
printk(KERN_ERR NAME ": unable to detect flash\n");
iounmap(scx200_docflash_map.virt);
release_resource(&docmem);
return -ENXIO;
}
if (size < mymtd->size)
printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
mymtd->owner = THIS_MODULE;
partition_info[3].offset = mymtd->size-partition_info[3].size;
partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_scx200_docflash(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (scx200_docflash_map.virt) {
iounmap(scx200_docflash_map.virt);
release_resource(&docmem);
}
}
module_init(init_scx200_docflash);
module_exit(cleanup_scx200_docflash);
| linux-master | drivers/mtd/maps/scx200_docflash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ichxrom.c
*
* Normal mappings of chips in physical memory
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define xstr(s) str(s)
#define str(s) #s
#define MOD_NAME xstr(KBUILD_BASENAME)
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
#define BIOS_CNTL 0x4e
#define FWH_DEC_EN1 0xE3
#define FWH_DEC_EN2 0xF0
#define FWH_SEL1 0xE8
#define FWH_SEL2 0xEE
struct ichxrom_window {
void __iomem* virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct ichxrom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
static struct ichxrom_window ichxrom_window = {
.maps = LIST_HEAD_INIT(ichxrom_window.maps),
};
static void ichxrom_cleanup(struct ichxrom_window *window)
{
struct ichxrom_map_info *map, *scratch;
u16 word;
int ret;
/* Disable writes through the rom window */
ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
if (!ret)
pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
pci_dev_put(window->pdev);
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
window->pdev = NULL;
}
}
static int __init ichxrom_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
struct ichxrom_map_info *map = NULL;
unsigned long map_top;
u8 byte;
u16 word;
/* For now I just handle the ichx and I assume there
* are not a lot of resources up at the top of the address
* space. It is possible to handle other devices in the
* top 16MB but it is very painful. Also since
* you can only really attach a FWH to an ICHX there
* a number of simplifications you can make.
*
* Also you can page firmware hubs if an 8MB window isn't enough
* but don't currently handle that case either.
*/
window->pdev = pdev;
/* Find a region continuous to the end of the ROM window */
window->phys = 0;
pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
if (byte == 0xff) {
window->phys = 0xffc00000;
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
if ((byte & 0x0f) == 0x0f) {
window->phys = 0xff400000;
}
else if ((byte & 0x0e) == 0x0e) {
window->phys = 0xff500000;
}
else if ((byte & 0x0c) == 0x0c) {
window->phys = 0xff600000;
}
else if ((byte & 0x08) == 0x08) {
window->phys = 0xff700000;
}
}
else if ((byte & 0xfe) == 0xfe) {
window->phys = 0xffc80000;
}
else if ((byte & 0xfc) == 0xfc) {
window->phys = 0xffd00000;
}
else if ((byte & 0xf8) == 0xf8) {
window->phys = 0xffd80000;
}
else if ((byte & 0xf0) == 0xf0) {
window->phys = 0xffe00000;
}
else if ((byte & 0xe0) == 0xe0) {
window->phys = 0xffe80000;
}
else if ((byte & 0xc0) == 0xc0) {
window->phys = 0xfff00000;
}
else if ((byte & 0x80) == 0x80) {
window->phys = 0xfff80000;
}
if (window->phys == 0) {
printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
goto out;
}
window->phys -= 0x400000UL;
window->size = (0xffffffffUL - window->phys) + 1UL;
/* Enable writes through the rom window */
pci_read_config_word(pdev, BIOS_CNTL, &word);
if (!(word & 1) && (word & (1<<1))) {
/* The BIOS will generate an error if I enable
* this device, so don't even try.
*/
printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
goto out;
}
pci_write_config_word(pdev, BIOS_CNTL, word | 1);
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to the window being "reserved" by the BIOS.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_DEBUG MOD_NAME ": "
"%s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for an rom chip at */
map_top = window->phys;
if ((window->phys & 0x3fffff) != 0) {
map_top = window->phys + 0x400000;
}
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* Probe at most the last 4M of the address space.
*/
if (map_top < 0xffc00000) {
map_top = 0xffc00000;
}
#endif
/* Loop through and look for rom chips */
while((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* Firmware hubs only use vpp when being programmed
* in a factory setting. So in-place programming
* needs to use a different method.
*/
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1)
{
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++) {
cfi->chips[i].start += offset;
}
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
ichxrom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void ichxrom_remove_one(struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
static const struct pci_device_id ichxrom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, },
};
#if 0
MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
static struct pci_driver ichxrom_driver = {
.name = MOD_NAME,
.id_table = ichxrom_pci_tbl,
.probe = ichxrom_init_one,
.remove = ichxrom_remove_one,
};
#endif
static int __init init_ichxrom(void)
{
struct pci_dev *pdev;
const struct pci_device_id *id;
pdev = NULL;
for (id = ichxrom_pci_tbl; id->vendor; id++) {
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
break;
}
}
if (pdev) {
return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
}
return -ENXIO;
#if 0
return pci_register_driver(&ichxrom_driver);
#endif
}
static void __exit cleanup_ichxrom(void)
{
ichxrom_remove_one(ichxrom_window.pdev);
}
module_init(init_ichxrom);
module_exit(cleanup_ichxrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
| linux-master | drivers/mtd/maps/ichxrom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* tsunami_flash.c
*
* flash chip on alpha ds10...
*/
#include <asm/io.h>
#include <asm/core_tsunami.h>
#include <linux/init.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#define FLASH_ENABLE_PORT 0x00C00001
#define FLASH_ENABLE_BYTE 0x01
#define FLASH_DISABLE_BYTE 0x00
#define MAX_TIG_FLASH_SIZE (12*1024*1024)
static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
{
map_word val;
val.x[0] = tsunami_tig_readb(offset);
return val;
}
static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
{
tsunami_tig_writeb(value.x[0], offset);
}
static void tsunami_flash_copy_from(
struct map_info *map, void *addr, unsigned long offset, ssize_t len)
{
unsigned char *dest;
dest = addr;
while(len && (offset < MAX_TIG_FLASH_SIZE)) {
*dest = tsunami_tig_readb(offset);
offset++;
dest++;
len--;
}
}
static void tsunami_flash_copy_to(
struct map_info *map, unsigned long offset,
const void *addr, ssize_t len)
{
const unsigned char *src;
src = addr;
while(len && (offset < MAX_TIG_FLASH_SIZE)) {
tsunami_tig_writeb(*src, offset);
offset++;
src++;
len--;
}
}
/*
* Deliberately don't provide operations wider than 8 bits. I don't
* have then and it scares me to think how you could mess up if
* you tried to use them. Buswidth is correctly so I'm safe.
*/
static struct map_info tsunami_flash_map = {
.name = "flash chip on the Tsunami TIG bus",
.size = MAX_TIG_FLASH_SIZE,
.phys = NO_XIP,
.bankwidth = 1,
.read = tsunami_flash_read8,
.copy_from = tsunami_flash_copy_from,
.write = tsunami_flash_write8,
.copy_to = tsunami_flash_copy_to,
};
static struct mtd_info *tsunami_flash_mtd;
static void __exit cleanup_tsunami_flash(void)
{
struct mtd_info *mtd;
mtd = tsunami_flash_mtd;
if (mtd) {
mtd_device_unregister(mtd);
map_destroy(mtd);
}
tsunami_flash_mtd = 0;
}
static const char * const rom_probe_types[] = {
"cfi_probe", "jedec_probe", "map_rom", NULL };
static int __init init_tsunami_flash(void)
{
const char * const *type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
tsunami_flash_mtd = 0;
type = rom_probe_types;
for(; !tsunami_flash_mtd && *type; type++) {
tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
}
if (tsunami_flash_mtd) {
tsunami_flash_mtd->owner = THIS_MODULE;
mtd_device_register(tsunami_flash_mtd, NULL, 0);
return 0;
}
return -ENXIO;
}
module_init(init_tsunami_flash);
module_exit(cleanup_tsunami_flash);
| linux-master | drivers/mtd/maps/tsunami_flash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mtd/maps/pci.c
*
* Copyright (C) 2001 Russell King, All rights reserved.
*
* Generic PCI memory map driver. We support the following boards:
* - Intel IQ80310 ATU.
* - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
struct map_pci_info;
struct mtd_pci_info {
int (*init)(struct pci_dev *dev, struct map_pci_info *map);
void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
const char *map_name;
};
struct map_pci_info {
struct map_info map;
void __iomem *base;
void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
struct pci_dev *dev;
};
static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0]= readb(map->base + map->translate(map, ofs));
return val;
}
static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
map_word val;
val.x[0] = readl(map->base + map->translate(map, ofs));
return val;
}
static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
memcpy_fromio(to, map->base + map->translate(map, from), len);
}
static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
writeb(val.x[0], map->base + map->translate(map, ofs));
}
static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
writel(val.x[0], map->base + map->translate(map, ofs));
}
static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
{
struct map_pci_info *map = (struct map_pci_info *)_map;
memcpy_toio(map->base + map->translate(map, to), from, len);
}
static const struct map_info mtd_pci_map = {
.phys = NO_XIP,
.copy_from = mtd_pci_copyfrom,
.copy_to = mtd_pci_copyto,
};
/*
* Intel IOP80310 Flash driver
*/
static int
intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
{
u32 win_base;
map->map.bankwidth = 1;
map->map.read = mtd_pci_read8;
map->map.write = mtd_pci_write8;
map->map.size = 0x00800000;
map->base = ioremap(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
if (!map->base)
return -ENOMEM;
/*
* We want to base the memory window at Xscale
* bus address 0, not 0x1000.
*/
pci_read_config_dword(dev, 0x44, &win_base);
pci_write_config_dword(dev, 0x44, 0);
map->map.map_priv_2 = win_base;
return 0;
}
static void
intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
{
if (map->base)
iounmap(map->base);
pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
}
static unsigned long
intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
{
unsigned long page_addr = ofs & 0x00400000;
/*
* This mundges the flash location so we avoid
* the first 80 bytes (they appear to read nonsense).
*/
if (page_addr) {
writel(0x00000008, map->base + 0x1558);
writel(0x00000000, map->base + 0x1550);
} else {
writel(0x00000007, map->base + 0x1558);
writel(0x00800000, map->base + 0x1550);
ofs += 0x00800000;
}
return ofs;
}
static struct mtd_pci_info intel_iq80310_info = {
.init = intel_iq80310_init,
.exit = intel_iq80310_exit,
.translate = intel_iq80310_translate,
.map_name = "cfi_probe",
};
/*
* Intel DC21285 driver
*/
static int
intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
{
unsigned long base, len;
base = pci_resource_start(dev, PCI_ROM_RESOURCE);
len = pci_resource_len(dev, PCI_ROM_RESOURCE);
if (!len || !base) {
/*
* No ROM resource
*/
base = pci_resource_start(dev, 2);
len = pci_resource_len(dev, 2);
/*
* We need to re-allocate PCI BAR2 address range to the
* PCI ROM BAR, and disable PCI BAR2.
*/
} else {
/*
* Hmm, if an address was allocated to the ROM resource, but
* not enabled, should we be allocating a new resource for it
* or simply enabling it?
*/
pci_enable_rom(dev);
printk("%s: enabling expansion ROM\n", pci_name(dev));
}
if (!len || !base)
return -ENXIO;
map->map.bankwidth = 4;
map->map.read = mtd_pci_read32;
map->map.write = mtd_pci_write32;
map->map.size = len;
map->base = ioremap(base, len);
if (!map->base)
return -ENOMEM;
return 0;
}
static void
intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
{
if (map->base)
iounmap(map->base);
/*
* We need to undo the PCI BAR2/PCI ROM BAR address alteration.
*/
pci_disable_rom(dev);
}
static unsigned long
intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
{
return ofs & 0x00ffffc0 ? ofs : (ofs ^ (1 << 5));
}
static struct mtd_pci_info intel_dc21285_info = {
.init = intel_dc21285_init,
.exit = intel_dc21285_exit,
.translate = intel_dc21285_translate,
.map_name = "jedec_probe",
};
/*
* PCI device ID table
*/
static const struct pci_device_id mtd_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x530d,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = PCI_CLASS_MEMORY_OTHER << 8,
.class_mask = 0xffff00,
.driver_data = (unsigned long)&intel_iq80310_info,
},
{
.vendor = PCI_VENDOR_ID_DEC,
.device = PCI_DEVICE_ID_DEC_21285,
.subvendor = 0, /* DC21285 defaults to 0 on reset */
.subdevice = 0, /* DC21285 defaults to 0 on reset */
.driver_data = (unsigned long)&intel_dc21285_info,
},
{ 0, }
};
/*
* Generic code follows.
*/
static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
struct map_pci_info *map = NULL;
struct mtd_info *mtd = NULL;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, "pci mtd");
if (err)
goto out;
map = kmalloc(sizeof(*map), GFP_KERNEL);
err = -ENOMEM;
if (!map)
goto release;
map->map = mtd_pci_map;
map->map.name = pci_name(dev);
map->dev = dev;
map->exit = info->exit;
map->translate = info->translate;
err = info->init(dev, map);
if (err)
goto release;
mtd = do_map_probe(info->map_name, &map->map);
err = -ENODEV;
if (!mtd)
goto release;
mtd->owner = THIS_MODULE;
mtd_device_register(mtd, NULL, 0);
pci_set_drvdata(dev, mtd);
return 0;
release:
if (map) {
map->exit(dev, map);
kfree(map);
}
pci_release_regions(dev);
out:
return err;
}
static void mtd_pci_remove(struct pci_dev *dev)
{
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
mtd_device_unregister(mtd);
map_destroy(mtd);
map->exit(dev, map);
kfree(map);
pci_release_regions(dev);
}
static struct pci_driver mtd_pci_driver = {
.name = "MTD PCI",
.probe = mtd_pci_probe,
.remove = mtd_pci_remove,
.id_table = mtd_pci_ids,
};
module_pci_driver(mtd_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_DESCRIPTION("Generic PCI map driver");
MODULE_DEVICE_TABLE(pci, mtd_pci_ids);
| linux-master | drivers/mtd/maps/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform
*
* Copyright (C) 2001 Sysgo Real-Time Solutions GmbH
*
* The SC520CDP is an evaluation board for the Elan SC520 processor available
* from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
* and up to 512 KiB of 8-bit DIL Flash ROM.
* For details see https://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/concat.h>
/*
** The Embedded Systems BIOS decodes the first FLASH starting at
** 0x8400000. This is a *terrible* place for it because accessing
** the flash at this location causes the A22 address line to be high
** (that's what 0x8400000 binary's ought to be). But this is the highest
** order address line on the raw flash devices themselves!!
** This causes the top HALF of the flash to be accessed first. Beyond
** the physical limits of the flash, the flash chip aliases over (to
** 0x880000 which causes the bottom half to be accessed. This splits the
** flash into two and inverts it! If you then try to access this from another
** program that does NOT do this insanity, then you *will* access the
** first half of the flash, but not find what you expect there. That
** stuff is in the *second* half! Similarly, the address used by the
** BIOS for the second FLASH bank is also quite a bad choice.
** If REPROGRAM_PAR is defined below (the default), then this driver will
** choose more useful addresses for the FLASH banks by reprogramming the
** responsible PARxx registers in the SC520's MMCR region. This will
** cause the settings to be incompatible with the BIOS's settings, which
** shouldn't be a problem since you are running Linux, (i.e. the BIOS is
** not much use anyway). However, if you need to be compatible with
** the BIOS for some reason, just undefine REPROGRAM_PAR.
*/
#define REPROGRAM_PAR
#ifdef REPROGRAM_PAR
/* These are the addresses we want.. */
#define WINDOW_ADDR_0 0x08800000
#define WINDOW_ADDR_1 0x09000000
#define WINDOW_ADDR_2 0x09800000
/* .. and these are the addresses the BIOS gives us */
#define WINDOW_ADDR_0_BIOS 0x08400000
#define WINDOW_ADDR_1_BIOS 0x08c00000
#define WINDOW_ADDR_2_BIOS 0x09400000
#else
#define WINDOW_ADDR_0 0x08400000
#define WINDOW_ADDR_1 0x08C00000
#define WINDOW_ADDR_2 0x09400000
#endif
#define WINDOW_SIZE_0 0x00800000
#define WINDOW_SIZE_1 0x00800000
#define WINDOW_SIZE_2 0x00080000
static struct map_info sc520cdp_map[] = {
{
.name = "SC520CDP Flash Bank #0",
.size = WINDOW_SIZE_0,
.bankwidth = 4,
.phys = WINDOW_ADDR_0
},
{
.name = "SC520CDP Flash Bank #1",
.size = WINDOW_SIZE_1,
.bankwidth = 4,
.phys = WINDOW_ADDR_1
},
{
.name = "SC520CDP DIL Flash",
.size = WINDOW_SIZE_2,
.bankwidth = 1,
.phys = WINDOW_ADDR_2
},
};
#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
static struct mtd_info *mymtd[NUM_FLASH_BANKS];
static struct mtd_info *merged_mtd;
#ifdef REPROGRAM_PAR
/*
** The SC520 MMCR (memory mapped control register) region resides
** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers
** are at offset 0x88 in the MMCR:
*/
#define SC520_MMCR_BASE 0xFFFEF000
#define SC520_MMCR_EXTENT 0x1000
#define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x))
#define NUM_SC520_PAR 16 /* total number of PAR registers */
/*
** The highest three bits in a PAR register determine what target
** device is controlled by this PAR. Here, only ROMCS? and BOOTCS
** devices are of interest.
*/
#define SC520_PAR_BOOTCS (0x4<<29)
#define SC520_PAR_ROMCS0 (0x5<<29)
#define SC520_PAR_ROMCS1 (0x6<<29)
#define SC520_PAR_TRGDEV (0x7<<29)
/*
** Bits 28 thru 26 determine some attributes for the
** region controlled by the PAR. (We only use non-cacheable)
*/
#define SC520_PAR_WRPROT (1<<26) /* write protected */
#define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */
#define SC520_PAR_NOEXEC (1<<28) /* code execution denied */
/*
** Bit 25 determines the granularity: 4K or 64K
*/
#define SC520_PAR_PG_SIZ4 (0<<25)
#define SC520_PAR_PG_SIZ64 (1<<25)
/*
** Build a value to be written into a PAR register.
** We only need ROM entries, 64K page size:
*/
#define SC520_PAR_ENTRY(trgdev, address, size) \
((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \
(address) >> 16 | (((size) >> 16) - 1) << 14)
struct sc520_par_table
{
unsigned long trgdev;
unsigned long new_par;
unsigned long default_address;
};
static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
{
{ /* Flash Bank #0: selected by ROMCS0 */
SC520_PAR_ROMCS0,
SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0),
WINDOW_ADDR_0_BIOS
},
{ /* Flash Bank #1: selected by ROMCS1 */
SC520_PAR_ROMCS1,
SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1),
WINDOW_ADDR_1_BIOS
},
{ /* DIL (BIOS) Flash: selected by BOOTCS */
SC520_PAR_BOOTCS,
SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2),
WINDOW_ADDR_2_BIOS
}
};
static void sc520cdp_setup_par(void)
{
unsigned long __iomem *mmcr;
unsigned long mmcr_val;
int i, j;
/* map in SC520's MMCR area */
mmcr = ioremap(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
if(!mmcr) { /* ioremap failed: skip the PAR reprogramming */
/* force physical address fields to BIOS defaults: */
for(i = 0; i < NUM_FLASH_BANKS; i++)
sc520cdp_map[i].phys = par_table[i].default_address;
return;
}
/*
** Find the PARxx registers that are responsible for activating
** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a
** new value from the table.
*/
for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */
for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */
mmcr_val = readl(&mmcr[SC520_PAR(j)]);
/* if target device field matches, reprogram the PAR */
if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev)
{
writel(par_table[i].new_par, &mmcr[SC520_PAR(j)]);
break;
}
}
if(j == NUM_SC520_PAR)
{ /* no matching PAR found: try default BIOS address */
printk(KERN_NOTICE "Could not find PAR responsible for %s\n",
sc520cdp_map[i].name);
printk(KERN_NOTICE "Trying default address 0x%lx\n",
par_table[i].default_address);
sc520cdp_map[i].phys = par_table[i].default_address;
}
}
iounmap(mmcr);
}
#endif
static int __init init_sc520cdp(void)
{
int i, j, devices_found = 0;
#ifdef REPROGRAM_PAR
/* reprogram PAR registers so flash appears at the desired addresses */
sc520cdp_setup_par();
#endif
for (i = 0; i < NUM_FLASH_BANKS; i++) {
printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n",
(unsigned long long)sc520cdp_map[i].size,
(unsigned long long)sc520cdp_map[i].phys);
sc520cdp_map[i].virt = ioremap(sc520cdp_map[i].phys, sc520cdp_map[i].size);
if (!sc520cdp_map[i].virt) {
printk("Failed to ioremap\n");
for (j = 0; j < i; j++) {
if (mymtd[j]) {
map_destroy(mymtd[j]);
iounmap(sc520cdp_map[j].virt);
}
}
return -EIO;
}
simple_map_init(&sc520cdp_map[i]);
mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
if(!mymtd[i])
mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]);
if(!mymtd[i])
mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
if (mymtd[i]) {
mymtd[i]->owner = THIS_MODULE;
++devices_found;
}
else {
iounmap(sc520cdp_map[i].virt);
}
}
if(devices_found >= 2) {
/* Combine the two flash banks into a single MTD device & register it: */
merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1");
if(merged_mtd)
mtd_device_register(merged_mtd, NULL, 0);
}
if(devices_found == 3) /* register the third (DIL-Flash) device */
mtd_device_register(mymtd[2], NULL, 0);
return(devices_found ? 0 : -ENXIO);
}
static void __exit cleanup_sc520cdp(void)
{
int i;
if (merged_mtd) {
mtd_device_unregister(merged_mtd);
mtd_concat_destroy(merged_mtd);
}
if (mymtd[2])
mtd_device_unregister(mymtd[2]);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
if (mymtd[i])
map_destroy(mymtd[i]);
if (sc520cdp_map[i].virt) {
iounmap(sc520cdp_map[i].virt);
sc520cdp_map[i].virt = NULL;
}
}
}
module_init(init_sc520cdp);
module_exit(cleanup_sc520cdp);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
| linux-master | drivers/mtd/maps/sc520cdp.c |
/*
* MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
*
* (C) 2000 Nicolas Pitre <[email protected]>
*
* This code is GPL
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/hardware/dec21285.h>
#include <asm/mach-types.h>
static struct mtd_info *dc21285_mtd;
#ifdef CONFIG_ARCH_NETWINDER
/*
* This is really ugly, but it seams to be the only
* realiable way to do it, as the cpld state machine
* is unpredictible. So we have a 25us penalty per
* write access.
*/
static void nw_en_write(void)
{
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
*/
udelay(25);
}
#else
#define nw_en_write() do { } while (0)
#endif
static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint8_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint16_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint32_t*)(map->virt + ofs);
return val;
}
static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
memcpy(to, (void*)(map->virt + from), len);
}
static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint8_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint16_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*(uint32_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint32_t*)from);
dc21285_write32(map, d, to);
from += 4;
to += 4;
len -= 4;
}
}
static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint16_t*)from);
dc21285_write16(map, d, to);
from += 2;
to += 2;
len -= 2;
}
}
static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
map_word d;
d.x[0] = *((uint8_t*)from);
dc21285_write8(map, d, to);
from++;
to++;
len--;
}
static struct map_info dc21285_map = {
.name = "DC21285 flash",
.phys = NO_XIP,
.size = 16*1024*1024,
.copy_from = dc21285_copy_from,
};
/* Partition stuff */
static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
dc21285_map.bankwidth = 1;
dc21285_map.read = dc21285_read8;
dc21285_map.write = dc21285_write8;
dc21285_map.copy_to = dc21285_copy_to_8;
break;
case SA110_CNTL_ROMWIDTH_16:
dc21285_map.bankwidth = 2;
dc21285_map.read = dc21285_read16;
dc21285_map.write = dc21285_write16;
dc21285_map.copy_to = dc21285_copy_to_16;
break;
case SA110_CNTL_ROMWIDTH_32:
dc21285_map.bankwidth = 4;
dc21285_map.read = dc21285_read32;
dc21285_map.write = dc21285_write32;
dc21285_map.copy_to = dc21285_copy_to_32;
break;
default:
printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
return -ENXIO;
}
printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
dc21285_map.bankwidth*8);
/* Let's map the flash area */
dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
if (!dc21285_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
if (machine_is_ebsa285()) {
dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
} else {
dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
}
if (!dc21285_mtd) {
iounmap(dc21285_map.virt);
return -ENXIO;
}
dc21285_mtd->owner = THIS_MODULE;
mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
if(machine_is_ebsa285()) {
/*
* Flash timing is determined with bits 19-16 of the
* CSR_SA110_CNTL. The value is the number of wait cycles, or
* 0 for 16 cycles (the default). Cycles are 20 ns.
* Here we use 7 for 140 ns flash chips.
*/
/* access time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
/* burst time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
/* tristate time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
}
return 0;
}
static void __exit cleanup_dc21285(void)
{
mtd_device_unregister(dc21285_mtd);
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
module_init(init_dc21285);
module_exit(cleanup_dc21285);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
| linux-master | drivers/mtd/maps/dc21285.c |
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************/
/*
* nettel.c -- mappings for NETtel/SecureEdge/SnapGear (x86) boards.
*
* (C) Copyright 2000-2001, Greg Ungerer ([email protected])
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
*/
/****************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/reboot.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/root_dev.h>
#include <asm/io.h>
/****************************************************************************/
#define INTEL_BUSWIDTH 1
#define AMD_WINDOW_MAXSIZE 0x00200000
#define AMD_BUSWIDTH 1
/*
* PAR masks and shifts, assuming 64K pages.
*/
#define SC520_PAR_ADDR_MASK 0x00003fff
#define SC520_PAR_ADDR_SHIFT 16
#define SC520_PAR_TO_ADDR(par) \
(((par)&SC520_PAR_ADDR_MASK) << SC520_PAR_ADDR_SHIFT)
#define SC520_PAR_SIZE_MASK 0x01ffc000
#define SC520_PAR_SIZE_SHIFT 2
#define SC520_PAR_TO_SIZE(par) \
((((par)&SC520_PAR_SIZE_MASK) << SC520_PAR_SIZE_SHIFT) + (64*1024))
#define SC520_PAR(cs, addr, size) \
((cs) | \
((((size)-(64*1024)) >> SC520_PAR_SIZE_SHIFT) & SC520_PAR_SIZE_MASK) | \
(((addr) >> SC520_PAR_ADDR_SHIFT) & SC520_PAR_ADDR_MASK))
#define SC520_PAR_BOOTCS 0x8a000000
#define SC520_PAR_ROMCS1 0xaa000000
#define SC520_PAR_ROMCS2 0xca000000 /* Cache disabled, 64K page */
static void *nettel_mmcrp = NULL;
#ifdef CONFIG_MTD_CFI_INTELEXT
static struct mtd_info *intel_mtd;
#endif
static struct mtd_info *amd_mtd;
/****************************************************************************/
/****************************************************************************/
#ifdef CONFIG_MTD_CFI_INTELEXT
static struct map_info nettel_intel_map = {
.name = "SnapGear Intel",
.size = 0,
.bankwidth = INTEL_BUSWIDTH,
};
static struct mtd_partition nettel_intel_partitions[] = {
{
.name = "SnapGear kernel",
.offset = 0,
.size = 0x000e0000
},
{
.name = "SnapGear filesystem",
.offset = 0x00100000,
},
{
.name = "SnapGear config",
.offset = 0x000e0000,
.size = 0x00020000
},
{
.name = "SnapGear Intel",
.offset = 0
},
{
.name = "SnapGear BIOS Config",
.offset = 0x007e0000,
.size = 0x00020000
},
{
.name = "SnapGear BIOS",
.offset = 0x007e0000,
.size = 0x00020000
},
};
#endif
static struct map_info nettel_amd_map = {
.name = "SnapGear AMD",
.size = AMD_WINDOW_MAXSIZE,
.bankwidth = AMD_BUSWIDTH,
};
static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
.size = 0x00010000
},
{
.name = "SnapGear BIOS",
.offset = 0x000f0000,
.size = 0x00010000
},
{
.name = "SnapGear AMD",
.offset = 0
},
{
.name = "SnapGear high BIOS",
.offset = 0x001f0000,
.size = 0x00010000
}
};
#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
/****************************************************************************/
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* Set the Intel flash back to read mode since some old boot
* loaders don't.
*/
static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
unsigned long b;
/* Make sure all FLASH chips are put back into read mode */
for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
cfi->device_type, NULL);
}
return(NOTIFY_OK);
}
static struct notifier_block nettel_notifier_block = {
nettel_reboot_notifier, NULL, 0
};
#endif
/****************************************************************************/
static int __init nettel_init(void)
{
volatile unsigned long *amdpar;
unsigned long amdaddr, maxsize;
int num_amd_partitions=0;
#ifdef CONFIG_MTD_CFI_INTELEXT
volatile unsigned long *intel0par, *intel1par;
unsigned long orig_bootcspar, orig_romcs1par;
unsigned long intel0addr, intel0size;
unsigned long intel1addr, intel1size;
int intelboot, intel0cs, intel1cs;
int num_intel_partitions;
#endif
int rc = 0;
nettel_mmcrp = (void *) ioremap(0xfffef000, 4096);
if (nettel_mmcrp == NULL) {
printk("SNAPGEAR: failed to disable MMCR cache??\n");
return(-EIO);
}
/* Set CPU clock to be 33.000MHz */
*((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;
amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);
#ifdef CONFIG_MTD_CFI_INTELEXT
intelboot = 0;
intel0cs = SC520_PAR_ROMCS1;
intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
intel1cs = SC520_PAR_ROMCS2;
intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);
/*
* Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
* otherwise they might clash with where we try to map BOOTCS.
*/
orig_bootcspar = *amdpar;
orig_romcs1par = *intel0par;
*intel0par = 0;
*intel1par = 0;
#endif
/*
* The first thing to do is determine if we have a separate
* boot FLASH device. Typically this is a small (1 to 2MB)
* AMD FLASH part. It seems that device size is about the
* only way to tell if this is the case...
*/
amdaddr = 0x20000000;
maxsize = AMD_WINDOW_MAXSIZE;
*amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
__asm__ ("wbinvd");
nettel_amd_map.phys = amdaddr;
nettel_amd_map.virt = ioremap(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
iounmap(nettel_mmcrp);
return(-EIO);
}
simple_map_init(&nettel_amd_map);
if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
(int)(amd_mtd->size>>10));
amd_mtd->owner = THIS_MODULE;
/* The high BIOS partition is only present for 2MB units */
num_amd_partitions = NUM_AMD_PARTITIONS;
if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
num_amd_partitions--;
/* Don't add the partition until after the primary INTEL's */
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* Map the Intel flash into memory after the AMD
* It has to start on a multiple of maxsize.
*/
maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
if (maxsize < (32 * 1024 * 1024))
maxsize = (32 * 1024 * 1024);
intel0addr = amdaddr + maxsize;
#endif
} else {
#ifdef CONFIG_MTD_CFI_INTELEXT
/* INTEL boot FLASH */
intelboot++;
if (!orig_romcs1par) {
intel0cs = SC520_PAR_BOOTCS;
intel0par = (volatile unsigned long *)
(nettel_mmcrp + 0xc4);
intel1cs = SC520_PAR_ROMCS1;
intel1par = (volatile unsigned long *)
(nettel_mmcrp + 0xc0);
intel0addr = SC520_PAR_TO_ADDR(orig_bootcspar);
maxsize = SC520_PAR_TO_SIZE(orig_bootcspar);
} else {
/* Kernel base is on ROMCS1, not BOOTCS */
intel0cs = SC520_PAR_ROMCS1;
intel0par = (volatile unsigned long *)
(nettel_mmcrp + 0xc0);
intel1cs = SC520_PAR_BOOTCS;
intel1par = (volatile unsigned long *)
(nettel_mmcrp + 0xc4);
intel0addr = SC520_PAR_TO_ADDR(orig_romcs1par);
maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
}
/* Destroy useless AMD MTD mapping */
amd_mtd = NULL;
iounmap(nettel_amd_map.virt);
nettel_amd_map.virt = NULL;
#else
/* Only AMD flash supported */
rc = -ENXIO;
goto out_unmap2;
#endif
}
#ifdef CONFIG_MTD_CFI_INTELEXT
/*
* We have determined the INTEL FLASH configuration, so lets
* go ahead and probe for them now.
*/
/* Set PAR to the maximum size */
if (maxsize < (32 * 1024 * 1024))
maxsize = (32 * 1024 * 1024);
*intel0par = SC520_PAR(intel0cs, intel0addr, maxsize);
/* Turn other PAR off so the first probe doesn't find it */
*intel1par = 0;
/* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
rc = -EIO;
goto out_unmap2;
}
simple_map_init(&nettel_intel_map);
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (!intel_mtd) {
rc = -ENXIO;
goto out_unmap1;
}
/* Set PAR to the detected size */
intel0size = intel_mtd->size;
*intel0par = SC520_PAR(intel0cs, intel0addr, intel0size);
/*
* Map second Intel FLASH right after first. Set its size to the
* same maxsize used for the first Intel FLASH.
*/
intel1addr = intel0addr + intel0size;
*intel1par = SC520_PAR(intel1cs, intel1addr, maxsize);
__asm__ ("wbinvd");
maxsize += intel0size;
/* Delete the old map and probe again to do both chips */
map_destroy(intel_mtd);
intel_mtd = NULL;
iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
nettel_intel_map.virt = ioremap(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
rc = -EIO;
goto out_unmap2;
}
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (! intel_mtd) {
rc = -ENXIO;
goto out_unmap1;
}
intel1size = intel_mtd->size - intel0size;
if (intel1size > 0) {
*intel1par = SC520_PAR(intel1cs, intel1addr, intel1size);
__asm__ ("wbinvd");
} else {
*intel1par = 0;
}
printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
(unsigned long long)(intel_mtd->size >> 10));
intel_mtd->owner = THIS_MODULE;
num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
if (intelboot) {
/*
* Adjust offset and size of last boot partition.
* Must allow for BIOS region at end of FLASH.
*/
nettel_intel_partitions[1].size = (intel0size + intel1size) -
(1024*1024 + intel_mtd->erasesize);
nettel_intel_partitions[3].size = intel0size + intel1size;
nettel_intel_partitions[4].offset =
(intel0size + intel1size) - intel_mtd->erasesize;
nettel_intel_partitions[4].size = intel_mtd->erasesize;
nettel_intel_partitions[5].offset =
nettel_intel_partitions[4].offset;
nettel_intel_partitions[5].size =
nettel_intel_partitions[4].size;
} else {
/* No BIOS regions when AMD boot */
num_intel_partitions -= 2;
}
rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
num_intel_partitions);
if (rc)
goto out_map_destroy;
#endif
if (amd_mtd) {
rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
num_amd_partitions);
if (rc)
goto out_mtd_unreg;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
register_reboot_notifier(&nettel_notifier_block);
#endif
return rc;
out_mtd_unreg:
#ifdef CONFIG_MTD_CFI_INTELEXT
mtd_device_unregister(intel_mtd);
out_map_destroy:
map_destroy(intel_mtd);
out_unmap1:
iounmap(nettel_intel_map.virt);
#endif
out_unmap2:
iounmap(nettel_mmcrp);
iounmap(nettel_amd_map.virt);
return rc;
}
/****************************************************************************/
static void __exit nettel_cleanup(void)
{
#ifdef CONFIG_MTD_CFI_INTELEXT
unregister_reboot_notifier(&nettel_notifier_block);
#endif
if (amd_mtd) {
mtd_device_unregister(amd_mtd);
map_destroy(amd_mtd);
}
if (nettel_mmcrp) {
iounmap(nettel_mmcrp);
nettel_mmcrp = NULL;
}
if (nettel_amd_map.virt) {
iounmap(nettel_amd_map.virt);
nettel_amd_map.virt = NULL;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
mtd_device_unregister(intel_mtd);
map_destroy(intel_mtd);
}
if (nettel_intel_map.virt) {
iounmap(nettel_intel_map.virt);
nettel_intel_map.virt = NULL;
}
#endif
}
/****************************************************************************/
module_init(nettel_init);
module_exit(nettel_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <[email protected]>");
MODULE_DESCRIPTION("SnapGear/SecureEdge FLASH support");
/****************************************************************************/
| linux-master | drivers/mtd/maps/nettel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* drivers/mtd/maps/plat-ram.c
*
* (c) 2004-2005 Simtec Electronics
* http://www.simtec.co.uk/products/SWLINUX/
* Ben Dooks <[email protected]>
*
* Generic platform device based RAM map
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/plat-ram.h>
#include <asm/io.h>
/* private structure for each mtd platform ram device created */
struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
struct platdata_mtd_ram *pdata;
};
/* to_platram_info()
*
* device private data to struct platram_info conversion
*/
static inline struct platram_info *to_platram_info(struct platform_device *dev)
{
return platform_get_drvdata(dev);
}
/* platram_setrw
*
* call the platform device's set rw/ro control
*
* to = 0 => read-only
* = 1 => read-write
*/
static inline void platram_setrw(struct platram_info *info, int to)
{
if (info->pdata == NULL)
return;
if (info->pdata->set_rw != NULL)
(info->pdata->set_rw)(info->dev, to);
}
/* platram_remove
*
* called to remove the device from the driver's control
*/
static int platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
return 0;
if (info->mtd) {
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
/* ensure ram is left read-only */
platram_setrw(info, PLATRAM_RO);
kfree(info);
return 0;
}
/* platram_probe
*
* called from device drive system when a device matching our
* driver is found.
*/
static int platram_probe(struct platform_device *pdev)
{
struct platdata_mtd_ram *pdata;
struct platram_info *info;
struct resource *res;
int err = 0;
dev_dbg(&pdev->dev, "probe entered\n");
if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data supplied\n");
err = -ENOENT;
goto exit_error;
}
pdata = dev_get_platdata(&pdev->dev);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto exit_error;
}
platform_set_drvdata(pdev, info);
info->dev = &pdev->dev;
info->pdata = pdata;
/* get the resource for the memory mapping */
info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
goto exit_free;
}
dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res,
(unsigned long long)res->start);
/* setup map parameters */
info->map.phys = res->start;
info->map.size = resource_size(res);
info->map.name = pdata->mapname != NULL ?
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
/* probe for the right mtd map driver
* supplied by the platform_data struct */
if (pdata->map_probes) {
const char * const *map_probes = pdata->map_probes;
for ( ; !info->mtd && *map_probes; map_probes++)
info->mtd = do_map_probe(*map_probes , &info->map);
}
/* fallback to map_ram */
else
info->mtd = do_map_probe("map_ram", &info->map);
if (info->mtd == NULL) {
dev_err(&pdev->dev, "failed to probe for map_ram\n");
err = -ENOMEM;
goto exit_free;
}
info->mtd->dev.parent = &pdev->dev;
platram_setrw(info, PLATRAM_RW);
/* check to see if there are any available partitions, or whether
* to add this device whole */
err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
pdata->partitions,
pdata->nr_partitions);
if (err) {
dev_err(&pdev->dev, "failed to register mtd device\n");
goto exit_free;
}
dev_info(&pdev->dev, "registered mtd device\n");
if (pdata->nr_partitions) {
/* add the whole device. */
err = mtd_device_register(info->mtd, NULL, 0);
if (err) {
dev_err(&pdev->dev,
"failed to register the entire device\n");
goto exit_free;
}
}
return 0;
exit_free:
platram_remove(pdev);
exit_error:
return err;
}
/* device driver info */
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:mtd-ram");
static struct platform_driver platram_driver = {
.probe = platram_probe,
.remove = platram_remove,
.driver = {
.name = "mtd-ram",
},
};
module_platform_driver(platram_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_DESCRIPTION("MTD platform RAM map driver");
| linux-master | drivers/mtd/maps/plat-ram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PISMO memory driver - http://www.pismoworld.org/
*
* For ARM Realview and Versatile platforms
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/pismo.h>
#define PISMO_NUM_CS 5
struct pismo_cs_block {
u8 type;
u8 width;
__le16 access;
__le32 size;
u32 reserved[2];
char device[32];
} __packed;
struct pismo_eeprom {
struct pismo_cs_block cs[PISMO_NUM_CS];
char board[15];
u8 sum;
} __packed;
struct pismo_mem {
phys_addr_t base;
u32 size;
u16 access;
u8 width;
u8 type;
};
struct pismo_data {
struct i2c_client *client;
void (*vpp)(void *, int);
void *vpp_data;
struct platform_device *dev[PISMO_NUM_CS];
};
static void pismo_set_vpp(struct platform_device *pdev, int on)
{
struct i2c_client *client = to_i2c_client(pdev->dev.parent);
struct pismo_data *pismo = i2c_get_clientdata(client);
pismo->vpp(pismo->vpp_data, on);
}
static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
return 0;
return 1 << width;
}
static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
size_t size)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.len = sizeof(addr),
.buf = &addr,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = size,
.buf = buf,
},
};
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
static int pismo_add_device(struct pismo_data *pismo, int i,
struct pismo_mem *region, const char *name,
void *pdata, size_t psize)
{
struct platform_device *dev;
struct resource res = { };
phys_addr_t base = region->base;
int ret;
if (base == ~0)
return -ENXIO;
res.start = base;
res.end = base + region->size - 1;
res.flags = IORESOURCE_MEM;
dev = platform_device_alloc(name, i);
if (!dev)
return -ENOMEM;
dev->dev.parent = &pismo->client->dev;
do {
ret = platform_device_add_resources(dev, &res, 1);
if (ret)
break;
ret = platform_device_add_data(dev, pdata, psize);
if (ret)
break;
ret = platform_device_add(dev);
if (ret)
break;
pismo->dev[i] = dev;
return 0;
} while (0);
platform_device_put(dev);
return ret;
}
static int pismo_add_nor(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct physmap_flash_data data = {
.width = region->width,
};
if (pismo->vpp)
data.set_vpp = pismo_set_vpp;
return pismo_add_device(pismo, i, region, "physmap-flash",
&data, sizeof(data));
}
static int pismo_add_sram(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
.bankwidth = region->width,
};
return pismo_add_device(pismo, i, region, "mtd-ram",
&data, sizeof(data));
}
static void pismo_add_one(struct pismo_data *pismo, int i,
const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
struct pismo_mem region;
region.base = base;
region.type = cs->type;
region.width = pismo_width_to_bytes(cs->width);
region.access = le16_to_cpu(cs->access);
region.size = le32_to_cpu(cs->size);
if (region.width == 0) {
dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
return;
}
/*
* FIXME: may need to the platforms memory controller here, but at
* the moment we assume that it has already been correctly setup.
* The memory controller can also tell us the base address as well.
*/
dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
i, cs->device, region.type, region.access, region.size / 1024);
switch (region.type) {
case 0:
break;
case 1:
/* static DOC */
break;
case 2:
/* static NOR */
pismo_add_nor(pismo, i, ®ion);
break;
case 3:
/* static RAM */
pismo_add_sram(pismo, i, ®ion);
break;
}
}
static void pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
platform_device_unregister(pismo->dev[i]);
kfree(pismo);
}
static int pismo_probe(struct i2c_client *client)
{
struct pismo_pdata *pdata = client->dev.platform_data;
struct pismo_eeprom eeprom;
struct pismo_data *pismo;
int ret, i;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "functionality mismatch\n");
return -EIO;
}
pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
if (!pismo)
return -ENOMEM;
pismo->client = client;
if (pdata) {
pismo->vpp = pdata->set_vpp;
pismo->vpp_data = pdata->vpp_data;
}
i2c_set_clientdata(client, pismo);
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
if (eeprom.cs[i].type != 0xff)
pismo_add_one(pismo, i, &eeprom.cs[i],
pdata->cs_addrs[i]);
return 0;
exit_free:
kfree(pismo);
return ret;
}
static const struct i2c_device_id pismo_id[] = {
{ "pismo" },
{ },
};
MODULE_DEVICE_TABLE(i2c, pismo_id);
static struct i2c_driver pismo_driver = {
.driver = {
.name = "pismo",
},
.probe = pismo_probe,
.remove = pismo_remove,
.id_table = pismo_id,
};
static int __init pismo_init(void)
{
BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
return i2c_add_driver(&pismo_driver);
}
module_init(pismo_init);
static void __exit pismo_exit(void)
{
i2c_del_driver(&pismo_driver);
}
module_exit(pismo_exit);
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_DESCRIPTION("PISMO memory driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/maps/pismo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Versatile OF physmap driver add-on
*
* Copyright (c) 2016, Linaro Limited
* Author: Linus Walleij <[email protected]>
*/
#include <linux/export.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mtd/map.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include "physmap-versatile.h"
static struct regmap *syscon_regmap;
enum versatile_flashprot {
INTEGRATOR_AP_FLASHPROT,
INTEGRATOR_CP_FLASHPROT,
VERSATILE_FLASHPROT,
REALVIEW_FLASHPROT,
};
static const struct of_device_id syscon_match[] = {
{
.compatible = "arm,integrator-ap-syscon",
.data = (void *)INTEGRATOR_AP_FLASHPROT,
},
{
.compatible = "arm,integrator-cp-syscon",
.data = (void *)INTEGRATOR_CP_FLASHPROT,
},
{
.compatible = "arm,core-module-versatile",
.data = (void *)VERSATILE_FLASHPROT,
},
{
.compatible = "arm,realview-eb-syscon",
.data = (void *)REALVIEW_FLASHPROT,
},
{
.compatible = "arm,realview-pb1176-syscon",
.data = (void *)REALVIEW_FLASHPROT,
},
{
.compatible = "arm,realview-pb11mp-syscon",
.data = (void *)REALVIEW_FLASHPROT,
},
{
.compatible = "arm,realview-pba8-syscon",
.data = (void *)REALVIEW_FLASHPROT,
},
{
.compatible = "arm,realview-pbx-syscon",
.data = (void *)REALVIEW_FLASHPROT,
},
{},
};
/*
* Flash protection handling for the Integrator/AP
*/
#define INTEGRATOR_SC_CTRLS_OFFSET 0x08
#define INTEGRATOR_SC_CTRLC_OFFSET 0x0C
#define INTEGRATOR_SC_CTRL_FLVPPEN BIT(1)
#define INTEGRATOR_SC_CTRL_FLWP BIT(2)
#define INTEGRATOR_EBI_CSR1_OFFSET 0x04
/* The manual says bit 2, the code says bit 3, trust the code */
#define INTEGRATOR_EBI_WRITE_ENABLE BIT(3)
#define INTEGRATOR_EBI_LOCK_OFFSET 0x20
#define INTEGRATOR_EBI_LOCK_VAL 0xA05F
static const struct of_device_id ebi_match[] = {
{ .compatible = "arm,external-bus-interface"},
{ },
};
static int ap_flash_init(struct platform_device *pdev)
{
struct device_node *ebi;
void __iomem *ebi_base;
u32 val;
int ret;
/* Look up the EBI */
ebi = of_find_matching_node(NULL, ebi_match);
if (!ebi) {
return -ENODEV;
}
ebi_base = of_iomap(ebi, 0);
of_node_put(ebi);
if (!ebi_base)
return -ENODEV;
/* Clear VPP and write protection bits */
ret = regmap_write(syscon_regmap,
INTEGRATOR_SC_CTRLC_OFFSET,
INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
if (ret)
dev_err(&pdev->dev, "error clearing Integrator VPP/WP\n");
/* Unlock the EBI */
writel(INTEGRATOR_EBI_LOCK_VAL, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
/* Enable write cycles on the EBI, CSR1 (flash) */
val = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
val |= INTEGRATOR_EBI_WRITE_ENABLE;
writel(val, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
/* Lock the EBI again */
writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
iounmap(ebi_base);
return 0;
}
static void ap_flash_set_vpp(struct map_info *map, int on)
{
int ret;
if (on) {
ret = regmap_write(syscon_regmap,
INTEGRATOR_SC_CTRLS_OFFSET,
INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
if (ret)
pr_err("error enabling AP VPP\n");
} else {
ret = regmap_write(syscon_regmap,
INTEGRATOR_SC_CTRLC_OFFSET,
INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
if (ret)
pr_err("error disabling AP VPP\n");
}
}
/*
* Flash protection handling for the Integrator/CP
*/
#define INTCP_FLASHPROG_OFFSET 0x04
#define CINTEGRATOR_FLVPPEN BIT(0)
#define CINTEGRATOR_FLWREN BIT(1)
#define CINTEGRATOR_FLMASK BIT(0)|BIT(1)
static void cp_flash_set_vpp(struct map_info *map, int on)
{
int ret;
if (on) {
ret = regmap_update_bits(syscon_regmap,
INTCP_FLASHPROG_OFFSET,
CINTEGRATOR_FLMASK,
CINTEGRATOR_FLVPPEN | CINTEGRATOR_FLWREN);
if (ret)
pr_err("error setting CP VPP\n");
} else {
ret = regmap_update_bits(syscon_regmap,
INTCP_FLASHPROG_OFFSET,
CINTEGRATOR_FLMASK,
0);
if (ret)
pr_err("error setting CP VPP\n");
}
}
/*
* Flash protection handling for the Versatiles and RealViews
*/
#define VERSATILE_SYS_FLASH_OFFSET 0x4C
static void versatile_flash_set_vpp(struct map_info *map, int on)
{
int ret;
ret = regmap_update_bits(syscon_regmap, VERSATILE_SYS_FLASH_OFFSET,
0x01, !!on);
if (ret)
pr_err("error setting Versatile VPP\n");
}
int of_flash_probe_versatile(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
{
struct device_node *sysnp;
const struct of_device_id *devid;
struct regmap *rmap;
static enum versatile_flashprot versatile_flashprot;
int ret;
/* Not all flash chips use this protection line */
if (!of_device_is_compatible(np, "arm,versatile-flash"))
return 0;
/* For first chip probed, look up the syscon regmap */
if (!syscon_regmap) {
sysnp = of_find_matching_node_and_match(NULL,
syscon_match,
&devid);
if (!sysnp)
return -ENODEV;
versatile_flashprot = (uintptr_t)devid->data;
rmap = syscon_node_to_regmap(sysnp);
of_node_put(sysnp);
if (IS_ERR(rmap))
return PTR_ERR(rmap);
syscon_regmap = rmap;
}
switch (versatile_flashprot) {
case INTEGRATOR_AP_FLASHPROT:
ret = ap_flash_init(pdev);
if (ret)
return ret;
map->set_vpp = ap_flash_set_vpp;
dev_info(&pdev->dev, "Integrator/AP flash protection\n");
break;
case INTEGRATOR_CP_FLASHPROT:
map->set_vpp = cp_flash_set_vpp;
dev_info(&pdev->dev, "Integrator/CP flash protection\n");
break;
case VERSATILE_FLASHPROT:
case REALVIEW_FLASHPROT:
map->set_vpp = versatile_flash_set_vpp;
dev_info(&pdev->dev, "versatile/realview flash protection\n");
break;
default:
dev_info(&pdev->dev, "device marked as Versatile flash "
"but no system controller was found\n");
break;
}
return 0;
}
| linux-master | drivers/mtd/maps/physmap-versatile.c |
/*
* Flash and EPROM on Hitachi Solution Engine and similar boards.
*
* (C) 2001 Red Hat, Inc.
*
* GPL'd
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/errno.h>
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
struct map_info soleng_eprom_map = {
.name = "Solution Engine EPROM",
.size = 0x400000,
.bankwidth = 4,
};
struct map_info soleng_flash_map = {
.name = "Solution Engine FLASH",
.size = 0x400000,
.bankwidth = 4,
};
static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_soleng_maps(void)
{
/* First probe at offset 0 */
soleng_flash_map.phys = 0;
soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
soleng_eprom_map.phys = 0x01000000;
soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
simple_map_init(&soleng_eprom_map);
simple_map_init(&soleng_flash_map);
printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Not there. Try swapping */
printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
soleng_flash_map.phys = 0x01000000;
soleng_flash_map.virt = P2SEGADDR(0x01000000);
soleng_eprom_map.phys = 0;
soleng_eprom_map.virt = P1SEGADDR(0);
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Eep. */
printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
return -ENXIO;
}
}
printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
&soleng_flash_map.phys,
&soleng_eprom_map.phys);
flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
eprom_mtd->owner = THIS_MODULE;
mtd_device_register(eprom_mtd, NULL, 0);
}
mtd_device_parse_register(flash_mtd, probes, NULL, NULL, 0);
return 0;
}
static void __exit cleanup_soleng_maps(void)
{
if (eprom_mtd) {
mtd_device_unregister(eprom_mtd);
map_destroy(eprom_mtd);
}
mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
}
module_init(init_soleng_maps);
module_exit(cleanup_soleng_maps);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
| linux-master | drivers/mtd/maps/solutionengine.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
* Copyright (C) 2010 John Crispin <[email protected]>
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/of.h>
#include <lantiq_soc.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
* To make PCI work we need to enable the endianness swapping for the address
* written to the EBU. This endianness swapping works for PCI correctly but
* fails for attached NOR devices. To workaround this we need to use a complex
* map. The workaround involves swapping all addresses whilst probing the chip.
* Once probing is complete we stop swapping the addresses but swizzle the
* unlock addresses to ensure that access to the NOR device works correctly.
*/
enum {
LTQ_NOR_PROBING,
LTQ_NOR_NORMAL
};
struct ltq_mtd {
struct resource *res;
struct mtd_info *mtd;
struct map_info *map;
};
static const char ltq_map_name[] = "ltq_nor";
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
{
unsigned long flags;
map_word temp;
if (map->map_priv_1 == LTQ_NOR_PROBING)
adr ^= 2;
spin_lock_irqsave(&ebu_lock, flags);
temp.x[0] = *(u16 *)(map->virt + adr);
spin_unlock_irqrestore(&ebu_lock, flags);
return temp;
}
static void
ltq_write16(struct map_info *map, map_word d, unsigned long adr)
{
unsigned long flags;
if (map->map_priv_1 == LTQ_NOR_PROBING)
adr ^= 2;
spin_lock_irqsave(&ebu_lock, flags);
*(u16 *)(map->virt + adr) = d.x[0];
spin_unlock_irqrestore(&ebu_lock, flags);
}
/*
* The following 2 functions copy data between iomem and a cached memory
* section. As memcpy() makes use of pre-fetching we cannot use it here.
* The normal alternative of using memcpy_{to,from}io also makes use of
* memcpy() on MIPS so it is not applicable either. We are therefore stuck
* with having to use our own loop.
*/
static void
ltq_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len)
{
unsigned char *f = (unsigned char *)map->virt + from;
unsigned char *t = (unsigned char *)to;
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
while (len--)
*t++ = *f++;
spin_unlock_irqrestore(&ebu_lock, flags);
}
static void
ltq_copy_to(struct map_info *map, unsigned long to,
const void *from, ssize_t len)
{
unsigned char *f = (unsigned char *)from;
unsigned char *t = (unsigned char *)map->virt + to;
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
while (len--)
*t++ = *f++;
spin_unlock_irqrestore(&ebu_lock, flags);
}
static int
ltq_mtd_probe(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd;
struct cfi_private *cfi;
int err;
ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL);
if (!ltq_mtd)
return -ENOMEM;
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, <q_mtd->res);
if (IS_ERR(ltq_mtd->map->virt))
return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
GFP_KERNEL);
if (!ltq_mtd->map)
return -ENOMEM;
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
ltq_mtd->map->read = ltq_read16;
ltq_mtd->map->write = ltq_write16;
ltq_mtd->map->copy_from = ltq_copy_from;
ltq_mtd->map->copy_to = ltq_copy_to;
ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
return -ENXIO;
}
ltq_mtd->mtd->dev.parent = &pdev->dev;
mtd_set_of_node(ltq_mtd->mtd, pdev->dev.of_node);
cfi = ltq_mtd->map->fldrv_priv;
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
err = mtd_device_register(ltq_mtd->mtd, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
}
return 0;
err_destroy:
map_destroy(ltq_mtd->mtd);
return err;
}
static int
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
if (ltq_mtd && ltq_mtd->mtd) {
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
return 0;
}
static const struct of_device_id ltq_mtd_match[] = {
{ .compatible = "lantiq,nor" },
{},
};
MODULE_DEVICE_TABLE(of, ltq_mtd_match);
static struct platform_driver ltq_mtd_driver = {
.probe = ltq_mtd_probe,
.remove = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
.of_match_table = ltq_mtd_match,
},
};
module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <[email protected]>");
MODULE_DESCRIPTION("Lantiq SoC NOR");
| linux-master | drivers/mtd/maps/lantiq-flash.c |
// SPDX-License-Identifier: GPL-2.0-only
/* sun_uflash.c - Driver for user-programmable flash on
* Sun Microsystems SME boardsets.
*
* This driver does NOT provide access to the OBP-flash for
* safety reasons-- use <linux>/drivers/sbus/char/flash.c instead.
*
* Copyright (c) 2001 Eric Brower ([email protected])
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#define UFLASH_OBPNAME "flashprom"
#define DRIVER_NAME "sun_uflash"
#define PFX DRIVER_NAME ": "
#define UFLASH_WINDOW_SIZE 0x200000
#define UFLASH_BUSWIDTH 1 /* EBus is 8-bit */
MODULE_AUTHOR("Eric Brower <[email protected]>");
MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
struct uflash_dev {
const char *name; /* device name */
struct map_info map; /* mtd map info */
struct mtd_info *mtd; /* mtd info */
};
struct map_info uflash_map_templ = {
.name = "SUNW,???-????",
.size = UFLASH_WINDOW_SIZE,
.bankwidth = UFLASH_BUSWIDTH,
};
int uflash_devinit(struct platform_device *op, struct device_node *dp)
{
struct uflash_dev *up;
if (op->resource[1].flags) {
/* Non-CFI userflash device-- once I find one we
* can work on supporting it.
*/
printk(KERN_ERR PFX "Unsupported device at %pOF, 0x%llx\n",
dp, (unsigned long long)op->resource[0].start);
return -ENODEV;
}
up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL);
if (!up)
return -ENOMEM;
/* copy defaults and tweak parameters */
memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ));
up->map.size = resource_size(&op->resource[0]);
up->name = of_get_property(dp, "model", NULL);
if (up->name && 0 < strlen(up->name))
up->map.name = up->name;
up->map.phys = op->resource[0].start;
up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size,
DRIVER_NAME);
if (!up->map.virt) {
printk(KERN_ERR PFX "Failed to map device.\n");
kfree(up);
return -EINVAL;
}
simple_map_init(&up->map);
/* MTD registration */
up->mtd = do_map_probe("cfi_probe", &up->map);
if (!up->mtd) {
of_iounmap(&op->resource[0], up->map.virt, up->map.size);
kfree(up);
return -ENXIO;
}
up->mtd->owner = THIS_MODULE;
mtd_device_register(up->mtd, NULL, 0);
dev_set_drvdata(&op->dev, up);
return 0;
}
static int uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
/* Flashprom must have the "user" property in order to
* be used by this driver.
*/
if (!of_property_read_bool(dp, "user"))
return -ENODEV;
return uflash_devinit(op, dp);
}
static int uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
if (up->mtd) {
mtd_device_unregister(up->mtd);
map_destroy(up->mtd);
}
if (up->map.virt) {
of_iounmap(&op->resource[0], up->map.virt, up->map.size);
up->map.virt = NULL;
}
kfree(up);
return 0;
}
static const struct of_device_id uflash_match[] = {
{
.name = UFLASH_OBPNAME,
},
{},
};
MODULE_DEVICE_TABLE(of, uflash_match);
static struct platform_driver uflash_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = uflash_match,
},
.probe = uflash_probe,
.remove = uflash_remove,
};
module_platform_driver(uflash_driver);
| linux-master | drivers/mtd/maps/sun_uflash.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Cortina Systems Gemini OF physmap add-on
* Copyright (C) 2017 Linus Walleij <[email protected]>
*
* This SoC has an elaborate flash control register, so we need to
* detect and set it up when booting on this platform.
*/
#include <linux/export.h>
#include <linux/of.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
#include "physmap-gemini.h"
/*
* The Flash-relevant parts of the global status register
* These would also be relevant for a NAND driver.
*/
#define GLOBAL_STATUS 0x04
#define FLASH_TYPE_MASK (0x3 << 24)
#define FLASH_TYPE_NAND_2K (0x3 << 24)
#define FLASH_TYPE_NAND_512 (0x2 << 24)
#define FLASH_TYPE_PARALLEL (0x1 << 24)
#define FLASH_TYPE_SERIAL (0x0 << 24)
/* if parallel */
#define FLASH_WIDTH_16BIT (1 << 23) /* else 8 bit */
/* if serial */
#define FLASH_ATMEL (1 << 23) /* else STM */
#define FLASH_SIZE_MASK (0x3 << 21)
#define NAND_256M (0x3 << 21) /* and more */
#define NAND_128M (0x2 << 21)
#define NAND_64M (0x1 << 21)
#define NAND_32M (0x0 << 21)
#define ATMEL_16M (0x3 << 21) /* and more */
#define ATMEL_8M (0x2 << 21)
#define ATMEL_4M_2M (0x1 << 21)
#define ATMEL_1M (0x0 << 21) /* and less */
#define STM_32M (1 << 22) /* and more */
#define STM_16M (0 << 22) /* and less */
#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
struct gemini_flash {
struct device *dev;
struct pinctrl *p;
struct pinctrl_state *enabled_state;
struct pinctrl_state *disabled_state;
};
/* Static local state */
static struct gemini_flash *gf;
static void gemini_flash_enable_pins(void)
{
int ret;
if (IS_ERR(gf->enabled_state))
return;
ret = pinctrl_select_state(gf->p, gf->enabled_state);
if (ret)
dev_err(gf->dev, "failed to enable pins\n");
}
static void gemini_flash_disable_pins(void)
{
int ret;
if (IS_ERR(gf->disabled_state))
return;
ret = pinctrl_select_state(gf->p, gf->disabled_state);
if (ret)
dev_err(gf->dev, "failed to disable pins\n");
}
static map_word __xipram gemini_flash_map_read(struct map_info *map,
unsigned long ofs)
{
map_word ret;
gemini_flash_enable_pins();
ret = inline_map_read(map, ofs);
gemini_flash_disable_pins();
return ret;
}
static void __xipram gemini_flash_map_write(struct map_info *map,
const map_word datum,
unsigned long ofs)
{
gemini_flash_enable_pins();
inline_map_write(map, datum, ofs);
gemini_flash_disable_pins();
}
static void __xipram gemini_flash_map_copy_from(struct map_info *map,
void *to, unsigned long from,
ssize_t len)
{
gemini_flash_enable_pins();
inline_map_copy_from(map, to, from, len);
gemini_flash_disable_pins();
}
static void __xipram gemini_flash_map_copy_to(struct map_info *map,
unsigned long to,
const void *from, ssize_t len)
{
gemini_flash_enable_pins();
inline_map_copy_to(map, to, from, len);
gemini_flash_disable_pins();
}
int of_flash_probe_gemini(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
{
struct regmap *rmap;
struct device *dev = &pdev->dev;
u32 val;
int ret;
/* Multiplatform guard */
if (!of_device_is_compatible(np, "cortina,gemini-flash"))
return 0;
gf = devm_kzalloc(dev, sizeof(*gf), GFP_KERNEL);
if (!gf)
return -ENOMEM;
gf->dev = dev;
rmap = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(rmap)) {
dev_err(dev, "no syscon\n");
return PTR_ERR(rmap);
}
ret = regmap_read(rmap, GLOBAL_STATUS, &val);
if (ret) {
dev_err(dev, "failed to read global status register\n");
return -ENODEV;
}
dev_dbg(dev, "global status reg: %08x\n", val);
/*
* It would be contradictory if a physmap flash was NOT parallel.
*/
if ((val & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL) {
dev_err(dev, "flash is not parallel\n");
return -ENODEV;
}
/*
* Complain if DT data and hardware definition is different.
*/
if (val & FLASH_WIDTH_16BIT) {
if (map->bankwidth != 2)
dev_warn(dev, "flash hardware say flash is 16 bit wide but DT says it is %d bits wide\n",
map->bankwidth * 8);
} else {
if (map->bankwidth != 1)
dev_warn(dev, "flash hardware say flash is 8 bit wide but DT says it is %d bits wide\n",
map->bankwidth * 8);
}
gf->p = devm_pinctrl_get(dev);
if (IS_ERR(gf->p)) {
dev_err(dev, "no pinctrl handle\n");
ret = PTR_ERR(gf->p);
return ret;
}
gf->enabled_state = pinctrl_lookup_state(gf->p, "enabled");
if (IS_ERR(gf->enabled_state))
dev_err(dev, "no enabled pin control state\n");
gf->disabled_state = pinctrl_lookup_state(gf->p, "disabled");
if (IS_ERR(gf->enabled_state)) {
dev_err(dev, "no disabled pin control state\n");
} else {
ret = pinctrl_select_state(gf->p, gf->disabled_state);
if (ret)
dev_err(gf->dev, "failed to disable pins\n");
}
map->read = gemini_flash_map_read;
map->write = gemini_flash_map_write;
map->copy_from = gemini_flash_map_copy_from;
map->copy_to = gemini_flash_map_copy_to;
dev_info(dev, "initialized Gemini-specific physmap control\n");
return 0;
}
| linux-master | drivers/mtd/maps/physmap-gemini.c |
/*
* pcmciamtd.c - MTD driver for PCMCIA flash memory cards
*
* Author: Simon Evans <[email protected]>
*
* Copyright (C) 2002 Simon Evans
*
* Licence: GPL
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
#define DRIVER_DESC "PCMCIA Flash memory card driver"
/* Size of the PCMCIA address space: 26 bits = 64 MB */
#define MAX_PCMCIA_ADDR 0x4000000
struct pcmciamtd_dev {
struct pcmcia_device *p_dev;
void __iomem *win_base; /* ioremapped address of PCMCIA window */
unsigned int win_size; /* size of window */
unsigned int offset; /* offset into card the window currently points at */
struct map_info pcmcia_map;
struct mtd_info *mtd_info;
int vpp;
char mtd_name[sizeof(struct cistpl_vers_1_t)];
};
/* Module parameters */
/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
static int bankwidth = 2;
/* Speed of memory accesses, in ns */
static int mem_speed;
/* Force the size of an SRAM card */
static int force_size;
/* Force Vpp */
static int vpp;
/* Set Vpp */
static int setvpp;
/* Force card to be treated as FLASH, ROM or RAM */
static int mem_type;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Evans <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
module_param(bankwidth, int, 0);
MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
module_param(mem_speed, int, 0);
MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
module_param(force_size, int, 0);
MODULE_PARM_DESC(force_size, "Force size of card in MiB (1-64)");
module_param(setvpp, int, 0);
MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
module_param(vpp, int, 0);
MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
module_param(mem_type, int, 0);
MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
/* read/write{8,16} copy_{from,to} routines with window remapping
* to access whole card
*/
static void __iomem *remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct resource *win = (struct resource *) map->map_priv_2;
unsigned int offset;
int ret;
if (!pcmcia_dev_present(dev->p_dev)) {
pr_debug("device removed\n");
return NULL;
}
offset = to & ~(dev->win_size-1);
if (offset != dev->offset) {
pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
dev->offset, offset);
ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
if (ret != 0)
return NULL;
dev->offset = offset;
}
return dev->win_base + (to & (dev->win_size-1));
}
static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
{
void __iomem *addr;
map_word d = {{0}};
addr = remap_window(map, ofs);
if(!addr)
return d;
d.x[0] = readb(addr);
pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
return d;
}
static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
{
void __iomem *addr;
map_word d = {{0}};
addr = remap_window(map, ofs);
if(!addr)
return d;
d.x[0] = readw(addr);
pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
return d;
}
static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
void __iomem *addr;
if(toread > len)
toread = len;
addr = remap_window(map, from);
if(!addr)
return;
pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
memcpy_fromio(to, addr, toread);
len -= toread;
to += toread;
from += toread;
}
}
static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
{
void __iomem *addr = remap_window(map, adr);
if(!addr)
return;
pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
{
void __iomem *addr = remap_window(map, adr);
if(!addr)
return;
pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
void __iomem *addr;
if(towrite > len)
towrite = len;
addr = remap_window(map, to);
if(!addr)
return;
pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
memcpy_toio(addr, from, towrite);
len -= towrite;
to += towrite;
from += towrite;
}
}
/* read/write{8,16} copy_{from,to} routines with direct access */
#define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev)))
static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
map_word d = {{0}};
if(DEV_REMOVED(map))
return d;
d.x[0] = readb(win_base + ofs);
pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
map_word d = {{0}};
if(DEV_REMOVED(map))
return d;
d.x[0] = readw(win_base + ofs);
pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
ofs, win_base + ofs, d.x[0]);
return d;
}
static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
if(DEV_REMOVED(map))
return;
pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
if(DEV_REMOVED(map))
return;
pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n",
adr, win_base + adr, d.x[0]);
writeb(d.x[0], win_base + adr);
}
static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
if(DEV_REMOVED(map))
return;
pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n",
adr, win_base + adr, d.x[0]);
writew(d.x[0], win_base + adr);
}
static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
void __iomem *win_base = (void __iomem *)map->map_priv_2;
if(DEV_REMOVED(map))
return;
pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
memcpy_toio(win_base + to, from, len);
}
static DEFINE_MUTEX(pcmcia_vpp_lock);
static int pcmcia_vpp_refcnt;
static void pcmciamtd_set_vpp(struct map_info *map, int on)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
struct pcmcia_device *link = dev->p_dev;
pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
mutex_lock(&pcmcia_vpp_lock);
if (on) {
if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
pcmcia_fixup_vpp(link, dev->vpp);
} else {
if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
pcmcia_fixup_vpp(link, 0);
}
mutex_unlock(&pcmcia_vpp_lock);
}
static void pcmciamtd_release(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
pr_debug("link = 0x%p\n", link);
if (link->resource[2]->end) {
if(dev->win_base) {
iounmap(dev->win_base);
dev->win_base = NULL;
}
}
pcmcia_disable_device(link);
}
static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
{
cisparse_t parse;
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_format_t *t = &parse.format;
(void)t; /* Shut up, gcc */
pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
t->type, t->edc, t->offset, t->length);
}
return -ENOSPC;
}
static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
{
cisparse_t parse;
int i;
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
pr_debug("JEDEC: 0x%02x 0x%02x\n",
t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
{
struct pcmciamtd_dev *dev = priv_data;
cisparse_t parse;
cistpl_device_t *t = &parse.device;
int i;
if (pcmcia_parse_tuple(tuple, &parse))
return -EINVAL;
pr_debug("Common memory:\n");
dev->pcmcia_map.size = t->dev[0].size;
/* from here on: DEBUG only */
for (i = 0; i < t->ndev; i++) {
pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
}
return 0;
}
static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv_data)
{
struct pcmciamtd_dev *dev = priv_data;
cisparse_t parse;
cistpl_device_geo_t *t = &parse.device_geo;
int i;
if (pcmcia_parse_tuple(tuple, &parse))
return -EINVAL;
dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
/* from here on: DEBUG only */
for (i = 0; i < t->ngeo; i++) {
pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
}
return 0;
}
static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
{
int i;
if (p_dev->prod_id[0]) {
dev->mtd_name[0] = '\0';
for (i = 0; i < 4; i++) {
if (i)
strcat(dev->mtd_name, " ");
if (p_dev->prod_id[i])
strcat(dev->mtd_name, p_dev->prod_id[i]);
}
pr_debug("Found name: %s\n", dev->mtd_name);
}
pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
if(!dev->pcmcia_map.size)
dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
if(!dev->pcmcia_map.bankwidth)
dev->pcmcia_map.bankwidth = 2;
if(force_size) {
dev->pcmcia_map.size = force_size << 20;
pr_debug("size forced to %dM\n", force_size);
}
if(bankwidth) {
dev->pcmcia_map.bankwidth = bankwidth;
pr_debug("bankwidth forced to %d\n", bankwidth);
}
dev->pcmcia_map.name = dev->mtd_name;
if(!dev->mtd_name[0]) {
strcpy(dev->mtd_name, "PCMCIA Memory card");
*new_name = 1;
}
pr_debug("Device: Size: %lu Width:%d Name: %s\n",
dev->pcmcia_map.size,
dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
int ret;
int i, j = 0;
static char *probes[] = { "jedec_probe", "cfi_probe" };
int new_name = 0;
pr_debug("link=0x%p\n", link);
card_settings(dev, link, &new_name);
dev->pcmcia_map.phys = NO_XIP;
dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
if (dev->pcmcia_map.bankwidth == 1) {
dev->pcmcia_map.read = pcmcia_read8_remap;
dev->pcmcia_map.write = pcmcia_write8_remap;
} else {
dev->pcmcia_map.read = pcmcia_read16_remap;
dev->pcmcia_map.write = pcmcia_write16_remap;
}
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
/* Request a memory window for PCMCIA. Some architeures can map windows
* up to the maximum that PCMCIA can support (64MiB) - this is ideal and
* we aim for a window the size of the whole card - otherwise we try
* smaller windows until we succeed
*/
link->resource[2]->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
link->resource[2]->flags |= (dev->pcmcia_map.bankwidth == 1) ?
WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
link->resource[2]->start = 0;
link->resource[2]->end = (force_size) ? force_size << 20 :
MAX_PCMCIA_ADDR;
dev->win_size = 0;
do {
int ret;
pr_debug("requesting window with size = %luKiB memspeed = %d\n",
(unsigned long) resource_size(link->resource[2]) >> 10,
mem_speed);
ret = pcmcia_request_window(link, link->resource[2], mem_speed);
pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
if(ret) {
j++;
link->resource[2]->start = 0;
link->resource[2]->end = (force_size) ?
force_size << 20 : MAX_PCMCIA_ADDR;
link->resource[2]->end >>= j;
} else {
pr_debug("Got window of size %luKiB\n", (unsigned long)
resource_size(link->resource[2]) >> 10);
dev->win_size = resource_size(link->resource[2]);
break;
}
} while (link->resource[2]->end >= 0x1000);
pr_debug("dev->win_size = %d\n", dev->win_size);
if(!dev->win_size) {
dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
/* Get write protect status */
dev->win_base = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
if(!dev->win_base) {
dev_err(&dev->p_dev->dev, "ioremap(%pR) failed\n",
link->resource[2]);
pcmciamtd_release(link);
return -ENODEV;
}
pr_debug("mapped window dev = %p @ %pR, base = %p\n",
dev, link->resource[2], dev->win_base);
dev->offset = 0;
dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
dev->pcmcia_map.map_priv_2 = (unsigned long)link->resource[2];
dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
if(setvpp == 2) {
link->vpp = dev->vpp;
} else {
link->vpp = 0;
}
link->config_index = 0;
pr_debug("Setting Configuration\n");
ret = pcmcia_enable_device(link);
if (ret != 0) {
if (dev->win_base) {
iounmap(dev->win_base);
dev->win_base = NULL;
}
return -ENODEV;
}
if(mem_type == 1) {
mtd = do_map_probe("map_ram", &dev->pcmcia_map);
} else if(mem_type == 2) {
mtd = do_map_probe("map_rom", &dev->pcmcia_map);
} else {
for(i = 0; i < ARRAY_SIZE(probes); i++) {
pr_debug("Trying %s\n", probes[i]);
mtd = do_map_probe(probes[i], &dev->pcmcia_map);
if(mtd)
break;
pr_debug("FAILED: %s\n", probes[i]);
}
}
if(!mtd) {
pr_debug("Can not find an MTD\n");
pcmciamtd_release(link);
return -ENODEV;
}
dev->mtd_info = mtd;
mtd->owner = THIS_MODULE;
if(new_name) {
int size = 0;
char unit = ' ';
/* Since we are using a default name, make it better by adding
* in the size
*/
if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
size = mtd->size >> 10;
unit = 'K';
} else {
size = mtd->size >> 20;
unit = 'M';
}
snprintf(dev->mtd_name, sizeof(dev->mtd_name), "%d%ciB %s", size, unit, "PCMCIA Memory card");
}
/* If the memory found is fits completely into the mapped PCMCIA window,
use the faster non-remapping read/write functions */
if(mtd->size <= dev->win_size) {
pr_debug("Using non remapping memory functions\n");
dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
if (dev->pcmcia_map.bankwidth == 1) {
dev->pcmcia_map.read = pcmcia_read8;
dev->pcmcia_map.write = pcmcia_write8;
} else {
dev->pcmcia_map.read = pcmcia_read16;
dev->pcmcia_map.write = pcmcia_write16;
}
dev->pcmcia_map.copy_from = pcmcia_copy_from;
dev->pcmcia_map.copy_to = pcmcia_copy_to;
}
if (mtd_device_register(mtd, NULL, 0)) {
map_destroy(mtd);
dev->mtd_info = NULL;
dev_err(&dev->p_dev->dev,
"Could not register the MTD device\n");
pcmciamtd_release(link);
return -ENODEV;
}
dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
}
static int pcmciamtd_suspend(struct pcmcia_device *dev)
{
pr_debug("EVENT_PM_RESUME\n");
/* get_lock(link); */
return 0;
}
static int pcmciamtd_resume(struct pcmcia_device *dev)
{
pr_debug("EVENT_PM_SUSPEND\n");
/* free_lock(link); */
return 0;
}
static void pcmciamtd_detach(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
pr_debug("link=0x%p\n", link);
if(dev->mtd_info) {
mtd_device_unregister(dev->mtd_info);
dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
dev->mtd_info->index);
map_destroy(dev->mtd_info);
}
pcmciamtd_release(link);
}
static int pcmciamtd_probe(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev;
/* Create new memory card device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) return -ENOMEM;
pr_debug("dev=0x%p\n", dev);
dev->p_dev = link;
link->priv = dev;
return pcmciamtd_config(link);
}
static const struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_FUNC_ID(1),
PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21),
PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21),
PCMCIA_DEVICE_PROD_ID12("IBM", "4MB FLASH", 0xb569a6e5, 0x8bc54d2a),
PCMCIA_DEVICE_PROD_ID12("IBM", "8MB FLASH", 0xb569a6e5, 0x6df1be3e),
PCMCIA_DEVICE_PROD_ID12("Intel", "S2E20SW", 0x816cc815, 0xd14c9dcf),
PCMCIA_DEVICE_PROD_ID12("Intel", "S2E8 SW", 0x816cc815, 0xa2d7dedb),
PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-02 ", 0x40ade711, 0x145cea5c),
PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-04 ", 0x40ade711, 0x42064dda),
PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-20 ", 0x40ade711, 0x25ee5cb0),
PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
PCMCIA_DEVICE_PROD_ID12("PRETEC", " 4MB SRAM CARD", 0xebf91155, 0x20b6bf17),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
/* the following was commented out in pcmcia-cs-3.2.7 */
/* PCMCIA_DEVICE_PROD_ID12("RATOC Systems,Inc.", "SmartMedia ADAPTER PC Card", 0xf4a2fefe, 0x5885b2ae), */
#ifdef CONFIG_MTD_PCMCIA_ANONYMOUS
{ .match_flags = PCMCIA_DEV_ID_MATCH_ANONYMOUS, },
#endif
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, pcmciamtd_ids);
static struct pcmcia_driver pcmciamtd_driver = {
.name = "pcmciamtd",
.probe = pcmciamtd_probe,
.remove = pcmciamtd_detach,
.owner = THIS_MODULE,
.id_table = pcmciamtd_ids,
.suspend = pcmciamtd_suspend,
.resume = pcmciamtd_resume,
};
static int __init init_pcmciamtd(void)
{
if(bankwidth && bankwidth != 1 && bankwidth != 2) {
info("bad bankwidth (%d), using default", bankwidth);
bankwidth = 2;
}
if(force_size && (force_size < 1 || force_size > 64)) {
info("bad force_size (%d), using default", force_size);
force_size = 0;
}
if(mem_type && mem_type != 1 && mem_type != 2) {
info("bad mem_type (%d), using default", mem_type);
mem_type = 0;
}
return pcmcia_register_driver(&pcmciamtd_driver);
}
static void __exit exit_pcmciamtd(void)
{
pr_debug(DRIVER_DESC " unloading");
pcmcia_unregister_driver(&pcmciamtd_driver);
}
module_init(init_pcmciamtd);
module_exit(exit_pcmciamtd);
| linux-master | drivers/mtd/maps/pcmciamtd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Flash memory access on SA11x0 based devices
*
* (C) 2000 Nicolas Pitre <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
#include <mach/hardware.h>
#include <linux/sizes.h>
#include <asm/mach/flash.h>
struct sa_subdev_info {
char name[16];
struct map_info map;
struct mtd_info *mtd;
struct flash_platform_data *plat;
};
struct sa_info {
struct mtd_info *mtd;
int num_subdev;
struct sa_subdev_info subdev[];
};
static DEFINE_SPINLOCK(sa1100_vpp_lock);
static int sa1100_vpp_refcnt;
static void sa1100_set_vpp(struct map_info *map, int on)
{
struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
unsigned long flags;
spin_lock_irqsave(&sa1100_vpp_lock, flags);
if (on) {
if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
subdev->plat->set_vpp(1);
} else {
if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
subdev->plat->set_vpp(0);
}
spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
}
static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
{
if (subdev->mtd)
map_destroy(subdev->mtd);
if (subdev->map.virt)
iounmap(subdev->map.virt);
release_mem_region(subdev->map.phys, subdev->map.size);
}
static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res)
{
unsigned long phys;
unsigned int size;
int ret;
phys = res->start;
size = res->end - phys + 1;
/*
* Retrieve the bankwidth from the MSC registers.
* We currently only implement CS0 and CS1 here.
*/
switch (phys) {
default:
printk(KERN_WARNING "SA1100 flash: unknown base address "
"0x%08lx, assuming CS0\n", phys);
fallthrough;
case SA1100_CS0_PHYS:
subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
break;
case SA1100_CS1_PHYS:
subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
break;
}
if (!request_mem_region(phys, size, subdev->name)) {
ret = -EBUSY;
goto out;
}
if (subdev->plat->set_vpp)
subdev->map.set_vpp = sa1100_set_vpp;
subdev->map.phys = phys;
subdev->map.size = size;
subdev->map.virt = ioremap(phys, size);
if (!subdev->map.virt) {
ret = -ENOMEM;
goto err;
}
simple_map_init(&subdev->map);
/*
* Now let's probe for the actual flash. Do it here since
* specific machine settings might have been set above.
*/
subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map);
if (subdev->mtd == NULL) {
ret = -ENXIO;
goto err;
}
printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
phys, (unsigned)(subdev->mtd->size >> 20),
subdev->map.bankwidth * 8);
return 0;
err:
sa1100_destroy_subdev(subdev);
out:
return ret;
}
static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat)
{
int i;
if (info->mtd) {
mtd_device_unregister(info->mtd);
if (info->mtd != info->subdev[0].mtd)
mtd_concat_destroy(info->mtd);
}
for (i = info->num_subdev - 1; i >= 0; i--)
sa1100_destroy_subdev(&info->subdev[i]);
kfree(info);
if (plat->exit)
plat->exit();
}
static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
struct flash_platform_data *plat)
{
struct sa_info *info;
int nr, size, i, ret = 0;
/*
* Count number of devices.
*/
for (nr = 0; ; nr++)
if (!platform_get_resource(pdev, IORESOURCE_MEM, nr))
break;
if (nr == 0) {
ret = -ENODEV;
goto out;
}
size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
/*
* Allocate the map_info structs in one go.
*/
info = kzalloc(size, GFP_KERNEL);
if (!info) {
ret = -ENOMEM;
goto out;
}
if (plat->init) {
ret = plat->init();
if (ret)
goto err;
}
/*
* Claim and then map the memory regions.
*/
for (i = 0; i < nr; i++) {
struct sa_subdev_info *subdev = &info->subdev[i];
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
subdev->map.name = subdev->name;
sprintf(subdev->name, "%s-%d", plat->name, i);
subdev->plat = plat;
ret = sa1100_probe_subdev(subdev, res);
if (ret)
break;
}
info->num_subdev = i;
/*
* ENXIO is special. It means we didn't find a chip when we probed.
*/
if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0))
goto err;
/*
* If we found one device, don't bother with concat support. If
* we found multiple devices, use concat if we have it available,
* otherwise fail. Either way, it'll be called "sa1100".
*/
if (info->num_subdev == 1) {
strcpy(info->subdev[0].name, plat->name);
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
struct mtd_info **cdev;
cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
if (!cdev) {
ret = -ENOMEM;
goto err;
}
/*
* We detected multiple devices. Concatenate them together.
*/
for (i = 0; i < info->num_subdev; i++)
cdev[i] = info->subdev[i].mtd;
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
kfree(cdev);
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
}
}
info->mtd->dev.parent = &pdev->dev;
if (ret == 0)
return info;
err:
sa1100_destroy(info, plat);
out:
return ERR_PTR(ret);
}
static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int sa1100_mtd_probe(struct platform_device *pdev)
{
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
struct sa_info *info;
int err;
if (!plat)
return -ENODEV;
info = sa1100_setup_mtd(pdev, plat);
if (IS_ERR(info)) {
err = PTR_ERR(info);
goto out;
}
/*
* Partition selection stuff.
*/
mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
plat->nr_parts);
platform_set_drvdata(pdev, info);
err = 0;
out:
return err;
}
static int sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
sa1100_destroy(info, plat);
return 0;
}
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = sa1100_mtd_remove,
.driver = {
.name = "sa1100-mtd",
},
};
module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sa1100-mtd");
| linux-master | drivers/mtd/maps/sa1100-flash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
* is enabled.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs)
{
return inline_map_read(map, ofs);
}
static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
{
inline_map_write(map, datum, ofs);
}
static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
inline_map_copy_from(map, to, from, len);
}
static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
inline_map_copy_to(map, to, from, len);
}
void simple_map_init(struct map_info *map)
{
BUG_ON(!map_bankwidth_supported(map->bankwidth));
map->read = simple_map_read;
map->write = simple_map_write;
map->copy_from = simple_map_copy_from;
map->copy_to = simple_map_copy_to;
}
EXPORT_SYMBOL(simple_map_init);
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/maps/map_funcs.c |
/*
* Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
/* We split the flash chip up into four parts.
* 1: bootloader first 128k (0x00000000 - 0x0001FFFF) size 0x020000
* 2: kernel 640k (0x00020000 - 0x000BFFFF) size 0x0A0000
* 3: compressed 1536k root ramdisk (0x000C0000 - 0x0023FFFF) size 0x180000
* 4: writeable diskpartition (jffs)(0x00240000 - 0x003FFFFF) size 0x1C0000
*/
#define FLASH_PHYS_ADDR 0x40000000
#define FLASH_SIZE 0x400000
#define FLASH_PARTITION0_ADDR 0x00000000
#define FLASH_PARTITION0_SIZE 0x00020000
#define FLASH_PARTITION1_ADDR 0x00020000
#define FLASH_PARTITION1_SIZE 0x000A0000
#define FLASH_PARTITION2_ADDR 0x000C0000
#define FLASH_PARTITION2_SIZE 0x00180000
#define FLASH_PARTITION3_ADDR 0x00240000
#define FLASH_PARTITION3_SIZE 0x001C0000
static struct map_info flagadm_map = {
.name = "FlagaDM flash device",
.size = FLASH_SIZE,
.bankwidth = 2,
};
static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
.size = FLASH_PARTITION0_SIZE
},
{
.name = "Kernel image",
.offset = FLASH_PARTITION1_ADDR,
.size = FLASH_PARTITION1_SIZE
},
{
.name = "Initial ramdisk image",
.offset = FLASH_PARTITION2_ADDR,
.size = FLASH_PARTITION2_SIZE
},
{
.name = "Persistent storage",
.offset = FLASH_PARTITION3_ADDR,
.size = FLASH_PARTITION3_SIZE
}
};
#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
static struct mtd_info *mymtd;
static int __init init_flagadm(void)
{
printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
FLASH_SIZE, FLASH_PHYS_ADDR);
flagadm_map.phys = FLASH_PHYS_ADDR;
flagadm_map.virt = ioremap(FLASH_PHYS_ADDR,
FLASH_SIZE);
if (!flagadm_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
simple_map_init(&flagadm_map);
mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, flagadm_parts, PARTITION_COUNT);
printk(KERN_NOTICE "FlagaDM flash device initialized\n");
return 0;
}
iounmap((void __iomem *)flagadm_map.virt);
return -ENXIO;
}
static void __exit cleanup_flagadm(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (flagadm_map.virt) {
iounmap((void __iomem *)flagadm_map.virt);
flagadm_map.virt = NULL;
}
}
module_init(init_flagadm);
module_exit(cleanup_flagadm);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kári Davíðsson <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
| linux-master | drivers/mtd/maps/cfi_flagadm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Intel IXP4xx OF physmap add-on
* Copyright (C) 2019 Linus Walleij <[email protected]>
*
* Based on the ixp4xx.c map driver, originally written by:
* Intel Corporation
* Deepak Saxena <[email protected]>
* Copyright (C) 2002 Intel Corporation
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*/
#include <linux/export.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include "physmap-ixp4xx.h"
/*
* Read/write a 16 bit word from flash address 'addr'.
*
* When the cpu is in little-endian mode it swizzles the address lines
* ('address coherency') so we need to undo the swizzling to ensure commands
* and the like end up on the correct flash address.
*
* To further complicate matters, due to the way the expansion bus controller
* handles 32 bit reads, the byte stream ABCD is stored on the flash as:
* D15 D0
* +---+---+
* | A | B | 0
* +---+---+
* | C | D | 2
* +---+---+
* This means that on LE systems each 16 bit word must be swapped. Note that
* this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
* data and other flash commands which are always in D7-D0.
*/
#ifndef CONFIG_CPU_BIG_ENDIAN
static inline u16 flash_read16(void __iomem *addr)
{
return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
}
static inline void flash_write16(u16 d, void __iomem *addr)
{
__raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
}
#define BYTE0(h) ((h) & 0xFF)
#define BYTE1(h) (((h) >> 8) & 0xFF)
#else
static inline u16 flash_read16(const void __iomem *addr)
{
return __raw_readw(addr);
}
static inline void flash_write16(u16 d, void __iomem *addr)
{
__raw_writew(d, addr);
}
#define BYTE0(h) (((h) >> 8) & 0xFF)
#define BYTE1(h) ((h) & 0xFF)
#endif
static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = flash_read16(map->virt + ofs);
return val;
}
/*
* The IXP4xx expansion bus only allows 16-bit wide acceses
* when attached to a 16-bit wide device (such as the 28F128J3A),
* so we can't just memcpy_fromio().
*/
static void ixp4xx_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len)
{
u8 *dest = (u8 *) to;
void __iomem *src = map->virt + from;
if (len <= 0)
return;
if (from & 1) {
*dest++ = BYTE1(flash_read16(src-1));
src++;
--len;
}
while (len >= 2) {
u16 data = flash_read16(src);
*dest++ = BYTE0(data);
*dest++ = BYTE1(data);
src += 2;
len -= 2;
}
if (len > 0)
*dest++ = BYTE0(flash_read16(src));
}
static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
{
flash_write16(d.x[0], map->virt + adr);
}
int of_flash_probe_ixp4xx(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
{
struct device *dev = &pdev->dev;
/* Multiplatform guard */
if (!of_device_is_compatible(np, "intel,ixp4xx-flash"))
return 0;
map->read = ixp4xx_read16;
map->write = ixp4xx_write16;
map->copy_from = ixp4xx_copy_from;
map->copy_to = NULL;
dev_info(dev, "initialized Intel IXP4xx-specific physmap control\n");
return 0;
}
| linux-master | drivers/mtd/maps/physmap-ixp4xx.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.