python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
/*
* written by David Borowski
*
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/serial_reg.h>
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.10"
#define SYNTH_IO_EXTENT 0x04
#define SWAIT udelay(70)
#define PROCSPEECH 0x1f
#define SYNTH_CLEAR 0x03
static int synth_probe(struct spk_synth *synth);
static void keynote_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_port;
static int port_forced;
static unsigned int synth_portlist[] = { 0x2a8, 0 };
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"[f130]" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[f90]" } },
[RATE_ID] = { RATE, .u.n = {"\04%c ", 8, 0, 10, 81, -8, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"[f%d]", 5, 0, 9, 40, 10, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/keypc.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_keypc = {
.name = "keypc",
.version = DRV_VERSION,
.long_name = "Keynote PC",
.init = "[t][n7,1][n8,0]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 1000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = keynote_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "keypc",
},
};
static inline bool synth_writable(void)
{
return (inb_p(synth_port + UART_RX) & 0x10) != 0;
}
static inline bool synth_full(void)
{
return (inb_p(synth_port + UART_RX) & 0x80) == 0;
}
static char *oops(void)
{
int s1, s2, s3, s4;
s1 = inb_p(synth_port);
s2 = inb_p(synth_port + 1);
s3 = inb_p(synth_port + 2);
s4 = inb_p(synth_port + 3);
pr_warn("synth timeout %d %d %d %d\n", s1, s2, s3, s4);
return NULL;
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
int timeout;
while ((ch = *buf)) {
if (ch == '\n')
ch = PROCSPEECH;
if (synth_full())
return buf;
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
return oops();
outb_p(ch, synth_port);
udelay(70);
buf++;
}
return NULL;
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
int timeout;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
struct var_t *full_time;
int delay_time_val;
int full_time_val;
int jiffy_delta_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
set_current_state(TASK_RUNNING);
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0) {
oops();
break;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, synth_port);
SWAIT;
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0) {
oops();
break;
}
outb_p(PROCSPEECH, synth_port);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0)
oops();
else
outb_p(PROCSPEECH, synth_port);
}
static void synth_flush(struct spk_synth *synth)
{
outb_p(SYNTH_CLEAR, synth_port);
}
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
synth_port = port_forced;
pr_info("probe forced to %x by kernel command line\n",
synth_port);
if (synth_request_region(synth_port - 1, SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
port_val = inb(synth_port);
} else {
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i],
SYNTH_IO_EXTENT)) {
pr_warn
("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
continue;
}
port_val = inb(synth_portlist[i]);
if (port_val == 0x80) {
synth_port = synth_portlist[i];
break;
}
}
}
if (port_val != 0x80) {
pr_info("%s: not found\n", synth->long_name);
synth_release_region(synth_port, SYNTH_IO_EXTENT);
synth_port = 0;
return -ENODEV;
}
pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name,
synth_port, synth_port + SYNTH_IO_EXTENT - 1,
synth->version);
synth->alive = 1;
return 0;
}
static void keynote_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (synth_port)
synth_release_region(synth_port, SYNTH_IO_EXTENT);
synth_port = 0;
}
module_param_hw_named(port, port_forced, int, ioport, 0444);
module_param_named(start, synth_keypc.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_keypc);
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Keynote Gold PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_keypc.c |
// SPDX-License-Identifier: GPL-2.0+
/* makemapdata.c
* originally written by: Kirk Reiser.
*
** Copyright (C) 2002 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*/
#include <stdlib.h>
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <linux/version.h>
#include <ctype.h>
#include "utils.h"
static char buffer[256];
static int get_define(void)
{
char *c;
while (fgets(buffer, sizeof(buffer)-1, infile)) {
lc++;
if (strncmp(buffer, "#define", 7))
continue;
c = buffer + 7;
while (*c == ' ' || *c == '\t')
c++;
def_name = c;
while (*c && *c != ' ' && *c != '\t' && *c != '\n')
c++;
if (!*c || *c == '\n')
continue;
*c++ = '\0';
while (*c == ' ' || *c == '\t' || *c == '(')
c++;
def_val = c;
while (*c && *c != '\n' && *c != ')')
c++;
*c++ = '\0';
return 1;
}
fclose(infile);
infile = 0;
return 0;
}
int
main(int argc, char *argv[])
{
int value, i;
struct st_key *this;
const char *dir_name, *spk_dir_name;
char *cp;
dir_name = getenv("TOPDIR");
if (!dir_name)
dir_name = ".";
spk_dir_name = getenv("SPKDIR");
if (!spk_dir_name)
spk_dir_name = "drivers/accessibility/speakup";
bzero(key_table, sizeof(key_table));
add_key("shift", 1, is_shift);
add_key("altgr", 2, is_shift);
add_key("ctrl", 4, is_shift);
add_key("alt", 8, is_shift);
add_key("spk", 16, is_shift);
add_key("double", 32, is_shift);
open_input(dir_name, "include/linux/input.h");
while (get_define()) {
if (strncmp(def_name, "KEY_", 4))
continue;
value = atoi(def_val);
if (value > 0 && value < MAXKEYVAL)
add_key(def_name, value, is_input);
}
open_input(dir_name, "include/uapi/linux/input-event-codes.h");
while (get_define()) {
if (strncmp(def_name, "KEY_", 4))
continue;
value = atoi(def_val);
if (value > 0 && value < MAXKEYVAL)
add_key(def_name, value, is_input);
}
open_input(spk_dir_name, "spk_priv_keyinfo.h");
while (get_define()) {
if (strlen(def_val) > 5) {
//if (def_val[0] == '(')
// def_val++;
cp = strchr(def_val, '+');
if (!cp)
continue;
if (cp[-1] == ' ')
cp[-1] = '\0';
*cp++ = '\0';
this = find_key(def_val);
while (*cp == ' ')
cp++;
if (!this || *cp < '0' || *cp > '9')
continue;
value = this->value+atoi(cp);
} else if (!strncmp(def_val, "0x", 2))
sscanf(def_val+2, "%x", &value);
else if (*def_val >= '0' && *def_val <= '9')
value = atoi(def_val);
else
continue;
add_key(def_name, value, is_spk);
}
printf("struct st_key_init init_key_data[] = {\n");
for (i = 0; i < HASHSIZE; i++) {
this = &key_table[i];
if (!this->name)
continue;
do {
printf("\t{ \"%s\", %d, %d, },\n", this->name, this->value, this->shift);
this = this->next;
} while (this);
}
printf("\t{ \".\", 0, 0 }\n};\n");
exit(0);
}
| linux-master | drivers/accessibility/speakup/makemapdata.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18 /* flush synth buffer */
#define PROCSPEECH '\r' /* start synth processing speech char */
static int synth_probe(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID, PUNCT_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x05[f99]" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05[f80]" } },
[RATE_ID] = { RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/audptr.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_audptr = {
.name = "audptr",
.version = DRV_VERSION,
.long_name = "Audapter",
.init = "\x05[D1]\x05[Ol]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 400,
.trigger = 50,
.jiffies = 30,
.full = 18000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "audptr",
},
};
static void synth_flush(struct spk_synth *synth)
{
synth->io_ops->flush_buffer(synth);
synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_version(struct spk_synth *synth)
{
unsigned i;
char synth_id[33];
synth->synth_immediate(synth, "\x05[Q]");
synth_id[0] = synth->io_ops->synth_in(synth);
if (synth_id[0] != 'A')
return;
for (i = 1; i < sizeof(synth_id) - 1; i++) {
/* read version string from synth */
synth_id[i] = synth->io_ops->synth_in(synth);
if (synth_id[i] == '\n')
break;
}
synth_id[i] = '\0';
pr_info("%s version: %s", synth->long_name, synth_id);
}
static int synth_probe(struct spk_synth *synth)
{
int failed;
failed = spk_ttyio_synth_probe(synth);
if (failed == 0)
synth_version(synth);
synth->alive = !failed;
return 0;
}
module_param_named(ser, synth_audptr.ser, int, 0444);
module_param_named(dev, synth_audptr.dev_name, charp, 0444);
module_param_named(start, synth_audptr.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_audptr);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Audapter synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_audptr.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/types.h>
#include <linux/uaccess.h>
#include "speakup.h"
#include "spk_priv.h"
static int misc_registered;
static int dev_opened;
static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
size_t nbytes, loff_t *ppos)
{
size_t count = nbytes;
const char __user *ptr = buffer;
size_t bytes;
unsigned long flags;
u_char buf[256];
if (!synth)
return -ENODEV;
while (count > 0) {
bytes = min(count, sizeof(buf));
if (copy_from_user(buf, ptr, bytes))
return -EFAULT;
count -= bytes;
ptr += bytes;
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_write(buf, bytes);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
return (ssize_t)nbytes;
}
static ssize_t speakup_file_read(struct file *fp, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return 0;
}
static int speakup_file_open(struct inode *ip, struct file *fp)
{
if (!synth)
return -ENODEV;
if (xchg(&dev_opened, 1))
return -EBUSY;
return 0;
}
static int speakup_file_release(struct inode *ip, struct file *fp)
{
dev_opened = 0;
return 0;
}
static const struct file_operations synth_fops = {
.read = speakup_file_read,
.write = speakup_file_write,
.open = speakup_file_open,
.release = speakup_file_release,
};
static struct miscdevice synth_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "synth",
.fops = &synth_fops,
};
void speakup_register_devsynth(void)
{
if (misc_registered != 0)
return;
/* zero it so if register fails, deregister will not ref invalid ptrs */
if (misc_register(&synth_device)) {
pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
} else {
pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
MISC_MAJOR, synth_device.minor);
misc_registered = 1;
}
}
void speakup_unregister_devsynth(void)
{
if (!misc_registered)
return;
pr_info("speakup: unregistering synth device /dev/synth\n");
misc_deregister(&synth_device);
misc_registered = 0;
}
| linux-master | drivers/accessibility/speakup/devsynth.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "speakup.h"
#include "spk_priv.h"
#include "speakup_dtlk.h" /* local header file for LiteTalk values */
#define DRV_VERSION "2.11"
#define PROCSPEECH 0x0d
static int synth_probe(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID, PUNCT_ID,
VOICE_ID, FREQUENCY_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+35p" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-35p" } },
[RATE_ID] = { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } },
[VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
[FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/ltlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&freq_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_ltlk = {
.name = "ltlk",
.version = DRV_VERSION,
.long_name = "LiteTalk",
.init = "\01@\x01\x31y\n\0",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = spk_synth_get_index,
.indexing = {
.command = "\x01%di",
.lowindex = 1,
.highindex = 5,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "ltlk",
},
};
/* interrogate the LiteTalk and print its settings */
static void synth_interrogate(struct spk_synth *synth)
{
unsigned char *t, i;
unsigned char buf[50], rom_v[20];
synth->synth_immediate(synth, "\x18\x01?");
for (i = 0; i < 50; i++) {
buf[i] = synth->io_ops->synth_in(synth);
if (i > 2 && buf[i] == 0x7f)
break;
}
t = buf + 2;
for (i = 0; *t != '\r'; t++) {
rom_v[i] = *t;
if (++i >= 19)
break;
}
rom_v[i] = 0;
pr_info("%s: ROM version: %s\n", synth->long_name, rom_v);
}
static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
failed = spk_ttyio_synth_probe(synth);
if (failed == 0)
synth_interrogate(synth);
synth->alive = !failed;
return failed;
}
module_param_named(ser, synth_ltlk.ser, int, 0444);
module_param_named(dev, synth_ltlk.dev_name, charp, 0444);
module_param_named(start, synth_ltlk.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(frequency, "Set the frequency variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_ltlk);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DoubleTalk LT/LiteTalk synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_ltlk.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/ctype.h> /* for isdigit() and friends */
#include <linux/fs.h>
#include <linux/mm.h> /* for verify_area */
#include <linux/errno.h> /* for -EBUSY */
#include <linux/ioport.h> /* for check_region, request_region */
#include <linux/interrupt.h>
#include <linux/delay.h> /* for loops_per_sec */
#include <linux/kmod.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h> /* for copy_from_user */
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "speakup.h"
#include "serialio.h"
static LIST_HEAD(synths);
struct spk_synth *synth;
char spk_pitch_buff[32] = "";
static int module_status;
bool spk_quiet_boot;
struct speakup_info_t speakup_info = {
/*
* This spinlock is used to protect the entire speakup machinery, and
* must be taken at each kernel->speakup transition and released at
* each corresponding speakup->kernel transition.
*
* The progression thread only interferes with the speakup machinery
* through the synth buffer, so only needs to take the lock
* while tinkering with the buffer.
*
* We use spin_lock/trylock_irqsave and spin_unlock_irqrestore with this
* spinlock because speakup needs to disable the keyboard IRQ.
*/
.spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock),
.flushing = 0,
};
EXPORT_SYMBOL_GPL(speakup_info);
static int do_synth_init(struct spk_synth *in_synth);
/*
* Main loop of the progression thread: keep eating from the buffer
* and push to the serial port, waiting as needed
*
* For devices that have a "full" notification mechanism, the driver can
* adapt the loop the way they prefer.
*/
static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
{
u16 ch;
unsigned long flags;
unsigned long jiff_max;
struct var_t *delay_time;
struct var_t *full_time;
struct var_t *jiffy_delta;
int jiffy_delta_val;
int delay_time_val;
int full_time_val;
int ret;
jiffy_delta = spk_get_var(JIFFY);
full_time = spk_get_var(FULL);
delay_time = spk_get_var(DELAY);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (!unicode)
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = synth->procspeech;
if (unicode)
ret = synth->io_ops->synth_out_unicode(synth, ch);
else
ret = synth->io_ops->synth_out(synth, ch);
if (!ret) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->io_ops->synth_out(synth, synth->procspeech))
schedule_timeout(
msecs_to_jiffies(delay_time_val));
else
schedule_timeout(
msecs_to_jiffies(full_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
synth->io_ops->synth_out(synth, synth->procspeech);
}
void spk_do_catch_up(struct spk_synth *synth)
{
_spk_do_catch_up(synth, 0);
}
EXPORT_SYMBOL_GPL(spk_do_catch_up);
void spk_do_catch_up_unicode(struct spk_synth *synth)
{
_spk_do_catch_up(synth, 1);
}
EXPORT_SYMBOL_GPL(spk_do_catch_up_unicode);
void spk_synth_flush(struct spk_synth *synth)
{
synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, synth->clear);
}
EXPORT_SYMBOL_GPL(spk_synth_flush);
unsigned char spk_synth_get_index(struct spk_synth *synth)
{
return synth->io_ops->synth_in_nowait(synth);
}
EXPORT_SYMBOL_GPL(spk_synth_get_index);
int spk_synth_is_alive_nop(struct spk_synth *synth)
{
synth->alive = 1;
return 1;
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_nop);
int spk_synth_is_alive_restart(struct spk_synth *synth)
{
if (synth->alive)
return 1;
if (synth->io_ops->wait_for_xmitr(synth) > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
return 2; /* reenabled */
}
pr_warn("%s: can't restart synth\n", synth->long_name);
return 0;
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
static void thread_wake_up(struct timer_list *unused)
{
wake_up_interruptible_all(&speakup_event);
}
static DEFINE_TIMER(thread_timer, thread_wake_up);
void synth_start(void)
{
struct var_t *trigger_time;
if (!synth->alive) {
synth_buffer_clear();
return;
}
trigger_time = spk_get_var(TRIGGER);
if (!timer_pending(&thread_timer))
mod_timer(&thread_timer, jiffies +
msecs_to_jiffies(trigger_time->u.n.value));
}
void spk_do_flush(void)
{
if (!synth)
return;
speakup_info.flushing = 1;
synth_buffer_clear();
if (synth->alive) {
if (spk_pitch_shift) {
synth_printf("%s", spk_pitch_buff);
spk_pitch_shift = 0;
}
}
wake_up_interruptible_all(&speakup_event);
wake_up_process(speakup_task);
}
void synth_write(const char *buf, size_t count)
{
while (count--)
synth_buffer_add(*buf++);
synth_start();
}
void synth_printf(const char *fmt, ...)
{
va_list args;
unsigned char buf[160], *p;
int r;
va_start(args, fmt);
r = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (r > sizeof(buf) - 1)
r = sizeof(buf) - 1;
p = buf;
while (r--)
synth_buffer_add(*p++);
synth_start();
}
EXPORT_SYMBOL_GPL(synth_printf);
void synth_putwc(u16 wc)
{
synth_buffer_add(wc);
}
EXPORT_SYMBOL_GPL(synth_putwc);
void synth_putwc_s(u16 wc)
{
synth_buffer_add(wc);
synth_start();
}
EXPORT_SYMBOL_GPL(synth_putwc_s);
void synth_putws(const u16 *buf)
{
const u16 *p;
for (p = buf; *p; p++)
synth_buffer_add(*p);
}
EXPORT_SYMBOL_GPL(synth_putws);
void synth_putws_s(const u16 *buf)
{
synth_putws(buf);
synth_start();
}
EXPORT_SYMBOL_GPL(synth_putws_s);
static int index_count;
static int sentence_count;
void spk_reset_index_count(int sc)
{
static int first = 1;
if (first)
first = 0;
else
synth->get_index(synth);
index_count = 0;
sentence_count = sc;
}
int synth_supports_indexing(void)
{
if (synth->get_index)
return 1;
return 0;
}
void synth_insert_next_index(int sent_num)
{
int out;
if (synth->alive) {
if (sent_num == 0) {
synth->indexing.currindex++;
index_count++;
if (synth->indexing.currindex >
synth->indexing.highindex)
synth->indexing.currindex =
synth->indexing.lowindex;
}
out = synth->indexing.currindex * 10 + sent_num;
synth_printf(synth->indexing.command, out, out);
}
}
void spk_get_index_count(int *linecount, int *sentcount)
{
int ind = synth->get_index(synth);
if (ind) {
sentence_count = ind % 10;
if ((ind / 10) <= synth->indexing.currindex)
index_count = synth->indexing.currindex - (ind / 10);
else
index_count = synth->indexing.currindex
- synth->indexing.lowindex
+ synth->indexing.highindex - (ind / 10) + 1;
}
*sentcount = sentence_count;
*linecount = index_count;
}
static struct resource synth_res;
int synth_request_region(unsigned long start, unsigned long n)
{
struct resource *parent = &ioport_resource;
memset(&synth_res, 0, sizeof(synth_res));
synth_res.name = synth->name;
synth_res.start = start;
synth_res.end = start + n - 1;
synth_res.flags = IORESOURCE_BUSY;
return request_resource(parent, &synth_res);
}
EXPORT_SYMBOL_GPL(synth_request_region);
int synth_release_region(unsigned long start, unsigned long n)
{
return release_resource(&synth_res);
}
EXPORT_SYMBOL_GPL(synth_release_region);
struct var_t synth_time_vars[] = {
{ DELAY, .u.n = {NULL, 100, 100, 2000, 0, 0, NULL } },
{ TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } },
{ JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } },
{ FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } },
{ FLUSH, .u.n = {NULL, 4000, 10, 4000, 0, 0, NULL } },
V_LAST_VAR
};
/* called by: speakup_init() */
int synth_init(char *synth_name)
{
int ret = 0;
struct spk_synth *tmp, *synth = NULL;
if (!synth_name)
return 0;
if (strcmp(synth_name, "none") == 0) {
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
return 0;
}
mutex_lock(&spk_mutex);
/* First, check if we already have it loaded. */
list_for_each_entry(tmp, &synths, node) {
if (strcmp(tmp->name, synth_name) == 0)
synth = tmp;
}
/* If we got one, initialize it now. */
if (synth)
ret = do_synth_init(synth);
else
ret = -ENODEV;
mutex_unlock(&spk_mutex);
return ret;
}
/* called by: synth_add() */
static int do_synth_init(struct spk_synth *in_synth)
{
struct var_t *var;
synth_release();
if (in_synth->checkval != SYNTH_CHECK)
return -EINVAL;
synth = in_synth;
synth->alive = 0;
pr_warn("synth probe\n");
if (synth->probe(synth) < 0) {
pr_warn("%s: device probe failed\n", in_synth->name);
synth = NULL;
return -ENODEV;
}
synth_time_vars[0].u.n.value =
synth_time_vars[0].u.n.default_val = synth->delay;
synth_time_vars[1].u.n.value =
synth_time_vars[1].u.n.default_val = synth->trigger;
synth_time_vars[2].u.n.value =
synth_time_vars[2].u.n.default_val = synth->jiffies;
synth_time_vars[3].u.n.value =
synth_time_vars[3].u.n.default_val = synth->full;
synth_time_vars[4].u.n.value =
synth_time_vars[4].u.n.default_val = synth->flush_time;
synth_printf("%s", synth->init);
for (var = synth->vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
if (synth->attributes.name &&
sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
return -ENOMEM;
synth_flags = synth->flags;
wake_up_interruptible_all(&speakup_event);
if (speakup_task)
wake_up_process(speakup_task);
return 0;
}
void synth_release(void)
{
struct var_t *var;
unsigned long flags;
if (!synth)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
pr_info("releasing synth %s\n", synth->name);
synth->alive = 0;
del_timer(&thread_timer);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->attributes.name)
sysfs_remove_group(speakup_kobj, &synth->attributes);
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
synth->release(synth);
synth = NULL;
}
/* called by: all_driver_init() */
int synth_add(struct spk_synth *in_synth)
{
int status = 0;
struct spk_synth *tmp;
mutex_lock(&spk_mutex);
list_for_each_entry(tmp, &synths, node) {
if (tmp == in_synth) {
mutex_unlock(&spk_mutex);
return 0;
}
}
if (in_synth->startup)
status = do_synth_init(in_synth);
if (!status)
list_add_tail(&in_synth->node, &synths);
mutex_unlock(&spk_mutex);
return status;
}
EXPORT_SYMBOL_GPL(synth_add);
void synth_remove(struct spk_synth *in_synth)
{
mutex_lock(&spk_mutex);
if (synth == in_synth)
synth_release();
list_del(&in_synth->node);
module_status = 0;
mutex_unlock(&spk_mutex);
}
EXPORT_SYMBOL_GPL(synth_remove);
struct spk_synth *synth_current(void)
{
return synth;
}
EXPORT_SYMBOL_GPL(synth_current);
short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
| linux-master | drivers/accessibility/speakup/synth.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.14"
#define SYNTH_CLEAR 0x03
#define PROCSPEECH 0x0b
static volatile unsigned char last_char;
static void read_buff_add(u_char ch)
{
last_char = ch;
}
static inline bool synth_full(void)
{
return last_char == 0x13;
}
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int in_escape;
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID, INFLECTION_ID,
VOL_ID, PUNCT_ID, VOICE_ID,
DIRECT_ID, V_LAST_ID,
NB_ID,
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 222]" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100]" } },
[RATE_ID] = { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } },
[INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
[VOICE_ID] = { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/decext.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_decext = {
.name = "decext",
.version = DRV_VERSION,
.long_name = "Dectalk External",
.init = "[:pe -380]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.flags = SF_DEC,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = read_buff_add,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "decext",
},
};
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val = 0;
int delay_time_val = 0;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[') {
in_escape = 1;
} else if (ch == ']') {
in_escape = 0;
} else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
synth->io_ops->synth_out(synth,
PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock,
flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
}
if (!in_escape)
synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
in_escape = 0;
synth->io_ops->flush_buffer(synth);
synth->synth_immediate(synth, "\033P;10z\033\\");
}
module_param_named(ser, synth_decext.ser, int, 0444);
module_param_named(dev, synth_decext.dev_name, charp, 0444);
module_param_named(start, synth_decext.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(inflection, "Set the inflection variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_decext);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk External synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_decext.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* This is the DECtalk PC speakup driver
*
* Some constants from DEC's DOS driver:
* Copyright (c) by Digital Equipment Corp.
*
* 386BSD DECtalk PC driver:
* Copyright (c) 1996 Brian Buhrow <[email protected]>
*
* Linux DECtalk PC driver:
* Copyright (c) 1997 Nicolas Pitre <[email protected]>
*
* speakup DECtalk PC Internal driver:
* Copyright (c) 2003 David Borowski <[email protected]>
*
* All rights reserved.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "speakup.h"
#define MODULE_init 0x0dec /* module in boot code */
#define MODULE_self_test 0x8800 /* module in self-test */
#define MODULE_reset 0xffff /* reinit the whole module */
#define MODE_mask 0xf000 /* mode bits in high nibble */
#define MODE_null 0x0000
#define MODE_test 0x2000 /* in testing mode */
#define MODE_status 0x8000
#define STAT_int 0x0001 /* running in interrupt mode */
#define STAT_tr_char 0x0002 /* character data to transmit */
#define STAT_rr_char 0x0004 /* ready to receive char data */
#define STAT_cmd_ready 0x0008 /* ready to accept commands */
#define STAT_dma_ready 0x0010 /* dma command ready */
#define STAT_digitized 0x0020 /* spc in digitized mode */
#define STAT_new_index 0x0040 /* new last index ready */
#define STAT_new_status 0x0080 /* new status posted */
#define STAT_dma_state 0x0100 /* dma state toggle */
#define STAT_index_valid 0x0200 /* indexs are valid */
#define STAT_flushing 0x0400 /* flush in progress */
#define STAT_self_test 0x0800 /* module in self test */
#define MODE_ready 0xc000 /* module ready for next phase */
#define READY_boot 0x0000
#define READY_kernel 0x0001
#define MODE_error 0xf000
#define CMD_mask 0xf000 /* mask for command nibble */
#define CMD_null 0x0000 /* post status */
#define CMD_control 0x1000 /* hard control command */
#define CTRL_mask 0x0F00 /* mask off control nibble */
#define CTRL_data 0x00FF /* mask to get data byte */
#define CTRL_null 0x0000 /* null control */
#define CTRL_vol_up 0x0100 /* increase volume */
#define CTRL_vol_down 0x0200 /* decrease volume */
#define CTRL_vol_set 0x0300 /* set volume */
#define CTRL_pause 0x0400 /* pause spc */
#define CTRL_resume 0x0500 /* resume spc clock */
#define CTRL_resume_spc 0x0001 /* resume spc soft pause */
#define CTRL_flush 0x0600 /* flush all buffers */
#define CTRL_int_enable 0x0700 /* enable status change ints */
#define CTRL_buff_free 0x0800 /* buffer remain count */
#define CTRL_buff_used 0x0900 /* buffer in use */
#define CTRL_speech 0x0a00 /* immediate speech change */
#define CTRL_SP_voice 0x0001 /* voice change */
#define CTRL_SP_rate 0x0002 /* rate change */
#define CTRL_SP_comma 0x0003 /* comma pause change */
#define CTRL_SP_period 0x0004 /* period pause change */
#define CTRL_SP_rate_delta 0x0005 /* delta rate change */
#define CTRL_SP_get_param 0x0006 /* return the desired parameter */
#define CTRL_last_index 0x0b00 /* get last index spoken */
#define CTRL_io_priority 0x0c00 /* change i/o priority */
#define CTRL_free_mem 0x0d00 /* get free paragraphs on module */
#define CTRL_get_lang 0x0e00 /* return bitmask of loaded languages */
#define CMD_test 0x2000 /* self-test request */
#define TEST_mask 0x0F00 /* isolate test field */
#define TEST_null 0x0000 /* no test requested */
#define TEST_isa_int 0x0100 /* assert isa irq */
#define TEST_echo 0x0200 /* make data in == data out */
#define TEST_seg 0x0300 /* set peek/poke segment */
#define TEST_off 0x0400 /* set peek/poke offset */
#define TEST_peek 0x0500 /* data out == *peek */
#define TEST_poke 0x0600 /* *peek == data in */
#define TEST_sub_code 0x00FF /* user defined test sub codes */
#define CMD_id 0x3000 /* return software id */
#define ID_null 0x0000 /* null id */
#define ID_kernel 0x0100 /* kernel code executing */
#define ID_boot 0x0200 /* boot code executing */
#define CMD_dma 0x4000 /* force a dma start */
#define CMD_reset 0x5000 /* reset module status */
#define CMD_sync 0x6000 /* kernel sync command */
#define CMD_char_in 0x7000 /* single character send */
#define CMD_char_out 0x8000 /* single character get */
#define CHAR_count_1 0x0100 /* one char in cmd_low */
#define CHAR_count_2 0x0200 /* the second in data_low */
#define CHAR_count_3 0x0300 /* the third in data_high */
#define CMD_spc_mode 0x9000 /* change spc mode */
#define CMD_spc_to_text 0x0100 /* set to text mode */
#define CMD_spc_to_digit 0x0200 /* set to digital mode */
#define CMD_spc_rate 0x0400 /* change spc data rate */
#define CMD_error 0xf000 /* severe error */
enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC };
#define DMA_single_in 0x01
#define DMA_single_out 0x02
#define DMA_buff_in 0x03
#define DMA_buff_out 0x04
#define DMA_control 0x05
#define DT_MEM_ALLOC 0x03
#define DT_SET_DIC 0x04
#define DT_START_TASK 0x05
#define DT_LOAD_MEM 0x06
#define DT_READ_MEM 0x07
#define DT_DIGITAL_IN 0x08
#define DMA_sync 0x06
#define DMA_sync_char 0x07
#define DRV_VERSION "2.12"
#define PROCSPEECH 0x0b
#define SYNTH_IO_EXTENT 8
static int synth_probe(struct spk_synth *synth);
static void dtpc_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 };
static int in_escape, is_flushing;
static int dt_stat, dma_state;
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID, INFLECTION_ID,
VOL_ID, PUNCT_ID, VOICE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID,
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 200]" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100]" } },
[RATE_ID] = { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } },
[INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
[VOICE_ID] = { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/decpc.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_dec_pc = {
.name = "decpc",
.version = DRV_VERSION,
.long_name = "Dectalk PC",
.init = "[:pe -380]",
.procspeech = PROCSPEECH,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 1000,
.flags = SF_DEC,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = dtpc_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "decpc",
},
};
static int dt_getstatus(void)
{
dt_stat = inb_p(speakup_info.port_tts) |
(inb_p(speakup_info.port_tts + 1) << 8);
return dt_stat;
}
static void dt_sendcmd(u_int cmd)
{
outb_p(cmd & 0xFF, speakup_info.port_tts);
outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts + 1);
}
static int dt_waitbit(int bit)
{
int timeout = 100;
while (--timeout > 0) {
if ((dt_getstatus() & bit) == bit)
return 1;
udelay(50);
}
return 0;
}
static int dt_wait_dma(void)
{
int timeout = 100, state = dma_state;
if (!dt_waitbit(STAT_dma_ready))
return 0;
while (--timeout > 0) {
if ((dt_getstatus() & STAT_dma_state) == state)
return 1;
udelay(50);
}
dma_state = dt_getstatus() & STAT_dma_state;
return 1;
}
static int dt_ctrl(u_int cmd)
{
int timeout = 10;
if (!dt_waitbit(STAT_cmd_ready))
return -1;
outb_p(0, speakup_info.port_tts + 2);
outb_p(0, speakup_info.port_tts + 3);
dt_getstatus();
dt_sendcmd(CMD_control | cmd);
outb_p(0, speakup_info.port_tts + 6);
while (dt_getstatus() & STAT_cmd_ready) {
udelay(20);
if (--timeout == 0)
break;
}
dt_sendcmd(CMD_null);
return 0;
}
static void synth_flush(struct spk_synth *synth)
{
int timeout = 10;
if (is_flushing)
return;
is_flushing = 4;
in_escape = 0;
while (dt_ctrl(CTRL_flush)) {
if (--timeout == 0)
break;
udelay(50);
}
for (timeout = 0; timeout < 10; timeout++) {
if (dt_waitbit(STAT_dma_ready))
break;
udelay(50);
}
outb_p(DMA_sync, speakup_info.port_tts + 4);
outb_p(0, speakup_info.port_tts + 4);
udelay(100);
for (timeout = 0; timeout < 10; timeout++) {
if (!(dt_getstatus() & STAT_flushing))
break;
udelay(50);
}
dma_state = dt_getstatus() & STAT_dma_state;
dma_state ^= STAT_dma_state;
is_flushing = 0;
}
static int dt_sendchar(char ch)
{
if (!dt_wait_dma())
return -1;
if (!(dt_stat & STAT_rr_char))
return -2;
outb_p(DMA_single_in, speakup_info.port_tts + 4);
outb_p(ch, speakup_info.port_tts + 4);
dma_state ^= STAT_dma_state;
return 0;
}
static int testkernel(void)
{
int status = 0;
if (dt_getstatus() == 0xffff) {
status = -1;
goto oops;
}
dt_sendcmd(CMD_sync);
if (!dt_waitbit(STAT_cmd_ready))
status = -2;
else if (dt_stat & 0x8000)
return 0;
else if (dt_stat == 0x0dec)
pr_warn("dec_pc at 0x%x, software not loaded\n",
speakup_info.port_tts);
status = -3;
oops: synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
return status;
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
static u_char last;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val;
int delay_time_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (dt_sendchar(ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[') {
in_escape = 1;
} else if (ch == ']') {
in_escape = 0;
} else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
dt_sendchar(PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
dt_sendchar(PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock,
flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
ch = 0;
}
if (!in_escape)
dt_sendchar(PROCSPEECH);
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
while ((ch = *buf)) {
if (ch == '\n')
ch = PROCSPEECH;
if (dt_sendchar(ch))
return buf;
buf++;
}
return NULL;
}
static int synth_probe(struct spk_synth *synth)
{
int i = 0, failed = 0;
pr_info("Probing for %s.\n", synth->long_name);
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) {
pr_warn("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
continue;
}
speakup_info.port_tts = synth_portlist[i];
failed = testkernel();
if (failed == 0)
break;
}
if (failed) {
pr_info("%s: not found\n", synth->long_name);
return -ENODEV;
}
pr_info("%s: %03x-%03x, Driver Version %s,\n", synth->long_name,
speakup_info.port_tts, speakup_info.port_tts + 7,
synth->version);
synth->alive = 1;
return 0;
}
static void dtpc_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
}
module_param_named(start, synth_dec_pc.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(inflection, "Set the inflection variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_dec_pc);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_decpc.c |
// SPDX-License-Identifier: GPL-2.0
/* Internationalization implementation. Includes definitions of English
* string arrays, and the i18n pointer.
*/
#include <linux/slab.h> /* For kmalloc. */
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/string.h>
#include "speakup.h"
#include "spk_priv.h"
static char *speakup_msgs[MSG_LAST_INDEX];
static char *speakup_default_msgs[MSG_LAST_INDEX] = {
[MSG_BLANK] = "blank",
[MSG_IAM_ALIVE] = "I'm aLive!",
[MSG_YOU_KILLED_SPEAKUP] = "You killed speakup!",
[MSG_HEY_THATS_BETTER] = "hey. That's better!",
[MSG_YOU_TURNED_ME_OFF] = "You turned me off!",
[MSG_PARKED] = "parked!",
[MSG_UNPARKED] = "unparked!",
[MSG_MARK] = "mark",
[MSG_CUT] = "cut",
[MSG_MARK_CLEARED] = "mark, cleared",
[MSG_PASTE] = "paste",
[MSG_BRIGHT] = "bright",
[MSG_ON_BLINKING] = "on blinking",
[MSG_OFF] = "off",
[MSG_ON] = "on",
[MSG_NO_WINDOW] = "no window",
[MSG_CURSORING_OFF] = "cursoring off",
[MSG_CURSORING_ON] = "cursoring on",
[MSG_HIGHLIGHT_TRACKING] = "highlight tracking",
[MSG_READ_WINDOW] = "read windo",
[MSG_READ_ALL] = "read all",
[MSG_EDIT_DONE] = "edit done",
[MSG_WINDOW_ALREADY_SET] = "window already set, clear then reset",
[MSG_END_BEFORE_START] = "error end before start",
[MSG_WINDOW_CLEARED] = "window cleared",
[MSG_WINDOW_SILENCED] = "window silenced",
[MSG_WINDOW_SILENCE_DISABLED] = "window silence disabled",
[MSG_ERROR] = "error",
[MSG_GOTO_CANCELED] = "goto canceled",
[MSG_GOTO] = "go to?",
[MSG_LEAVING_HELP] = "leaving help",
[MSG_IS_UNASSIGNED] = "is unassigned",
[MSG_HELP_INFO] =
"press space to exit, up or down to scroll, or a letter to go to a command",
[MSG_EDGE_TOP] = "top,",
[MSG_EDGE_BOTTOM] = "bottom,",
[MSG_EDGE_LEFT] = "left,",
[MSG_EDGE_RIGHT] = "right,",
[MSG_NUMBER] = "number",
[MSG_SPACE] = "space",
[MSG_START] = "start",
[MSG_END] = "end",
[MSG_CTRL] = "control-",
[MSG_DISJUNCTION] = "or",
/* Messages with embedded format specifiers. */
[MSG_POS_INFO] = "line %ld, col %ld, t t y %d",
[MSG_CHAR_INFO] = "hex %02x, decimal %d",
[MSG_REPEAT_DESC] = "times %d .",
[MSG_REPEAT_DESC2] = "repeated %d .",
[MSG_WINDOW_LINE] = "window is line %d",
[MSG_WINDOW_BOUNDARY] = "%s at line %d, column %d",
[MSG_EDIT_PROMPT] = "edit %s, press space when done",
[MSG_NO_COMMAND] = "no commands for %c",
[MSG_KEYDESC] = "is %s",
/* Control keys. */
/* Most of these duplicate the entries in state names. */
[MSG_CTL_SHIFT] = "shift",
[MSG_CTL_ALTGR] = "altgr",
[MSG_CTL_CONTROL] = "control",
[MSG_CTL_ALT] = "alt",
[MSG_CTL_LSHIFT] = "l shift",
[MSG_CTL_SPEAKUP] = "speakup",
[MSG_CTL_LCONTROL] = "l control",
[MSG_CTL_RCONTROL] = "r control",
[MSG_CTL_CAPSSHIFT] = "caps shift",
/* Color names. */
[MSG_COLOR_BLACK] = "black",
[MSG_COLOR_BLUE] = "blue",
[MSG_COLOR_GREEN] = "green",
[MSG_COLOR_CYAN] = "cyan",
[MSG_COLOR_RED] = "red",
[MSG_COLOR_MAGENTA] = "magenta",
[MSG_COLOR_YELLOW] = "yellow",
[MSG_COLOR_WHITE] = "white",
[MSG_COLOR_GREY] = "grey",
[MSG_COLOR_BRIGHTBLUE] = "bright blue",
[MSG_COLOR_BRIGHTGREEN] = "bright green",
[MSG_COLOR_BRIGHTCYAN] = "bright cyan",
[MSG_COLOR_BRIGHTRED] = "bright red",
[MSG_COLOR_BRIGHTMAGENTA] = "bright magenta",
[MSG_COLOR_BRIGHTYELLOW] = "bright yellow",
[MSG_COLOR_BRIGHTWHITE] = "bright white",
/* Names of key states. */
[MSG_STATE_DOUBLE] = "double",
[MSG_STATE_SPEAKUP] = "speakup",
[MSG_STATE_ALT] = "alt",
[MSG_STATE_CONTROL] = "ctrl",
[MSG_STATE_ALTGR] = "altgr",
[MSG_STATE_SHIFT] = "shift",
/* Key names. */
[MSG_KEYNAME_ESC] = "escape",
[MSG_KEYNAME_1] = "1",
[MSG_KEYNAME_2] = "2",
[MSG_KEYNAME_3] = "3",
[MSG_KEYNAME_4] = "4",
[MSG_KEYNAME_5] = "5",
[MSG_KEYNAME_6] = "6",
[MSG_KEYNAME_7] = "7",
[MSG_KEYNAME_8] = "8",
[MSG_KEYNAME_9] = "9",
[MSG_KEYNAME_0] = "0",
[MSG_KEYNAME_DASH] = "minus",
[MSG_KEYNAME_EQUAL] = "equal",
[MSG_KEYNAME_BS] = "back space",
[MSG_KEYNAME_TAB] = "tab",
[MSG_KEYNAME_Q] = "q",
[MSG_KEYNAME_W] = "w",
[MSG_KEYNAME_E] = "e",
[MSG_KEYNAME_R] = "r",
[MSG_KEYNAME_T] = "t",
[MSG_KEYNAME_Y] = "y",
[MSG_KEYNAME_U] = "u",
[MSG_KEYNAME_I] = "i",
[MSG_KEYNAME_O] = "o",
[MSG_KEYNAME_P] = "p",
[MSG_KEYNAME_LEFTBRACE] = "left brace",
[MSG_KEYNAME_RIGHTBRACE] = "right brace",
[MSG_KEYNAME_ENTER] = "enter",
[MSG_KEYNAME_LEFTCTRL] = "left control",
[MSG_KEYNAME_A] = "a",
[MSG_KEYNAME_S] = "s",
[MSG_KEYNAME_D] = "d",
[MSG_KEYNAME_F] = "f",
[MSG_KEYNAME_G] = "g",
[MSG_KEYNAME_H] = "h",
[MSG_KEYNAME_J] = "j",
[MSG_KEYNAME_K] = "k",
[MSG_KEYNAME_L] = "l",
[MSG_KEYNAME_SEMICOLON] = "semicolon",
[MSG_KEYNAME_SINGLEQUOTE] = "apostrophe",
[MSG_KEYNAME_GRAVE] = "accent",
[MSG_KEYNAME_LEFTSHFT] = "left shift",
[MSG_KEYNAME_BACKSLASH] = "back slash",
[MSG_KEYNAME_Z] = "z",
[MSG_KEYNAME_X] = "x",
[MSG_KEYNAME_C] = "c",
[MSG_KEYNAME_V] = "v",
[MSG_KEYNAME_B] = "b",
[MSG_KEYNAME_N] = "n",
[MSG_KEYNAME_M] = "m",
[MSG_KEYNAME_COMMA] = "comma",
[MSG_KEYNAME_DOT] = "dot",
[MSG_KEYNAME_SLASH] = "slash",
[MSG_KEYNAME_RIGHTSHFT] = "right shift",
[MSG_KEYNAME_KPSTAR] = "keypad asterisk",
[MSG_KEYNAME_LEFTALT] = "left alt",
[MSG_KEYNAME_SPACE] = "space",
[MSG_KEYNAME_CAPSLOCK] = "caps lock",
[MSG_KEYNAME_F1] = "f1",
[MSG_KEYNAME_F2] = "f2",
[MSG_KEYNAME_F3] = "f3",
[MSG_KEYNAME_F4] = "f4",
[MSG_KEYNAME_F5] = "f5",
[MSG_KEYNAME_F6] = "f6",
[MSG_KEYNAME_F7] = "f7",
[MSG_KEYNAME_F8] = "f8",
[MSG_KEYNAME_F9] = "f9",
[MSG_KEYNAME_F10] = "f10",
[MSG_KEYNAME_NUMLOCK] = "num lock",
[MSG_KEYNAME_SCROLLLOCK] = "scroll lock",
[MSG_KEYNAME_KP7] = "keypad 7",
[MSG_KEYNAME_KP8] = "keypad 8",
[MSG_KEYNAME_KP9] = "keypad 9",
[MSG_KEYNAME_KPMINUS] = "keypad minus",
[MSG_KEYNAME_KP4] = "keypad 4",
[MSG_KEYNAME_KP5] = "keypad 5",
[MSG_KEYNAME_KP6] = "keypad 6",
[MSG_KEYNAME_KPPLUS] = "keypad plus",
[MSG_KEYNAME_KP1] = "keypad 1",
[MSG_KEYNAME_KP2] = "keypad 2",
[MSG_KEYNAME_KP3] = "keypad 3",
[MSG_KEYNAME_KP0] = "keypad 0",
[MSG_KEYNAME_KPDOT] = "keypad dot",
[MSG_KEYNAME_103RD] = "103rd",
[MSG_KEYNAME_F13] = "f13",
[MSG_KEYNAME_102ND] = "102nd",
[MSG_KEYNAME_F11] = "f11",
[MSG_KEYNAME_F12] = "f12",
[MSG_KEYNAME_F14] = "f14",
[MSG_KEYNAME_F15] = "f15",
[MSG_KEYNAME_F16] = "f16",
[MSG_KEYNAME_F17] = "f17",
[MSG_KEYNAME_F18] = "f18",
[MSG_KEYNAME_F19] = "f19",
[MSG_KEYNAME_F20] = "f20",
[MSG_KEYNAME_KPENTER] = "keypad enter",
[MSG_KEYNAME_RIGHTCTRL] = "right control",
[MSG_KEYNAME_KPSLASH] = "keypad slash",
[MSG_KEYNAME_SYSRQ] = "sysrq",
[MSG_KEYNAME_RIGHTALT] = "right alt",
[MSG_KEYNAME_LF] = "line feed",
[MSG_KEYNAME_HOME] = "home",
[MSG_KEYNAME_UP] = "up",
[MSG_KEYNAME_PGUP] = "page up",
[MSG_KEYNAME_LEFT] = "left",
[MSG_KEYNAME_RIGHT] = "right",
[MSG_KEYNAME_END] = "end",
[MSG_KEYNAME_DOWN] = "down",
[MSG_KEYNAME_PGDN] = "page down",
[MSG_KEYNAME_INS] = "insert",
[MSG_KEYNAME_DEL] = "delete",
[MSG_KEYNAME_MACRO] = "macro",
[MSG_KEYNAME_MUTE] = "mute",
[MSG_KEYNAME_VOLDOWN] = "volume down",
[MSG_KEYNAME_VOLUP] = "volume up",
[MSG_KEYNAME_POWER] = "power",
[MSG_KEYNAME_KPEQUAL] = "keypad equal",
[MSG_KEYNAME_KPPLUSDASH] = "keypad plusminus",
[MSG_KEYNAME_PAUSE] = "pause",
[MSG_KEYNAME_F21] = "f21",
[MSG_KEYNAME_F22] = "f22",
[MSG_KEYNAME_F23] = "f23",
[MSG_KEYNAME_F24] = "f24",
[MSG_KEYNAME_KPCOMMA] = "keypad comma",
[MSG_KEYNAME_LEFTMETA] = "left meta",
[MSG_KEYNAME_RIGHTMETA] = "right meta",
[MSG_KEYNAME_COMPOSE] = "compose",
[MSG_KEYNAME_STOP] = "stop",
[MSG_KEYNAME_AGAIN] = "again",
[MSG_KEYNAME_PROPS] = "props",
[MSG_KEYNAME_UNDO] = "undo",
[MSG_KEYNAME_FRONT] = "front",
[MSG_KEYNAME_COPY] = "copy",
[MSG_KEYNAME_OPEN] = "open",
[MSG_KEYNAME_PASTE] = "paste",
[MSG_KEYNAME_FIND] = "find",
[MSG_KEYNAME_CUT] = "cut",
[MSG_KEYNAME_HELP] = "help",
[MSG_KEYNAME_MENU] = "menu",
[MSG_KEYNAME_CALC] = "calc",
[MSG_KEYNAME_SETUP] = "setup",
[MSG_KEYNAME_SLEEP] = "sleep",
[MSG_KEYNAME_WAKEUP] = "wakeup",
[MSG_KEYNAME_FILE] = "file",
[MSG_KEYNAME_SENDFILE] = "send file",
[MSG_KEYNAME_DELFILE] = "delete file",
[MSG_KEYNAME_XFER] = "transfer",
[MSG_KEYNAME_PROG1] = "prog1",
[MSG_KEYNAME_PROG2] = "prog2",
[MSG_KEYNAME_WWW] = "www",
[MSG_KEYNAME_MSDOS] = "msdos",
[MSG_KEYNAME_COFFEE] = "coffee",
[MSG_KEYNAME_DIRECTION] = "direction",
[MSG_KEYNAME_CYCLEWINDOWS] = "cycle windows",
[MSG_KEYNAME_MAIL] = "mail",
[MSG_KEYNAME_BOOKMARKS] = "bookmarks",
[MSG_KEYNAME_COMPUTER] = "computer",
[MSG_KEYNAME_BACK] = "back",
[MSG_KEYNAME_FORWARD] = "forward",
[MSG_KEYNAME_CLOSECD] = "close cd",
[MSG_KEYNAME_EJECTCD] = "eject cd",
[MSG_KEYNAME_EJECTCLOSE] = "eject close cd",
[MSG_KEYNAME_NEXTSONG] = "next song",
[MSG_KEYNAME_PLAYPAUSE] = "play pause",
[MSG_KEYNAME_PREVSONG] = "previous song",
[MSG_KEYNAME_STOPCD] = "stop cd",
[MSG_KEYNAME_RECORD] = "record",
[MSG_KEYNAME_REWIND] = "rewind",
[MSG_KEYNAME_PHONE] = "phone",
[MSG_KEYNAME_ISO] = "iso",
[MSG_KEYNAME_CONFIG] = "config",
[MSG_KEYNAME_HOMEPG] = "home page",
[MSG_KEYNAME_REFRESH] = "refresh",
[MSG_KEYNAME_EXIT] = "exit",
[MSG_KEYNAME_MOVE] = "move",
[MSG_KEYNAME_EDIT] = "edit",
[MSG_KEYNAME_SCROLLUP] = "scroll up",
[MSG_KEYNAME_SCROLLDN] = "scroll down",
[MSG_KEYNAME_KPLEFTPAR] = "keypad left paren",
[MSG_KEYNAME_KPRIGHTPAR] = "keypad right paren",
/* Function names. */
[MSG_FUNCNAME_ATTRIB_BLEEP_DEC] = "attribute bleep decrement",
[MSG_FUNCNAME_ATTRIB_BLEEP_INC] = "attribute bleep increment",
[MSG_FUNCNAME_BLEEPS_DEC] = "bleeps decrement",
[MSG_FUNCNAME_BLEEPS_INC] = "bleeps increment",
[MSG_FUNCNAME_CHAR_FIRST] = "character, first",
[MSG_FUNCNAME_CHAR_LAST] = "character, last",
[MSG_FUNCNAME_CHAR_CURRENT] = "character, say current",
[MSG_FUNCNAME_CHAR_HEX_AND_DEC] = "character, say hex and decimal",
[MSG_FUNCNAME_CHAR_NEXT] = "character, say next",
[MSG_FUNCNAME_CHAR_PHONETIC] = "character, say phonetic",
[MSG_FUNCNAME_CHAR_PREVIOUS] = "character, say previous",
[MSG_FUNCNAME_CURSOR_PARK] = "cursor park",
[MSG_FUNCNAME_CUT] = "cut",
[MSG_FUNCNAME_EDIT_DELIM] = "edit delimiters",
[MSG_FUNCNAME_EDIT_EXNUM] = "edit exnum",
[MSG_FUNCNAME_EDIT_MOST] = "edit most",
[MSG_FUNCNAME_EDIT_REPEATS] = "edit repeats",
[MSG_FUNCNAME_EDIT_SOME] = "edit some",
[MSG_FUNCNAME_GOTO] = "go to",
[MSG_FUNCNAME_GOTO_BOTTOM] = "go to bottom edge",
[MSG_FUNCNAME_GOTO_LEFT] = "go to left edge",
[MSG_FUNCNAME_GOTO_RIGHT] = "go to right edge",
[MSG_FUNCNAME_GOTO_TOP] = "go to top edge",
[MSG_FUNCNAME_HELP] = "help",
[MSG_FUNCNAME_LINE_SAY_CURRENT] = "line, say current",
[MSG_FUNCNAME_LINE_SAY_NEXT] = "line, say next",
[MSG_FUNCNAME_LINE_SAY_PREVIOUS] = "line, say previous",
[MSG_FUNCNAME_LINE_SAY_WITH_INDENT] = "line, say with indent",
[MSG_FUNCNAME_PASTE] = "paste",
[MSG_FUNCNAME_PITCH_DEC] = "pitch decrement",
[MSG_FUNCNAME_PITCH_INC] = "pitch increment",
[MSG_FUNCNAME_PUNC_DEC] = "punctuation decrement",
[MSG_FUNCNAME_PUNC_INC] = "punctuation increment",
[MSG_FUNCNAME_PUNC_LEVEL_DEC] = "punc level decrement",
[MSG_FUNCNAME_PUNC_LEVEL_INC] = "punc level increment",
[MSG_FUNCNAME_QUIET] = "quiet",
[MSG_FUNCNAME_RATE_DEC] = "rate decrement",
[MSG_FUNCNAME_RATE_INC] = "rate increment",
[MSG_FUNCNAME_READING_PUNC_DEC] = "reading punctuation decrement",
[MSG_FUNCNAME_READING_PUNC_INC] = "reading punctuation increment",
[MSG_FUNCNAME_SAY_ATTRIBUTES] = "say attributes",
[MSG_FUNCNAME_SAY_FROM_LEFT] = "say from left",
[MSG_FUNCNAME_SAY_FROM_TOP] = "say from top",
[MSG_FUNCNAME_SAY_POSITION] = "say position",
[MSG_FUNCNAME_SAY_SCREEN] = "say screen",
[MSG_FUNCNAME_SAY_TO_BOTTOM] = "say to bottom",
[MSG_FUNCNAME_SAY_TO_RIGHT] = "say to right",
[MSG_FUNCNAME_SPEAKUP] = "speakup",
[MSG_FUNCNAME_SPEAKUP_LOCK] = "speakup lock",
[MSG_FUNCNAME_SPEAKUP_OFF] = "speakup off",
[MSG_FUNCNAME_SPEECH_KILL] = "speech kill",
[MSG_FUNCNAME_SPELL_DELAY_DEC] = "spell delay decrement",
[MSG_FUNCNAME_SPELL_DELAY_INC] = "spell delay increment",
[MSG_FUNCNAME_SPELL_WORD] = "spell word",
[MSG_FUNCNAME_SPELL_WORD_PHONETICALLY] = "spell word phonetically",
[MSG_FUNCNAME_TONE_DEC] = "tone decrement",
[MSG_FUNCNAME_TONE_INC] = "tone increment",
[MSG_FUNCNAME_VOICE_DEC] = "voice decrement",
[MSG_FUNCNAME_VOICE_INC] = "voice increment",
[MSG_FUNCNAME_VOLUME_DEC] = "volume decrement",
[MSG_FUNCNAME_VOLUME_INC] = "volume increment",
[MSG_FUNCNAME_WINDOW_CLEAR] = "window, clear",
[MSG_FUNCNAME_WINDOW_SAY] = "window, say",
[MSG_FUNCNAME_WINDOW_SET] = "window, set",
[MSG_FUNCNAME_WINDOW_SILENCE] = "window, silence",
[MSG_FUNCNAME_WORD_SAY_CURRENT] = "word, say current",
[MSG_FUNCNAME_WORD_SAY_NEXT] = "word, say next",
[MSG_FUNCNAME_WORD_SAY_PREVIOUS] = "word, say previous",
};
static struct msg_group_t all_groups[] = {
{
.name = "ctl_keys",
.start = MSG_CTL_START,
.end = MSG_CTL_END,
},
{
.name = "colors",
.start = MSG_COLORS_START,
.end = MSG_COLORS_END,
},
{
.name = "formatted",
.start = MSG_FORMATTED_START,
.end = MSG_FORMATTED_END,
},
{
.name = "function_names",
.start = MSG_FUNCNAMES_START,
.end = MSG_FUNCNAMES_END,
},
{
.name = "key_names",
.start = MSG_KEYNAMES_START,
.end = MSG_KEYNAMES_END,
},
{
.name = "announcements",
.start = MSG_ANNOUNCEMENTS_START,
.end = MSG_ANNOUNCEMENTS_END,
},
{
.name = "states",
.start = MSG_STATES_START,
.end = MSG_STATES_END,
},
};
static const int num_groups = ARRAY_SIZE(all_groups);
char *spk_msg_get(enum msg_index_t index)
{
return speakup_msgs[index];
}
/*
* Function: next_specifier
* Finds the start of the next format specifier in the argument string.
* Return value: pointer to start of format
* specifier, or NULL if no specifier exists.
*/
static char *next_specifier(char *input)
{
int found = 0;
char *next_percent = input;
while (next_percent && !found) {
next_percent = strchr(next_percent, '%');
if (next_percent) {
/* skip over doubled percent signs */
while (next_percent[0] == '%' &&
next_percent[1] == '%')
next_percent += 2;
if (*next_percent == '%')
found = 1;
else if (*next_percent == '\0')
next_percent = NULL;
}
}
return next_percent;
}
/* Skip over 0 or more flags. */
static char *skip_flags(char *input)
{
while ((*input != '\0') && strchr(" 0+-#", *input))
input++;
return input;
}
/* Skip over width.precision, if it exists. */
static char *skip_width(char *input)
{
while (isdigit(*input))
input++;
if (*input == '.') {
input++;
while (isdigit(*input))
input++;
}
return input;
}
/*
* Skip past the end of the conversion part.
* Note that this code only accepts a handful of conversion specifiers:
* c d s x and ld. Not accidental; these are exactly the ones used in
* the default group of formatted messages.
*/
static char *skip_conversion(char *input)
{
if ((input[0] == 'l') && (input[1] == 'd'))
input += 2;
else if ((*input != '\0') && strchr("cdsx", *input))
input++;
return input;
}
/*
* Function: find_specifier_end
* Return a pointer to the end of the format specifier.
*/
static char *find_specifier_end(char *input)
{
input++; /* Advance over %. */
input = skip_flags(input);
input = skip_width(input);
input = skip_conversion(input);
return input;
}
/*
* Function: compare_specifiers
* Compare the format specifiers pointed to by *input1 and *input2.
* Return true if they are the same, false otherwise.
* Advance *input1 and *input2 so that they point to the character following
* the end of the specifier.
*/
static bool compare_specifiers(char **input1, char **input2)
{
bool same = false;
char *end1 = find_specifier_end(*input1);
char *end2 = find_specifier_end(*input2);
size_t length1 = end1 - *input1;
size_t length2 = end2 - *input2;
if ((length1 == length2) && !memcmp(*input1, *input2, length1))
same = true;
*input1 = end1;
*input2 = end2;
return same;
}
/*
* Function: fmt_validate
* Check that two format strings contain the same number of format specifiers,
* and that the order of specifiers is the same in both strings.
* Return true if the condition holds, false if it doesn't.
*/
static bool fmt_validate(char *template, char *user)
{
bool valid = true;
bool still_comparing = true;
char *template_ptr = template;
char *user_ptr = user;
while (still_comparing && valid) {
template_ptr = next_specifier(template_ptr);
user_ptr = next_specifier(user_ptr);
if (template_ptr && user_ptr) {
/* Both have at least one more specifier. */
valid = compare_specifiers(&template_ptr, &user_ptr);
} else {
/* No more format specifiers in one or both strings. */
still_comparing = false;
/* See if one has more specifiers than the other. */
if (template_ptr || user_ptr)
valid = false;
}
}
return valid;
}
/*
* Function: msg_set
* Description: Add a user-supplied message to the user_messages array.
* The message text is copied to a memory area allocated with kmalloc.
* If the function fails, then user_messages is untouched.
* Arguments:
* - index: a message number, as found in i18n.h.
* - text: text of message. Not NUL-terminated.
* - length: number of bytes in text.
* Failure conditions:
* -EINVAL - Invalid format specifiers in formatted message or illegal index.
* -ENOMEM - Unable to allocate memory.
*/
ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
{
char *newstr = NULL;
unsigned long flags;
if ((index < MSG_FIRST_INDEX) || (index >= MSG_LAST_INDEX))
return -EINVAL;
newstr = kmemdup_nul(text, length, GFP_KERNEL);
if (!newstr)
return -ENOMEM;
if (index >= MSG_FORMATTED_START &&
index <= MSG_FORMATTED_END &&
!fmt_validate(speakup_default_msgs[index], newstr)) {
kfree(newstr);
return -EINVAL;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_msgs[index] != speakup_default_msgs[index])
kfree(speakup_msgs[index]);
speakup_msgs[index] = newstr;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return 0;
}
/*
* Find a message group, given its name. Return a pointer to the structure
* if found, or NULL otherwise.
*/
struct msg_group_t *spk_find_msg_group(const char *group_name)
{
struct msg_group_t *group = NULL;
int i;
for (i = 0; i < num_groups; i++) {
if (!strcmp(all_groups[i].name, group_name)) {
group = &all_groups[i];
break;
}
}
return group;
}
void spk_reset_msg_group(struct msg_group_t *group)
{
unsigned long flags;
enum msg_index_t i;
spin_lock_irqsave(&speakup_info.spinlock, flags);
for (i = group->start; i <= group->end; i++) {
if (speakup_msgs[i] != speakup_default_msgs[i])
kfree(speakup_msgs[i]);
speakup_msgs[i] = speakup_default_msgs[i];
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* Called at initialization time, to establish default messages. */
void spk_initialize_msgs(void)
{
memcpy(speakup_msgs, speakup_default_msgs,
sizeof(speakup_default_msgs));
}
/* Free user-supplied strings when module is unloaded: */
void spk_free_user_msgs(void)
{
enum msg_index_t index;
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
for (index = MSG_FIRST_INDEX; index < MSG_LAST_INDEX; index++) {
if (speakup_msgs[index] != speakup_default_msgs[index]) {
kfree(speakup_msgs[index]);
speakup_msgs[index] = speakup_default_msgs[index];
}
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
| linux-master | drivers/accessibility/speakup/i18n.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18
#define PROCSPEECH '\r' /* process speech char */
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x05P8" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05P5" } },
[RATE_ID] = { RATE, .u.n = {"\x05R%d", 5, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x05P%d", 5, 0, 9, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x05V%d", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x05T%c", 12, 0, 25, 61, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/* These attributes will appear in /sys/accessibility/speakup/txprt. */
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_txprt = {
.name = "txprt",
.version = DRV_VERSION,
.long_name = "Transport",
.init = "\x05N1",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "txprt",
},
};
module_param_named(ser, synth_txprt.ser, int, 0444);
module_param_named(dev, synth_txprt.dev_name, charp, 0444);
module_param_named(start, synth_txprt.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_txprt);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Transport synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_txprt.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/serial_reg.h> /* for UART_MCR* constants */
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.21"
#define SYNTH_CLEAR 0x18
#define PROCSPEECH '\r'
static void do_catch_up(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, VOICE_ID, LANG_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"cap, " } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"" } },
[RATE_ID] = { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } },
[VOICE_ID] = { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } },
[LANG_ID] = { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/apollo.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute lang_attribute =
__ATTR(lang, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&lang_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_apollo = {
.name = "apollo",
.version = DRV_VERSION,
.long_name = "Apollo",
.init = "@R3@D0@K1\r",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "apollo",
},
};
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
struct var_t *full_time;
int full_time_val = 0;
int delay_time_val = 0;
int jiffy_delta_val = 0;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (!synth->io_ops->synth_out(synth, ch)) {
synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->io_ops->synth_out(synth, synth->procspeech))
schedule_timeout(msecs_to_jiffies
(delay_time_val));
else
schedule_timeout(msecs_to_jiffies
(full_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
synth->io_ops->synth_out(synth, PROCSPEECH);
}
module_param_named(ser, synth_apollo.ser, int, 0444);
module_param_named(dev, synth_apollo.dev_name, charp, 0444);
module_param_named(start, synth_apollo.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(lang, vars[LANG_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(lang, "Set the lang variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_apollo);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Apollo II synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_apollo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alibaba DDR Sub-System Driveway PMU driver
*
* Copyright (C) 2022 Alibaba Inc
*/
#define ALI_DRW_PMUNAME "ali_drw"
#define ALI_DRW_DRVNAME ALI_DRW_PMUNAME "_pmu"
#define pr_fmt(fmt) ALI_DRW_DRVNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/refcount.h>
#define ALI_DRW_PMU_COMMON_MAX_COUNTERS 16
#define ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE 19
#define ALI_DRW_PMU_PA_SHIFT 12
#define ALI_DRW_PMU_CNT_INIT 0x00000000
#define ALI_DRW_CNT_MAX_PERIOD 0xffffffff
#define ALI_DRW_PMU_CYCLE_EVT_ID 0x80
#define ALI_DRW_PMU_CNT_CTRL 0xC00
#define ALI_DRW_PMU_CNT_RST BIT(2)
#define ALI_DRW_PMU_CNT_STOP BIT(1)
#define ALI_DRW_PMU_CNT_START BIT(0)
#define ALI_DRW_PMU_CNT_STATE 0xC04
#define ALI_DRW_PMU_TEST_CTRL 0xC08
#define ALI_DRW_PMU_CNT_PRELOAD 0xC0C
#define ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK GENMASK(23, 0)
#define ALI_DRW_PMU_CYCLE_CNT_LOW_MASK GENMASK(31, 0)
#define ALI_DRW_PMU_CYCLE_CNT_HIGH 0xC10
#define ALI_DRW_PMU_CYCLE_CNT_LOW 0xC14
/* PMU EVENT SEL 0-3 are paired in 32-bit registers on a 4-byte stride */
#define ALI_DRW_PMU_EVENT_SEL0 0xC68
/* counter 0-3 use sel0, counter 4-7 use sel1...*/
#define ALI_DRW_PMU_EVENT_SELn(n) \
(ALI_DRW_PMU_EVENT_SEL0 + (n / 4) * 0x4)
#define ALI_DRW_PMCOM_CNT_EN BIT(7)
#define ALI_DRW_PMCOM_CNT_EVENT_MASK GENMASK(5, 0)
#define ALI_DRW_PMCOM_CNT_EVENT_OFFSET(n) \
(8 * (n % 4))
/* PMU COMMON COUNTER 0-15, are paired in 32-bit registers on a 4-byte stride */
#define ALI_DRW_PMU_COMMON_COUNTER0 0xC78
#define ALI_DRW_PMU_COMMON_COUNTERn(n) \
(ALI_DRW_PMU_COMMON_COUNTER0 + 0x4 * (n))
#define ALI_DRW_PMU_OV_INTR_ENABLE_CTL 0xCB8
#define ALI_DRW_PMU_OV_INTR_DISABLE_CTL 0xCBC
#define ALI_DRW_PMU_OV_INTR_ENABLE_STATUS 0xCC0
#define ALI_DRW_PMU_OV_INTR_CLR 0xCC4
#define ALI_DRW_PMU_OV_INTR_STATUS 0xCC8
#define ALI_DRW_PMCOM_CNT_OV_INTR_MASK GENMASK(23, 8)
#define ALI_DRW_PMBW_CNT_OV_INTR_MASK GENMASK(7, 0)
#define ALI_DRW_PMU_OV_INTR_MASK GENMASK_ULL(63, 0)
static int ali_drw_cpuhp_state_num;
static LIST_HEAD(ali_drw_pmu_irqs);
static DEFINE_MUTEX(ali_drw_pmu_irqs_lock);
struct ali_drw_pmu_irq {
struct hlist_node node;
struct list_head irqs_node;
struct list_head pmus_node;
int irq_num;
int cpu;
refcount_t refcount;
};
struct ali_drw_pmu {
void __iomem *cfg_base;
struct device *dev;
struct list_head pmus_node;
struct ali_drw_pmu_irq *irq;
int irq_num;
int cpu;
DECLARE_BITMAP(used_mask, ALI_DRW_PMU_COMMON_MAX_COUNTERS);
struct perf_event *events[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
int evtids[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
struct pmu pmu;
};
#define to_ali_drw_pmu(p) (container_of(p, struct ali_drw_pmu, pmu))
#define DRW_CONFIG_EVENTID GENMASK(7, 0)
#define GET_DRW_EVENTID(event) FIELD_GET(DRW_CONFIG_EVENTID, (event)->attr.config)
static ssize_t ali_drw_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sprintf(buf, "%s\n", (char *)eattr->var);
}
/*
* PMU event attributes
*/
static ssize_t ali_drw_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
#define ALI_DRW_PMU_ATTR(_name, _func, _config) \
(&((struct dev_ext_attribute[]) { \
{ __ATTR(_name, 0444, _func, NULL), (void *)_config } \
})[0].attr.attr)
#define ALI_DRW_PMU_FORMAT_ATTR(_name, _config) \
ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_format_show, (void *)_config)
#define ALI_DRW_PMU_EVENT_ATTR(_name, _config) \
ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_event_show, (unsigned long)_config)
static struct attribute *ali_drw_pmu_events_attrs[] = {
ALI_DRW_PMU_EVENT_ATTR(hif_rd_or_wr, 0x0),
ALI_DRW_PMU_EVENT_ATTR(hif_wr, 0x1),
ALI_DRW_PMU_EVENT_ATTR(hif_rd, 0x2),
ALI_DRW_PMU_EVENT_ATTR(hif_rmw, 0x3),
ALI_DRW_PMU_EVENT_ATTR(hif_hi_pri_rd, 0x4),
ALI_DRW_PMU_EVENT_ATTR(dfi_wr_data_cycles, 0x7),
ALI_DRW_PMU_EVENT_ATTR(dfi_rd_data_cycles, 0x8),
ALI_DRW_PMU_EVENT_ATTR(hpr_xact_when_critical, 0x9),
ALI_DRW_PMU_EVENT_ATTR(lpr_xact_when_critical, 0xA),
ALI_DRW_PMU_EVENT_ATTR(wr_xact_when_critical, 0xB),
ALI_DRW_PMU_EVENT_ATTR(op_is_activate, 0xC),
ALI_DRW_PMU_EVENT_ATTR(op_is_rd_or_wr, 0xD),
ALI_DRW_PMU_EVENT_ATTR(op_is_rd_activate, 0xE),
ALI_DRW_PMU_EVENT_ATTR(op_is_rd, 0xF),
ALI_DRW_PMU_EVENT_ATTR(op_is_wr, 0x10),
ALI_DRW_PMU_EVENT_ATTR(op_is_mwr, 0x11),
ALI_DRW_PMU_EVENT_ATTR(op_is_precharge, 0x12),
ALI_DRW_PMU_EVENT_ATTR(precharge_for_rdwr, 0x13),
ALI_DRW_PMU_EVENT_ATTR(precharge_for_other, 0x14),
ALI_DRW_PMU_EVENT_ATTR(rdwr_transitions, 0x15),
ALI_DRW_PMU_EVENT_ATTR(write_combine, 0x16),
ALI_DRW_PMU_EVENT_ATTR(war_hazard, 0x17),
ALI_DRW_PMU_EVENT_ATTR(raw_hazard, 0x18),
ALI_DRW_PMU_EVENT_ATTR(waw_hazard, 0x19),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk0, 0x1A),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk1, 0x1B),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk2, 0x1C),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk3, 0x1D),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk0, 0x1E),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk1, 0x1F),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk2, 0x20),
ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk3, 0x21),
ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk0, 0x26),
ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk1, 0x27),
ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk2, 0x28),
ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk3, 0x29),
ALI_DRW_PMU_EVENT_ATTR(op_is_refresh, 0x2A),
ALI_DRW_PMU_EVENT_ATTR(op_is_crit_ref, 0x2B),
ALI_DRW_PMU_EVENT_ATTR(op_is_load_mode, 0x2D),
ALI_DRW_PMU_EVENT_ATTR(op_is_zqcl, 0x2E),
ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_rd, 0x30),
ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_wr, 0x31),
ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mpc, 0x34),
ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mrr, 0x35),
ALI_DRW_PMU_EVENT_ATTR(op_is_tcr_mrr, 0x36),
ALI_DRW_PMU_EVENT_ATTR(op_is_zqstart, 0x37),
ALI_DRW_PMU_EVENT_ATTR(op_is_zqlatch, 0x38),
ALI_DRW_PMU_EVENT_ATTR(chi_txreq, 0x39),
ALI_DRW_PMU_EVENT_ATTR(chi_txdat, 0x3A),
ALI_DRW_PMU_EVENT_ATTR(chi_rxdat, 0x3B),
ALI_DRW_PMU_EVENT_ATTR(chi_rxrsp, 0x3C),
ALI_DRW_PMU_EVENT_ATTR(tsz_vio, 0x3D),
ALI_DRW_PMU_EVENT_ATTR(cycle, 0x80),
NULL,
};
static struct attribute_group ali_drw_pmu_events_attr_group = {
.name = "events",
.attrs = ali_drw_pmu_events_attrs,
};
static struct attribute *ali_drw_pmu_format_attr[] = {
ALI_DRW_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
static const struct attribute_group ali_drw_pmu_format_group = {
.name = "format",
.attrs = ali_drw_pmu_format_attr,
};
static ssize_t ali_drw_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(drw_pmu->cpu));
}
static struct device_attribute ali_drw_pmu_cpumask_attr =
__ATTR(cpumask, 0444, ali_drw_pmu_cpumask_show, NULL);
static struct attribute *ali_drw_pmu_cpumask_attrs[] = {
&ali_drw_pmu_cpumask_attr.attr,
NULL,
};
static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
.attrs = ali_drw_pmu_cpumask_attrs,
};
static ssize_t ali_drw_pmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sysfs_emit(page, "%s\n", "ali_drw_pmu");
}
static umode_t ali_drw_pmu_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
return attr->mode;
}
static struct device_attribute ali_drw_pmu_identifier_attr =
__ATTR(identifier, 0444, ali_drw_pmu_identifier_show, NULL);
static struct attribute *ali_drw_pmu_identifier_attrs[] = {
&ali_drw_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group ali_drw_pmu_identifier_attr_group = {
.attrs = ali_drw_pmu_identifier_attrs,
.is_visible = ali_drw_pmu_identifier_attr_visible
};
static const struct attribute_group *ali_drw_pmu_attr_groups[] = {
&ali_drw_pmu_events_attr_group,
&ali_drw_pmu_cpumask_attr_group,
&ali_drw_pmu_format_group,
&ali_drw_pmu_identifier_attr_group,
NULL,
};
/* find a counter for event, then in add func, hw.idx will equal to counter */
static int ali_drw_get_counter_idx(struct perf_event *event)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
int idx;
for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; ++idx) {
if (!test_and_set_bit(idx, drw_pmu->used_mask))
return idx;
}
/* The counters are all in use. */
return -EBUSY;
}
static u64 ali_drw_pmu_read_counter(struct perf_event *event)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
u64 cycle_high, cycle_low;
if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
cycle_high = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_HIGH);
cycle_high &= ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK;
cycle_low = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_LOW);
cycle_low &= ALI_DRW_PMU_CYCLE_CNT_LOW_MASK;
return (cycle_high << 32 | cycle_low);
}
return readl(drw_pmu->cfg_base +
ALI_DRW_PMU_COMMON_COUNTERn(event->hw.idx));
}
static void ali_drw_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev, now;
do {
prev = local64_read(&hwc->prev_count);
now = ali_drw_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
/* handle overflow. */
delta = now - prev;
if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID)
delta &= ALI_DRW_PMU_OV_INTR_MASK;
else
delta &= ALI_DRW_CNT_MAX_PERIOD;
local64_add(delta, &event->count);
}
static void ali_drw_pmu_event_set_period(struct perf_event *event)
{
u64 pre_val;
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
/* set a preload counter for test purpose */
writel(ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE + event->hw.idx,
drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
/* set conunter initial value */
pre_val = ALI_DRW_PMU_CNT_INIT;
writel(pre_val, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
local64_set(&event->hw.prev_count, pre_val);
/* set sel mode to zero to start test */
writel(0x0, drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
}
static void ali_drw_pmu_enable_counter(struct perf_event *event)
{
u32 val, subval, reg, shift;
int counter = event->hw.idx;
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
reg = ALI_DRW_PMU_EVENT_SELn(counter);
val = readl(drw_pmu->cfg_base + reg);
subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 1) |
FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, drw_pmu->evtids[counter]);
shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
val &= ~(GENMASK(7, 0) << shift);
val |= subval << shift;
writel(val, drw_pmu->cfg_base + reg);
}
static void ali_drw_pmu_disable_counter(struct perf_event *event)
{
u32 val, reg, subval, shift;
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
int counter = event->hw.idx;
reg = ALI_DRW_PMU_EVENT_SELn(counter);
val = readl(drw_pmu->cfg_base + reg);
subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 0) |
FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, 0);
shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
val &= ~(GENMASK(7, 0) << shift);
val |= subval << shift;
writel(val, drw_pmu->cfg_base + reg);
}
static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
{
struct ali_drw_pmu_irq *irq = data;
struct ali_drw_pmu *drw_pmu;
irqreturn_t ret = IRQ_NONE;
rcu_read_lock();
list_for_each_entry_rcu(drw_pmu, &irq->pmus_node, pmus_node) {
unsigned long status, clr_status;
struct perf_event *event;
unsigned int idx;
for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
event = drw_pmu->events[idx];
if (!event)
continue;
ali_drw_pmu_disable_counter(event);
}
/* common counter intr status */
status = readl(drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_STATUS);
status = FIELD_GET(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
if (status) {
for_each_set_bit(idx, &status,
ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
event = drw_pmu->events[idx];
if (WARN_ON_ONCE(!event))
continue;
ali_drw_pmu_event_update(event);
ali_drw_pmu_event_set_period(event);
}
/* clear common counter intr status */
clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
writel(clr_status,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
}
for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
event = drw_pmu->events[idx];
if (!event)
continue;
if (!(event->hw.state & PERF_HES_STOPPED))
ali_drw_pmu_enable_counter(event);
}
if (status)
ret = IRQ_HANDLED;
}
rcu_read_unlock();
return ret;
}
static struct ali_drw_pmu_irq *__ali_drw_pmu_init_irq(struct platform_device
*pdev, int irq_num)
{
int ret;
struct ali_drw_pmu_irq *irq;
list_for_each_entry(irq, &ali_drw_pmu_irqs, irqs_node) {
if (irq->irq_num == irq_num
&& refcount_inc_not_zero(&irq->refcount))
return irq;
}
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
if (!irq)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&irq->pmus_node);
/* Pick one CPU to be the preferred one to use */
irq->cpu = smp_processor_id();
refcount_set(&irq->refcount, 1);
/*
* FIXME: one of DDRSS Driveway PMU overflow interrupt shares the same
* irq number with MPAM ERR_IRQ. To register DDRSS PMU and MPAM drivers
* successfully, add IRQF_SHARED flag. Howerer, PMU interrupt should not
* share with other component.
*/
ret = devm_request_irq(&pdev->dev, irq_num, ali_drw_pmu_isr,
IRQF_SHARED, dev_name(&pdev->dev), irq);
if (ret < 0) {
dev_err(&pdev->dev,
"Fail to request IRQ:%d ret:%d\n", irq_num, ret);
goto out_free;
}
ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
if (ret)
goto out_free;
ret = cpuhp_state_add_instance_nocalls(ali_drw_cpuhp_state_num,
&irq->node);
if (ret)
goto out_free;
irq->irq_num = irq_num;
list_add(&irq->irqs_node, &ali_drw_pmu_irqs);
return irq;
out_free:
kfree(irq);
return ERR_PTR(ret);
}
static int ali_drw_pmu_init_irq(struct ali_drw_pmu *drw_pmu,
struct platform_device *pdev)
{
int irq_num;
struct ali_drw_pmu_irq *irq;
/* Read and init IRQ */
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0)
return irq_num;
mutex_lock(&ali_drw_pmu_irqs_lock);
irq = __ali_drw_pmu_init_irq(pdev, irq_num);
mutex_unlock(&ali_drw_pmu_irqs_lock);
if (IS_ERR(irq))
return PTR_ERR(irq);
drw_pmu->irq = irq;
mutex_lock(&ali_drw_pmu_irqs_lock);
list_add_rcu(&drw_pmu->pmus_node, &irq->pmus_node);
mutex_unlock(&ali_drw_pmu_irqs_lock);
return 0;
}
static void ali_drw_pmu_uninit_irq(struct ali_drw_pmu *drw_pmu)
{
struct ali_drw_pmu_irq *irq = drw_pmu->irq;
mutex_lock(&ali_drw_pmu_irqs_lock);
list_del_rcu(&drw_pmu->pmus_node);
if (!refcount_dec_and_test(&irq->refcount)) {
mutex_unlock(&ali_drw_pmu_irqs_lock);
return;
}
list_del(&irq->irqs_node);
mutex_unlock(&ali_drw_pmu_irqs_lock);
WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
cpuhp_state_remove_instance_nocalls(ali_drw_cpuhp_state_num,
&irq->node);
kfree(irq);
}
static int ali_drw_pmu_event_init(struct perf_event *event)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_event *sibling;
struct device *dev = drw_pmu->pmu.dev;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event)) {
dev_err(dev, "Sampling not supported!\n");
return -EOPNOTSUPP;
}
if (event->attach_state & PERF_ATTACH_TASK) {
dev_err(dev, "Per-task counter cannot allocate!\n");
return -EOPNOTSUPP;
}
event->cpu = drw_pmu->cpu;
if (event->cpu < 0) {
dev_err(dev, "Per-task mode not supported!\n");
return -EOPNOTSUPP;
}
if (event->group_leader != event &&
!is_software_event(event->group_leader)) {
dev_err(dev, "driveway only allow one event!\n");
return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
if (sibling != event && !is_software_event(sibling)) {
dev_err(dev, "driveway event not allowed!\n");
return -EINVAL;
}
}
/* reset all the pmu counters */
writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
hwc->idx = -1;
return 0;
}
static void ali_drw_pmu_start(struct perf_event *event, int flags)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
event->hw.state = 0;
if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
writel(ALI_DRW_PMU_CNT_START,
drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
return;
}
ali_drw_pmu_event_set_period(event);
if (flags & PERF_EF_RELOAD) {
unsigned long prev_raw_count =
local64_read(&event->hw.prev_count);
writel(prev_raw_count,
drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
}
ali_drw_pmu_enable_counter(event);
writel(ALI_DRW_PMU_CNT_START, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
}
static void ali_drw_pmu_stop(struct perf_event *event, int flags)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
if (event->hw.state & PERF_HES_STOPPED)
return;
if (GET_DRW_EVENTID(event) != ALI_DRW_PMU_CYCLE_EVT_ID)
ali_drw_pmu_disable_counter(event);
writel(ALI_DRW_PMU_CNT_STOP, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
ali_drw_pmu_event_update(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int ali_drw_pmu_add(struct perf_event *event, int flags)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = -1;
int evtid;
evtid = GET_DRW_EVENTID(event);
if (evtid != ALI_DRW_PMU_CYCLE_EVT_ID) {
idx = ali_drw_get_counter_idx(event);
if (idx < 0)
return idx;
drw_pmu->events[idx] = event;
drw_pmu->evtids[idx] = evtid;
}
hwc->idx = idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
ali_drw_pmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void ali_drw_pmu_del(struct perf_event *event, int flags)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
ali_drw_pmu_stop(event, PERF_EF_UPDATE);
if (idx >= 0 && idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
drw_pmu->events[idx] = NULL;
drw_pmu->evtids[idx] = 0;
clear_bit(idx, drw_pmu->used_mask);
}
perf_event_update_userpage(event);
}
static void ali_drw_pmu_read(struct perf_event *event)
{
ali_drw_pmu_event_update(event);
}
static int ali_drw_pmu_probe(struct platform_device *pdev)
{
struct ali_drw_pmu *drw_pmu;
struct resource *res;
char *name;
int ret;
drw_pmu = devm_kzalloc(&pdev->dev, sizeof(*drw_pmu), GFP_KERNEL);
if (!drw_pmu)
return -ENOMEM;
drw_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, drw_pmu);
drw_pmu->cfg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(drw_pmu->cfg_base))
return PTR_ERR(drw_pmu->cfg_base);
name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
(u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
if (!name)
return -ENOMEM;
writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
/* enable the generation of interrupt by all common counters */
writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_ENABLE_CTL);
/* clearing interrupt status */
writel(0xffffff, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
drw_pmu->cpu = smp_processor_id();
ret = ali_drw_pmu_init_irq(drw_pmu, pdev);
if (ret)
return ret;
drw_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = ali_drw_pmu_event_init,
.add = ali_drw_pmu_add,
.del = ali_drw_pmu_del,
.start = ali_drw_pmu_start,
.stop = ali_drw_pmu_stop,
.read = ali_drw_pmu_read,
.attr_groups = ali_drw_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&drw_pmu->pmu, name, -1);
if (ret) {
dev_err(drw_pmu->dev, "DRW Driveway PMU PMU register failed!\n");
ali_drw_pmu_uninit_irq(drw_pmu);
}
return ret;
}
static int ali_drw_pmu_remove(struct platform_device *pdev)
{
struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
/* disable the generation of interrupt by all common counters */
writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_DISABLE_CTL);
ali_drw_pmu_uninit_irq(drw_pmu);
perf_pmu_unregister(&drw_pmu->pmu);
return 0;
}
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct ali_drw_pmu_irq *irq;
struct ali_drw_pmu *drw_pmu;
unsigned int target;
int ret;
cpumask_t node_online_cpus;
irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
ret = cpumask_and(&node_online_cpus,
cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
if (ret)
target = cpumask_any_but(&node_online_cpus, cpu);
else
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
/* We're only reading, but this isn't the place to be involving RCU */
mutex_lock(&ali_drw_pmu_irqs_lock);
list_for_each_entry(drw_pmu, &irq->pmus_node, pmus_node)
perf_pmu_migrate_context(&drw_pmu->pmu, irq->cpu, target);
mutex_unlock(&ali_drw_pmu_irqs_lock);
WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
irq->cpu = target;
return 0;
}
/*
* Due to historical reasons, the HID used in the production environment is
* ARMHD700, so we leave ARMHD700 as Compatible ID.
*/
static const struct acpi_device_id ali_drw_acpi_match[] = {
{"BABA5000", 0},
{"ARMHD700", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, ali_drw_acpi_match);
static struct platform_driver ali_drw_pmu_driver = {
.driver = {
.name = "ali_drw_pmu",
.acpi_match_table = ali_drw_acpi_match,
},
.probe = ali_drw_pmu_probe,
.remove = ali_drw_pmu_remove,
};
static int __init ali_drw_pmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"ali_drw_pmu:online",
NULL, ali_drw_pmu_offline_cpu);
if (ret < 0) {
pr_err("DRW Driveway PMU: setup hotplug failed, ret = %d\n",
ret);
return ret;
}
ali_drw_cpuhp_state_num = ret;
ret = platform_driver_register(&ali_drw_pmu_driver);
if (ret)
cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
return ret;
}
static void __exit ali_drw_pmu_exit(void)
{
platform_driver_unregister(&ali_drw_pmu_driver);
cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
}
module_init(ali_drw_pmu_init);
module_exit(ali_drw_pmu_exit);
MODULE_AUTHOR("Hongbo Yao <[email protected]>");
MODULE_AUTHOR("Neng Chen <[email protected]>");
MODULE_AUTHOR("Shuai Xue <[email protected]>");
MODULE_DESCRIPTION("Alibaba DDR Sub-System Driveway PMU driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/alibaba_uncore_drw_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RISC-V performance counter support.
*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
* This code is based on ARM perf event code which is in turn based on
* sparc64 and x86 code.
*/
#define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
#include <linux/mod_devicetable.h>
#include <linux/perf/riscv_pmu.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/hwcap.h>
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
#define SYSCTL_LEGACY 2
#define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
#define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
PMU_FORMAT_ATTR(event, "config:0-47");
PMU_FORMAT_ATTR(firmware, "config:63");
static struct attribute *riscv_arch_formats_attr[] = {
&format_attr_event.attr,
&format_attr_firmware.attr,
NULL,
};
static struct attribute_group riscv_pmu_format_group = {
.name = "format",
.attrs = riscv_arch_formats_attr,
};
static const struct attribute_group *riscv_pmu_attr_groups[] = {
&riscv_pmu_format_group,
NULL,
};
/* Allow user mode access by default */
static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
/*
* RISC-V doesn't have heterogeneous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
*/
static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
static unsigned long cmask;
struct sbi_pmu_event_data {
union {
union {
struct hw_gen_event {
uint32_t event_code:16;
uint32_t event_type:4;
uint32_t reserved:12;
} hw_gen_event;
struct hw_cache_event {
uint32_t result_id:1;
uint32_t op_id:2;
uint32_t cache_id:13;
uint32_t event_type:4;
uint32_t reserved:12;
} hw_cache_event;
};
uint32_t event_idx;
};
};
static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
SBI_PMU_HW_CPU_CYCLES,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
SBI_PMU_HW_INSTRUCTIONS,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
SBI_PMU_HW_CACHE_REFERENCES,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
SBI_PMU_HW_CACHE_MISSES,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
SBI_PMU_HW_BRANCH_INSTRUCTIONS,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
SBI_PMU_HW_BRANCH_MISSES,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
SBI_PMU_HW_BUS_CYCLES,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
SBI_PMU_HW_STALLED_CYCLES_BACKEND,
SBI_PMU_EVENT_TYPE_HW, 0}},
[PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
SBI_PMU_HW_REF_CPU_CYCLES,
SBI_PMU_EVENT_TYPE_HW, 0}},
};
#define C(x) PERF_COUNT_HW_CACHE_##x
static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
},
},
};
static int pmu_sbi_ctr_get_width(int idx)
{
return pmu_ctr_list[idx].width;
}
static bool pmu_sbi_ctr_is_fw(int cidx)
{
union sbi_pmu_ctr_info *info;
info = &pmu_ctr_list[cidx];
if (!info)
return false;
return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
}
/*
* Returns the counter width of a programmable counter and number of hardware
* counters. As we don't support heterogeneous CPUs yet, it is okay to just
* return the counter width of the first programmable counter.
*/
int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
{
int i;
union sbi_pmu_ctr_info *info;
u32 hpm_width = 0, hpm_count = 0;
if (!cmask)
return -EINVAL;
for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
info = &pmu_ctr_list[i];
if (!info)
continue;
if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
hpm_width = info->width;
if (info->type == SBI_PMU_CTR_TYPE_HW)
hpm_count++;
}
*hw_ctr_width = hpm_width;
*num_hw_ctr = hpm_count;
return 0;
}
EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
static uint8_t pmu_sbi_csr_index(struct perf_event *event)
{
return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
}
static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
{
unsigned long cflags = 0;
bool guest_events = false;
if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
guest_events = true;
if (event->attr.exclude_kernel)
cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
if (event->attr.exclude_user)
cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
if (guest_events && event->attr.exclude_hv)
cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
if (event->attr.exclude_host)
cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
if (event->attr.exclude_guest)
cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
return cflags;
}
static int pmu_sbi_ctr_get_idx(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct sbiret ret;
int idx;
uint64_t cbase = 0, cmask = rvpmu->cmask;
unsigned long cflags = 0;
cflags = pmu_sbi_get_filter_flags(event);
/*
* In legacy mode, we have to force the fixed counters for those events
* but not in the user access mode as we want to use the other counters
* that support sampling/filtering.
*/
if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
cmask = 1;
} else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
}
}
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
cmask, cflags, hwc->event_base, hwc->config,
hwc->config >> 32);
#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
cmask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
hwc->event_base, hwc->config);
return sbi_err_map_linux_errno(ret.error);
}
idx = ret.value;
if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
return -ENOENT;
/* Additional sanity check for the counter id */
if (pmu_sbi_ctr_is_fw(idx)) {
if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
return idx;
} else {
if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
return idx;
}
return -ENOENT;
}
static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
int idx = hwc->idx;
if (pmu_sbi_ctr_is_fw(idx))
clear_bit(idx, cpuc->used_fw_ctrs);
else
clear_bit(idx, cpuc->used_hw_ctrs);
}
static int pmu_event_find_cache(u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
return ret;
}
static bool pmu_sbi_is_fw_event(struct perf_event *event)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
return true;
else
return false;
}
static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
int bSoftware;
u64 raw_config_val;
int ret;
switch (type) {
case PERF_TYPE_HARDWARE:
if (config >= PERF_COUNT_HW_MAX)
return -EINVAL;
ret = pmu_hw_event_map[event->attr.config].event_idx;
break;
case PERF_TYPE_HW_CACHE:
ret = pmu_event_find_cache(config);
break;
case PERF_TYPE_RAW:
/*
* As per SBI specification, the upper 16 bits must be unused for
* a raw event. Use the MSB (63b) to distinguish between hardware
* raw event and firmware events.
*/
bSoftware = config >> 63;
raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
if (bSoftware) {
ret = (raw_config_val & 0xFFFF) |
(SBI_PMU_EVENT_TYPE_FW << 16);
} else {
ret = RISCV_PMU_RAW_EVENT_IDX;
*econfig = raw_config_val;
}
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static u64 pmu_sbi_ctr_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
struct sbiret ret;
union sbi_pmu_ctr_info info;
u64 val = 0;
if (pmu_sbi_is_fw_event(event)) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
hwc->idx, 0, 0, 0, 0, 0);
if (!ret.error)
val = ret.value;
} else {
info = pmu_ctr_list[idx];
val = riscv_pmu_ctr_read_csr(info.csr);
if (IS_ENABLED(CONFIG_32BIT))
val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
}
return val;
}
static void pmu_sbi_set_scounteren(void *arg)
{
struct perf_event *event = (struct perf_event *)arg;
csr_write(CSR_SCOUNTEREN,
csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
{
struct perf_event *event = (struct perf_event *)arg;
csr_write(CSR_SCOUNTEREN,
csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
{
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, 0, 0);
#endif
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
pmu_sbi_set_scounteren((void *)event);
}
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
{
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
pmu_sbi_reset_scounteren((void *)event);
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
flag != SBI_PMU_STOP_FLAG_RESET)
pr_err("Stopping counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
}
static int pmu_sbi_find_num_ctrs(void)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
if (!ret.error)
return ret.value;
else
return sbi_err_map_linux_errno(ret.error);
}
static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
{
struct sbiret ret;
int i, num_hw_ctr = 0, num_fw_ctr = 0;
union sbi_pmu_ctr_info cinfo;
pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
if (!pmu_ctr_list)
return -ENOMEM;
for (i = 0; i < nctr; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
continue;
*mask |= BIT(i);
cinfo.value = ret.value;
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
num_fw_ctr++;
else
num_hw_ctr++;
pmu_ctr_list[i].value = cinfo.value;
}
pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
return 0;
}
static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
{
/*
* No need to check the error because we are disabling all the counters
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
0, pmu->cmask, 0, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
{
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
/* No need to check the error here as we can't do anything about the error */
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
}
/*
* This function starts all the used counters in two step approach.
* Any counter that did not overflow can be start in a single step
* while the overflowed counters need to be started with updated initialization
* value.
*/
static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
unsigned long ctr_ovf_mask)
{
int idx = 0;
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
struct perf_event *event;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
unsigned long ctr_start_mask = 0;
uint64_t max_period;
struct hw_perf_event *hwc;
u64 init_val = 0;
ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
/* Start all the counters that did not overflow in a single shot */
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
0, 0, 0, 0);
/* Reinitialize and start all the counter that overflowed */
while (ctr_ovf_mask) {
if (ctr_ovf_mask & 0x01) {
event = cpu_hw_evt->events[idx];
hwc = &event->hw;
max_period = riscv_pmu_ctr_get_width_mask(event);
init_val = local64_read(&hwc->prev_count) & max_period;
#if defined(CONFIG_32BIT)
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, init_val >> 32, 0);
#else
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
flag, init_val, 0, 0);
#endif
perf_event_update_userpage(event);
}
ctr_ovf_mask = ctr_ovf_mask >> 1;
idx++;
}
}
static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
{
struct perf_sample_data data;
struct pt_regs *regs;
struct hw_perf_event *hw_evt;
union sbi_pmu_ctr_info *info;
int lidx, hidx, fidx;
struct riscv_pmu *pmu;
struct perf_event *event;
unsigned long overflow;
unsigned long overflowed_ctrs = 0;
struct cpu_hw_events *cpu_hw_evt = dev;
u64 start_clock = sched_clock();
if (WARN_ON_ONCE(!cpu_hw_evt))
return IRQ_NONE;
/* Firmware counter don't support overflow yet */
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
event = cpu_hw_evt->events[fidx];
if (!event) {
csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
return IRQ_NONE;
}
pmu = to_riscv_pmu(event->pmu);
pmu_sbi_stop_hw_ctrs(pmu);
/* Overflow status register should only be read after counter are stopped */
ALT_SBI_PMU_OVERFLOW(overflow);
/*
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
/* No overflow bit is set */
if (!overflow)
return IRQ_NONE;
regs = get_irq_regs();
for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
struct perf_event *event = cpu_hw_evt->events[lidx];
/* Skip if invalid event or user did not request a sampling */
if (!event || !is_sampling_event(event))
continue;
info = &pmu_ctr_list[lidx];
/* Do a sanity check */
if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
continue;
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
if (!(overflow & (1 << hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
overflowed_ctrs |= 1 << lidx;
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
if (riscv_pmu_event_set_period(event)) {
/*
* Unlike other ISAs, RISC-V don't have to disable interrupts
* to avoid throttling here. As per the specification, the
* interrupt remains disabled until the OF bit is set.
* Interrupts are enabled again only during the start.
* TODO: We will need to stop the guest counters once
* virtualization support is added.
*/
perf_event_overflow(event, &data, regs);
}
}
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
perf_sample_event_took(sched_clock() - start_clock);
return IRQ_HANDLED;
}
static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
/*
* We keep enabling userspace access to CYCLE, TIME and INSTRET via the
* legacy option but that will be removed in the future.
*/
if (sysctl_perf_user_access == SYSCTL_LEGACY)
csr_write(CSR_SCOUNTEREN, 0x7);
else
csr_write(CSR_SCOUNTEREN, 0x2);
/* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu);
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
return 0;
}
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
}
/* Disable all counters access for user mode now */
csr_write(CSR_SCOUNTEREN, 0x0);
return 0;
}
static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
{
int ret;
struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
struct irq_domain *domain = NULL;
if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
riscv_pmu_irq_num = RV_IRQ_PMU;
riscv_pmu_use_irq = true;
} else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
riscv_cached_marchid(0) == 0 &&
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
}
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP;
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
DOMAIN_BUS_ANY);
if (!domain) {
pr_err("Failed to find INTC IRQ root domain\n");
return -ENODEV;
}
riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
if (!riscv_pmu_irq) {
pr_err("Failed to map PMU interrupt for node\n");
return -ENODEV;
}
ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
if (ret) {
pr_err("registering percpu irq failed [%d]\n", ret);
return ret;
}
return 0;
}
#ifdef CONFIG_CPU_PM
static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
void *v)
{
struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
struct perf_event *event;
int idx;
if (!enabled)
return NOTIFY_OK;
for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
event = cpuc->events[idx];
if (!event)
continue;
switch (cmd) {
case CPU_PM_ENTER:
/*
* Stop and update the counter
*/
riscv_pmu_stop(event, PERF_EF_UPDATE);
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
*/
riscv_pmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
}
}
return NOTIFY_OK;
}
static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
{
pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
}
static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
{
cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
}
#else
static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
#endif
static void riscv_pmu_destroy(struct riscv_pmu *pmu)
{
riscv_pm_pmu_unregister(pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
}
static void pmu_sbi_event_init(struct perf_event *event)
{
/*
* The permissions are set at event_init so that we do not depend
* on the sysctl value that can change.
*/
if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
else
event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
}
static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
return;
if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
return;
}
}
/*
* The user mmapped the event to directly access it: this is where
* we determine based on sysctl_perf_user_access if we grant userspace
* the direct access to this event. That means that within the same
* task, some events may be directly accessible and some other may not,
* if the user changes the value of sysctl_perf_user_accesss in the
* meantime.
*/
event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
/*
* We must enable userspace access *before* advertising in the user page
* that it is possible to do so to avoid any race.
* And we must notify all cpus here because threads that currently run
* on other cpus will try to directly access the counter too without
* calling pmu_sbi_ctr_start.
*/
if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
on_each_cpu_mask(mm_cpumask(mm),
pmu_sbi_set_scounteren, (void *)event, 1);
}
static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
return;
if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
return;
}
}
/*
* Here we can directly remove user access since the user does not have
* access to the user page anymore so we avoid the racy window where the
* user could have read cap_user_rdpmc to true right before we disable
* it.
*/
event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
on_each_cpu_mask(mm_cpumask(mm),
pmu_sbi_reset_scounteren, (void *)event, 1);
}
static void riscv_pmu_update_counter_access(void *info)
{
if (sysctl_perf_user_access == SYSCTL_LEGACY)
csr_write(CSR_SCOUNTEREN, 0x7);
else
csr_write(CSR_SCOUNTEREN, 0x2);
}
static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int prev = sysctl_perf_user_access;
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
/*
* Test against the previous value since we clear SCOUNTEREN when
* sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
* not do that if that was already the case.
*/
if (ret || !write || prev == sysctl_perf_user_access)
return ret;
on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
return 0;
}
static struct ctl_table sbi_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = riscv_pmu_proc_user_access_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{ }
};
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
int ret = -ENODEV;
int num_counters;
pr_info("SBI PMU extension is available\n");
pmu = riscv_pmu_alloc();
if (!pmu)
return -ENOMEM;
num_counters = pmu_sbi_find_num_ctrs();
if (num_counters < 0) {
pr_err("SBI PMU extension doesn't provide any counters\n");
goto out_free;
}
/* It is possible to get from SBI more than max number of counters */
if (num_counters > RISCV_MAX_COUNTERS) {
num_counters = RISCV_MAX_COUNTERS;
pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
}
/* cache all the information about counters now */
if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
goto out_free;
ret = pmu_sbi_setup_irqs(pmu, pdev);
if (ret < 0) {
pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
pmu->event_map = pmu_sbi_event_map;
pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
pmu->ctr_get_width = pmu_sbi_ctr_get_width;
pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
pmu->ctr_read = pmu_sbi_ctr_read;
pmu->event_init = pmu_sbi_event_init;
pmu->event_mapped = pmu_sbi_event_mapped;
pmu->event_unmapped = pmu_sbi_event_unmapped;
pmu->csr_index = pmu_sbi_csr_index;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
if (ret)
return ret;
ret = riscv_pm_pmu_register(pmu);
if (ret)
goto out_unregister;
ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
if (ret)
goto out_unregister;
register_sysctl("kernel", sbi_pmu_sysctl_table);
return 0;
out_unregister:
riscv_pmu_destroy(pmu);
out_free:
kfree(pmu);
return ret;
}
static struct platform_driver pmu_sbi_driver = {
.probe = pmu_sbi_device_probe,
.driver = {
.name = RISCV_PMU_SBI_PDEV_NAME,
},
};
static int __init pmu_sbi_devinit(void)
{
int ret;
struct platform_device *pdev;
if (sbi_spec_version < sbi_mk_version(0, 3) ||
!sbi_probe_extension(SBI_EXT_PMU)) {
return 0;
}
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
"perf/riscv/pmu:starting",
pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
if (ret) {
pr_err("CPU hotplug notifier could not be registered: %d\n",
ret);
return ret;
}
ret = platform_driver_register(&pmu_sbi_driver);
if (ret)
return ret;
pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
platform_driver_unregister(&pmu_sbi_driver);
return PTR_ERR(pdev);
}
/* Notify legacy implementation that SBI pmu is available*/
riscv_pmu_legacy_skip_init();
return ret;
}
device_initcall(pmu_sbi_devinit)
| linux-master | drivers/perf/riscv_pmu_sbi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This driver adds support for perf events to use the Performance
* Monitor Counter Groups (PMCG) associated with an SMMUv3 node
* to monitor that node.
*
* SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
* <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
* to 4K boundary. For example, the PMCG at 0xff88840000 is named
* smmuv3_pmcg_ff88840
*
* Filtering by stream id is done by specifying filtering parameters
* with the event. options are:
* filter_enable - 0 = no filtering, 1 = filtering enabled
* filter_span - 0 = exact match, 1 = pattern match
* filter_stream_id - pattern to filter against
*
* To match a partial StreamID where the X most-significant bits must match
* but the Y least-significant bits might differ, STREAMID is programmed
* with a value that contains:
* STREAMID[Y - 1] == 0.
* STREAMID[Y - 2:0] == 1 (where Y > 1).
* The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
* contain a value to match from the corresponding bits of event StreamID.
*
* Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
* filter_span=1,filter_stream_id=0x42/ -a netperf
* Applies filter pattern 0x42 to transaction events, which means events
* matching stream ids 0x42 and 0x43 are counted. Further filtering
* information is available in the SMMU documentation.
*
* SMMU events are not attributable to a CPU, so task mode and sampling
* are not supported.
*/
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#define SMMU_PMCG_EVCNTR0 0x0
#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
#define SMMU_PMCG_EVTYPER0 0x400
#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
#define SMMU_PMCG_SID_SPAN_SHIFT 29
#define SMMU_PMCG_SMR0 0xA00
#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
#define SMMU_PMCG_CNTENSET0 0xC00
#define SMMU_PMCG_CNTENCLR0 0xC20
#define SMMU_PMCG_INTENSET0 0xC40
#define SMMU_PMCG_INTENCLR0 0xC60
#define SMMU_PMCG_OVSCLR0 0xC80
#define SMMU_PMCG_OVSSET0 0xCC0
#define SMMU_PMCG_CFGR 0xE00
#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
#define SMMU_PMCG_CFGR_MSI BIT(21)
#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
#define SMMU_PMCG_CR 0xE04
#define SMMU_PMCG_CR_ENABLE BIT(0)
#define SMMU_PMCG_IIDR 0xE08
#define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20)
#define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16)
#define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12)
#define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0)
#define SMMU_PMCG_CEID0 0xE20
#define SMMU_PMCG_CEID1 0xE28
#define SMMU_PMCG_IRQ_CTRL 0xE50
#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
#define SMMU_PMCG_IRQ_CFG0 0xE58
#define SMMU_PMCG_IRQ_CFG1 0xE60
#define SMMU_PMCG_IRQ_CFG2 0xE64
/* IMP-DEF ID registers */
#define SMMU_PMCG_PIDR0 0xFE0
#define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0)
#define SMMU_PMCG_PIDR1 0xFE4
#define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4)
#define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0)
#define SMMU_PMCG_PIDR2 0xFE8
#define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4)
#define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0)
#define SMMU_PMCG_PIDR3 0xFEC
#define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4)
#define SMMU_PMCG_PIDR4 0xFD0
#define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0)
/* MSI config fields */
#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
#define SMMU_PMCG_MAX_COUNTERS 64
#define SMMU_PMCG_ARCH_MAX_EVENTS 128
#define SMMU_PMCG_PA_SHIFT 12
#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
#define SMMU_PMCG_HARDEN_DISABLE BIT(1)
static int cpuhp_state_num;
struct smmu_pmu {
struct hlist_node node;
struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
unsigned int irq;
unsigned int on_cpu;
struct pmu pmu;
unsigned int num_counters;
struct device *dev;
void __iomem *reg_base;
void __iomem *reloc_base;
u64 counter_mask;
u32 options;
u32 iidr;
bool global_filter;
};
#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
static inline u32 get_##_name(struct perf_event *event) \
{ \
return FIELD_GET(GENMASK_ULL(_end, _start), \
event->attr._config); \
} \
SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
static inline void smmu_pmu_enable(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
}
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx);
static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
unsigned int idx;
for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
smmu_pmu_enable(pmu);
}
static inline void smmu_pmu_disable(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
}
static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
unsigned int idx;
/*
* The global disable of PMU sometimes fail to stop the counting.
* Harden this by writing an invalid event type to each used counter
* to forcibly stop counting.
*/
for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
smmu_pmu_disable(pmu);
}
static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
u32 idx, u64 value)
{
if (smmu_pmu->counter_mask & BIT(32))
writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
else
writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
}
static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
{
u64 value;
if (smmu_pmu->counter_mask & BIT(32))
value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
else
value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
return value;
}
static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
{
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
}
static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
{
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
}
static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
{
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
}
static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
u32 idx)
{
writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
}
static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
u32 val)
{
writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
}
static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
{
writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
}
static void smmu_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
u64 delta, prev, now;
u32 idx = hwc->idx;
do {
prev = local64_read(&hwc->prev_count);
now = smmu_pmu_counter_get_value(smmu_pmu, idx);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
/* handle overflow. */
delta = now - prev;
delta &= smmu_pmu->counter_mask;
local64_add(delta, &event->count);
}
static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u64 new;
if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
/*
* On platforms that require this quirk, if the counter starts
* at < half_counter value and wraps, the current logic of
* handling the overflow may not work. It is expected that,
* those platforms will have full 64 counter bits implemented
* so that such a possibility is remote(eg: HiSilicon HIP08).
*/
new = smmu_pmu_counter_get_value(smmu_pmu, idx);
} else {
/*
* We limit the max period to half the max counter value
* of the counter size, so that even in the case of extreme
* interrupt latency the counter will (hopefully) not wrap
* past its initial value.
*/
new = smmu_pmu->counter_mask >> 1;
smmu_pmu_counter_set_value(smmu_pmu, idx, new);
}
local64_set(&hwc->prev_count, new);
}
static void smmu_pmu_set_event_filter(struct perf_event *event,
int idx, u32 span, u32 sid)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
u32 evtyper;
evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
smmu_pmu_set_smr(smmu_pmu, idx, sid);
}
static bool smmu_pmu_check_global_filter(struct perf_event *curr,
struct perf_event *new)
{
if (get_filter_enable(new) != get_filter_enable(curr))
return false;
if (!get_filter_enable(new))
return true;
return get_filter_span(new) == get_filter_span(curr) &&
get_filter_stream_id(new) == get_filter_stream_id(curr);
}
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx)
{
u32 span, sid;
unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
bool filter_en = !!get_filter_enable(event);
span = filter_en ? get_filter_span(event) :
SMMU_PMCG_DEFAULT_FILTER_SPAN;
sid = filter_en ? get_filter_stream_id(event) :
SMMU_PMCG_DEFAULT_FILTER_SID;
cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
/*
* Per-counter filtering, or scheduling the first globally-filtered
* event into an empty PMU so idx == 0 and it works out equivalent.
*/
if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
smmu_pmu_set_event_filter(event, idx, span, sid);
return 0;
}
/* Otherwise, must match whatever's currently scheduled */
if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
return 0;
}
return -EAGAIN;
}
static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
struct perf_event *event)
{
int idx, err;
unsigned int num_ctrs = smmu_pmu->num_counters;
idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
if (idx == num_ctrs)
/* The counters are all in use. */
return -EAGAIN;
err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
if (err)
return err;
set_bit(idx, smmu_pmu->used_counters);
return idx;
}
static bool smmu_pmu_events_compatible(struct perf_event *curr,
struct perf_event *new)
{
if (new->pmu != curr->pmu)
return false;
if (to_smmu_pmu(new->pmu)->global_filter &&
!smmu_pmu_check_global_filter(curr, new))
return false;
return true;
}
/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
*/
static int smmu_pmu_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct device *dev = smmu_pmu->dev;
struct perf_event *sibling;
int group_num_events = 1;
u16 event_id;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (hwc->sample_period) {
dev_dbg(dev, "Sampling not supported\n");
return -EOPNOTSUPP;
}
if (event->cpu < 0) {
dev_dbg(dev, "Per-task mode not supported\n");
return -EOPNOTSUPP;
}
/* Verify specified event is supported on this PMU */
event_id = get_event(event);
if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
(!test_bit(event_id, smmu_pmu->supported_events))) {
dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
return -EINVAL;
}
/* Don't allow groups with mixed PMUs, except for s/w events */
if (!is_software_event(event->group_leader)) {
if (!smmu_pmu_events_compatible(event->group_leader, event))
return -EINVAL;
if (++group_num_events > smmu_pmu->num_counters)
return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling))
continue;
if (!smmu_pmu_events_compatible(sibling, event))
return -EINVAL;
if (++group_num_events > smmu_pmu->num_counters)
return -EINVAL;
}
hwc->idx = -1;
/*
* Ensure all events are on the same cpu so all events are in the
* same cpu context, to avoid races on pmu_enable etc.
*/
event->cpu = smmu_pmu->on_cpu;
return 0;
}
static void smmu_pmu_event_start(struct perf_event *event, int flags)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
hwc->state = 0;
smmu_pmu_set_period(smmu_pmu, hwc);
smmu_pmu_counter_enable(smmu_pmu, idx);
}
static void smmu_pmu_event_stop(struct perf_event *event, int flags)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (hwc->state & PERF_HES_STOPPED)
return;
smmu_pmu_counter_disable(smmu_pmu, idx);
/* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
smmu_pmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int smmu_pmu_event_add(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx;
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
idx = smmu_pmu_get_event_idx(smmu_pmu, event);
if (idx < 0)
return idx;
hwc->idx = idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
smmu_pmu->events[idx] = event;
local64_set(&hwc->prev_count, 0);
smmu_pmu_interrupt_enable(smmu_pmu, idx);
if (flags & PERF_EF_START)
smmu_pmu_event_start(event, flags);
/* Propagate changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void smmu_pmu_event_del(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
int idx = hwc->idx;
smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
smmu_pmu_interrupt_disable(smmu_pmu, idx);
smmu_pmu->events[idx] = NULL;
clear_bit(idx, smmu_pmu->used_counters);
perf_event_update_userpage(event);
}
static void smmu_pmu_event_read(struct perf_event *event)
{
smmu_pmu_event_update(event);
}
/* cpumask */
static ssize_t smmu_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
}
static struct device_attribute smmu_pmu_cpumask_attr =
__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
static struct attribute *smmu_pmu_cpumask_attrs[] = {
&smmu_pmu_cpumask_attr.attr,
NULL
};
static const struct attribute_group smmu_pmu_cpumask_group = {
.attrs = smmu_pmu_cpumask_attrs,
};
/* Events */
static ssize_t smmu_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define SMMU_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
static struct attribute *smmu_pmu_events[] = {
SMMU_EVENT_ATTR(cycles, 0),
SMMU_EVENT_ATTR(transaction, 1),
SMMU_EVENT_ATTR(tlb_miss, 2),
SMMU_EVENT_ATTR(config_cache_miss, 3),
SMMU_EVENT_ATTR(trans_table_walk_access, 4),
SMMU_EVENT_ATTR(config_struct_access, 5),
SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
NULL
};
static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
return attr->mode;
return 0;
}
static const struct attribute_group smmu_pmu_events_group = {
.name = "events",
.attrs = smmu_pmu_events,
.is_visible = smmu_pmu_event_is_visible,
};
static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
}
static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
if (!smmu_pmu->iidr)
return 0;
return attr->mode;
}
static struct device_attribute smmu_pmu_identifier_attr =
__ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
static struct attribute *smmu_pmu_identifier_attrs[] = {
&smmu_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group smmu_pmu_identifier_group = {
.attrs = smmu_pmu_identifier_attrs,
.is_visible = smmu_pmu_identifier_attr_visible,
};
/* Formats */
PMU_FORMAT_ATTR(event, "config:0-15");
PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
PMU_FORMAT_ATTR(filter_span, "config1:32");
PMU_FORMAT_ATTR(filter_enable, "config1:33");
static struct attribute *smmu_pmu_formats[] = {
&format_attr_event.attr,
&format_attr_filter_stream_id.attr,
&format_attr_filter_span.attr,
&format_attr_filter_enable.attr,
NULL
};
static const struct attribute_group smmu_pmu_format_group = {
.name = "format",
.attrs = smmu_pmu_formats,
};
static const struct attribute_group *smmu_pmu_attr_grps[] = {
&smmu_pmu_cpumask_group,
&smmu_pmu_events_group,
&smmu_pmu_format_group,
&smmu_pmu_identifier_group,
NULL
};
/*
* Generic device handlers
*/
static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct smmu_pmu *smmu_pmu;
unsigned int target;
smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
if (cpu != smmu_pmu->on_cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
smmu_pmu->on_cpu = target;
WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
return 0;
}
static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
{
struct smmu_pmu *smmu_pmu = data;
DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64));
u64 ovsr;
unsigned int idx;
ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
if (!ovsr)
return IRQ_NONE;
writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
bitmap_from_u64(ovs, ovsr);
for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
struct perf_event *event = smmu_pmu->events[idx];
struct hw_perf_event *hwc;
if (WARN_ON_ONCE(!event))
continue;
smmu_pmu_event_update(event);
hwc = &event->hw;
smmu_pmu_set_period(smmu_pmu, hwc);
}
return IRQ_HANDLED;
}
static void smmu_pmu_free_msis(void *data)
{
struct device *dev = data;
platform_msi_domain_free_irqs(dev);
}
static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
phys_addr_t doorbell;
struct device *dev = msi_desc_to_dev(desc);
struct smmu_pmu *pmu = dev_get_drvdata(dev);
doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
doorbell &= MSI_CFG0_ADDR_MASK;
writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
}
static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
{
struct device *dev = pmu->dev;
int ret;
/* Clear MSI address reg */
writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
/* MSI supported or not */
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
}
pmu->irq = msi_get_virq(dev, 0);
/* Add callback to free MSIs on teardown */
devm_add_action(dev, smmu_pmu_free_msis, dev);
}
static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
{
unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
int irq, ret = -ENXIO;
smmu_pmu_setup_msi(pmu);
irq = pmu->irq;
if (irq)
ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
flags, "smmuv3-pmu", pmu);
return ret;
}
static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
{
u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
smmu_pmu_disable(&smmu_pmu->pmu);
/* Disable counter and interrupt */
writeq_relaxed(counter_present_mask,
smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
writeq_relaxed(counter_present_mask,
smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
writeq_relaxed(counter_present_mask,
smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
}
static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
{
u32 model;
model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
switch (model) {
case IORT_SMMU_V3_PMCG_HISI_HIP08:
/* HiSilicon Erratum 162001800 */
smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
break;
case IORT_SMMU_V3_PMCG_HISI_HIP09:
smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
break;
}
dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
}
static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
{
return of_device_is_compatible(smmu_pmu->dev->of_node,
"arm,mmu-600-pmcg");
}
static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
{
u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) |
(FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8);
u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2);
u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3);
u32 implementer =
FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) |
(FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) |
(FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8);
iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) |
FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) |
FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) |
FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer);
}
smmu_pmu->iidr = iidr;
}
static int smmu_pmu_probe(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu;
struct resource *res_0;
u32 cfgr, reg_size;
u64 ceid_64[2];
int irq, err;
char *name;
struct device *dev = &pdev->dev;
smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
if (!smmu_pmu)
return -ENOMEM;
smmu_pmu->dev = dev;
platform_set_drvdata(pdev, smmu_pmu);
smmu_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
.event_init = smmu_pmu_event_init,
.add = smmu_pmu_event_add,
.del = smmu_pmu_event_del,
.start = smmu_pmu_event_start,
.stop = smmu_pmu_event_stop,
.read = smmu_pmu_event_read,
.attr_groups = smmu_pmu_attr_grps,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
if (IS_ERR(smmu_pmu->reg_base))
return PTR_ERR(smmu_pmu->reg_base);
cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
/* Determine if page 1 is present */
if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(smmu_pmu->reloc_base))
return PTR_ERR(smmu_pmu->reloc_base);
} else {
smmu_pmu->reloc_base = smmu_pmu->reg_base;
}
irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
smmu_pmu->irq = irq;
ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
SMMU_PMCG_ARCH_MAX_EVENTS);
smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
smmu_pmu_reset(smmu_pmu);
err = smmu_pmu_setup_irq(smmu_pmu);
if (err) {
dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
return err;
}
smmu_pmu_get_iidr(smmu_pmu);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
(res_0->start) >> SMMU_PMCG_PA_SHIFT);
if (!name) {
dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
return -EINVAL;
}
if (!dev->of_node)
smmu_pmu_get_acpi_options(smmu_pmu);
/*
* For platforms suffer this quirk, the PMU disable sometimes fails to
* stop the counters. This will leads to inaccurate or error counting.
* Forcibly disable the counters with these quirk handler.
*/
if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
}
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
&smmu_pmu->node);
if (err) {
dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
err, &res_0->start);
return err;
}
err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
if (err) {
dev_err(dev, "Error %d registering PMU @%pa\n",
err, &res_0->start);
goto out_unregister;
}
dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
&res_0->start, smmu_pmu->num_counters,
smmu_pmu->global_filter ? "Global(Counter0)" :
"Individual");
return 0;
out_unregister:
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
return err;
}
static int smmu_pmu_remove(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
return 0;
}
static void smmu_pmu_shutdown(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
smmu_pmu_disable(&smmu_pmu->pmu);
}
#ifdef CONFIG_OF
static const struct of_device_id smmu_pmu_of_match[] = {
{ .compatible = "arm,smmu-v3-pmcg" },
{}
};
MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
#endif
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
.of_match_table = of_match_ptr(smmu_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
.remove = smmu_pmu_remove,
.shutdown = smmu_pmu_shutdown,
};
static int __init arm_smmu_pmu_init(void)
{
int ret;
cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/arm/pmcg:online",
NULL,
smmu_pmu_offline_cpu);
if (cpuhp_state_num < 0)
return cpuhp_state_num;
ret = platform_driver_register(&smmu_pmu_driver);
if (ret)
cpuhp_remove_multi_state(cpuhp_state_num);
return ret;
}
module_init(arm_smmu_pmu_init);
static void __exit arm_smmu_pmu_exit(void)
{
platform_driver_unregister(&smmu_pmu_driver);
cpuhp_remove_multi_state(cpuhp_state_num);
}
module_exit(arm_smmu_pmu_exit);
MODULE_ALIAS("platform:arm-smmu-v3-pmcg");
MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
MODULE_AUTHOR("Neil Leeder <[email protected]>");
MODULE_AUTHOR("Shameer Kolothum <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_smmuv3_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell CN10K LLC-TAD perf driver
*
* Copyright (C) 2021 Marvell
*/
#define pr_fmt(fmt) "tad_pmu: " fmt
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/cpuhotplug.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#define TAD_PFC_OFFSET 0x800
#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
#define TAD_PRF_OFFSET 0x900
#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3))
#define TAD_PRF_CNTSEL_MASK 0xFF
#define TAD_MAX_COUNTERS 8
#define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu))
struct tad_region {
void __iomem *base;
};
struct tad_pmu {
struct pmu pmu;
struct tad_region *regions;
u32 region_cnt;
unsigned int cpu;
struct hlist_node node;
struct perf_event *events[TAD_MAX_COUNTERS];
DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
};
static int tad_pmu_cpuhp_state;
static void tad_pmu_event_counter_read(struct perf_event *event)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 counter_idx = hwc->idx;
u64 prev, new;
int i;
do {
prev = local64_read(&hwc->prev_count);
for (i = 0, new = 0; i < tad_pmu->region_cnt; i++)
new += readq(tad_pmu->regions[i].base +
TAD_PFC(counter_idx));
} while (local64_cmpxchg(&hwc->prev_count, prev, new) != prev);
local64_add(new - prev, &event->count);
}
static void tad_pmu_event_counter_stop(struct perf_event *event, int flags)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 counter_idx = hwc->idx;
int i;
/* TAD()_PFC() stop counting on the write
* which sets TAD()_PRF()[CNTSEL] == 0
*/
for (i = 0; i < tad_pmu->region_cnt; i++) {
writeq_relaxed(0, tad_pmu->regions[i].base +
TAD_PRF(counter_idx));
}
tad_pmu_event_counter_read(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 event_idx = event->attr.config;
u32 counter_idx = hwc->idx;
u64 reg_val;
int i;
hwc->state = 0;
/* Typically TAD_PFC() are zeroed to start counting */
for (i = 0; i < tad_pmu->region_cnt; i++)
writeq_relaxed(0, tad_pmu->regions[i].base +
TAD_PFC(counter_idx));
/* TAD()_PFC() start counting on the write
* which sets TAD()_PRF()[CNTSEL] != 0
*/
for (i = 0; i < tad_pmu->region_cnt; i++) {
reg_val = event_idx & 0xFF;
writeq_relaxed(reg_val, tad_pmu->regions[i].base +
TAD_PRF(counter_idx));
}
}
static void tad_pmu_event_counter_del(struct perf_event *event, int flags)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
tad_pmu_event_counter_stop(event, flags | PERF_EF_UPDATE);
tad_pmu->events[idx] = NULL;
clear_bit(idx, tad_pmu->counters_map);
}
static int tad_pmu_event_counter_add(struct perf_event *event, int flags)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
/* Get a free counter for this event */
idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS);
if (idx == TAD_MAX_COUNTERS)
return -EAGAIN;
set_bit(idx, tad_pmu->counters_map);
hwc->idx = idx;
hwc->state = PERF_HES_STOPPED;
tad_pmu->events[idx] = event;
if (flags & PERF_EF_START)
tad_pmu_event_counter_start(event, flags);
return 0;
}
static int tad_pmu_event_init(struct perf_event *event)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (!event->attr.disabled)
return -EINVAL;
if (event->state != PERF_EVENT_STATE_OFF)
return -EINVAL;
event->cpu = tad_pmu->cpu;
event->hw.idx = -1;
event->hw.config_base = event->attr.config;
return 0;
}
static ssize_t tad_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define TAD_PMU_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR_ID(name, tad_pmu_event_show, config)
static struct attribute *tad_pmu_event_attrs[] = {
TAD_PMU_EVENT_ATTR(tad_none, 0x0),
TAD_PMU_EVENT_ATTR(tad_req_msh_in_any, 0x1),
TAD_PMU_EVENT_ATTR(tad_req_msh_in_mn, 0x2),
TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_any, 0x4),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_mn, 0x5),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_exlmn, 0x6),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_dss, 0x7),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_retry_dss, 0x8),
TAD_PMU_EVENT_ATTR(tad_dat_msh_in_any, 0x9),
TAD_PMU_EVENT_ATTR(tad_dat_msh_in_dss, 0xa),
TAD_PMU_EVENT_ATTR(tad_req_msh_out_any, 0xb),
TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_rd, 0xc),
TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_wr, 0xd),
TAD_PMU_EVENT_ATTR(tad_req_msh_out_evict, 0xe),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_any, 0xf),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_exlmn, 0x10),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_mn, 0x11),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_exlmn, 0x12),
TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_mn, 0x13),
TAD_PMU_EVENT_ATTR(tad_snp_msh_out_any, 0x14),
TAD_PMU_EVENT_ATTR(tad_snp_msh_out_mn, 0x15),
TAD_PMU_EVENT_ATTR(tad_snp_msh_out_exlmn, 0x16),
TAD_PMU_EVENT_ATTR(tad_dat_msh_out_any, 0x17),
TAD_PMU_EVENT_ATTR(tad_dat_msh_out_fill, 0x18),
TAD_PMU_EVENT_ATTR(tad_dat_msh_out_dss, 0x19),
TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
TAD_PMU_EVENT_ATTR(tad_dat_rd, 0x21),
TAD_PMU_EVENT_ATTR(tad_dat_rd_byp, 0x22),
TAD_PMU_EVENT_ATTR(tad_ifb_occ, 0x23),
TAD_PMU_EVENT_ATTR(tad_req_occ, 0x24),
NULL
};
static const struct attribute_group tad_pmu_events_attr_group = {
.name = "events",
.attrs = tad_pmu_event_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *tad_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL
};
static struct attribute_group tad_pmu_format_attr_group = {
.name = "format",
.attrs = tad_pmu_format_attrs,
};
static ssize_t tad_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu));
}
static DEVICE_ATTR(cpumask, 0444, tad_pmu_cpumask_show, NULL);
static struct attribute *tad_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static struct attribute_group tad_pmu_cpumask_attr_group = {
.attrs = tad_pmu_cpumask_attrs,
};
static const struct attribute_group *tad_pmu_attr_groups[] = {
&tad_pmu_events_attr_group,
&tad_pmu_format_attr_group,
&tad_pmu_cpumask_attr_group,
NULL
};
static int tad_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
struct resource *res;
u32 tad_pmu_page_size;
u32 tad_page_size;
u32 tad_cnt;
int i, ret;
char *name;
tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL);
if (!tad_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, tad_pmu);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Mem resource not found\n");
return -ENODEV;
}
ret = device_property_read_u32(dev, "marvell,tad-page-size",
&tad_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-page-size property\n");
return ret;
}
ret = device_property_read_u32(dev, "marvell,tad-pmu-page-size",
&tad_pmu_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
return ret;
}
ret = device_property_read_u32(dev, "marvell,tad-cnt", &tad_cnt);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-cnt property\n");
return ret;
}
regions = devm_kcalloc(&pdev->dev, tad_cnt,
sizeof(*regions), GFP_KERNEL);
if (!regions)
return -ENOMEM;
/* ioremap the distributed TAD pmu regions */
for (i = 0; i < tad_cnt && res->start < res->end; i++) {
regions[i].base = devm_ioremap(&pdev->dev,
res->start,
tad_pmu_page_size);
if (!regions[i].base) {
dev_err(&pdev->dev, "TAD%d ioremap fail\n", i);
return -ENOMEM;
}
res->start += tad_page_size;
}
tad_pmu->regions = regions;
tad_pmu->region_cnt = tad_cnt;
tad_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = tad_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE |
PERF_PMU_CAP_NO_INTERRUPT,
.task_ctx_nr = perf_invalid_context,
.event_init = tad_pmu_event_init,
.add = tad_pmu_event_counter_add,
.del = tad_pmu_event_counter_del,
.start = tad_pmu_event_counter_start,
.stop = tad_pmu_event_counter_stop,
.read = tad_pmu_event_counter_read,
};
tad_pmu->cpu = raw_smp_processor_id();
/* Register pmu instance for cpu hotplug */
ret = cpuhp_state_add_instance_nocalls(tad_pmu_cpuhp_state,
&tad_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
return ret;
}
name = "tad";
ret = perf_pmu_register(&tad_pmu->pmu, name, -1);
if (ret)
cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
&tad_pmu->node);
return ret;
}
static int tad_pmu_remove(struct platform_device *pdev)
{
struct tad_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
&pmu->node);
perf_pmu_unregister(&pmu->pmu);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
{ .compatible = "marvell,cn10k-tad-pmu", },
{},
};
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id tad_pmu_acpi_match[] = {
{"MRVL000B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
#endif
static struct platform_driver tad_pmu_driver = {
.driver = {
.name = "cn10k_tad_pmu",
.of_match_table = of_match_ptr(tad_pmu_of_match),
.acpi_match_table = ACPI_PTR(tad_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,
.remove = tad_pmu_remove,
};
static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node);
unsigned int target;
if (cpu != pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
return 0;
}
static int __init tad_pmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/cn10k/tadpmu:online",
NULL,
tad_pmu_offline_cpu);
if (ret < 0)
return ret;
tad_pmu_cpuhp_state = ret;
ret = platform_driver_register(&tad_pmu_driver);
if (ret)
cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
return ret;
}
static void __exit tad_pmu_exit(void)
{
platform_driver_unregister(&tad_pmu_driver);
cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
}
module_init(tad_pmu_init);
module_exit(tad_pmu_exit);
MODULE_DESCRIPTION("Marvell CN10K LLC-TAD Perf driver");
MODULE_AUTHOR("Bhaskara Budiredla <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/marvell_cn10k_tad_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RISC-V performance counter support.
*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
* This implementation is based on old RISC-V perf and ARM perf event code
* which are in turn based on sparc64 and x86 code.
*/
#include <linux/cpumask.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/perf/riscv_pmu.h>
#include <linux/printk.h>
#include <linux/smp.h>
#include <linux/sched_clock.h>
#include <asm/sbi.h>
static bool riscv_perf_user_access(struct perf_event *event)
{
return ((event->attr.type == PERF_TYPE_HARDWARE) ||
(event->attr.type == PERF_TYPE_HW_CACHE) ||
(event->attr.type == PERF_TYPE_RAW)) &&
!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
}
void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{
struct clock_read_data *rd;
unsigned int seq;
u64 ns;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_time_short = 0;
userpg->cap_user_rdpmc = riscv_perf_user_access(event);
#ifdef CONFIG_RISCV_PMU
/*
* The counters are 64-bit but the priv spec doesn't mandate all the
* bits to be implemented: that's why, counter width can vary based on
* the cpu vendor.
*/
if (userpg->cap_user_rdpmc)
userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
#endif
do {
rd = sched_clock_read_begin(&seq);
userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns;
userpg->time_cycles = rd->epoch_cyc;
userpg->time_mask = rd->sched_clock_mask;
/*
* Subtract the cycle base, such that software that
* doesn't know about cap_user_time_short still 'works'
* assuming no wraps.
*/
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns;
} while (sched_clock_read_retry(seq));
userpg->time_offset = userpg->time_zero - now;
/*
* time_shift is not expected to be greater than 31 due to
* the original published conversion algorithm shifting a
* 32-bit value (now specifies a 64-bit value) - refer
* perf_event_mmap_page documentation in perf_event.h.
*/
if (userpg->time_shift == 32) {
userpg->time_shift = 31;
userpg->time_mult >>= 1;
}
/*
* Internal timekeeping for enabled/running/stopped times
* is always computed with the sched_clock.
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
static unsigned long csr_read_num(int csr_num)
{
#define switchcase_csr_read(__csr_num, __val) {\
case __csr_num: \
__val = csr_read(__csr_num); \
break; }
#define switchcase_csr_read_2(__csr_num, __val) {\
switchcase_csr_read(__csr_num + 0, __val) \
switchcase_csr_read(__csr_num + 1, __val)}
#define switchcase_csr_read_4(__csr_num, __val) {\
switchcase_csr_read_2(__csr_num + 0, __val) \
switchcase_csr_read_2(__csr_num + 2, __val)}
#define switchcase_csr_read_8(__csr_num, __val) {\
switchcase_csr_read_4(__csr_num + 0, __val) \
switchcase_csr_read_4(__csr_num + 4, __val)}
#define switchcase_csr_read_16(__csr_num, __val) {\
switchcase_csr_read_8(__csr_num + 0, __val) \
switchcase_csr_read_8(__csr_num + 8, __val)}
#define switchcase_csr_read_32(__csr_num, __val) {\
switchcase_csr_read_16(__csr_num + 0, __val) \
switchcase_csr_read_16(__csr_num + 16, __val)}
unsigned long ret = 0;
switch (csr_num) {
switchcase_csr_read_32(CSR_CYCLE, ret)
switchcase_csr_read_32(CSR_CYCLEH, ret)
default :
break;
}
return ret;
#undef switchcase_csr_read_32
#undef switchcase_csr_read_16
#undef switchcase_csr_read_8
#undef switchcase_csr_read_4
#undef switchcase_csr_read_2
#undef switchcase_csr_read
}
/*
* Read the CSR of a corresponding counter.
*/
unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
{
if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
(csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
pr_err("Invalid performance counter csr %lx\n", csr);
return -EINVAL;
}
return csr_read_num(csr);
}
u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
{
int cwidth;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (!rvpmu->ctr_get_width)
/**
* If the pmu driver doesn't support counter width, set it to default
* maximum allowed by the specification.
*/
cwidth = 63;
else {
if (hwc->idx == -1)
/* Handle init case where idx is not initialized yet */
cwidth = rvpmu->ctr_get_width(0);
else
cwidth = rvpmu->ctr_get_width(hwc->idx);
}
return GENMASK_ULL(cwidth, 0);
}
u64 riscv_pmu_event_update(struct perf_event *event)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 prev_raw_count, new_raw_count;
unsigned long cmask;
u64 oldval, delta;
if (!rvpmu->ctr_read)
return 0;
cmask = riscv_pmu_ctr_get_width_mask(event);
do {
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = rvpmu->ctr_read(event);
oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count);
} while (oldval != prev_raw_count);
delta = (new_raw_count - prev_raw_count) & cmask;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return delta;
}
void riscv_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
if (!(hwc->state & PERF_HES_STOPPED)) {
if (rvpmu->ctr_stop) {
rvpmu->ctr_stop(event, 0);
hwc->state |= PERF_HES_STOPPED;
}
riscv_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
int riscv_pmu_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int overflow = 0;
uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
/*
* Limit the maximum period to prevent the counter value
* from overtaking the one we are about to program. In
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
if (left > (max_period >> 1))
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
perf_event_update_userpage(event);
return overflow;
}
void riscv_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
u64 init_val;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
hwc->state = 0;
riscv_pmu_event_set_period(event);
init_val = local64_read(&hwc->prev_count) & max_period;
rvpmu->ctr_start(event, init_val);
perf_event_update_userpage(event);
}
static int riscv_pmu_add(struct perf_event *event, int flags)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
idx = rvpmu->ctr_get_idx(event);
if (idx < 0)
return idx;
hwc->idx = idx;
cpuc->events[idx] = event;
cpuc->n_events++;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
riscv_pmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void riscv_pmu_del(struct perf_event *event, int flags)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
riscv_pmu_stop(event, PERF_EF_UPDATE);
cpuc->events[hwc->idx] = NULL;
/* The firmware need to reset the counter mapping */
if (rvpmu->ctr_stop)
rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
cpuc->n_events--;
if (rvpmu->ctr_clear_idx)
rvpmu->ctr_clear_idx(event);
perf_event_update_userpage(event);
hwc->idx = -1;
}
static void riscv_pmu_read(struct perf_event *event)
{
riscv_pmu_event_update(event);
}
static int riscv_pmu_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
int mapped_event;
u64 event_config = 0;
uint64_t cmask;
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapped_event;
}
/*
* idx is set to -1 because the index of a general event should not be
* decided until binding to some counter in pmu->add().
* config will contain the information about counter CSR
* the idx will contain the counter index
*/
hwc->config = event_config;
hwc->idx = -1;
hwc->event_base = mapped_event;
if (rvpmu->event_init)
rvpmu->event_init(event);
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
cmask = riscv_pmu_ctr_get_width_mask(event);
hwc->sample_period = cmask >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
static int riscv_pmu_event_idx(struct perf_event *event)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
return 0;
if (rvpmu->csr_index)
return rvpmu->csr_index(event) + 1;
return 0;
}
static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
if (rvpmu->event_mapped) {
rvpmu->event_mapped(event, mm);
perf_event_update_userpage(event);
}
}
static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
if (rvpmu->event_unmapped) {
rvpmu->event_unmapped(event, mm);
perf_event_update_userpage(event);
}
}
struct riscv_pmu *riscv_pmu_alloc(void)
{
struct riscv_pmu *pmu;
int cpuid, i;
struct cpu_hw_events *cpuc;
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
if (!pmu)
goto out;
pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
}
for_each_possible_cpu(cpuid) {
cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
cpuc->n_events = 0;
for (i = 0; i < RISCV_MAX_COUNTERS; i++)
cpuc->events[i] = NULL;
}
pmu->pmu = (struct pmu) {
.event_init = riscv_pmu_event_init,
.event_mapped = riscv_pmu_event_mapped,
.event_unmapped = riscv_pmu_event_unmapped,
.event_idx = riscv_pmu_event_idx,
.add = riscv_pmu_add,
.del = riscv_pmu_del,
.start = riscv_pmu_start,
.stop = riscv_pmu_stop,
.read = riscv_pmu_read,
};
return pmu;
out_free_pmu:
kfree(pmu);
out:
return NULL;
}
| linux-master | drivers/perf/riscv_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 NXP
* Copyright 2016 Freescale Semiconductor, Inc.
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define COUNTER_CNTL 0x0
#define COUNTER_READ 0x20
#define COUNTER_DPCR1 0x30
#define CNTL_OVER 0x1
#define CNTL_CLEAR 0x2
#define CNTL_EN 0x4
#define CNTL_EN_MASK 0xFFFFFFFB
#define CNTL_CLEAR_MASK 0xFFFFFFFD
#define CNTL_OVER_MASK 0xFFFFFFFE
#define CNTL_CP_SHIFT 16
#define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT)
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
#define CYCLES_COUNTER_MASK 0x0FFFFFFF
#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define DDR_PERF_DEV_NAME "imx8_ddr"
#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
static DEFINE_IDA(ddr_ida);
/* DDR Perf hardware feature */
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
const char *identifier; /* system PMU identifier for userspace */
};
static const struct fsl_ddr_devtype_data imx8_devtype_data;
static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
};
static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MQ",
};
static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MM",
};
static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MN",
};
static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
.identifier = "i.MX8MP",
};
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
struct ddr_pmu {
struct pmu pmu;
void __iomem *base;
unsigned int cpu;
struct hlist_node node;
struct device *dev;
struct perf_event *events[NUM_COUNTERS];
enum cpuhp_state cpuhp_state;
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
int active_counter;
};
static ssize_t ddr_perf_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
}
static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct ddr_pmu *pmu = dev_get_drvdata(dev);
if (!pmu->devtype_data->identifier)
return 0;
return attr->mode;
};
static struct device_attribute ddr_perf_identifier_attr =
__ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
static struct attribute *ddr_perf_identifier_attrs[] = {
&ddr_perf_identifier_attr.attr,
NULL,
};
static const struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = ddr_perf_identifier_attrs,
.is_visible = ddr_perf_identifier_attr_visible,
};
enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED,
PERF_CAP_AXI_ID_FEAT_MAX,
};
static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
{
u32 quirks = pmu->devtype_data->quirks;
switch (cap) {
case PERF_CAP_AXI_ID_FILTER:
return !!(quirks & DDR_CAP_AXI_ID_FILTER);
case PERF_CAP_AXI_ID_FILTER_ENHANCED:
quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
default:
WARN(1, "unknown filter cap %d\n", cap);
}
return 0;
}
static ssize_t ddr_perf_filter_cap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
struct dev_ext_attribute *ea =
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
}
#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
(&((struct dev_ext_attribute) { \
__ATTR(_name, 0444, _func, NULL), (void *)_var \
}).attr.attr)
#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
static struct attribute *ddr_perf_filter_cap_attr[] = {
PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
NULL,
};
static const struct attribute_group ddr_perf_filter_cap_attr_group = {
.name = "caps",
.attrs = ddr_perf_filter_cap_attr,
};
static ssize_t ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
}
static struct device_attribute ddr_perf_cpumask_attr =
__ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
static struct attribute *ddr_perf_cpumask_attrs[] = {
&ddr_perf_cpumask_attr.attr,
NULL,
};
static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = ddr_perf_cpumask_attrs,
};
static ssize_t
ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id)
static struct attribute *ddr_perf_events_attrs[] = {
IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
NULL,
};
static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(axi_id, "config1:0-15");
PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
&format_attr_axi_id.attr,
&format_attr_axi_mask.attr,
NULL,
};
static const struct attribute_group ddr_perf_format_attr_group = {
.name = "format",
.attrs = ddr_perf_format_attrs,
};
static const struct attribute_group *attr_groups[] = {
&ddr_perf_events_attr_group,
&ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group,
&ddr_perf_filter_cap_attr_group,
&ddr_perf_identifier_attr_group,
NULL,
};
static bool ddr_perf_is_filtered(struct perf_event *event)
{
return event->attr.config == 0x41 || event->attr.config == 0x42;
}
static u32 ddr_perf_filter_val(struct perf_event *event)
{
return event->attr.config1;
}
static bool ddr_perf_filters_compatible(struct perf_event *a,
struct perf_event *b)
{
if (!ddr_perf_is_filtered(a))
return true;
if (!ddr_perf_is_filtered(b))
return true;
return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
}
static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
{
unsigned int filt;
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
ddr_perf_is_filtered(event);
}
static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
{
int i;
/*
* Always map cycle event to counter 0
* Cycles counter is dedicated for cycle event
* can't used for the other events
*/
if (event == EVENT_CYCLES_ID) {
if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
return EVENT_CYCLES_COUNTER;
else
return -ENOENT;
}
for (i = 1; i < NUM_COUNTERS; i++) {
if (pmu->events[i] == NULL)
return i;
}
return -ENOENT;
}
static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
{
pmu->events[counter] = NULL;
}
static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
{
struct perf_event *event = pmu->events[counter];
void __iomem *base = pmu->base;
/*
* return bytes instead of bursts from ddr transaction for
* axid-read and axid-write event if PMU core supports enhanced
* filter.
*/
base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
COUNTER_READ;
return readl_relaxed(base + counter * 4);
}
static int ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
if (event->cpu < 0) {
dev_warn(pmu->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/*
* We must NOT create groups containing mixed PMUs, although software
* events are acceptable (for example to create a CCN group
* periodically read when a hrtimer aka cpu-clock leader triggers).
*/
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
if (!ddr_perf_filters_compatible(event, event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (!ddr_perf_filters_compatible(event, sibling))
return -EINVAL;
}
}
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
return -EINVAL;
}
event->cpu = pmu->cpu;
hwc->idx = -1;
return 0;
}
static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
int counter, bool enable)
{
u8 reg = counter * 4 + COUNTER_CNTL;
int val;
if (enable) {
/*
* cycle counter is special which should firstly write 0 then
* write 1 into CLEAR bit to clear it. Other counters only
* need write 0 into CLEAR bit and it turns out to be 1 by
* hardware. Below enable flow is harmless for all counters.
*/
writel(0, pmu->base + reg);
val = CNTL_EN | CNTL_CLEAR;
val |= FIELD_PREP(CNTL_CSV_MASK, config);
/*
* On i.MX8MP we need to bias the cycle counter to overflow more often.
* We do this by initializing bits [23:16] of the counter value via the
* COUNTER_CTRL Counter Parameter (CP) field.
*/
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
if (counter == EVENT_CYCLES_COUNTER)
val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
}
writel(val, pmu->base + reg);
} else {
/* Disable counter */
val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
writel(val, pmu->base + reg);
}
}
static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
{
int val;
val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
return val & CNTL_OVER;
}
static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
{
u8 reg = counter * 4 + COUNTER_CNTL;
int val;
val = readl_relaxed(pmu->base + reg);
val &= ~CNTL_CLEAR;
writel(val, pmu->base + reg);
val |= CNTL_CLEAR;
writel(val, pmu->base + reg);
}
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 new_raw_count;
int counter = hwc->idx;
int ret;
new_raw_count = ddr_perf_read_counter(pmu, counter);
/* Remove the bias applied in ddr_perf_counter_enable(). */
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
if (counter == EVENT_CYCLES_COUNTER)
new_raw_count &= CYCLES_COUNTER_MASK;
}
local64_add(new_raw_count, &event->count);
/*
* For legacy SoCs: event counter continue counting when overflow,
* no need to clear the counter.
* For new SoCs: event counter stop counting when overflow, need
* clear counter to let it count again.
*/
if (counter != EVENT_CYCLES_COUNTER) {
ret = ddr_perf_counter_overflow(pmu, counter);
if (ret)
dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
event->attr.config);
}
/* clear counter every time for both cycle counter and event counter */
ddr_perf_counter_clear(pmu, counter);
}
static void ddr_perf_event_start(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
local64_set(&hwc->prev_count, 0);
ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
if (!pmu->active_counter++)
ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER, true);
hwc->state = 0;
}
static int ddr_perf_event_add(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter;
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
int i;
for (i = 1; i < NUM_COUNTERS; i++) {
if (pmu->events[i] &&
!ddr_perf_filters_compatible(event, pmu->events[i]))
return -EINVAL;
}
if (ddr_perf_is_filtered(event)) {
/* revert axi id masking(axi_mask) value */
cfg1 ^= AXI_MASKING_REVERT;
writel(cfg1, pmu->base + COUNTER_DPCR1);
}
}
counter = ddr_perf_alloc_counter(pmu, cfg);
if (counter < 0) {
dev_dbg(pmu->dev, "There are not enough counters\n");
return -EOPNOTSUPP;
}
pmu->events[counter] = event;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
if (flags & PERF_EF_START)
ddr_perf_event_start(event, flags);
return 0;
}
static void ddr_perf_event_stop(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
ddr_perf_event_update(event);
if (!--pmu->active_counter)
ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER, false);
hwc->state |= PERF_HES_STOPPED;
}
static void ddr_perf_event_del(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
ddr_perf_event_stop(event, PERF_EF_UPDATE);
ddr_perf_free_counter(pmu, counter);
hwc->idx = -1;
}
static void ddr_perf_pmu_enable(struct pmu *pmu)
{
}
static void ddr_perf_pmu_disable(struct pmu *pmu)
{
}
static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
struct device *dev)
{
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
.event_init = ddr_perf_event_init,
.add = ddr_perf_event_add,
.del = ddr_perf_event_del,
.start = ddr_perf_event_start,
.stop = ddr_perf_event_stop,
.read = ddr_perf_event_update,
.pmu_enable = ddr_perf_pmu_enable,
.pmu_disable = ddr_perf_pmu_disable,
},
.base = base,
.dev = dev,
};
pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
return pmu->id;
}
static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
{
int i;
struct ddr_pmu *pmu = (struct ddr_pmu *) p;
struct perf_event *event;
/* all counter will stop if cycle counter disabled */
ddr_perf_counter_enable(pmu,
EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER,
false);
/*
* When the cycle counter overflows, all counters are stopped,
* and an IRQ is raised. If any other counter overflows, it
* continues counting, and no IRQ is raised. But for new SoCs,
* such as i.MX8MP, event counter would stop when overflow, so
* we need use cycle counter to stop overflow of event counter.
*
* Cycles occur at least 4 times as often as other events, so we
* can update all events on a cycle counter overflow and not
* lose events.
*
*/
for (i = 0; i < NUM_COUNTERS; i++) {
if (!pmu->events[i])
continue;
event = pmu->events[i];
ddr_perf_event_update(event);
}
ddr_perf_counter_enable(pmu,
EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER,
true);
return IRQ_HANDLED;
}
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
int target;
if (cpu != pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
return 0;
}
static int ddr_perf_probe(struct platform_device *pdev)
{
struct ddr_pmu *pmu;
struct device_node *np;
void __iomem *base;
char *name;
int num;
int ret;
int irq;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
np = pdev->dev.of_node;
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
num = ddr_perf_init(pmu, base, &pdev->dev);
platform_set_drvdata(pdev, pmu);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
num);
if (!name) {
ret = -ENOMEM;
goto cpuhp_state_err;
}
pmu->devtype_data = of_device_get_match_data(&pdev->dev);
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DDR_CPUHP_CB_NAME,
NULL,
ddr_perf_offline_cpu);
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
goto cpuhp_instance_err;
}
/* Request irq */
irq = of_irq_get(np, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Failed to get irq: %d", irq);
ret = irq;
goto ddr_perf_err;
}
ret = devm_request_irq(&pdev->dev, irq,
ddr_perf_irq_handler,
IRQF_NOBALANCING | IRQF_NO_THREAD,
DDR_CPUHP_CB_NAME,
pmu);
if (ret < 0) {
dev_err(&pdev->dev, "Request irq failed: %d", ret);
goto ddr_perf_err;
}
pmu->irq = irq;
ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
if (ret) {
dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
goto ddr_perf_err;
}
ret = perf_pmu_register(&pmu->pmu, name, -1);
if (ret)
goto ddr_perf_err;
return 0;
ddr_perf_err:
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
ida_free(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
static int ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_remove_multi_state(pmu->cpuhp_state);
perf_pmu_unregister(&pmu->pmu);
ida_free(&ddr_ida, pmu->id);
return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
.driver = {
.name = "imx-ddr-pmu",
.of_match_table = imx_ddr_pmu_dt_ids,
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
.remove = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/fsl_imx8_ddr_perf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* platform_device probing code for ARM performance counters.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
* Copyright (C) 2010 ARM Ltd., Will Deacon <[email protected]>
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#define dev_fmt pr_fmt
#include <linux/bug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/kconfig.h>
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/smp.h>
static int probe_current_pmu(struct arm_pmu *pmu,
const struct pmu_probe_info *info)
{
int cpu = get_cpu();
unsigned int cpuid = read_cpuid_id();
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
for (; info->init != NULL; info++) {
if ((cpuid & info->mask) != info->cpuid)
continue;
ret = info->init(pmu);
break;
}
put_cpu();
return ret;
}
static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
{
int cpu, ret;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
if (ret)
return ret;
for_each_cpu(cpu, &pmu->supported_cpus)
per_cpu(hw_events->irq, cpu) = irq;
return 0;
}
static bool pmu_has_irq_affinity(struct device_node *node)
{
return !!of_find_property(node, "interrupt-affinity", NULL);
}
static int pmu_parse_irq_affinity(struct device *dev, int i)
{
struct device_node *dn;
int cpu;
/*
* If we don't have an interrupt-affinity property, we guess irq
* affinity matches our logical CPU order, as we used to assume.
* This is fragile, so we'll warn in pmu_parse_irqs().
*/
if (!pmu_has_irq_affinity(dev->of_node))
return i;
dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i);
if (!dn) {
dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i);
return -EINVAL;
}
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
dev_warn(dev, "failed to find logical CPU for %pOFn\n", dn);
cpu = nr_cpu_ids;
}
of_node_put(dn);
return cpu;
}
static int pmu_parse_irqs(struct arm_pmu *pmu)
{
int i = 0, num_irqs;
struct platform_device *pdev = pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
struct device *dev = &pdev->dev;
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0)
return dev_err_probe(dev, num_irqs, "unable to count PMU IRQs\n");
/*
* In this case we have no idea which CPUs are covered by the PMU.
* To match our prior behaviour, we assume all CPUs in this case.
*/
if (num_irqs == 0) {
dev_warn(dev, "no irqs for PMU, sampling events not supported\n");
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
cpumask_setall(&pmu->supported_cpus);
return 0;
}
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
dev_warn(dev, "no interrupt-affinity property, guessing.\n");
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
irq = platform_get_irq(pdev, i);
if (WARN_ON(irq <= 0))
continue;
if (irq_is_percpu_devid(irq)) {
dev_warn(dev, "multiple PPIs or mismatched SPI/PPI detected\n");
return -EINVAL;
}
cpu = pmu_parse_irq_affinity(dev, i);
if (cpu < 0)
return cpu;
if (cpu >= nr_cpu_ids)
continue;
if (per_cpu(hw_events->irq, cpu)) {
dev_warn(dev, "multiple PMU IRQs for the same CPU detected\n");
return -EINVAL;
}
per_cpu(hw_events->irq, cpu) = irq;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
}
return 0;
}
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
int cpu, err = 0;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
if (!irq)
continue;
err = armpmu_request_irq(irq, cpu);
if (err)
break;
}
return err;
}
static void armpmu_free_irqs(struct arm_pmu *armpmu)
{
int cpu;
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
armpmu_free_irq(irq, cpu);
}
}
int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
{
armpmu_init_fn init_fn;
struct device *dev = &pdev->dev;
struct arm_pmu *pmu;
int ret = -ENODEV;
pmu = armpmu_alloc();
if (!pmu)
return -ENOMEM;
pmu->plat_device = pdev;
ret = pmu_parse_irqs(pmu);
if (ret)
goto out_free;
init_fn = of_device_get_match_data(dev);
if (init_fn) {
pmu->secure_access = of_property_read_bool(dev->of_node,
"secure-reg-access");
/* arm64 systems boot only as non-secure */
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n");
pmu->secure_access = false;
}
ret = init_fn(pmu);
} else if (probe_table) {
cpumask_setall(&pmu->supported_cpus);
ret = probe_current_pmu(pmu, probe_table);
}
if (ret) {
dev_err(dev, "failed to probe PMU!\n");
goto out_free;
}
ret = armpmu_request_irqs(pmu);
if (ret)
goto out_free_irqs;
ret = armpmu_register(pmu);
if (ret) {
dev_err(dev, "failed to register PMU devices!\n");
goto out_free_irqs;
}
return 0;
out_free_irqs:
armpmu_free_irqs(pmu);
out_free:
armpmu_free(pmu);
return ret;
}
| linux-master | drivers/perf/arm_pmu_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the L3 cache PMUs in Qualcomm Technologies chips.
*
* The driver supports a distributed cache architecture where the overall
* cache for a socket is comprised of multiple slices each with its own PMU.
* Access to each individual PMU is provided even though all CPUs share all
* the slices. User space needs to aggregate to individual counts to provide
* a global picture.
*
* See Documentation/admin-guide/perf/qcom_l3_pmu.rst for more details.
*
* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
/*
* General constants
*/
/* Number of counters on each PMU */
#define L3_NUM_COUNTERS 8
/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
#define L3_EVTYPE_MASK 0xFF
/*
* Bit position of the 'long counter' flag within perf_event_attr.config.
* Reserve some space between the event type and this flag to allow expansion
* in the event type field.
*/
#define L3_EVENT_LC_BIT 32
/*
* Register offsets
*/
/* Perfmon registers */
#define L3_HML3_PM_CR 0x000
#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8)
#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8)
#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8)
#define L3_HML3_PM_FILTRA 0x300
#define L3_HML3_PM_FILTRB 0x308
#define L3_HML3_PM_FILTRC 0x310
#define L3_HML3_PM_FILTRAM 0x304
#define L3_HML3_PM_FILTRBM 0x30C
#define L3_HML3_PM_FILTRCM 0x314
/* Basic counter registers */
#define L3_M_BC_CR 0x500
#define L3_M_BC_SATROLL_CR 0x504
#define L3_M_BC_CNTENSET 0x508
#define L3_M_BC_CNTENCLR 0x50C
#define L3_M_BC_INTENSET 0x510
#define L3_M_BC_INTENCLR 0x514
#define L3_M_BC_GANG 0x718
#define L3_M_BC_OVSR 0x740
#define L3_M_BC_IRQCTL 0x96C
/*
* Bit field definitions
*/
/* L3_HML3_PM_CR */
#define PM_CR_RESET (0)
/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */
#define PMCNT_RESET (0)
/* L3_HML3_PM_EVTYPEx */
#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK)
/* Reset value for all the filter registers */
#define PM_FLTR_RESET (0)
/* L3_M_BC_CR */
#define BC_RESET (1UL << 1)
#define BC_ENABLE (1UL << 0)
/* L3_M_BC_SATROLL_CR */
#define BC_SATROLL_CR_RESET (0)
/* L3_M_BC_CNTENSET */
#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7))
/* L3_M_BC_CNTENCLR */
#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
#define BC_CNTENCLR_RESET (0xFF)
/* L3_M_BC_INTENSET */
#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7))
/* L3_M_BC_INTENCLR */
#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
#define BC_INTENCLR_RESET (0xFF)
/* L3_M_BC_GANG */
#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7))
#define BC_GANG_RESET (0)
/* L3_M_BC_OVSR */
#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7))
#define PMOVSRCLR_RESET (0xFF)
/* L3_M_BC_IRQCTL */
#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7))
#define BC_IRQCTL_RESET (0x0)
/*
* Events
*/
#define L3_EVENT_CYCLES 0x01
#define L3_EVENT_READ_HIT 0x20
#define L3_EVENT_READ_MISS 0x21
#define L3_EVENT_READ_HIT_D 0x22
#define L3_EVENT_READ_MISS_D 0x23
#define L3_EVENT_WRITE_HIT 0x24
#define L3_EVENT_WRITE_MISS 0x25
/*
* Decoding of settings from perf_event_attr
*
* The config format for perf events is:
* - config: bits 0-7: event type
* bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits
*/
static inline u32 get_event_type(struct perf_event *event)
{
return (event->attr.config) & L3_EVTYPE_MASK;
}
static inline bool event_uses_long_counter(struct perf_event *event)
{
return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT));
}
static inline int event_num_counters(struct perf_event *event)
{
return event_uses_long_counter(event) ? 2 : 1;
}
/*
* Main PMU, inherits from the core perf PMU type
*/
struct l3cache_pmu {
struct pmu pmu;
struct hlist_node node;
void __iomem *regs;
struct perf_event *events[L3_NUM_COUNTERS];
unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)];
cpumask_t cpumask;
};
#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
/*
* Type used to group hardware counter operations
*
* Used to implement two types of hardware counters, standard (32bits) and
* long (64bits). The hardware supports counter chaining which we use to
* implement long counters. This support is exposed via the 'lc' flag field
* in perf_event_attr.config.
*/
struct l3cache_event_ops {
/* Called to start event monitoring */
void (*start)(struct perf_event *event);
/* Called to stop event monitoring */
void (*stop)(struct perf_event *event, int flags);
/* Called to update the perf_event */
void (*update)(struct perf_event *event);
};
/*
* Implementation of long counter operations
*
* 64bit counters are implemented by chaining two of the 32bit physical
* counters. The PMU only supports chaining of adjacent even/odd pairs
* and for simplicity the driver always configures the odd counter to
* count the overflows of the lower-numbered even counter. Note that since
* the resulting hardware counter is 64bits no IRQs are required to maintain
* the software counter which is also 64bits.
*/
static void qcom_l3_cache__64bit_counter_start(struct perf_event *event)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 evsel = get_event_type(event);
u32 gang;
/* Set the odd counter to count the overflows of the even counter */
gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
gang |= GANG_EN(idx + 1);
writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG);
/* Initialize the hardware counters and reset prev_count*/
local64_set(&event->hw.prev_count, 0);
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
/*
* Set the event types, the upper half must use zero and the lower
* half the actual event type
*/
writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1));
writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
/* Finally, enable the counters */
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1));
writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET);
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
}
static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event,
int flags)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
/* Disable the counters */
writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR);
/* Disable chaining */
writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG);
}
static void qcom_l3_cache__64bit_counter_update(struct perf_event *event)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 hi, lo;
u64 prev, new;
do {
prev = local64_read(&event->hw.prev_count);
do {
hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
} while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)));
new = ((u64)hi << 32) | lo;
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
local64_add(new - prev, &event->count);
}
static const struct l3cache_event_ops event_ops_long = {
.start = qcom_l3_cache__64bit_counter_start,
.stop = qcom_l3_cache__64bit_counter_stop,
.update = qcom_l3_cache__64bit_counter_update,
};
/*
* Implementation of standard counter operations
*
* 32bit counters use a single physical counter and a hardware feature that
* asserts the overflow IRQ on the toggling of the most significant bit in
* the counter. This feature allows the counters to be left free-running
* without needing the usual reprogramming required to properly handle races
* during concurrent calls to update.
*/
static void qcom_l3_cache__32bit_counter_start(struct perf_event *event)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 evsel = get_event_type(event);
u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
/* Set the counter to assert the overflow IRQ on MSB toggling */
writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
/* Initialize the hardware counter and reset prev_count*/
local64_set(&event->hw.prev_count, 0);
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
/* Set the event type */
writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
/* Enable interrupt generation by this counter */
writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET);
/* Finally, enable the counter */
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
}
static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event,
int flags)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
/* Disable the counter */
writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
/* Disable interrupt generation by this counter */
writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR);
/* Set the counter to not assert the overflow IRQ on MSB toggling */
writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
}
static void qcom_l3_cache__32bit_counter_update(struct perf_event *event)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
int idx = event->hw.idx;
u32 prev, new;
do {
prev = local64_read(&event->hw.prev_count);
new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
local64_add(new - prev, &event->count);
}
static const struct l3cache_event_ops event_ops_std = {
.start = qcom_l3_cache__32bit_counter_start,
.stop = qcom_l3_cache__32bit_counter_stop,
.update = qcom_l3_cache__32bit_counter_update,
};
/* Retrieve the appropriate operations for the given event */
static
const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event)
{
if (event_uses_long_counter(event))
return &event_ops_long;
else
return &event_ops_std;
}
/*
* Top level PMU functions.
*/
static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu)
{
int i;
writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR);
/*
* Use writel for the first programming command to ensure the basic
* counter unit is stopped before proceeding
*/
writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR);
writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR);
writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR);
writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR);
writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG);
writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL);
writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR);
for (i = 0; i < L3_NUM_COUNTERS; ++i) {
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i));
writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i));
}
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA);
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM);
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB);
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM);
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC);
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM);
/*
* Use writel here to ensure all programming commands are done
* before proceeding
*/
writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
}
static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data)
{
struct l3cache_pmu *l3pmu = data;
/* Read the overflow status register */
long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR);
int idx;
if (status == 0)
return IRQ_NONE;
/* Clear the bits we read on the overflow status register */
writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR);
for_each_set_bit(idx, &status, L3_NUM_COUNTERS) {
struct perf_event *event;
const struct l3cache_event_ops *ops;
event = l3pmu->events[idx];
if (!event)
continue;
/*
* Since the IRQ is not enabled for events using long counters
* we should never see one of those here, however, be consistent
* and use the ops indirections like in the other operations.
*/
ops = l3cache_event_get_ops(event);
ops->update(event);
}
return IRQ_HANDLED;
}
/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
*/
static void qcom_l3_cache__pmu_enable(struct pmu *pmu)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
/* Ensure the other programming commands are observed before enabling */
wmb();
writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
}
static void qcom_l3_cache__pmu_disable(struct pmu *pmu)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
writel_relaxed(0, l3pmu->regs + L3_M_BC_CR);
/* Ensure the basic counter unit is stopped before proceeding */
wmb();
}
/*
* We must NOT create groups containing events from multiple hardware PMUs,
* although mixing different software and hardware PMUs is allowed.
*/
static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct perf_event *sibling;
int counters = 0;
if (leader->pmu != event->pmu && !is_software_event(leader))
return false;
counters = event_num_counters(event);
counters += event_num_counters(leader);
for_each_sibling_event(sibling, leader) {
if (is_software_event(sibling))
continue;
if (sibling->pmu != event->pmu)
return false;
counters += event_num_counters(sibling);
}
/*
* If the group requires more counters than the HW has, it
* cannot ever be scheduled.
*/
return counters <= L3_NUM_COUNTERS;
}
static int qcom_l3_cache__event_init(struct perf_event *event)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* Is the event for this PMU?
*/
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* Sampling not supported since these events are not core-attributable.
*/
if (hwc->sample_period)
return -EINVAL;
/*
* Task mode not available, we run the counters as socket counters,
* not attributable to any CPU and therefore cannot attribute per-task.
*/
if (event->cpu < 0)
return -EINVAL;
/* Validate the group */
if (!qcom_l3_cache__validate_event_group(event))
return -EINVAL;
hwc->idx = -1;
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, like this one, where
* each event could be theoretically assigned to a different CPU.
* To mitigate this, we enforce CPU assignment to one designated
* processor (the one described in the "cpumask" attribute exported
* by the PMU device). perf user space tools honor this and avoid
* opening more than one copy of the events.
*/
event->cpu = cpumask_first(&l3pmu->cpumask);
return 0;
}
static void qcom_l3_cache__event_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
hwc->state = 0;
ops->start(event);
}
static void qcom_l3_cache__event_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
if (hwc->state & PERF_HES_STOPPED)
return;
ops->stop(event, flags);
if (flags & PERF_EF_UPDATE)
ops->update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int qcom_l3_cache__event_add(struct perf_event *event, int flags)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int order = event_uses_long_counter(event) ? 1 : 0;
int idx;
/*
* Try to allocate a counter.
*/
idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order);
if (idx < 0)
/* The counters are all in use. */
return -EAGAIN;
hwc->idx = idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
l3pmu->events[idx] = event;
if (flags & PERF_EF_START)
qcom_l3_cache__event_start(event, 0);
/* Propagate changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void qcom_l3_cache__event_del(struct perf_event *event, int flags)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int order = event_uses_long_counter(event) ? 1 : 0;
/* Stop and clean up */
qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE);
l3pmu->events[hwc->idx] = NULL;
bitmap_release_region(l3pmu->used_mask, hwc->idx, order);
/* Propagate changes to the userspace mapping. */
perf_event_update_userpage(event);
}
static void qcom_l3_cache__event_read(struct perf_event *event)
{
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
ops->update(event);
}
/*
* Add sysfs attributes
*
* We export:
* - formats, used by perf user space and other tools to configure events
* - events, used by perf user space and other tools to create events
* symbolically, e.g.:
* perf stat -a -e l3cache_0_0/event=read-miss/ ls
* perf stat -a -e l3cache_0_0/event=0x21/ ls
* - cpumask, used by perf user space and other tools to know on which CPUs
* to open the events
*/
/* formats */
static ssize_t l3cache_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
(&((struct dev_ext_attribute[]) { \
{ .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \
.var = (void *) _config, } \
})[0].attr.attr)
static struct attribute *qcom_l3_cache_pmu_formats[] = {
L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)),
NULL,
};
static const struct attribute_group qcom_l3_cache_pmu_format_group = {
.name = "format",
.attrs = qcom_l3_cache_pmu_formats,
};
/* events */
static ssize_t l3cache_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L3CACHE_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR_ID(_name, l3cache_pmu_event_show, _id)
static struct attribute *qcom_l3_cache_pmu_events[] = {
L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
NULL
};
static const struct attribute_group qcom_l3_cache_pmu_events_group = {
.name = "events",
.attrs = qcom_l3_cache_pmu_events,
};
/* cpumask */
static ssize_t cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
.attrs = qcom_l3_cache_pmu_cpumask_attrs,
};
/*
* Per PMU device attribute groups
*/
static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = {
&qcom_l3_cache_pmu_format_group,
&qcom_l3_cache_pmu_events_group,
&qcom_l3_cache_pmu_cpumask_attr_group,
NULL,
};
/*
* Probing functions and data.
*/
static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
/* If there is not a CPU/PMU association pick this CPU */
if (cpumask_empty(&l3pmu->cpumask))
cpumask_set_cpu(cpu, &l3pmu->cpumask);
return 0;
}
static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
cpumask_set_cpu(target, &l3pmu->cpumask);
return 0;
}
static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
{
struct l3cache_pmu *l3pmu;
struct acpi_device *acpi_dev;
struct resource *memrc;
int ret;
char *name;
/* Initialize the PMU data structures */
acpi_dev = ACPI_COMPANION(&pdev->dev);
if (!acpi_dev)
return -ENODEV;
l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
acpi_dev_parent(acpi_dev)->pnp.unique_id,
acpi_dev->pnp.unique_id);
if (!l3pmu || !name)
return -ENOMEM;
l3pmu->pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
.pmu_enable = qcom_l3_cache__pmu_enable,
.pmu_disable = qcom_l3_cache__pmu_disable,
.event_init = qcom_l3_cache__event_init,
.add = qcom_l3_cache__event_add,
.del = qcom_l3_cache__event_del,
.start = qcom_l3_cache__event_start,
.stop = qcom_l3_cache__event_stop,
.read = qcom_l3_cache__event_read,
.attr_groups = qcom_l3_cache_pmu_attr_grps,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
if (IS_ERR(l3pmu->regs))
return PTR_ERR(l3pmu->regs);
qcom_l3_cache__init(l3pmu);
ret = platform_get_irq(pdev, 0);
if (ret <= 0)
return ret;
ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0,
name, l3pmu);
if (ret) {
dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n",
&memrc->start);
return ret;
}
/* Add this instance to the list used by the offline callback */
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug", ret);
return ret;
}
ret = perf_pmu_register(&l3pmu->pmu, name, -1);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret);
return ret;
}
dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type);
return 0;
}
static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = {
{ "QCOM8081", },
{ }
};
MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match);
static struct platform_driver qcom_l3_cache_pmu_driver = {
.driver = {
.name = "qcom-l3cache-pmu",
.acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = qcom_l3_cache_pmu_probe,
};
static int __init register_qcom_l3_cache_pmu_driver(void)
{
int ret;
/* Install a hook to update the reader CPU in case it goes offline */
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
"perf/qcom/l3cache:online",
qcom_l3_cache_pmu_online_cpu,
qcom_l3_cache_pmu_offline_cpu);
if (ret)
return ret;
return platform_driver_register(&qcom_l3_cache_pmu_driver);
}
device_initcall(register_qcom_l3_cache_pmu_driver);
| linux-master | drivers/perf/qcom_l3_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2023 Huawei
*
* The CXL 3.0 specification includes a standard Performance Monitoring Unit,
* called the CXL PMU, or CPMU. In order to allow a high degree of
* implementation flexibility the specification provides a wide range of
* options all of which are self describing.
*
* Details in CXL rev 3.0 section 8.2.7 CPMU Register Interface
*/
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/perf_event.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/bits.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>
#include "../cxl/cxlpci.h"
#include "../cxl/cxl.h"
#include "../cxl/pmu.h"
#define CXL_PMU_CAP_REG 0x0
#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
#define CXL_PMU_FILTER_HDM BIT(0)
#define CXL_PMU_FILTER_CHAN_RANK_BANK BIT(1)
#define CXL_PMU_CAP_MSI_N_MSK GENMASK_ULL(47, 44)
#define CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN BIT_ULL(48)
#define CXL_PMU_CAP_FREEZE BIT_ULL(49)
#define CXL_PMU_CAP_INT BIT_ULL(50)
#define CXL_PMU_CAP_VERSION_MSK GENMASK_ULL(63, 60)
#define CXL_PMU_OVERFLOW_REG 0x10
#define CXL_PMU_FREEZE_REG 0x18
#define CXL_PMU_EVENT_CAP_REG(n) (0x100 + 8 * (n))
#define CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK GENMASK_ULL(31, 0)
#define CXL_PMU_EVENT_CAP_GROUP_ID_MSK GENMASK_ULL(47, 32)
#define CXL_PMU_EVENT_CAP_VENDOR_ID_MSK GENMASK_ULL(63, 48)
#define CXL_PMU_COUNTER_CFG_REG(n) (0x200 + 8 * (n))
#define CXL_PMU_COUNTER_CFG_TYPE_MSK GENMASK_ULL(1, 0)
#define CXL_PMU_COUNTER_CFG_TYPE_FREE_RUN 0
#define CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN 1
#define CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE 2
#define CXL_PMU_COUNTER_CFG_ENABLE BIT_ULL(8)
#define CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW BIT_ULL(9)
#define CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW BIT_ULL(10)
#define CXL_PMU_COUNTER_CFG_EDGE BIT_ULL(11)
#define CXL_PMU_COUNTER_CFG_INVERT BIT_ULL(12)
#define CXL_PMU_COUNTER_CFG_THRESHOLD_MSK GENMASK_ULL(23, 16)
#define CXL_PMU_COUNTER_CFG_EVENTS_MSK GENMASK_ULL(55, 24)
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
/* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
#define CXL_PMU_GID_CLOCK_TICKS 0x00
#define CXL_PMU_GID_D2H_REQ 0x0010
#define CXL_PMU_GID_D2H_RSP 0x0011
#define CXL_PMU_GID_H2D_REQ 0x0012
#define CXL_PMU_GID_H2D_RSP 0x0013
#define CXL_PMU_GID_CACHE_DATA 0x0014
#define CXL_PMU_GID_M2S_REQ 0x0020
#define CXL_PMU_GID_M2S_RWD 0x0021
#define CXL_PMU_GID_M2S_BIRSP 0x0022
#define CXL_PMU_GID_S2M_BISNP 0x0023
#define CXL_PMU_GID_S2M_NDR 0x0024
#define CXL_PMU_GID_S2M_DRS 0x0025
#define CXL_PMU_GID_DDR 0x8000
static int cxl_pmu_cpuhp_state_num;
struct cxl_pmu_ev_cap {
u16 vid;
u16 gid;
u32 msk;
union {
int counter_idx; /* fixed counters */
int event_idx; /* configurable counters */
};
struct list_head node;
};
#define CXL_PMU_MAX_COUNTERS 64
struct cxl_pmu_info {
struct pmu pmu;
void __iomem *base;
struct perf_event **hw_events;
struct list_head event_caps_configurable;
struct list_head event_caps_fixed;
DECLARE_BITMAP(used_counter_bm, CXL_PMU_MAX_COUNTERS);
DECLARE_BITMAP(conf_counter_bm, CXL_PMU_MAX_COUNTERS);
u16 counter_width;
u8 num_counters;
u8 num_event_capabilities;
int on_cpu;
struct hlist_node node;
bool filter_hdm;
int irq;
};
#define pmu_to_cxl_pmu_info(_pmu) container_of(_pmu, struct cxl_pmu_info, pmu)
/*
* All CPMU counters are discoverable via the Event Capabilities Registers.
* Each Event Capability register contains a a VID / GroupID.
* A counter may then count any combination (by summing) of events in
* that group which are in the Supported Events Bitmask.
* However, there are some complexities to the scheme.
* - Fixed function counters refer to an Event Capabilities register.
* That event capability register is not then used for Configurable
* counters.
*/
static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
{
unsigned long fixed_counter_event_cap_bm = 0;
void __iomem *base = info->base;
bool freeze_for_enable;
u64 val, eval;
int i;
val = readq(base + CXL_PMU_CAP_REG);
freeze_for_enable = FIELD_GET(CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN, val) &&
FIELD_GET(CXL_PMU_CAP_FREEZE, val);
if (!freeze_for_enable) {
dev_err(dev, "Counters not writable while frozen\n");
return -ENODEV;
}
info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1;
info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val);
info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1;
info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM;
if (FIELD_GET(CXL_PMU_CAP_INT, val))
info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val);
else
info->irq = -1;
/* First handle fixed function counters; note if configurable counters found */
for (i = 0; i < info->num_counters; i++) {
struct cxl_pmu_ev_cap *pmu_ev;
u32 events_msk;
u8 group_idx;
val = readq(base + CXL_PMU_COUNTER_CFG_REG(i));
if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) ==
CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE) {
set_bit(i, info->conf_counter_bm);
}
if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) !=
CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN)
continue;
/* In this case we know which fields are const */
group_idx = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, val);
events_msk = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENTS_MSK, val);
eval = readq(base + CXL_PMU_EVENT_CAP_REG(group_idx));
pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
if (!pmu_ev)
return -ENOMEM;
pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
/* For a fixed purpose counter use the events mask from the counter CFG */
pmu_ev->msk = events_msk;
pmu_ev->counter_idx = i;
/* This list add is never unwound as all entries deleted on remove */
list_add(&pmu_ev->node, &info->event_caps_fixed);
/*
* Configurable counters must not use an Event Capability registers that
* is in use for a Fixed counter
*/
set_bit(group_idx, &fixed_counter_event_cap_bm);
}
if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) {
struct cxl_pmu_ev_cap *pmu_ev;
int j;
/* Walk event capabilities unused by fixed counters */
for_each_clear_bit(j, &fixed_counter_event_cap_bm,
info->num_event_capabilities) {
pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
if (!pmu_ev)
return -ENOMEM;
eval = readq(base + CXL_PMU_EVENT_CAP_REG(j));
pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval);
pmu_ev->event_idx = j;
list_add(&pmu_ev->node, &info->event_caps_configurable);
}
}
return 0;
}
static ssize_t cxl_pmu_format_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
#define CXL_PMU_FORMAT_ATTR(_name, _format)\
(&((struct dev_ext_attribute[]) { \
{ \
.attr = __ATTR(_name, 0444, \
cxl_pmu_format_sysfs_show, NULL), \
.var = (void *)_format \
} \
})[0].attr.attr)
enum {
cxl_pmu_mask_attr,
cxl_pmu_gid_attr,
cxl_pmu_vid_attr,
cxl_pmu_threshold_attr,
cxl_pmu_invert_attr,
cxl_pmu_edge_attr,
cxl_pmu_hdm_filter_en_attr,
cxl_pmu_hdm_attr,
};
static struct attribute *cxl_pmu_format_attr[] = {
[cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
[cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
[cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
[cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
[cxl_pmu_invert_attr] = CXL_PMU_FORMAT_ATTR(invert, "config1:16"),
[cxl_pmu_edge_attr] = CXL_PMU_FORMAT_ATTR(edge, "config1:17"),
[cxl_pmu_hdm_filter_en_attr] = CXL_PMU_FORMAT_ATTR(hdm_filter_en, "config1:18"),
[cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
NULL
};
#define CXL_PMU_ATTR_CONFIG_MASK_MSK GENMASK_ULL(31, 0)
#define CXL_PMU_ATTR_CONFIG_GID_MSK GENMASK_ULL(47, 32)
#define CXL_PMU_ATTR_CONFIG_VID_MSK GENMASK_ULL(63, 48)
#define CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK GENMASK_ULL(15, 0)
#define CXL_PMU_ATTR_CONFIG1_INVERT_MSK BIT(16)
#define CXL_PMU_ATTR_CONFIG1_EDGE_MSK BIT(17)
#define CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK BIT(18)
#define CXL_PMU_ATTR_CONFIG2_HDM_MSK GENMASK(15, 0)
static umode_t cxl_pmu_format_is_visible(struct kobject *kobj,
struct attribute *attr, int a)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_pmu_info *info = dev_get_drvdata(dev);
/*
* Filter capability at the CPMU level, so hide the attributes if the particular
* filter is not supported.
*/
if (!info->filter_hdm &&
(attr == cxl_pmu_format_attr[cxl_pmu_hdm_filter_en_attr] ||
attr == cxl_pmu_format_attr[cxl_pmu_hdm_attr]))
return 0;
return attr->mode;
}
static const struct attribute_group cxl_pmu_format_group = {
.name = "format",
.attrs = cxl_pmu_format_attr,
.is_visible = cxl_pmu_format_is_visible,
};
static u32 cxl_pmu_config_get_mask(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config);
}
static u16 cxl_pmu_config_get_gid(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config);
}
static u16 cxl_pmu_config_get_vid(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config);
}
static u8 cxl_pmu_config1_get_threshold(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1);
}
static bool cxl_pmu_config1_get_invert(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1);
}
static bool cxl_pmu_config1_get_edge(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1);
}
/*
* CPMU specification allows for 8 filters, each with a 16 bit value...
* So we need to find 8x16bits to store it in.
* As the value used for disable is 0xffff, a separate enable switch
* is needed.
*/
static bool cxl_pmu_config1_hdm_filter_en(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1);
}
static u16 cxl_pmu_config2_get_hdm_decoder(struct perf_event *event)
{
return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2);
}
static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id);
}
#define CXL_PMU_EVENT_ATTR(_name, _vid, _gid, _msk) \
PMU_EVENT_ATTR_ID(_name, cxl_pmu_event_sysfs_show, \
((u64)(_vid) << 48) | ((u64)(_gid) << 32) | (u64)(_msk))
/* For CXL spec defined events */
#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk) \
CXL_PMU_EVENT_ATTR(_name, PCI_DVSEC_VENDOR_ID_CXL, _gid, _msk)
static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(clock_ticks, CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
/* CXL rev 3.0 Table 3-17 - Device to Host Requests */
CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdcurr, CXL_PMU_GID_D2H_REQ, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdown, CXL_PMU_GID_D2H_REQ, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdshared, CXL_PMU_GID_D2H_REQ, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdany, CXL_PMU_GID_D2H_REQ, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdownnodata, CXL_PMU_GID_D2H_REQ, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_itomwr, CXL_PMU_GID_D2H_REQ, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrcurr, CXL_PMU_GID_D2H_REQ, BIT(7)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_clflush, CXL_PMU_GID_D2H_REQ, BIT(8)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevict, CXL_PMU_GID_D2H_REQ, BIT(9)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_dirtyevict, CXL_PMU_GID_D2H_REQ, BIT(10)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevictnodata, CXL_PMU_GID_D2H_REQ, BIT(11)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinv, CXL_PMU_GID_D2H_REQ, BIT(12)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf, CXL_PMU_GID_D2H_REQ, BIT(13)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv, CXL_PMU_GID_D2H_REQ, BIT(14)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed, CXL_PMU_GID_D2H_REQ, BIT(16)),
/* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti, CXL_PMU_GID_D2H_RSP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv, CXL_PMU_GID_D2H_RSP, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse, CXL_PMU_GID_D2H_RSP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspshitse, CXL_PMU_GID_D2H_RSP, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspsfwdm, CXL_PMU_GID_D2H_RSP, BIT(7)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspifwdm, CXL_PMU_GID_D2H_RSP, BIT(15)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvfwdv, CXL_PMU_GID_D2H_RSP, BIT(22)),
/* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpdata, CXL_PMU_GID_H2D_REQ, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpinv, CXL_PMU_GID_H2D_REQ, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpcur, CXL_PMU_GID_H2D_REQ, BIT(3)),
/* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_writepull, CXL_PMU_GID_H2D_RSP, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_go, CXL_PMU_GID_H2D_RSP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepull, CXL_PMU_GID_H2D_RSP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_extcmp, CXL_PMU_GID_H2D_RSP, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepulldrop, CXL_PMU_GID_H2D_RSP, BIT(8)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_fastgowritepull, CXL_PMU_GID_H2D_RSP, BIT(13)),
CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_goerrwritepull, CXL_PMU_GID_H2D_RSP, BIT(15)),
/* CXL rev 3.0 Table 13-5 directly lists these */
CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data, CXL_PMU_GID_CACHE_DATA, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data, CXL_PMU_GID_CACHE_DATA, BIT(1)),
/* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv, CXL_PMU_GID_M2S_REQ, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd, CXL_PMU_GID_M2S_REQ, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata, CXL_PMU_GID_M2S_REQ, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd, CXL_PMU_GID_M2S_REQ, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd, CXL_PMU_GID_M2S_REQ, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd, CXL_PMU_GID_M2S_REQ, BIT(8)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt, CXL_PMU_GID_M2S_REQ, BIT(9)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict, CXL_PMU_GID_M2S_REQ, BIT(10)),
/* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwr, CXL_PMU_GID_M2S_RWD, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwrptl, CXL_PMU_GID_M2S_RWD, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_biconflict, CXL_PMU_GID_M2S_RWD, BIT(4)),
/* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_i, CXL_PMU_GID_M2S_BIRSP, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_s, CXL_PMU_GID_M2S_BIRSP, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_e, CXL_PMU_GID_M2S_BIRSP, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_iblk, CXL_PMU_GID_M2S_BIRSP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_sblk, CXL_PMU_GID_M2S_BIRSP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_eblk, CXL_PMU_GID_M2S_BIRSP, BIT(6)),
/* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_cur, CXL_PMU_GID_S2M_BISNP, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_data, CXL_PMU_GID_S2M_BISNP, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_inv, CXL_PMU_GID_S2M_BISNP, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk, CXL_PMU_GID_S2M_BISNP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk, CXL_PMU_GID_S2M_BISNP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk, CXL_PMU_GID_S2M_BISNP, BIT(6)),
/* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
/* CXL rev 3.0 Table 13-5 directly lists these */
CXL_PMU_EVENT_CXL_ATTR(ddr_act, CXL_PMU_GID_DDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(ddr_pre, CXL_PMU_GID_DDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(ddr_casrd, CXL_PMU_GID_DDR, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(ddr_caswr, CXL_PMU_GID_DDR, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(ddr_refresh, CXL_PMU_GID_DDR, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(ddr_selfrefreshent, CXL_PMU_GID_DDR, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(ddr_rfm, CXL_PMU_GID_DDR, BIT(6)),
NULL
};
static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
int vid, int gid, int msk)
{
struct cxl_pmu_ev_cap *pmu_ev;
list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
if (vid != pmu_ev->vid || gid != pmu_ev->gid)
continue;
/* Precise match for fixed counter */
if (msk == pmu_ev->msk)
return pmu_ev;
}
return ERR_PTR(-EINVAL);
}
static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
int vid, int gid, int msk)
{
struct cxl_pmu_ev_cap *pmu_ev;
list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
if (vid != pmu_ev->vid || gid != pmu_ev->gid)
continue;
/* Request mask must be subset of supported */
if (msk & ~pmu_ev->msk)
continue;
return pmu_ev;
}
return ERR_PTR(-EINVAL);
}
static umode_t cxl_pmu_event_is_visible(struct kobject *kobj, struct attribute *attr, int a)
{
struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
struct perf_pmu_events_attr *pmu_attr =
container_of(dev_attr, struct perf_pmu_events_attr, attr);
struct device *dev = kobj_to_dev(kobj);
struct cxl_pmu_info *info = dev_get_drvdata(dev);
int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id);
int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id);
int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id);
if (!IS_ERR(cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, msk)))
return attr->mode;
if (!IS_ERR(cxl_pmu_find_config_counter_ev_cap(info, vid, gid, msk)))
return attr->mode;
return 0;
}
static const struct attribute_group cxl_pmu_events = {
.name = "events",
.attrs = cxl_pmu_event_attrs,
.is_visible = cxl_pmu_event_is_visible,
};
static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_pmu_info *info = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu));
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *cxl_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group cxl_pmu_cpumask_group = {
.attrs = cxl_pmu_cpumask_attrs,
};
static const struct attribute_group *cxl_pmu_attr_groups[] = {
&cxl_pmu_events,
&cxl_pmu_format_group,
&cxl_pmu_cpumask_group,
NULL
};
/* If counter_idx == NULL, don't try to allocate a counter. */
static int cxl_pmu_get_event_idx(struct perf_event *event, int *counter_idx,
int *event_idx)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
DECLARE_BITMAP(configurable_and_free, CXL_PMU_MAX_COUNTERS);
struct cxl_pmu_ev_cap *pmu_ev;
u32 mask;
u16 gid, vid;
int i;
vid = cxl_pmu_config_get_vid(event);
gid = cxl_pmu_config_get_gid(event);
mask = cxl_pmu_config_get_mask(event);
pmu_ev = cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, mask);
if (!IS_ERR(pmu_ev)) {
if (!counter_idx)
return 0;
if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) {
*counter_idx = pmu_ev->counter_idx;
return 0;
}
/* Fixed counter is in use, but maybe a configurable one? */
}
pmu_ev = cxl_pmu_find_config_counter_ev_cap(info, vid, gid, mask);
if (!IS_ERR(pmu_ev)) {
if (!counter_idx)
return 0;
bitmap_andnot(configurable_and_free, info->conf_counter_bm,
info->used_counter_bm, CXL_PMU_MAX_COUNTERS);
i = find_first_bit(configurable_and_free, CXL_PMU_MAX_COUNTERS);
if (i == CXL_PMU_MAX_COUNTERS)
return -EINVAL;
*counter_idx = i;
return 0;
}
return -EINVAL;
}
static int cxl_pmu_event_init(struct perf_event *event)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
int rc;
/* Top level type sanity check - is this a Hardware Event being requested */
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
/* TODO: Validation of any filter */
/*
* Verify that it is possible to count what was requested. Either must
* be a fixed counter that is a precise match or a configurable counter
* where this is a subset.
*/
rc = cxl_pmu_get_event_idx(event, NULL, NULL);
if (rc < 0)
return rc;
event->cpu = info->on_cpu;
return 0;
}
static void cxl_pmu_enable(struct pmu *pmu)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
void __iomem *base = info->base;
/* Can assume frozen at this stage */
writeq(0, base + CXL_PMU_FREEZE_REG);
}
static void cxl_pmu_disable(struct pmu *pmu)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
void __iomem *base = info->base;
/*
* Whilst bits above number of counters are RsvdZ
* they are unlikely to be repurposed given
* number of counters is allowed to be 64 leaving
* no reserved bits. Hence this is only slightly
* naughty.
*/
writeq(GENMASK_ULL(63, 0), base + CXL_PMU_FREEZE_REG);
}
static void cxl_pmu_event_start(struct perf_event *event, int flags)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
struct hw_perf_event *hwc = &event->hw;
void __iomem *base = info->base;
u64 cfg;
/*
* All paths to here should either set these flags directly or
* call cxl_pmu_event_stop() which will ensure the correct state.
*/
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/*
* Currently only hdm filter control is implemnted, this code will
* want generalizing when more filters are added.
*/
if (info->filter_hdm) {
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1);
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1);
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1);
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE,
cxl_pmu_config1_get_edge(event) ? 1 : 0);
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT,
cxl_pmu_config1_get_invert(event) ? 1 : 0);
/* Fixed purpose counters have next two fields RO */
if (test_bit(hwc->idx, info->conf_counter_bm)) {
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK,
hwc->event_base);
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK,
cxl_pmu_config_get_mask(event));
}
cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK;
/*
* For events that generate only 1 count per clock the CXL 3.0 spec
* states the threshold shall be set to 1 but if set to 0 it will
* count the raw value anwyay?
* There is no definition of what events will count multiple per cycle
* and hence to which non 1 values of threshold can apply.
* (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition)
*/
cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK,
cxl_pmu_config1_get_threshold(event));
writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
local64_set(&hwc->prev_count, 0);
writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx));
perf_event_update_userpage(event);
}
static u64 cxl_pmu_read_counter(struct perf_event *event)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
void __iomem *base = info->base;
return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx));
}
static void __cxl_pmu_read(struct perf_event *event, bool overflow)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 new_cnt, prev_cnt, delta;
do {
prev_cnt = local64_read(&hwc->prev_count);
new_cnt = cxl_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt);
/*
* If we know an overflow occur then take that into account.
* Note counter is not reset as that would lose events
*/
delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0);
if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0))
delta += (1UL << info->counter_width);
local64_add(delta, &event->count);
}
static void cxl_pmu_read(struct perf_event *event)
{
__cxl_pmu_read(event, false);
}
static void cxl_pmu_event_stop(struct perf_event *event, int flags)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
void __iomem *base = info->base;
struct hw_perf_event *hwc = &event->hw;
u64 cfg;
cxl_pmu_read(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) |
FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1));
writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
hwc->state |= PERF_HES_UPTODATE;
}
static int cxl_pmu_event_add(struct perf_event *event, int flags)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx, rc;
int event_idx = 0;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
rc = cxl_pmu_get_event_idx(event, &idx, &event_idx);
if (rc < 0)
return rc;
hwc->idx = idx;
/* Only set for configurable counters */
hwc->event_base = event_idx;
info->hw_events[idx] = event;
set_bit(idx, info->used_counter_bm);
if (flags & PERF_EF_START)
cxl_pmu_event_start(event, PERF_EF_RELOAD);
return 0;
}
static void cxl_pmu_event_del(struct perf_event *event, int flags)
{
struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
struct hw_perf_event *hwc = &event->hw;
cxl_pmu_event_stop(event, PERF_EF_UPDATE);
clear_bit(hwc->idx, info->used_counter_bm);
info->hw_events[hwc->idx] = NULL;
perf_event_update_userpage(event);
}
static irqreturn_t cxl_pmu_irq(int irq, void *data)
{
struct cxl_pmu_info *info = data;
void __iomem *base = info->base;
u64 overflowed;
DECLARE_BITMAP(overflowedbm, 64);
int i;
overflowed = readq(base + CXL_PMU_OVERFLOW_REG);
/* Interrupt may be shared, so maybe it isn't ours */
if (!overflowed)
return IRQ_NONE;
bitmap_from_arr64(overflowedbm, &overflowed, 64);
for_each_set_bit(i, overflowedbm, info->num_counters) {
struct perf_event *event = info->hw_events[i];
if (!event) {
dev_dbg(info->pmu.dev,
"overflow but on non enabled counter %d\n", i);
continue;
}
__cxl_pmu_read(event, true);
}
writeq(overflowed, base + CXL_PMU_OVERFLOW_REG);
return IRQ_HANDLED;
}
static void cxl_pmu_perf_unregister(void *_info)
{
struct cxl_pmu_info *info = _info;
perf_pmu_unregister(&info->pmu);
}
static void cxl_pmu_cpuhp_remove(void *_info)
{
struct cxl_pmu_info *info = _info;
cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
}
static int cxl_pmu_probe(struct device *dev)
{
struct cxl_pmu *pmu = to_cxl_pmu(dev);
struct pci_dev *pdev = to_pci_dev(dev->parent);
struct cxl_pmu_info *info;
char *irq_name;
char *dev_name;
int rc, irq;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
dev_set_drvdata(dev, info);
INIT_LIST_HEAD(&info->event_caps_fixed);
INIT_LIST_HEAD(&info->event_caps_configurable);
info->base = pmu->base;
info->on_cpu = -1;
rc = cxl_pmu_parse_caps(dev, info);
if (rc)
return rc;
info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
info->num_counters, GFP_KERNEL);
if (!info->hw_events)
return -ENOMEM;
switch (pmu->type) {
case CXL_PMU_MEMDEV:
dev_name = devm_kasprintf(dev, GFP_KERNEL, "cxl_pmu_mem%d.%d",
pmu->assoc_id, pmu->index);
break;
}
if (!dev_name)
return -ENOMEM;
info->pmu = (struct pmu) {
.name = dev_name,
.parent = dev,
.module = THIS_MODULE,
.event_init = cxl_pmu_event_init,
.pmu_enable = cxl_pmu_enable,
.pmu_disable = cxl_pmu_disable,
.add = cxl_pmu_event_add,
.del = cxl_pmu_event_del,
.start = cxl_pmu_event_start,
.stop = cxl_pmu_event_stop,
.read = cxl_pmu_read,
.task_ctx_nr = perf_invalid_context,
.attr_groups = cxl_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
if (info->irq <= 0)
return -EINVAL;
rc = pci_irq_vector(pdev, info->irq);
if (rc < 0)
return rc;
irq = rc;
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
if (!irq_name)
return -ENOMEM;
rc = devm_request_irq(dev, irq, cxl_pmu_irq, IRQF_SHARED | IRQF_ONESHOT,
irq_name, info);
if (rc)
return rc;
info->irq = irq;
rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
if (rc)
return rc;
rc = devm_add_action_or_reset(dev, cxl_pmu_cpuhp_remove, info);
if (rc)
return rc;
rc = perf_pmu_register(&info->pmu, info->pmu.name, -1);
if (rc)
return rc;
rc = devm_add_action_or_reset(dev, cxl_pmu_perf_unregister, info);
if (rc)
return rc;
return 0;
}
static struct cxl_driver cxl_pmu_driver = {
.name = "cxl_pmu",
.probe = cxl_pmu_probe,
.id = CXL_DEVICE_PMU,
};
static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
if (info->on_cpu != -1)
return 0;
info->on_cpu = cpu;
/*
* CPU HP lock is held so we should be guaranteed that the CPU hasn't yet
* gone away again.
*/
WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
return 0;
}
static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
unsigned int target;
if (info->on_cpu != cpu)
return 0;
info->on_cpu = -1;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
dev_err(info->pmu.dev, "Unable to find a suitable CPU\n");
return 0;
}
perf_pmu_migrate_context(&info->pmu, cpu, target);
info->on_cpu = target;
/*
* CPU HP lock is held so we should be guaranteed that this CPU hasn't yet
* gone away.
*/
WARN_ON(irq_set_affinity(info->irq, cpumask_of(target)));
return 0;
}
static __init int cxl_pmu_init(void)
{
int rc;
rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"AP_PERF_CXL_PMU_ONLINE",
cxl_pmu_online_cpu, cxl_pmu_offline_cpu);
if (rc < 0)
return rc;
cxl_pmu_cpuhp_state_num = rc;
rc = cxl_driver_register(&cxl_pmu_driver);
if (rc)
cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
return rc;
}
static __exit void cxl_pmu_exit(void)
{
cxl_driver_unregister(&cxl_pmu_driver);
cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
}
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(CXL);
module_init(cxl_pmu_init);
module_exit(cxl_pmu_exit);
MODULE_ALIAS_CXL(CXL_DEVICE_PMU);
| linux-master | drivers/perf/cxl_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARMv8 PMUv3 Performance Events handling code.
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <[email protected]>
*
* This code is based heavily on the ARMv7 perf event code.
*/
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/arm_pmuv3.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
/* ARMv8 Cavium ThunderX specific event types. */
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
/*
* ARMv8 Architectural defined events, not all of these may
* be supported on any given implementation. Unsupported events will
* be disabled at run-time based on the PMCEID registers.
*/
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
[C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
[C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
};
static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
};
static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static ssize_t
armv8pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
#define ARMV8_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
/* Don't expose the chain event in /sys, since it's useless in isolation */
ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP),
ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG),
ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0),
ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1),
ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2),
ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3),
ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4),
ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5),
ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6),
ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7),
ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
NULL,
};
static umode_t
armv8pmu_event_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
return attr->mode;
if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
test_bit(id, cpu_pmu->pmceid_ext_bitmap))
return attr->mode;
}
return 0;
}
static const struct attribute_group armv8_pmuv3_events_attr_group = {
.name = "events",
.attrs = armv8_pmuv3_event_attrs,
.is_visible = armv8pmu_event_attr_is_visible,
};
PMU_FORMAT_ATTR(event, "config:0-15");
PMU_FORMAT_ATTR(long, "config1:0");
PMU_FORMAT_ATTR(rdpmc, "config1:1");
static int sysctl_perf_user_access __read_mostly;
static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
{
return event->attr.config1 & 0x1;
}
static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
{
return event->attr.config1 & 0x2;
}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
&format_attr_long.attr,
&format_attr_rdpmc.attr,
NULL,
};
static const struct attribute_group armv8_pmuv3_format_attr_group = {
.name = "format",
.attrs = armv8_pmuv3_format_attrs,
};
static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
return sysfs_emit(page, "0x%08x\n", slots);
}
static DEVICE_ATTR_RO(slots);
static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
& ARMV8_PMU_BUS_SLOTS_MASK;
return sysfs_emit(page, "0x%08x\n", bus_slots);
}
static DEVICE_ATTR_RO(bus_slots);
static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
& ARMV8_PMU_BUS_WIDTH_MASK;
u32 val = 0;
/* Encoded as Log2(number of bytes), plus one */
if (bus_width > 2 && bus_width < 13)
val = 1 << (bus_width - 1);
return sysfs_emit(page, "0x%08x\n", val);
}
static DEVICE_ATTR_RO(bus_width);
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
&dev_attr_bus_slots.attr,
&dev_attr_bus_width.attr,
NULL,
};
static const struct attribute_group armv8_pmuv3_caps_attr_group = {
.name = "caps",
.attrs = armv8_pmuv3_caps_attrs,
};
/*
* Perf Events' indices
*/
#define ARMV8_IDX_CYCLE_COUNTER 0
#define ARMV8_IDX_COUNTER0 1
#define ARMV8_IDX_CYCLE_COUNTER_USER 32
/*
* We unconditionally enable ARMv8.5-PMU long event counter support
* (64-bit events) where supported. Indicate if this arm_pmu has long
* event counter support.
*
* On AArch32, long counters make no sense (you can't access the top
* bits), so we only enable this on AArch64.
*/
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{
return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
}
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
{
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
}
/*
* We must chain two programmable counters for 64 bit events,
* except when we have allocated the 64bit cycle counter (for CPU
* cycles event) or when user space counter access is enabled.
*/
static inline bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
return !armv8pmu_event_has_user_read(event) &&
armv8pmu_event_is_64bit(event) &&
!armv8pmu_has_long_event(cpu_pmu) &&
(idx != ARMV8_IDX_CYCLE_COUNTER);
}
/*
* ARMv8 low level PMU access
*/
/*
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u32 armv8pmu_pmcr_read(void)
{
return read_pmcr();
}
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
write_pmcr(val);
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
static inline u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
return read_pmevcntrn(counter);
}
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
u64 val = armv8pmu_read_evcntr(idx);
if (armv8pmu_event_is_chained(event))
val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
return val;
}
/*
* The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
* is set the event counters also become 64-bit counters. Unless the
* user has requested a long counter (attr.config1) then we want to
* interrupt upon 32-bit overflow - we achieve this by applying a bias.
*/
static bool armv8pmu_event_needs_bias(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (armv8pmu_event_is_64bit(event))
return false;
if (armv8pmu_has_long_event(cpu_pmu) ||
idx == ARMV8_IDX_CYCLE_COUNTER)
return true;
return false;
}
static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
{
if (armv8pmu_event_needs_bias(event))
value |= GENMASK_ULL(63, 32);
return value;
}
static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
{
if (armv8pmu_event_needs_bias(event))
value &= ~GENMASK_ULL(63, 32);
return value;
}
static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u64 value;
if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_pmccntr();
else
value = armv8pmu_read_hw_counter(event);
return armv8pmu_unbias_long_counter(event, value);
}
static inline void armv8pmu_write_evcntr(int idx, u64 value)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_pmevcntrn(counter, value);
}
static inline void armv8pmu_write_hw_counter(struct perf_event *event,
u64 value)
{
int idx = event->hw.idx;
if (armv8pmu_event_is_chained(event)) {
armv8pmu_write_evcntr(idx, upper_32_bits(value));
armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
} else {
armv8pmu_write_evcntr(idx, value);
}
}
static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
value = armv8pmu_bias_long_counter(event, value);
if (idx == ARMV8_IDX_CYCLE_COUNTER)
write_pmccntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
val &= ARMV8_PMU_EVTYPE_MASK;
write_pmevtypern(counter, val);
}
static inline void armv8pmu_write_event_type(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
/*
* For chained events, the low counter is programmed to count
* the event of interest and the high counter is programmed
* with CHAIN event code with filters set to count at all ELs.
*/
if (armv8pmu_event_is_chained(event)) {
u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
ARMV8_PMU_INCLUDE_EL2;
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
if (idx == ARMV8_IDX_CYCLE_COUNTER)
write_pmccfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
}
static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
{
int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
u32 mask = BIT(counter);
if (armv8pmu_event_is_chained(event))
mask |= BIT(counter - 1);
return mask;
}
static inline void armv8pmu_enable_counter(u32 mask)
{
/*
* Make sure event configuration register writes are visible before we
* enable the counter.
* */
isb();
write_pmcntenset(mask);
}
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
kvm_set_pmu_events(mask, attr);
/* We rely on the hypervisor switch code to enable guest counters */
if (!kvm_pmu_counter_deferred(attr))
armv8pmu_enable_counter(mask);
}
static inline void armv8pmu_disable_counter(u32 mask)
{
write_pmcntenclr(mask);
/*
* Make sure the effects of disabling the counter are visible before we
* start configuring the event.
*/
isb();
}
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
kvm_clr_pmu_events(mask);
/* We rely on the hypervisor switch code to disable guest counters */
if (!kvm_pmu_counter_deferred(attr))
armv8pmu_disable_counter(mask);
}
static inline void armv8pmu_enable_intens(u32 mask)
{
write_pmintenset(mask);
}
static inline void armv8pmu_enable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_enable_intens(BIT(counter));
}
static inline void armv8pmu_disable_intens(u32 mask)
{
write_pmintenclr(mask);
isb();
/* Clear the overflow flag in case an interrupt is pending. */
write_pmovsclr(mask);
isb();
}
static inline void armv8pmu_disable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_disable_intens(BIT(counter));
}
static inline u32 armv8pmu_getreset_flags(void)
{
u32 value;
/* Read */
value = read_pmovsclr();
/* Write to clear flags */
value &= ARMV8_PMU_OVSR_MASK;
write_pmovsclr(value);
return value;
}
static void update_pmuserenr(u64 val)
{
lockdep_assert_irqs_disabled();
/*
* The current PMUSERENR_EL0 value might be the value for the guest.
* If that's the case, have KVM keep tracking of the register value
* for the host EL0 so that KVM can restore it before returning to
* the host EL0. Otherwise, update the register now.
*/
if (kvm_set_pmuserenr(val))
return;
write_pmuserenr(val);
}
static void armv8pmu_disable_user_access(void)
{
update_pmuserenr(0);
}
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
{
int i;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
/* Clear any unused counters to avoid leaking their contents */
for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
if (i == ARMV8_IDX_CYCLE_COUNTER)
write_pmccntr(0);
else
armv8pmu_write_evcntr(i, 0);
}
update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
}
static void armv8pmu_enable_event(struct perf_event *event)
{
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
}
static void armv8pmu_disable_event(struct perf_event *event)
{
armv8pmu_disable_event_counter(event);
armv8pmu_disable_event_irq(event);
}
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
struct perf_event_context *ctx;
int nr_user = 0;
ctx = perf_cpu_task_ctx();
if (ctx)
nr_user = ctx->nr_user;
if (sysctl_perf_user_access && nr_user)
armv8pmu_enable_user_access(cpu_pmu);
else
armv8pmu_disable_user_access();
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
}
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmovsr;
struct perf_sample_data data;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmovsr = armv8pmu_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv8pmu_has_overflowed(pmovsr))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
/*
* Stop the PMU while processing the counter overflows
* to prevent skews in group events.
*/
armv8pmu_stop(cpu_pmu);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
/* Ignore if we don't have an event. */
if (!event)
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
if (!armpmu_event_set_period(event))
continue;
/*
* Perf event overflow will queue the processing of the event as
* an irq_work which will be taken care of in the handling of
* IPI_IRQ_WORK.
*/
if (perf_event_overflow(event, &data, regs))
cpu_pmu->disable(event);
}
armv8pmu_start(cpu_pmu);
return IRQ_HANDLED;
}
static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
struct arm_pmu *cpu_pmu)
{
int idx;
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
return -EAGAIN;
}
static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
struct arm_pmu *cpu_pmu)
{
int idx;
/*
* Chaining requires two consecutive event counters, where
* the lower idx must be even.
*/
for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
if (!test_and_set_bit(idx, cpuc->used_mask)) {
/* Check if the preceding even counter is available */
if (!test_and_set_bit(idx - 1, cpuc->used_mask))
return idx;
/* Release the Odd counter */
clear_bit(idx, cpuc->used_mask);
}
}
return -EAGAIN;
}
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return ARMV8_IDX_CYCLE_COUNTER;
else if (armv8pmu_event_is_64bit(event) &&
armv8pmu_event_want_user_access(event) &&
!armv8pmu_has_long_event(cpu_pmu))
return -EAGAIN;
}
/*
* Otherwise use events counters
*/
if (armv8pmu_event_is_chained(event))
return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
else
return armv8pmu_get_single_idx(cpuc, cpu_pmu);
}
static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
int idx = event->hw.idx;
clear_bit(idx, cpuc->used_mask);
if (armv8pmu_event_is_chained(event))
clear_bit(idx - 1, cpuc->used_mask);
}
static int armv8pmu_user_event_idx(struct perf_event *event)
{
if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
return 0;
/*
* We remap the cycle counter index to 32 to
* match the offset applied to the rest of
* the counter indices.
*/
if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
return ARMV8_IDX_CYCLE_COUNTER_USER;
return event->hw.idx;
}
/*
* Add an event filter to a given event.
*/
static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (attr->exclude_idle)
return -EPERM;
/*
* If we're running in hyp mode, then we *are* the hypervisor.
* Therefore we ignore exclude_hv in this configuration, since
* there's no hypervisor to sample anyway. This is consistent
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
if (!attr->exclude_kernel && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
if (attr->exclude_guest)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (attr->exclude_host)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
} else {
if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
/*
* Filter out !VHE kernels and guest kernels
*/
if (attr->exclude_kernel)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/*
* Install the filter into config_base as this is used to
* construct the event type.
*/
event->config_base = config_base;
return 0;
}
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
u32 pmcr;
/* The counter and interrupt enable registers are unknown at reset. */
armv8pmu_disable_counter(U32_MAX);
armv8pmu_disable_intens(U32_MAX);
/* Clear the counters we flip at guest entry/exit */
kvm_clr_pmu_events(U32_MAX);
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
/* Enable long event counter support where available */
if (armv8pmu_has_long_event(cpu_pmu))
pmcr |= ARMV8_PMU_PMCR_LP;
armv8pmu_pmcr_write(pmcr);
}
static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
struct perf_event *event)
{
if (event->attr.type == PERF_TYPE_HARDWARE &&
event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
armpmu->pmceid_bitmap))
return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
armpmu->pmceid_bitmap))
return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
return HW_OP_UNSUPPORTED;
}
return armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
}
static int __armv8_pmuv3_map_event(struct perf_event *event,
const unsigned (*extra_event_map)
[PERF_COUNT_HW_MAX],
const unsigned (*extra_cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX])
{
int hw_event_id;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
/*
* CHAIN events only work when paired with an adjacent counter, and it
* never makes sense for a user to open one in isolation, as they'll be
* rotated arbitrarily.
*/
if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
return -EINVAL;
if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT;
/*
* User events must be allocated into a single counter, and so
* must not be chained.
*
* Most 64-bit events require long counter support, but 64-bit
* CPU_CYCLES events can be placed into the dedicated cycle
* counter when this is free.
*/
if (armv8pmu_event_want_user_access(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
return -EINVAL;
if (armv8pmu_event_is_64bit(event) &&
(hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
!armv8pmu_has_long_event(armpmu))
return -EOPNOTSUPP;
event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
}
/* Only expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
return hw_event_id;
}
return armpmu_map_event(event, extra_event_map, extra_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, NULL);
}
static int armv8_a53_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
}
static int armv8_a57_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
}
static int armv8_a73_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
}
static int armv8_thunder_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL,
&armv8_thunder_perf_cache_map);
}
static int armv8_vulcan_map_event(struct perf_event *event)
{
return __armv8_pmuv3_map_event(event, NULL,
&armv8_vulcan_perf_cache_map);
}
struct armv8pmu_probe_info {
struct arm_pmu *pmu;
bool present;
};
static void __armv8pmu_probe_pmu(void *info)
{
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
u64 pmceid_raw[2];
u32 pmceid[2];
int pmuver;
pmuver = read_pmuver();
if (!pmuv3_implemented(pmuver))
return;
cpu_pmu->pmuver = pmuver;
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
& ARMV8_PMU_PMCR_N_MASK;
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
pmceid[0] = pmceid_raw[0] = read_pmceid0();
pmceid[1] = pmceid_raw[1] = read_pmceid1();
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
pmceid[0] = pmceid_raw[0] >> 32;
pmceid[1] = pmceid_raw[1] >> 32;
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR register for sysfs */
if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_pmmir();
else
cpu_pmu->reg_pmmir = 0;
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
{
struct armv8pmu_probe_info probe = {
.pmu = cpu_pmu,
.present = false,
};
int ret;
ret = smp_call_function_any(&cpu_pmu->supported_cpus,
__armv8pmu_probe_pmu,
&probe, 1);
if (ret)
return ret;
return probe.present ? 0 : -ENODEV;
}
static void armv8pmu_disable_user_access_ipi(void *unused)
{
armv8pmu_disable_user_access();
}
static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write || sysctl_perf_user_access)
return ret;
on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
return 0;
}
static struct ctl_table armv8_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = armv8pmu_proc_user_access_handler,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{ }
};
static void armv8_pmu_register_sysctl_table(void)
{
static u32 tbl_registered = 0;
if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
register_sysctl("kernel", armv8_pmu_sysctl_table);
}
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event),
const struct attribute_group *events,
const struct attribute_group *format,
const struct attribute_group *caps)
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
return ret;
cpu_pmu->handle_irq = armv8pmu_handle_irq;
cpu_pmu->enable = armv8pmu_enable_event;
cpu_pmu->disable = armv8pmu_disable_event;
cpu_pmu->read_counter = armv8pmu_read_counter;
cpu_pmu->write_counter = armv8pmu_write_counter;
cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
cpu_pmu->start = armv8pmu_start;
cpu_pmu->stop = armv8pmu_stop;
cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
cpu_pmu->name = name;
cpu_pmu->map_event = map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
events : &armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
format : &armv8_pmuv3_format_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
caps : &armv8_pmuv3_caps_attr_group;
armv8_pmu_register_sysctl_table();
return 0;
}
static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
int (*map_event)(struct perf_event *event))
{
return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
}
#define PMUV3_INIT_SIMPLE(name) \
static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
{ \
return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
}
PMUV3_INIT_SIMPLE(armv8_pmuv3)
PMUV3_INIT_SIMPLE(armv8_cortex_a34)
PMUV3_INIT_SIMPLE(armv8_cortex_a55)
PMUV3_INIT_SIMPLE(armv8_cortex_a65)
PMUV3_INIT_SIMPLE(armv8_cortex_a75)
PMUV3_INIT_SIMPLE(armv8_cortex_a76)
PMUV3_INIT_SIMPLE(armv8_cortex_a77)
PMUV3_INIT_SIMPLE(armv8_cortex_a78)
PMUV3_INIT_SIMPLE(armv9_cortex_a510)
PMUV3_INIT_SIMPLE(armv9_cortex_a520)
PMUV3_INIT_SIMPLE(armv9_cortex_a710)
PMUV3_INIT_SIMPLE(armv9_cortex_a715)
PMUV3_INIT_SIMPLE(armv9_cortex_a720)
PMUV3_INIT_SIMPLE(armv8_cortex_x1)
PMUV3_INIT_SIMPLE(armv9_cortex_x2)
PMUV3_INIT_SIMPLE(armv9_cortex_x3)
PMUV3_INIT_SIMPLE(armv9_cortex_x4)
PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
armv8_a53_map_event);
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
armv8_a53_map_event);
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
armv8_a57_map_event);
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
armv8_a57_map_event);
}
static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
armv8_a73_map_event);
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
armv8_thunder_map_event);
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
armv8_vulcan_map_event);
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
{.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
{.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
{.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
{.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
{.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
{.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
{.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
{.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
{.compatible = "arm,cortex-a520-pmu", .data = armv9_cortex_a520_pmu_init},
{.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
{.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init},
{.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init},
{.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
{.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
{.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init},
{.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
{.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
{.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
{},
};
static int armv8_pmu_device_probe(struct platform_device *pdev)
{
return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
}
static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = ARMV8_PMU_PDEV_NAME,
.of_match_table = armv8_pmu_of_device_ids,
.suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
static int __init armv8_pmu_driver_init(void)
{
int ret;
if (acpi_disabled)
ret = platform_driver_register(&armv8_pmu_driver);
else
ret = arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
if (!ret)
lockup_detector_retry_init();
return ret;
}
device_initcall(armv8_pmu_driver_init)
void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{
struct clock_read_data *rd;
unsigned int seq;
u64 ns;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_time_short = 0;
userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
if (userpg->cap_user_rdpmc) {
if (event->hw.flags & ARMPMU_EVT_64BIT)
userpg->pmc_width = 64;
else
userpg->pmc_width = 32;
}
do {
rd = sched_clock_read_begin(&seq);
if (rd->read_sched_clock != arch_timer_read_counter)
return;
userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns;
userpg->time_cycles = rd->epoch_cyc;
userpg->time_mask = rd->sched_clock_mask;
/*
* Subtract the cycle base, such that software that
* doesn't know about cap_user_time_short still 'works'
* assuming no wraps.
*/
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns;
} while (sched_clock_read_retry(seq));
userpg->time_offset = userpg->time_zero - now;
/*
* time_shift is not expected to be greater than 31 due to
* the original published conversion algorithm shifting a
* 32-bit value (now specifies a 64-bit value) - refer
* perf_event_mmap_page documentation in perf_event.h.
*/
if (userpg->time_shift == 32) {
userpg->time_shift = 31;
userpg->time_mult >>= 1;
}
/*
* Internal timekeeping for enabled/running/stopped times
* is always computed with the sched_clock.
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
| linux-master | drivers/perf/arm_pmuv3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM DynamIQ Shared Unit (DSU) PMU driver
*
* Copyright (C) ARM Limited, 2017.
*
* Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
*/
#define PMUNAME "arm_dsu"
#define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <asm/arm_dsu_pmu.h>
#include <asm/local64.h>
/* PMU event codes */
#define DSU_PMU_EVT_CYCLES 0x11
#define DSU_PMU_EVT_CHAIN 0x1e
#define DSU_PMU_MAX_COMMON_EVENTS 0x40
#define DSU_PMU_MAX_HW_CNTRS 32
#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1)
#define CLUSTERPMCR_E BIT(0)
#define CLUSTERPMCR_P BIT(1)
#define CLUSTERPMCR_C BIT(2)
#define CLUSTERPMCR_N_SHIFT 11
#define CLUSTERPMCR_N_MASK 0x1f
#define CLUSTERPMCR_IDCODE_SHIFT 16
#define CLUSTERPMCR_IDCODE_MASK 0xff
#define CLUSTERPMCR_IMP_SHIFT 24
#define CLUSTERPMCR_IMP_MASK 0xff
#define CLUSTERPMCR_RES_MASK 0x7e8
#define CLUSTERPMCR_RES_VAL 0x40
#define DSU_ACTIVE_CPU_MASK 0x0
#define DSU_ASSOCIATED_CPU_MASK 0x1
/*
* We use the index of the counters as they appear in the counter
* bit maps in the PMU registers (e.g CLUSTERPMSELR).
* i.e,
* counter 0 - Bit 0
* counter 1 - Bit 1
* ...
* Cycle counter - Bit 31
*/
#define DSU_PMU_IDX_CYCLE_COUNTER 31
/* All event counters are 32bit, with a 64bit Cycle counter */
#define DSU_PMU_COUNTER_WIDTH(idx) \
(((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
#define DSU_PMU_COUNTER_MASK(idx) \
GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
#define DSU_EXT_ATTR(_name, _func, _config) \
(&((struct dev_ext_attribute[]) { \
{ \
.attr = __ATTR(_name, 0444, _func, NULL), \
.var = (void *)_config \
} \
})[0].attr.attr)
#define DSU_EVENT_ATTR(_name, _config) \
DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
#define DSU_FORMAT_ATTR(_name, _config) \
DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
#define DSU_CPUMASK_ATTR(_name, _config) \
DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
struct dsu_hw_events {
DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
struct perf_event *events[DSU_PMU_MAX_HW_CNTRS];
};
/*
* struct dsu_pmu - DSU PMU descriptor
*
* @pmu_lock : Protects accesses to DSU PMU register from normal vs
* interrupt handler contexts.
* @hw_events : Holds the event counter state.
* @associated_cpus : CPUs attached to the DSU.
* @active_cpu : CPU to which the PMU is bound for accesses.
* @cpuhp_node : Node for CPU hotplug notifier link.
* @num_counters : Number of event counters implemented by the PMU,
* excluding the cycle counter.
* @irq : Interrupt line for counter overflow.
* @cpmceid_bitmap : Bitmap for the availability of architected common
* events (event_code < 0x40).
*/
struct dsu_pmu {
struct pmu pmu;
struct device *dev;
raw_spinlock_t pmu_lock;
struct dsu_hw_events hw_events;
cpumask_t associated_cpus;
cpumask_t active_cpu;
struct hlist_node cpuhp_node;
s8 num_counters;
int irq;
DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
};
static unsigned long dsu_pmu_cpuhp_state;
static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
{
return container_of(pmu, struct dsu_pmu, pmu);
}
static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
return sysfs_emit(buf, "event=0x%lx\n", (unsigned long)eattr->var);
}
static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t dsu_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
unsigned long mask_id = (unsigned long)eattr->var;
const cpumask_t *cpumask;
switch (mask_id) {
case DSU_ACTIVE_CPU_MASK:
cpumask = &dsu_pmu->active_cpu;
break;
case DSU_ASSOCIATED_CPU_MASK:
cpumask = &dsu_pmu->associated_cpus;
break;
default:
return 0;
}
return cpumap_print_to_pagebuf(true, buf, cpumask);
}
static struct attribute *dsu_pmu_format_attrs[] = {
DSU_FORMAT_ATTR(event, "config:0-31"),
NULL,
};
static const struct attribute_group dsu_pmu_format_attr_group = {
.name = "format",
.attrs = dsu_pmu_format_attrs,
};
static struct attribute *dsu_pmu_event_attrs[] = {
DSU_EVENT_ATTR(cycles, 0x11),
DSU_EVENT_ATTR(bus_access, 0x19),
DSU_EVENT_ATTR(memory_error, 0x1a),
DSU_EVENT_ATTR(bus_cycles, 0x1d),
DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
DSU_EVENT_ATTR(l3d_cache, 0x2b),
DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
NULL,
};
static umode_t
dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
int unused)
{
struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr.attr);
unsigned long evt = (unsigned long)eattr->var;
return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
}
static const struct attribute_group dsu_pmu_events_attr_group = {
.name = "events",
.attrs = dsu_pmu_event_attrs,
.is_visible = dsu_pmu_event_attr_is_visible,
};
static struct attribute *dsu_pmu_cpumask_attrs[] = {
DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
NULL,
};
static const struct attribute_group dsu_pmu_cpumask_attr_group = {
.attrs = dsu_pmu_cpumask_attrs,
};
static const struct attribute_group *dsu_pmu_attr_groups[] = {
&dsu_pmu_cpumask_attr_group,
&dsu_pmu_events_attr_group,
&dsu_pmu_format_attr_group,
NULL,
};
static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
{
struct cpumask online_supported;
cpumask_and(&online_supported,
&dsu_pmu->associated_cpus, cpu_online_mask);
return cpumask_any_but(&online_supported, cpu);
}
static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
{
return (idx < dsu_pmu->num_counters) ||
(idx == DSU_PMU_IDX_CYCLE_COUNTER);
}
static inline u64 dsu_pmu_read_counter(struct perf_event *event)
{
u64 val;
unsigned long flags;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
int idx = event->hw.idx;
if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
&dsu_pmu->associated_cpus)))
return 0;
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
dev_err(event->pmu->dev,
"Trying reading invalid counter %d\n", idx);
return 0;
}
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
val = __dsu_pmu_read_pmccntr();
else
val = __dsu_pmu_read_counter(idx);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
return val;
}
static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
{
unsigned long flags;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
int idx = event->hw.idx;
if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
&dsu_pmu->associated_cpus)))
return;
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
dev_err(event->pmu->dev,
"writing to invalid counter %d\n", idx);
return;
}
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
__dsu_pmu_write_pmccntr(val);
else
__dsu_pmu_write_counter(idx, val);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
}
static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
struct perf_event *event)
{
int idx;
unsigned long evtype = event->attr.config;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
unsigned long *used_mask = hw_events->used_mask;
if (evtype == DSU_PMU_EVT_CYCLES) {
if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
return -EAGAIN;
return DSU_PMU_IDX_CYCLE_COUNTER;
}
idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
if (idx >= dsu_pmu->num_counters)
return -EAGAIN;
set_bit(idx, hw_events->used_mask);
return idx;
}
static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
{
__dsu_pmu_counter_interrupt_enable(idx);
__dsu_pmu_enable_counter(idx);
}
static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
{
__dsu_pmu_disable_counter(idx);
__dsu_pmu_counter_interrupt_disable(idx);
}
static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
struct perf_event *event)
{
int idx = event->hw.idx;
unsigned long flags;
if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
dev_err(event->pmu->dev,
"Trying to set invalid counter %d\n", idx);
return;
}
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
__dsu_pmu_set_event(idx, event->hw.config_base);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
}
static void dsu_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_count, new_count;
do {
/* We may also be called from the irq handler */
prev_count = local64_read(&hwc->prev_count);
new_count = dsu_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
prev_count);
delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
local64_add(delta, &event->count);
}
static void dsu_pmu_read(struct perf_event *event)
{
dsu_pmu_event_update(event);
}
static inline u32 dsu_pmu_get_reset_overflow(void)
{
return __dsu_pmu_get_reset_overflow();
}
/**
* dsu_pmu_set_event_period: Set the period for the counter.
*
* All DSU PMU event counters, except the cycle counter are 32bit
* counters. To handle cases of extreme interrupt latency, we program
* the counter with half of the max count for the counters.
*/
static void dsu_pmu_set_event_period(struct perf_event *event)
{
int idx = event->hw.idx;
u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
local64_set(&event->hw.prev_count, val);
dsu_pmu_write_counter(event, val);
}
static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
{
int i;
bool handled = false;
struct dsu_pmu *dsu_pmu = dev;
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
unsigned long overflow;
overflow = dsu_pmu_get_reset_overflow();
if (!overflow)
return IRQ_NONE;
for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
struct perf_event *event = hw_events->events[i];
if (!event)
continue;
dsu_pmu_event_update(event);
dsu_pmu_set_event_period(event);
handled = true;
}
return IRQ_RETVAL(handled);
}
static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
{
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
dsu_pmu_set_event_period(event);
if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
dsu_pmu_set_event(dsu_pmu, event);
event->hw.state = 0;
dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
}
static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
{
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
if (event->hw.state & PERF_HES_STOPPED)
return;
dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
dsu_pmu_event_update(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int dsu_pmu_add(struct perf_event *event, int flags)
{
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx;
if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
&dsu_pmu->associated_cpus)))
return -ENOENT;
idx = dsu_pmu_get_event_idx(hw_events, event);
if (idx < 0)
return idx;
hwc->idx = idx;
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
dsu_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
return 0;
}
static void dsu_pmu_del(struct perf_event *event, int flags)
{
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
dsu_pmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
static void dsu_pmu_enable(struct pmu *pmu)
{
u32 pmcr;
unsigned long flags;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
/* If no counters are added, skip enabling the PMU */
if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
return;
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
pmcr = __dsu_pmu_read_pmcr();
pmcr |= CLUSTERPMCR_E;
__dsu_pmu_write_pmcr(pmcr);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
}
static void dsu_pmu_disable(struct pmu *pmu)
{
u32 pmcr;
unsigned long flags;
struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
pmcr = __dsu_pmu_read_pmcr();
pmcr &= ~CLUSTERPMCR_E;
__dsu_pmu_write_pmcr(pmcr);
raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
}
static bool dsu_pmu_validate_event(struct pmu *pmu,
struct dsu_hw_events *hw_events,
struct perf_event *event)
{
if (is_software_event(event))
return true;
/* Reject groups spanning multiple HW PMUs. */
if (event->pmu != pmu)
return false;
return dsu_pmu_get_event_idx(hw_events, event) >= 0;
}
/*
* Make sure the group of events can be scheduled at once
* on the PMU.
*/
static bool dsu_pmu_validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct dsu_hw_events fake_hw;
if (event->group_leader == event)
return true;
memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
return false;
for_each_sibling_event(sibling, leader) {
if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
return false;
}
return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
}
static int dsu_pmu_event_init(struct perf_event *event)
{
struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* We don't support sampling */
if (is_sampling_event(event)) {
dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
return -EOPNOTSUPP;
}
/* We cannot support task bound events */
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
return -EINVAL;
}
if (has_branch_stack(event)) {
dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
return -EINVAL;
}
if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
dev_dbg(dsu_pmu->pmu.dev,
"Requested cpu is not associated with the DSU\n");
return -EINVAL;
}
/*
* Choose the current active CPU to read the events. We don't want
* to migrate the event contexts, irq handling etc to the requested
* CPU. As long as the requested CPU is within the same DSU, we
* are fine.
*/
event->cpu = cpumask_first(&dsu_pmu->active_cpu);
if (event->cpu >= nr_cpu_ids)
return -EINVAL;
if (!dsu_pmu_validate_group(event))
return -EINVAL;
event->hw.config_base = event->attr.config;
return 0;
}
static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
{
struct dsu_pmu *dsu_pmu;
dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
if (!dsu_pmu)
return ERR_PTR(-ENOMEM);
raw_spin_lock_init(&dsu_pmu->pmu_lock);
/*
* Initialise the number of counters to -1, until we probe
* the real number on a connected CPU.
*/
dsu_pmu->num_counters = -1;
return dsu_pmu;
}
/**
* dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster
* from device tree.
*/
static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
{
int i = 0, n, cpu;
struct device_node *cpu_node;
n = of_count_phandle_with_args(dev->of_node, "cpus", NULL);
if (n <= 0)
return -ENODEV;
for (; i < n; i++) {
cpu_node = of_parse_phandle(dev->of_node, "cpus", i);
if (!cpu_node)
break;
cpu = of_cpu_node_to_id(cpu_node);
of_node_put(cpu_node);
/*
* We have to ignore the failures here and continue scanning
* the list to handle cases where the nr_cpus could be capped
* in the running kernel.
*/
if (cpu < 0)
continue;
cpumask_set_cpu(cpu, mask);
}
return 0;
}
/**
* dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster
* from ACPI.
*/
static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
{
#ifdef CONFIG_ACPI
struct acpi_device *parent_adev = acpi_dev_parent(ACPI_COMPANION(dev));
int cpu;
/*
* A dsu pmu node is inside a cluster parent node along with cpu nodes.
* We need to find out all cpus that have the same parent with this pmu.
*/
for_each_possible_cpu(cpu) {
struct acpi_device *acpi_dev;
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
continue;
acpi_dev = ACPI_COMPANION(cpu_dev);
if (acpi_dev && acpi_dev_parent(acpi_dev) == parent_adev)
cpumask_set_cpu(cpu, mask);
}
#endif
return 0;
}
/*
* dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
*/
static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
{
u64 num_counters;
u32 cpmceid[2];
num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
CLUSTERPMCR_N_MASK;
/* We can only support up to 31 independent counters */
if (WARN_ON(num_counters > 31))
num_counters = 31;
dsu_pmu->num_counters = num_counters;
if (!dsu_pmu->num_counters)
return;
cpmceid[0] = __dsu_pmu_read_pmceid(0);
cpmceid[1] = __dsu_pmu_read_pmceid(1);
bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid,
DSU_PMU_MAX_COMMON_EVENTS);
}
static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
{
cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
if (irq_set_affinity(dsu_pmu->irq, &dsu_pmu->active_cpu))
pr_warn("Failed to set irq affinity to %d\n", cpu);
}
/*
* dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
* we haven't done it already.
*/
static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
{
if (dsu_pmu->num_counters == -1)
dsu_pmu_probe_pmu(dsu_pmu);
/* Reset the interrupt overflow mask */
dsu_pmu_get_reset_overflow();
}
static int dsu_pmu_device_probe(struct platform_device *pdev)
{
int irq, rc;
struct dsu_pmu *dsu_pmu;
struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
char *name;
static atomic_t pmu_idx = ATOMIC_INIT(-1);
dsu_pmu = dsu_pmu_alloc(pdev);
if (IS_ERR(dsu_pmu))
return PTR_ERR(dsu_pmu);
if (is_of_node(fwnode))
rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
else if (is_acpi_device_node(fwnode))
rc = dsu_pmu_acpi_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
else
return -ENOENT;
if (rc) {
dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
return rc;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
PMUNAME, atomic_inc_return(&pmu_idx));
if (!name)
return -ENOMEM;
rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
IRQF_NOBALANCING, name, dsu_pmu);
if (rc) {
dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
return rc;
}
dsu_pmu->irq = irq;
platform_set_drvdata(pdev, dsu_pmu);
rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
&dsu_pmu->cpuhp_node);
if (rc)
return rc;
dsu_pmu->pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
.module = THIS_MODULE,
.pmu_enable = dsu_pmu_enable,
.pmu_disable = dsu_pmu_disable,
.event_init = dsu_pmu_event_init,
.add = dsu_pmu_add,
.del = dsu_pmu_del,
.start = dsu_pmu_start,
.stop = dsu_pmu_stop,
.read = dsu_pmu_read,
.attr_groups = dsu_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
if (rc) {
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
&dsu_pmu->cpuhp_node);
}
return rc;
}
static int dsu_pmu_device_remove(struct platform_device *pdev)
{
struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
return 0;
}
static const struct of_device_id dsu_pmu_of_match[] = {
{ .compatible = "arm,dsu-pmu", },
{},
};
MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dsu_pmu_acpi_match[] = {
{ "ARMHD500", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, dsu_pmu_acpi_match);
#endif
static struct platform_driver dsu_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(dsu_pmu_of_match),
.acpi_match_table = ACPI_PTR(dsu_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
.remove = dsu_pmu_device_remove,
};
static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
cpuhp_node);
if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
return 0;
/* If the PMU is already managed, there is nothing to do */
if (!cpumask_empty(&dsu_pmu->active_cpu))
return 0;
dsu_pmu_init_pmu(dsu_pmu);
dsu_pmu_set_active_cpu(cpu, dsu_pmu);
return 0;
}
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
int dst;
struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
cpuhp_node);
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
return 0;
dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
if (dst >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
dsu_pmu_set_active_cpu(dst, dsu_pmu);
return 0;
}
static int __init dsu_pmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DRVNAME,
dsu_pmu_cpu_online,
dsu_pmu_cpu_teardown);
if (ret < 0)
return ret;
dsu_pmu_cpuhp_state = ret;
ret = platform_driver_register(&dsu_pmu_driver);
if (ret)
cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
return ret;
}
static void __exit dsu_pmu_exit(void)
{
platform_driver_unregister(&dsu_pmu_driver);
cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
}
module_init(dsu_pmu_init);
module_exit(dsu_pmu_exit);
MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
MODULE_AUTHOR("Suzuki K Poulose <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_dsu_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RISC-V performance counter support.
*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
* This implementation is based on old RISC-V perf and ARM perf event code
* which are in turn based on sparc64 and x86 code.
*/
#include <linux/mod_devicetable.h>
#include <linux/perf/riscv_pmu.h>
#include <linux/platform_device.h>
#define RISCV_PMU_LEGACY_CYCLE 0
#define RISCV_PMU_LEGACY_INSTRET 2
static bool pmu_init_done;
static int pmu_legacy_ctr_get_idx(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
if (event->attr.type != PERF_TYPE_HARDWARE)
return -EOPNOTSUPP;
if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
return RISCV_PMU_LEGACY_CYCLE;
else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
return RISCV_PMU_LEGACY_INSTRET;
else
return -EOPNOTSUPP;
}
/* For legacy config & counter index are same */
static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
{
return pmu_legacy_ctr_get_idx(event);
}
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u64 val;
if (idx == RISCV_PMU_LEGACY_CYCLE) {
val = riscv_pmu_ctr_read_csr(CSR_CYCLE);
if (IS_ENABLED(CONFIG_32BIT))
val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val;
} else if (idx == RISCV_PMU_LEGACY_INSTRET) {
val = riscv_pmu_ctr_read_csr(CSR_INSTRET);
if (IS_ENABLED(CONFIG_32BIT))
val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val;
} else
return 0;
return val;
}
static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
{
struct hw_perf_event *hwc = &event->hw;
u64 initial_val = pmu_legacy_read_ctr(event);
/**
* The legacy method doesn't really have a start/stop method.
* It also can not update the counter with a initial value.
* But we still need to set the prev_count so that read() can compute
* the delta. Just use the current counter value to set the prev_count.
*/
local64_set(&hwc->prev_count, initial_val);
}
static uint8_t pmu_legacy_csr_index(struct perf_event *event)
{
return event->hw.idx;
}
static void pmu_legacy_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
return;
event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
}
static void pmu_legacy_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
return;
event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
}
/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
* This driver only allows reading two counters i.e CYCLE & INSTRET.
* However, it can not start or stop the counter. Thus, it is not very useful
* will be removed in future.
*/
static void pmu_legacy_init(struct riscv_pmu *pmu)
{
pr_info("Legacy PMU implementation is available\n");
pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
BIT(RISCV_PMU_LEGACY_INSTRET);
pmu->ctr_start = pmu_legacy_ctr_start;
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
pmu->ctr_get_width = NULL;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
static int pmu_legacy_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
pmu = riscv_pmu_alloc();
if (!pmu)
return -ENOMEM;
pmu_legacy_init(pmu);
return 0;
}
static struct platform_driver pmu_legacy_driver = {
.probe = pmu_legacy_device_probe,
.driver = {
.name = RISCV_PMU_LEGACY_PDEV_NAME,
},
};
static int __init riscv_pmu_legacy_devinit(void)
{
int ret;
struct platform_device *pdev;
if (likely(pmu_init_done))
return 0;
ret = platform_driver_register(&pmu_legacy_driver);
if (ret)
return ret;
pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
platform_driver_unregister(&pmu_legacy_driver);
return PTR_ERR(pdev);
}
return ret;
}
late_initcall(riscv_pmu_legacy_devinit);
void riscv_pmu_legacy_skip_init(void)
{
pmu_init_done = true;
}
| linux-master | drivers/perf/riscv_pmu_legacy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2014 ARM Limited
*/
#include <linux/ctype.h>
#include <linux/hrtimer.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define CCN_NUM_XP_PORTS 2
#define CCN_NUM_VCS 4
#define CCN_NUM_REGIONS 256
#define CCN_REGION_SIZE 0x10000
#define CCN_ALL_OLY_ID 0xff00
#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
#define CCN_MN_ERRINT_STATUS 0x0008
#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
#define CCN_MN_ERR_SIG_VAL_63_0 0x0300
#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
#define CCN_DT_ACTIVE_DSM 0x0000
#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
#define CCN_DT_CTL 0x0028
#define CCN_DT_CTL__DT_EN (1 << 0)
#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
#define CCN_DT_PMCCNTR 0x0140
#define CCN_DT_PMCCNTRSR 0x0190
#define CCN_DT_PMOVSR 0x0198
#define CCN_DT_PMOVSR_CLR 0x01a0
#define CCN_DT_PMOVSR_CLR__MASK 0x1f
#define CCN_DT_PMCR 0x01a8
#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
#define CCN_DT_PMCR__PMU_EN (1 << 0)
#define CCN_DT_PMSR 0x01b0
#define CCN_DT_PMSR_REQ 0x01b8
#define CCN_DT_PMSR_CLR 0x01c0
#define CCN_HNF_PMU_EVENT_SEL 0x0600
#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
#define CCN_XP_DT_CONFIG 0x0300
#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
#define CCN_XP_DT_INTERFACE_SEL 0x0308
#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
#define CCN_XP_DT_CONTROL 0x0370
#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
#define CCN_XP_PMU_EVENT_SEL 0x0600
#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
#define CCN_SBAS_PMU_EVENT_SEL 0x0600
#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
#define CCN_RNI_PMU_EVENT_SEL 0x0600
#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
#define CCN_TYPE_MN 0x01
#define CCN_TYPE_DT 0x02
#define CCN_TYPE_HNF 0x04
#define CCN_TYPE_HNI 0x05
#define CCN_TYPE_XP 0x08
#define CCN_TYPE_SBSX 0x0c
#define CCN_TYPE_SBAS 0x10
#define CCN_TYPE_RNI_1P 0x14
#define CCN_TYPE_RNI_2P 0x15
#define CCN_TYPE_RNI_3P 0x16
#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
#define CCN_TYPE_RND_2P 0x19
#define CCN_TYPE_RND_3P 0x1a
#define CCN_TYPE_CYCLES 0xff /* Pseudotype */
#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
#define CCN_NUM_PMU_EVENTS 4
#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
#define CCN_NUM_PREDEFINED_MASKS 4
#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
struct arm_ccn_component {
void __iomem *base;
u32 type;
DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
union {
struct {
DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
} xp;
};
};
#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
struct arm_ccn_dt, pmu), struct arm_ccn, dt)
struct arm_ccn_dt {
int id;
void __iomem *base;
spinlock_t config_lock;
DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
struct {
struct arm_ccn_component *source;
struct perf_event *event;
} pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
struct {
u64 l, h;
} cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
struct hrtimer hrtimer;
unsigned int cpu;
struct hlist_node node;
struct pmu pmu;
};
struct arm_ccn {
struct device *dev;
void __iomem *base;
unsigned int irq;
unsigned sbas_present:1;
unsigned sbsx_present:1;
int num_nodes;
struct arm_ccn_component *node;
int num_xps;
struct arm_ccn_component *xp;
struct arm_ccn_dt dt;
int mn_id;
};
static int arm_ccn_node_to_xp(int node)
{
return node / CCN_NUM_XP_PORTS;
}
static int arm_ccn_node_to_xp_port(int node)
{
return node % CCN_NUM_XP_PORTS;
}
/*
* Bit shifts and masks in these defines must be kept in sync with
* arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
*/
#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3)
#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
{
*config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
*config |= (node_xp << 0) | (type << 8) | (port << 24);
}
static ssize_t arm_ccn_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *ea = container_of(attr,
struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)ea->var);
}
#define CCN_FORMAT_ATTR(_name, _config) \
struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
{ __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
NULL), _config }
static CCN_FORMAT_ATTR(node, "config:0-7");
static CCN_FORMAT_ATTR(xp, "config:0-7");
static CCN_FORMAT_ATTR(type, "config:8-15");
static CCN_FORMAT_ATTR(event, "config:16-23");
static CCN_FORMAT_ATTR(port, "config:24-25");
static CCN_FORMAT_ATTR(bus, "config:24-25");
static CCN_FORMAT_ATTR(vc, "config:26-28");
static CCN_FORMAT_ATTR(dir, "config:29-29");
static CCN_FORMAT_ATTR(mask, "config:30-33");
static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
static struct attribute *arm_ccn_pmu_format_attrs[] = {
&arm_ccn_pmu_format_attr_node.attr.attr,
&arm_ccn_pmu_format_attr_xp.attr.attr,
&arm_ccn_pmu_format_attr_type.attr.attr,
&arm_ccn_pmu_format_attr_event.attr.attr,
&arm_ccn_pmu_format_attr_port.attr.attr,
&arm_ccn_pmu_format_attr_bus.attr.attr,
&arm_ccn_pmu_format_attr_vc.attr.attr,
&arm_ccn_pmu_format_attr_dir.attr.attr,
&arm_ccn_pmu_format_attr_mask.attr.attr,
&arm_ccn_pmu_format_attr_cmp_l.attr.attr,
&arm_ccn_pmu_format_attr_cmp_h.attr.attr,
NULL
};
static const struct attribute_group arm_ccn_pmu_format_attr_group = {
.name = "format",
.attrs = arm_ccn_pmu_format_attrs,
};
struct arm_ccn_pmu_event {
struct device_attribute attr;
u32 type;
u32 event;
int num_ports;
int num_vcs;
const char *def;
int mask;
};
#define CCN_EVENT_ATTR(_name) \
__ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
/*
* Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
* their ports in XP they are connected to. For the sake of usability they are
* explicitly defined here (and translated into a relevant watchpoint in
* arm_ccn_pmu_event_init()) so the user can easily request them without deep
* knowledge of the flit format.
*/
#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
.type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
.def = _def, .mask = _mask, }
#define CCN_EVENT_HNI(_name, _def, _mask) { \
.attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
#define CCN_EVENT_SBSX(_name, _def, _mask) { \
.attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
.event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
.num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
.type = CCN_TYPE_HNF, .event = _event, }
#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
.type = CCN_TYPE_XP, .event = _event, \
.num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
/*
* RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
* on configuration. One of them is picked to represent the whole group,
* as they all share the same event types.
*/
#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
.type = CCN_TYPE_RNI_3P, .event = _event, }
#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
.type = CCN_TYPE_SBAS, .event = _event, }
#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
.type = CCN_TYPE_CYCLES }
static ssize_t arm_ccn_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
struct arm_ccn_pmu_event *event = container_of(attr,
struct arm_ccn_pmu_event, attr);
int res;
res = sysfs_emit(buf, "type=0x%x", event->type);
if (event->event)
res += sysfs_emit_at(buf, res, ",event=0x%x", event->event);
if (event->def)
res += sysfs_emit_at(buf, res, ",%s", event->def);
if (event->mask)
res += sysfs_emit_at(buf, res, ",mask=0x%x", event->mask);
/* Arguments required by an event */
switch (event->type) {
case CCN_TYPE_CYCLES:
break;
case CCN_TYPE_XP:
res += sysfs_emit_at(buf, res, ",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
res += sysfs_emit_at(buf, res,
",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
else
res += sysfs_emit_at(buf, res, ",bus=?");
break;
case CCN_TYPE_MN:
res += sysfs_emit_at(buf, res, ",node=%d", ccn->mn_id);
break;
default:
res += sysfs_emit_at(buf, res, ",node=?");
break;
}
res += sysfs_emit_at(buf, res, "\n");
return res;
}
static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = kobj_to_dev(kobj);
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
struct device_attribute *dev_attr = container_of(attr,
struct device_attribute, attr);
struct arm_ccn_pmu_event *event = container_of(dev_attr,
struct arm_ccn_pmu_event, attr);
if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
return 0;
if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
return 0;
return attr->mode;
}
static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
CCN_IDX_MASK_ORDER),
CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
CCN_IDX_MASK_ORDER),
CCN_EVENT_HNF(cache_miss, 0x1),
CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
CCN_EVENT_HNF(cache_fill, 0x3),
CCN_EVENT_HNF(pocq_retry, 0x4),
CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
CCN_EVENT_HNF(sf_hit, 0x6),
CCN_EVENT_HNF(sf_evictions, 0x7),
CCN_EVENT_HNF(snoops_sent, 0x8),
CCN_EVENT_HNF(snoops_broadcast, 0x9),
CCN_EVENT_HNF(l3_eviction, 0xa),
CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
CCN_EVENT_HNF(mc_retries, 0xc),
CCN_EVENT_HNF(mc_reqs, 0xd),
CCN_EVENT_HNF(qos_hh_retry, 0xe),
CCN_EVENT_RNI(rdata_beats_p0, 0x1),
CCN_EVENT_RNI(rdata_beats_p1, 0x2),
CCN_EVENT_RNI(rdata_beats_p2, 0x3),
CCN_EVENT_RNI(rxdat_flits, 0x4),
CCN_EVENT_RNI(txdat_flits, 0x5),
CCN_EVENT_RNI(txreq_flits, 0x6),
CCN_EVENT_RNI(txreq_flits_retried, 0x7),
CCN_EVENT_RNI(rrt_full, 0x8),
CCN_EVENT_RNI(wrt_full, 0x9),
CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
CCN_EVENT_XP(upload_starvation, 0x1),
CCN_EVENT_XP(download_starvation, 0x2),
CCN_EVENT_XP(respin, 0x3),
CCN_EVENT_XP(valid_flit, 0x4),
CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
CCN_EVENT_SBAS(rxdat_flits, 0x4),
CCN_EVENT_SBAS(txdat_flits, 0x5),
CCN_EVENT_SBAS(txreq_flits, 0x6),
CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
CCN_EVENT_SBAS(rrt_full, 0x8),
CCN_EVENT_SBAS(wrt_full, 0x9),
CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
CCN_EVENT_CYCLES(cycles),
};
/* Populated in arm_ccn_init() */
static struct attribute
*arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
static const struct attribute_group arm_ccn_pmu_events_attr_group = {
.name = "events",
.is_visible = arm_ccn_pmu_events_is_visible,
.attrs = arm_ccn_pmu_events_attrs,
};
static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
{
unsigned long i;
if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
return NULL;
i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
switch (name[1]) {
case 'l':
return &ccn->dt.cmp_mask[i].l;
case 'h':
return &ccn->dt.cmp_mask[i].h;
default:
return NULL;
}
}
static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
return mask ? sysfs_emit(buf, "0x%016llx\n", *mask) : -EINVAL;
}
static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
int err = -EINVAL;
if (mask)
err = kstrtoull(buf, 0, mask);
return err ? err : count;
}
#define CCN_CMP_MASK_ATTR(_name) \
struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
__ATTR(_name, S_IRUGO | S_IWUSR, \
arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
#define CCN_CMP_MASK_ATTR_RO(_name) \
struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
__ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
static CCN_CMP_MASK_ATTR(0l);
static CCN_CMP_MASK_ATTR(0h);
static CCN_CMP_MASK_ATTR(1l);
static CCN_CMP_MASK_ATTR(1h);
static CCN_CMP_MASK_ATTR(2l);
static CCN_CMP_MASK_ATTR(2h);
static CCN_CMP_MASK_ATTR(3l);
static CCN_CMP_MASK_ATTR(3h);
static CCN_CMP_MASK_ATTR(4l);
static CCN_CMP_MASK_ATTR(4h);
static CCN_CMP_MASK_ATTR(5l);
static CCN_CMP_MASK_ATTR(5h);
static CCN_CMP_MASK_ATTR(6l);
static CCN_CMP_MASK_ATTR(6h);
static CCN_CMP_MASK_ATTR(7l);
static CCN_CMP_MASK_ATTR(7h);
static CCN_CMP_MASK_ATTR_RO(8l);
static CCN_CMP_MASK_ATTR_RO(8h);
static CCN_CMP_MASK_ATTR_RO(9l);
static CCN_CMP_MASK_ATTR_RO(9h);
static CCN_CMP_MASK_ATTR_RO(al);
static CCN_CMP_MASK_ATTR_RO(ah);
static CCN_CMP_MASK_ATTR_RO(bl);
static CCN_CMP_MASK_ATTR_RO(bh);
static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
&arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
&arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
&arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
&arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
&arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
&arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
&arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
&arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
&arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
&arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
&arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
&arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
NULL
};
static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
.name = "cmp_mask",
.attrs = arm_ccn_pmu_cmp_mask_attrs,
};
static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
}
static struct device_attribute arm_ccn_pmu_cpumask_attr =
__ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL);
static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
&arm_ccn_pmu_cpumask_attr.attr,
NULL,
};
static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
.attrs = arm_ccn_pmu_cpumask_attrs,
};
/*
* Default poll period is 10ms, which is way over the top anyway,
* as in the worst case scenario (an event every cycle), with 1GHz
* clocked bus, the smallest, 32 bit counter will overflow in
* more than 4s.
*/
static unsigned int arm_ccn_pmu_poll_period_us = 10000;
module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
S_IRUGO | S_IWUSR);
static ktime_t arm_ccn_pmu_timer_period(void)
{
return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
}
static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
&arm_ccn_pmu_events_attr_group,
&arm_ccn_pmu_format_attr_group,
&arm_ccn_pmu_cmp_mask_attr_group,
&arm_ccn_pmu_cpumask_attr_group,
NULL
};
static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
{
int bit;
do {
bit = find_first_zero_bit(bitmap, size);
if (bit >= size)
return -EAGAIN;
} while (test_and_set_bit(bit, bitmap));
return bit;
}
/* All RN-I and RN-D nodes have identical PMUs */
static int arm_ccn_pmu_type_eq(u32 a, u32 b)
{
if (a == b)
return 1;
switch (a) {
case CCN_TYPE_RNI_1P:
case CCN_TYPE_RNI_2P:
case CCN_TYPE_RNI_3P:
case CCN_TYPE_RND_1P:
case CCN_TYPE_RND_2P:
case CCN_TYPE_RND_3P:
switch (b) {
case CCN_TYPE_RNI_1P:
case CCN_TYPE_RNI_2P:
case CCN_TYPE_RNI_3P:
case CCN_TYPE_RND_1P:
case CCN_TYPE_RND_2P:
case CCN_TYPE_RND_3P:
return 1;
}
break;
}
return 0;
}
static int arm_ccn_pmu_event_alloc(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
u32 node_xp, type, event_id;
struct arm_ccn_component *source;
int bit;
node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
event_id = CCN_CONFIG_EVENT(event->attr.config);
/* Allocate the cycle counter */
if (type == CCN_TYPE_CYCLES) {
if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
ccn->dt.pmu_counters_mask))
return -EAGAIN;
hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
return 0;
}
/* Allocate an event counter */
hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
CCN_NUM_PMU_EVENT_COUNTERS);
if (hw->idx < 0) {
dev_dbg(ccn->dev, "No more counters available!\n");
return -EAGAIN;
}
if (type == CCN_TYPE_XP)
source = &ccn->xp[node_xp];
else
source = &ccn->node[node_xp];
ccn->dt.pmu_counters[hw->idx].source = source;
/* Allocate an event source or a watchpoint */
if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
CCN_NUM_XP_WATCHPOINTS);
else
bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
CCN_NUM_PMU_EVENTS);
if (bit < 0) {
dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
node_xp);
clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
return -EAGAIN;
}
hw->config_base = bit;
ccn->dt.pmu_counters[hw->idx].event = event;
return 0;
}
static void arm_ccn_pmu_event_release(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
} else {
struct arm_ccn_component *source =
ccn->dt.pmu_counters[hw->idx].source;
if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
CCN_CONFIG_EVENT(event->attr.config) ==
CCN_EVENT_WATCHPOINT)
clear_bit(hw->config_base, source->xp.dt_cmp_mask);
else
clear_bit(hw->config_base, source->pmu_events_mask);
clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
}
ccn->dt.pmu_counters[hw->idx].source = NULL;
ccn->dt.pmu_counters[hw->idx].event = NULL;
}
static int arm_ccn_pmu_event_init(struct perf_event *event)
{
struct arm_ccn *ccn;
struct hw_perf_event *hw = &event->hw;
u32 node_xp, type, event_id;
int valid;
int i;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
ccn = pmu_to_arm_ccn(event->pmu);
if (hw->sample_period) {
dev_dbg(ccn->dev, "Sampling not supported!\n");
return -EOPNOTSUPP;
}
if (has_branch_stack(event)) {
dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
return -EINVAL;
}
if (event->cpu < 0) {
dev_dbg(ccn->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, like CCN, where each
* event could be theoretically assigned to a different CPU. To
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
event->cpu = ccn->dt.cpu;
node_xp = CCN_CONFIG_NODE(event->attr.config);
type = CCN_CONFIG_TYPE(event->attr.config);
event_id = CCN_CONFIG_EVENT(event->attr.config);
/* Validate node/xp vs topology */
switch (type) {
case CCN_TYPE_MN:
if (node_xp != ccn->mn_id) {
dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp);
return -EINVAL;
}
break;
case CCN_TYPE_XP:
if (node_xp >= ccn->num_xps) {
dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp);
return -EINVAL;
}
break;
case CCN_TYPE_CYCLES:
break;
default:
if (node_xp >= ccn->num_nodes) {
dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp);
return -EINVAL;
}
if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n",
type, node_xp);
return -EINVAL;
}
break;
}
/* Validate event ID vs available for the type */
for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
i++) {
struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
u32 port = CCN_CONFIG_PORT(event->attr.config);
u32 vc = CCN_CONFIG_VC(event->attr.config);
if (!arm_ccn_pmu_type_eq(type, e->type))
continue;
if (event_id != e->event)
continue;
if (e->num_ports && port >= e->num_ports) {
dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n",
port, node_xp);
return -EINVAL;
}
if (e->num_vcs && vc >= e->num_vcs) {
dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n",
vc, node_xp);
return -EINVAL;
}
valid = 1;
}
if (!valid) {
dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
event_id, node_xp);
return -EINVAL;
}
/* Watchpoint-based event for a node is actually set on XP */
if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
u32 port;
type = CCN_TYPE_XP;
port = arm_ccn_node_to_xp_port(node_xp);
node_xp = arm_ccn_node_to_xp(node_xp);
arm_ccn_pmu_config_set(&event->attr.config,
node_xp, type, port);
}
/*
* We must NOT create groups containing mixed PMUs, although software
* events are acceptable (for example to create a CCN group
* periodically read when a hrtimer aka cpu-clock leader triggers).
*/
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
return -EINVAL;
}
return 0;
}
static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
{
u64 res;
if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
#ifdef readq
res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
#else
/* 40 bit counter, can do snapshot and read in two parts */
writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
;
writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
res <<= 32;
res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
#endif
} else {
res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
}
return res;
}
static void arm_ccn_pmu_event_update(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
u64 prev_count, new_count, mask;
do {
prev_count = local64_read(&hw->prev_count);
new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
} while (local64_xchg(&hw->prev_count, new_count) != prev_count);
mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
local64_add((new_count - prev_count) & mask, &event->count);
}
static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
struct arm_ccn_component *xp;
u32 val, dt_cfg;
/* Nothing to do for cycle counter */
if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
return;
if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
else
xp = &ccn->xp[arm_ccn_node_to_xp(
CCN_CONFIG_NODE(event->attr.config))];
if (enable)
dt_cfg = hw->event_base;
else
dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
spin_lock(&ccn->dt.config_lock);
val = readl(xp->base + CCN_XP_DT_CONFIG);
val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
writel(val, xp->base + CCN_XP_DT_CONFIG);
spin_unlock(&ccn->dt.config_lock);
}
static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
local64_set(&event->hw.prev_count,
arm_ccn_pmu_read_counter(ccn, hw->idx));
hw->state = 0;
/* Set the DT bus input, engaging the counter */
arm_ccn_pmu_xp_dt_config(event, 1);
}
static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
/* Disable counting, setting the DT bus to pass-through mode */
arm_ccn_pmu_xp_dt_config(event, 0);
if (flags & PERF_EF_UPDATE)
arm_ccn_pmu_event_update(event);
hw->state |= PERF_HES_STOPPED;
}
static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
struct arm_ccn_component *source =
ccn->dt.pmu_counters[hw->idx].source;
unsigned long wp = hw->config_base;
u32 val;
u64 cmp_l = event->attr.config1;
u64 cmp_h = event->attr.config2;
u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
/* Direction (RX/TX), device (port) & virtual channel */
val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
val |= CCN_CONFIG_DIR(event->attr.config) <<
CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
val |= CCN_CONFIG_PORT(event->attr.config) <<
CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
val |= CCN_CONFIG_VC(event->attr.config) <<
CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
/* Comparison values */
writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
writel((cmp_l >> 32) & 0x7fffffff,
source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
writel((cmp_h >> 32) & 0x0fffffff,
source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
/* Mask */
writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
writel((mask_l >> 32) & 0x7fffffff,
source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
writel((mask_h >> 32) & 0x0fffffff,
source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
}
static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
struct arm_ccn_component *source =
ccn->dt.pmu_counters[hw->idx].source;
u32 val, id;
hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
id = (CCN_CONFIG_VC(event->attr.config) << 4) |
(CCN_CONFIG_BUS(event->attr.config) << 3) |
(CCN_CONFIG_EVENT(event->attr.config) << 0);
val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
}
static void arm_ccn_pmu_node_event_config(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
struct arm_ccn_component *source =
ccn->dt.pmu_counters[hw->idx].source;
u32 type = CCN_CONFIG_TYPE(event->attr.config);
u32 val, port;
port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
hw->config_base);
/* These *_event_sel regs should be identical, but let's make sure... */
BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
CCN_RNI_PMU_EVENT_SEL__ID__MASK);
if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
!arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
return;
/* Set the event id for the pre-allocated counter */
val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
val |= CCN_CONFIG_EVENT(event->attr.config) <<
CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
}
static void arm_ccn_pmu_event_config(struct perf_event *event)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
u32 xp, offset, val;
/* Cycle counter requires no setup */
if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
return;
if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
xp = CCN_CONFIG_XP(event->attr.config);
else
xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
spin_lock(&ccn->dt.config_lock);
/* Set the DT bus "distance" register */
offset = (hw->idx / 4) * 4;
val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
if (CCN_CONFIG_EVENT(event->attr.config) ==
CCN_EVENT_WATCHPOINT)
arm_ccn_pmu_xp_watchpoint_config(event);
else
arm_ccn_pmu_xp_event_config(event);
} else {
arm_ccn_pmu_node_event_config(event);
}
spin_unlock(&ccn->dt.config_lock);
}
static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
{
return bitmap_weight(ccn->dt.pmu_counters_mask,
CCN_NUM_PMU_EVENT_COUNTERS + 1);
}
static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
{
int err;
struct hw_perf_event *hw = &event->hw;
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
err = arm_ccn_pmu_event_alloc(event);
if (err)
return err;
/*
* Pin the timer, so that the overflows are handled by the chosen
* event->cpu (this is the same one as presented in "cpumask"
* attribute).
*/
if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
HRTIMER_MODE_REL_PINNED);
arm_ccn_pmu_event_config(event);
hw->state = PERF_HES_STOPPED;
if (flags & PERF_EF_START)
arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
return 0;
}
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
arm_ccn_pmu_event_release(event);
if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
hrtimer_cancel(&ccn->dt.hrtimer);
}
static void arm_ccn_pmu_event_read(struct perf_event *event)
{
arm_ccn_pmu_event_update(event);
}
static void arm_ccn_pmu_enable(struct pmu *pmu)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
val |= CCN_DT_PMCR__PMU_EN;
writel(val, ccn->dt.base + CCN_DT_PMCR);
}
static void arm_ccn_pmu_disable(struct pmu *pmu)
{
struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
val &= ~CCN_DT_PMCR__PMU_EN;
writel(val, ccn->dt.base + CCN_DT_PMCR);
}
static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
{
u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
int idx;
if (!pmovsr)
return IRQ_NONE;
writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
struct perf_event *event = dt->pmu_counters[idx].event;
int overflowed = pmovsr & BIT(idx);
WARN_ON_ONCE(overflowed && !event &&
idx != CCN_IDX_PMU_CYCLE_COUNTER);
if (!event || !overflowed)
continue;
arm_ccn_pmu_event_update(event);
}
return IRQ_HANDLED;
}
static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
{
struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
hrtimer);
unsigned long flags;
local_irq_save(flags);
arm_ccn_pmu_overflow_handler(dt);
local_irq_restore(flags);
hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
return HRTIMER_RESTART;
}
static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
unsigned int target;
if (cpu != dt->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
dt->cpu = target;
if (ccn->irq)
WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu)));
return 0;
}
static DEFINE_IDA(arm_ccn_pmu_ida);
static int arm_ccn_pmu_init(struct arm_ccn *ccn)
{
int i;
char *name;
int err;
/* Initialize DT subsystem */
ccn->dt.base = ccn->base + CCN_REGION_SIZE;
spin_lock_init(&ccn->dt.config_lock);
writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR);
writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
ccn->dt.base + CCN_DT_PMCR);
writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
for (i = 0; i < ccn->num_xps; i++) {
writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
(CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
CCN_XP_DT_CONTROL__DT_ENABLE,
ccn->xp[i].base + CCN_XP_DT_CONTROL);
}
ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
/* Get a convenient /sys/event_source/devices/ name */
ccn->dt.id = ida_alloc(&arm_ccn_pmu_ida, GFP_KERNEL);
if (ccn->dt.id == 0) {
name = "ccn";
} else {
name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
ccn->dt.id);
if (!name) {
err = -ENOMEM;
goto error_choose_name;
}
}
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
.add = arm_ccn_pmu_event_add,
.del = arm_ccn_pmu_event_del,
.start = arm_ccn_pmu_event_start,
.stop = arm_ccn_pmu_event_stop,
.read = arm_ccn_pmu_event_read,
.pmu_enable = arm_ccn_pmu_enable,
.pmu_disable = arm_ccn_pmu_disable,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
}
/* Pick one CPU which we will use to collect data from CCN... */
ccn->dt.cpu = raw_smp_processor_id();
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu));
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
}
}
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
err = perf_pmu_register(&ccn->dt.pmu, name, -1);
if (err)
goto error_pmu_register;
return 0;
error_pmu_register:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
error_set_affinity:
error_choose_name:
ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
return err;
}
static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
&ccn->dt.node);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
perf_pmu_unregister(&ccn->dt.pmu);
ida_free(&arm_ccn_pmu_ida, ccn->dt.id);
}
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
int (*callback)(struct arm_ccn *ccn, int region,
void __iomem *base, u32 type, u32 id))
{
int region;
for (region = 0; region < CCN_NUM_REGIONS; region++) {
u32 val, type, id;
void __iomem *base;
int err;
val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
4 * (region / 32));
if (!(val & (1 << (region % 32))))
continue;
base = ccn->base + region * CCN_REGION_SIZE;
val = readl(base + CCN_ALL_OLY_ID);
type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
CCN_ALL_OLY_ID__OLY_ID__MASK;
id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
CCN_ALL_OLY_ID__NODE_ID__MASK;
err = callback(ccn, region, base, type, id);
if (err)
return err;
}
return 0;
}
static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
void __iomem *base, u32 type, u32 id)
{
if (type == CCN_TYPE_XP && id >= ccn->num_xps)
ccn->num_xps = id + 1;
else if (id >= ccn->num_nodes)
ccn->num_nodes = id + 1;
return 0;
}
static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
void __iomem *base, u32 type, u32 id)
{
struct arm_ccn_component *component;
dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
switch (type) {
case CCN_TYPE_MN:
ccn->mn_id = id;
return 0;
case CCN_TYPE_DT:
return 0;
case CCN_TYPE_XP:
component = &ccn->xp[id];
break;
case CCN_TYPE_SBSX:
ccn->sbsx_present = 1;
component = &ccn->node[id];
break;
case CCN_TYPE_SBAS:
ccn->sbas_present = 1;
fallthrough;
default:
component = &ccn->node[id];
break;
}
component->base = base;
component->type = type;
return 0;
}
static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
const u32 *err_sig_val)
{
/* This should be really handled by firmware... */
dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
err_sig_val[5], err_sig_val[4], err_sig_val[3],
err_sig_val[2], err_sig_val[1], err_sig_val[0]);
dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
ccn->base + CCN_MN_ERRINT_STATUS);
return IRQ_HANDLED;
}
static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
{
irqreturn_t res = IRQ_NONE;
struct arm_ccn *ccn = dev_id;
u32 err_sig_val[6];
u32 err_or;
int i;
/* PMU overflow is a special case */
err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
res = arm_ccn_pmu_overflow_handler(&ccn->dt);
}
/* Have to read all err_sig_vals to clear them */
for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
err_sig_val[i] = readl(ccn->base +
CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
err_or |= err_sig_val[i];
}
if (err_or)
res |= arm_ccn_error_handler(ccn, err_sig_val);
if (res != IRQ_NONE)
writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
ccn->base + CCN_MN_ERRINT_STATUS);
return res;
}
static int arm_ccn_probe(struct platform_device *pdev)
{
struct arm_ccn *ccn;
int irq;
int err;
ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
if (!ccn)
return -ENOMEM;
ccn->dev = &pdev->dev;
platform_set_drvdata(pdev, ccn);
ccn->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccn->base))
return PTR_ERR(ccn->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Check if we can use the interrupt */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
ccn->base + CCN_MN_ERRINT_STATUS);
if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
/* Can set 'disable' bits, so can acknowledge interrupts */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
ccn->base + CCN_MN_ERRINT_STATUS);
err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(ccn->dev), ccn);
if (err)
return err;
ccn->irq = irq;
}
/* Build topology */
err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
if (err)
return err;
ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
GFP_KERNEL);
ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
GFP_KERNEL);
if (!ccn->node || !ccn->xp)
return -ENOMEM;
err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
if (err)
return err;
return arm_ccn_pmu_init(ccn);
}
static int arm_ccn_remove(struct platform_device *pdev)
{
struct arm_ccn *ccn = platform_get_drvdata(pdev);
arm_ccn_pmu_cleanup(ccn);
return 0;
}
static const struct of_device_id arm_ccn_match[] = {
{ .compatible = "arm,ccn-502", },
{ .compatible = "arm,ccn-504", },
{ .compatible = "arm,ccn-512", },
{},
};
MODULE_DEVICE_TABLE(of, arm_ccn_match);
static struct platform_driver arm_ccn_driver = {
.driver = {
.name = "arm-ccn",
.of_match_table = arm_ccn_match,
.suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
.remove = arm_ccn_remove,
};
static int __init arm_ccn_init(void)
{
int i, ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
"perf/arm/ccn:online", NULL,
arm_ccn_pmu_offline_cpu);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
ret = platform_driver_register(&arm_ccn_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
return ret;
}
static void __exit arm_ccn_exit(void)
{
platform_driver_unregister(&arm_ccn_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
}
module_init(arm_ccn_init);
module_exit(arm_ccn_exit);
MODULE_AUTHOR("Pawel Moll <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm-ccn.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* APM X-Gene SoC PMU (Performance Monitor Unit)
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
* Author: Hoan Tran <[email protected]>
* Tai Nguyen <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define CSW_CSWCR 0x0000
#define CSW_CSWCR_DUALMCB_MASK BIT(0)
#define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2)
#define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4)
#define MCBADDRMR 0x0000
#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
#define PCPPMU_INTSTATUS_REG 0x000
#define PCPPMU_INTMASK_REG 0x004
#define PCPPMU_INTMASK 0x0000000F
#define PCPPMU_INTENMASK 0xFFFFFFFF
#define PCPPMU_INTCLRMASK 0xFFFFFFF0
#define PCPPMU_INT_MCU BIT(0)
#define PCPPMU_INT_MCB BIT(1)
#define PCPPMU_INT_L3C BIT(2)
#define PCPPMU_INT_IOB BIT(3)
#define PCPPMU_V3_INTMASK 0x00FF33FF
#define PCPPMU_V3_INTENMASK 0xFFFFFFFF
#define PCPPMU_V3_INTCLRMASK 0xFF00CC00
#define PCPPMU_V3_INT_MCU 0x000000FF
#define PCPPMU_V3_INT_MCB 0x00000300
#define PCPPMU_V3_INT_L3C 0x00FF0000
#define PCPPMU_V3_INT_IOB 0x00003000
#define PMU_MAX_COUNTERS 4
#define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL
#define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL
#define PMU_OVERFLOW_MASK 0xF
#define PMU_PMCR_E BIT(0)
#define PMU_PMCR_P BIT(1)
#define PMU_PMEVCNTR0 0x000
#define PMU_PMEVCNTR1 0x004
#define PMU_PMEVCNTR2 0x008
#define PMU_PMEVCNTR3 0x00C
#define PMU_PMEVTYPER0 0x400
#define PMU_PMEVTYPER1 0x404
#define PMU_PMEVTYPER2 0x408
#define PMU_PMEVTYPER3 0x40C
#define PMU_PMAMR0 0xA00
#define PMU_PMAMR1 0xA04
#define PMU_PMCNTENSET 0xC00
#define PMU_PMCNTENCLR 0xC20
#define PMU_PMINTENSET 0xC40
#define PMU_PMINTENCLR 0xC60
#define PMU_PMOVSR 0xC80
#define PMU_PMCR 0xE04
/* PMU registers for V3 */
#define PMU_PMOVSCLR 0xC80
#define PMU_PMOVSSET 0xCC0
#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
#define GET_CNTR(ev) (ev->hw.idx)
#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
struct hw_pmu_info {
u32 type;
u32 enable_mask;
void __iomem *csr;
};
struct xgene_pmu_dev {
struct hw_pmu_info *inf;
struct xgene_pmu *parent;
struct pmu pmu;
u8 max_counters;
DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
u64 max_period;
const struct attribute_group **attr_groups;
struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
};
struct xgene_pmu_ops {
void (*mask_int)(struct xgene_pmu *pmu);
void (*unmask_int)(struct xgene_pmu *pmu);
u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
};
struct xgene_pmu {
struct device *dev;
struct hlist_node node;
int version;
void __iomem *pcppmu_csr;
u32 mcb_active_mask;
u32 mc_active_mask;
u32 l3c_active_mask;
cpumask_t cpu;
int irq;
raw_spinlock_t lock;
const struct xgene_pmu_ops *ops;
struct list_head l3cpmus;
struct list_head iobpmus;
struct list_head mcbpmus;
struct list_head mcpmus;
};
struct xgene_pmu_dev_ctx {
char *name;
struct list_head next;
struct xgene_pmu_dev *pmu_dev;
struct hw_pmu_info inf;
};
struct xgene_pmu_data {
int id;
u32 data;
};
enum xgene_pmu_version {
PCP_PMU_V1 = 1,
PCP_PMU_V2,
PCP_PMU_V3,
};
enum xgene_pmu_dev_type {
PMU_TYPE_L3C = 0,
PMU_TYPE_IOB,
PMU_TYPE_IOB_SLOW,
PMU_TYPE_MCB,
PMU_TYPE_MC,
};
/*
* sysfs format attributes
*/
static ssize_t xgene_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *) eattr->var);
}
#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
(&((struct dev_ext_attribute[]) { \
{ .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
.var = (void *) _config, } \
})[0].attr.attr)
static struct attribute *l3c_pmu_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
NULL,
};
static struct attribute *iob_pmu_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
NULL,
};
static struct attribute *mcb_pmu_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
NULL,
};
static struct attribute *mc_pmu_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
NULL,
};
static const struct attribute_group l3c_pmu_format_attr_group = {
.name = "format",
.attrs = l3c_pmu_format_attrs,
};
static const struct attribute_group iob_pmu_format_attr_group = {
.name = "format",
.attrs = iob_pmu_format_attrs,
};
static const struct attribute_group mcb_pmu_format_attr_group = {
.name = "format",
.attrs = mcb_pmu_format_attrs,
};
static const struct attribute_group mc_pmu_format_attr_group = {
.name = "format",
.attrs = mc_pmu_format_attrs,
};
static struct attribute *l3c_pmu_v3_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
NULL,
};
static struct attribute *iob_pmu_v3_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
NULL,
};
static struct attribute *iob_slow_pmu_v3_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
NULL,
};
static struct attribute *mcb_pmu_v3_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
NULL,
};
static struct attribute *mc_pmu_v3_format_attrs[] = {
XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
NULL,
};
static const struct attribute_group l3c_pmu_v3_format_attr_group = {
.name = "format",
.attrs = l3c_pmu_v3_format_attrs,
};
static const struct attribute_group iob_pmu_v3_format_attr_group = {
.name = "format",
.attrs = iob_pmu_v3_format_attrs,
};
static const struct attribute_group iob_slow_pmu_v3_format_attr_group = {
.name = "format",
.attrs = iob_slow_pmu_v3_format_attrs,
};
static const struct attribute_group mcb_pmu_v3_format_attr_group = {
.name = "format",
.attrs = mcb_pmu_v3_format_attrs,
};
static const struct attribute_group mc_pmu_v3_format_attr_group = {
.name = "format",
.attrs = mc_pmu_v3_format_attrs,
};
/*
* sysfs event attributes
*/
static ssize_t xgene_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
}
#define XGENE_PMU_EVENT_ATTR(_name, _config) \
PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config)
static struct attribute *l3c_pmu_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
NULL,
};
static struct attribute *iob_pmu_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
NULL,
};
static struct attribute *mcb_pmu_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
NULL,
};
static struct attribute *mc_pmu_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
NULL,
};
static const struct attribute_group l3c_pmu_events_attr_group = {
.name = "events",
.attrs = l3c_pmu_events_attrs,
};
static const struct attribute_group iob_pmu_events_attr_group = {
.name = "events",
.attrs = iob_pmu_events_attrs,
};
static const struct attribute_group mcb_pmu_events_attr_group = {
.name = "events",
.attrs = mcb_pmu_events_attrs,
};
static const struct attribute_group mc_pmu_events_attr_group = {
.name = "events",
.attrs = mc_pmu_events_attrs,
};
static struct attribute *l3c_pmu_v3_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
XGENE_PMU_EVENT_ATTR(read, 0x08),
XGENE_PMU_EVENT_ATTR(write, 0x09),
XGENE_PMU_EVENT_ATTR(request, 0x0a),
XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
XGENE_PMU_EVENT_ATTR(egression, 0x1b),
XGENE_PMU_EVENT_ATTR(replacement, 0x1c),
XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
NULL,
};
static struct attribute *iob_fast_pmu_v3_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
NULL,
};
static struct attribute *iob_slow_pmu_v3_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
NULL,
};
static struct attribute *mcb_pmu_v3_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
NULL,
};
static struct attribute *mc_pmu_v3_events_attrs[] = {
XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
NULL,
};
static const struct attribute_group l3c_pmu_v3_events_attr_group = {
.name = "events",
.attrs = l3c_pmu_v3_events_attrs,
};
static const struct attribute_group iob_fast_pmu_v3_events_attr_group = {
.name = "events",
.attrs = iob_fast_pmu_v3_events_attrs,
};
static const struct attribute_group iob_slow_pmu_v3_events_attr_group = {
.name = "events",
.attrs = iob_slow_pmu_v3_events_attrs,
};
static const struct attribute_group mcb_pmu_v3_events_attr_group = {
.name = "events",
.attrs = mcb_pmu_v3_events_attrs,
};
static const struct attribute_group mc_pmu_v3_events_attr_group = {
.name = "events",
.attrs = mc_pmu_v3_events_attrs,
};
/*
* sysfs cpumask attributes
*/
static ssize_t cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *xgene_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group pmu_cpumask_attr_group = {
.attrs = xgene_pmu_cpumask_attrs,
};
/*
* Per PMU device attribute groups of PMU v1 and v2
*/
static const struct attribute_group *l3c_pmu_attr_groups[] = {
&l3c_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&l3c_pmu_events_attr_group,
NULL
};
static const struct attribute_group *iob_pmu_attr_groups[] = {
&iob_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&iob_pmu_events_attr_group,
NULL
};
static const struct attribute_group *mcb_pmu_attr_groups[] = {
&mcb_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&mcb_pmu_events_attr_group,
NULL
};
static const struct attribute_group *mc_pmu_attr_groups[] = {
&mc_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&mc_pmu_events_attr_group,
NULL
};
/*
* Per PMU device attribute groups of PMU v3
*/
static const struct attribute_group *l3c_pmu_v3_attr_groups[] = {
&l3c_pmu_v3_format_attr_group,
&pmu_cpumask_attr_group,
&l3c_pmu_v3_events_attr_group,
NULL
};
static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = {
&iob_pmu_v3_format_attr_group,
&pmu_cpumask_attr_group,
&iob_fast_pmu_v3_events_attr_group,
NULL
};
static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = {
&iob_slow_pmu_v3_format_attr_group,
&pmu_cpumask_attr_group,
&iob_slow_pmu_v3_events_attr_group,
NULL
};
static const struct attribute_group *mcb_pmu_v3_attr_groups[] = {
&mcb_pmu_v3_format_attr_group,
&pmu_cpumask_attr_group,
&mcb_pmu_v3_events_attr_group,
NULL
};
static const struct attribute_group *mc_pmu_v3_attr_groups[] = {
&mc_pmu_v3_format_attr_group,
&pmu_cpumask_attr_group,
&mc_pmu_v3_events_attr_group,
NULL
};
static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
{
int cntr;
cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
pmu_dev->max_counters);
if (cntr == pmu_dev->max_counters)
return -ENOSPC;
set_bit(cntr, pmu_dev->cntr_assign_mask);
return cntr;
}
static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
{
clear_bit(cntr, pmu_dev->cntr_assign_mask);
}
static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
{
writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
}
static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu)
{
writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
}
static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
{
writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
}
static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu)
{
writel(PCPPMU_V3_INTCLRMASK,
xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
}
static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
int idx)
{
return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
}
static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev,
int idx)
{
u32 lo, hi;
/*
* v3 has 64-bit counter registers composed by 2 32-bit registers
* This can be a problem if the counter increases and carries
* out of bit [31] between 2 reads. The extra reads would help
* to prevent this issue.
*/
do {
hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1);
lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx);
} while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1));
return (((u64)hi << 32) | lo);
}
static inline void
xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
{
writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
}
static inline void
xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
{
u32 cnt_lo, cnt_hi;
cnt_hi = upper_32_bits(val);
cnt_lo = lower_32_bits(val);
/* v3 has 64-bit counter registers composed by 2 32-bit registers */
xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo);
xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi);
}
static inline void
xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
{
writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
}
static inline void
xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
{
writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
}
static inline void
xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
static inline void
xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
{
writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
}
static inline void
xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
static inline void
xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
val |= 1 << idx;
writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
}
static inline void
xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
val |= 1 << idx;
writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
}
static inline void
xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
val |= 1 << idx;
writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
}
static inline void
xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
val |= 1 << idx;
writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
}
static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMCR);
val |= PMU_PMCR_P;
writel(val, pmu_dev->inf->csr + PMU_PMCR);
}
static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMCR);
val |= PMU_PMCR_E;
writel(val, pmu_dev->inf->csr + PMU_PMCR);
}
static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
{
u32 val;
val = readl(pmu_dev->inf->csr + PMU_PMCR);
val &= ~PMU_PMCR_E;
writel(val, pmu_dev->inf->csr + PMU_PMCR);
}
static void xgene_perf_pmu_enable(struct pmu *pmu)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask,
pmu_dev->max_counters);
if (!enabled)
return;
xgene_pmu->ops->start_counters(pmu_dev);
}
static void xgene_perf_pmu_disable(struct pmu *pmu)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
xgene_pmu->ops->stop_counters(pmu_dev);
}
static int xgene_perf_event_init(struct perf_event *event)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct hw_perf_event *hw = &event->hw;
struct perf_event *sibling;
/* Test the event attr type check for PMU enumeration */
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* SOC PMU counters are shared across all cores.
* Therefore, it does not support per-process mode.
* Also, it does not support event sampling mode.
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, where each
* event could be theoretically assigned to a different CPU. To
* mitigate this, we enforce CPU assignment to one, selected
* processor (the one described in the "cpumask" attribute).
*/
event->cpu = cpumask_first(&pmu_dev->parent->cpu);
hw->config = event->attr.config;
/*
* Each bit of the config1 field represents an agent from which the
* request of the event come. The event is counted only if it's caused
* by a request of an agent has the bit cleared.
* By default, the event is counted for all agents.
*/
hw->config_base = event->attr.config1;
/*
* We must NOT create groups containing mixed PMUs, although software
* events are acceptable
*/
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
return -EINVAL;
}
return 0;
}
static void xgene_perf_enable_event(struct perf_event *event)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
GET_EVENTID(event));
xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
if (pmu_dev->inf->type == PMU_TYPE_IOB)
xgene_pmu->ops->write_agent1msk(pmu_dev,
~((u32)GET_AGENT1ID(event)));
xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
}
static void xgene_perf_disable_event(struct perf_event *event)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
}
static void xgene_perf_event_set_period(struct perf_event *event)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
struct hw_perf_event *hw = &event->hw;
/*
* For 32 bit counter, it has a period of 2^32. To account for the
* possibility of extreme interrupt latency we program for a period of
* half that. Hopefully, we can handle the interrupt before another 2^31
* events occur and the counter overtakes its previous value.
* For 64 bit counter, we don't expect it overflow.
*/
u64 val = 1ULL << 31;
local64_set(&hw->prev_count, val);
xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
}
static void xgene_perf_event_update(struct perf_event *event)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
struct hw_perf_event *hw = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hw->prev_count);
new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
local64_add(delta, &event->count);
}
static void xgene_perf_read(struct perf_event *event)
{
xgene_perf_event_update(event);
}
static void xgene_perf_start(struct perf_event *event, int flags)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
struct hw_perf_event *hw = &event->hw;
if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
hw->state = 0;
xgene_perf_event_set_period(event);
if (flags & PERF_EF_RELOAD) {
u64 prev_raw_count = local64_read(&hw->prev_count);
xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
prev_raw_count);
}
xgene_perf_enable_event(event);
perf_event_update_userpage(event);
}
static void xgene_perf_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hw = &event->hw;
if (hw->state & PERF_HES_UPTODATE)
return;
xgene_perf_disable_event(event);
WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
hw->state |= PERF_HES_STOPPED;
if (hw->state & PERF_HES_UPTODATE)
return;
xgene_perf_read(event);
hw->state |= PERF_HES_UPTODATE;
}
static int xgene_perf_add(struct perf_event *event, int flags)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct hw_perf_event *hw = &event->hw;
hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
/* Allocate an event counter */
hw->idx = get_next_avail_cntr(pmu_dev);
if (hw->idx < 0)
return -EAGAIN;
/* Update counter event pointer for Interrupt handler */
pmu_dev->pmu_counter_event[hw->idx] = event;
if (flags & PERF_EF_START)
xgene_perf_start(event, PERF_EF_RELOAD);
return 0;
}
static void xgene_perf_del(struct perf_event *event, int flags)
{
struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
struct hw_perf_event *hw = &event->hw;
xgene_perf_stop(event, PERF_EF_UPDATE);
/* clear the assigned counter */
clear_avail_cntr(pmu_dev, GET_CNTR(event));
perf_event_update_userpage(event);
pmu_dev->pmu_counter_event[hw->idx] = NULL;
}
static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
{
struct xgene_pmu *xgene_pmu;
if (pmu_dev->parent->version == PCP_PMU_V3)
pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD;
else
pmu_dev->max_period = PMU_CNT_MAX_PERIOD;
/* First version PMU supports only single event counter */
xgene_pmu = pmu_dev->parent;
if (xgene_pmu->version == PCP_PMU_V1)
pmu_dev->max_counters = 1;
else
pmu_dev->max_counters = PMU_MAX_COUNTERS;
/* Perf driver registration */
pmu_dev->pmu = (struct pmu) {
.attr_groups = pmu_dev->attr_groups,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = xgene_perf_pmu_enable,
.pmu_disable = xgene_perf_pmu_disable,
.event_init = xgene_perf_event_init,
.add = xgene_perf_add,
.del = xgene_perf_del,
.start = xgene_perf_start,
.stop = xgene_perf_stop,
.read = xgene_perf_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* Hardware counter init */
xgene_pmu->ops->stop_counters(pmu_dev);
xgene_pmu->ops->reset_counters(pmu_dev);
return perf_pmu_register(&pmu_dev->pmu, name, -1);
}
static int
xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
{
struct device *dev = xgene_pmu->dev;
struct xgene_pmu_dev *pmu;
pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
pmu->parent = xgene_pmu;
pmu->inf = &ctx->inf;
ctx->pmu_dev = pmu;
switch (pmu->inf->type) {
case PMU_TYPE_L3C:
if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = l3c_pmu_v3_attr_groups;
else
pmu->attr_groups = l3c_pmu_attr_groups;
break;
case PMU_TYPE_IOB:
if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
else
pmu->attr_groups = iob_pmu_attr_groups;
break;
case PMU_TYPE_IOB_SLOW:
if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
break;
case PMU_TYPE_MCB:
if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = mcb_pmu_v3_attr_groups;
else
pmu->attr_groups = mcb_pmu_attr_groups;
break;
case PMU_TYPE_MC:
if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
return -ENODEV;
if (xgene_pmu->version == PCP_PMU_V3)
pmu->attr_groups = mc_pmu_v3_attr_groups;
else
pmu->attr_groups = mc_pmu_attr_groups;
break;
default:
return -EINVAL;
}
if (xgene_init_perf(pmu, ctx->name)) {
dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
return -ENODEV;
}
dev_info(dev, "%s PMU registered\n", ctx->name);
return 0;
}
static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
{
struct xgene_pmu *xgene_pmu = pmu_dev->parent;
void __iomem *csr = pmu_dev->inf->csr;
u32 pmovsr;
int idx;
xgene_pmu->ops->stop_counters(pmu_dev);
if (xgene_pmu->version == PCP_PMU_V3)
pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK;
else
pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
if (!pmovsr)
goto out;
/* Clear interrupt flag */
if (xgene_pmu->version == PCP_PMU_V1)
writel(0x0, csr + PMU_PMOVSR);
else if (xgene_pmu->version == PCP_PMU_V2)
writel(pmovsr, csr + PMU_PMOVSR);
else
writel(pmovsr, csr + PMU_PMOVSCLR);
for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
struct perf_event *event = pmu_dev->pmu_counter_event[idx];
int overflowed = pmovsr & BIT(idx);
/* Ignore if we don't have an event. */
if (!event || !overflowed)
continue;
xgene_perf_event_update(event);
xgene_perf_event_set_period(event);
}
out:
xgene_pmu->ops->start_counters(pmu_dev);
}
static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
{
u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
struct xgene_pmu_dev_ctx *ctx;
struct xgene_pmu *xgene_pmu = dev_id;
u32 val;
raw_spin_lock(&xgene_pmu->lock);
/* Get Interrupt PMU source */
val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
if (xgene_pmu->version == PCP_PMU_V3) {
intr_mcu = PCPPMU_V3_INT_MCU;
intr_mcb = PCPPMU_V3_INT_MCB;
intr_l3c = PCPPMU_V3_INT_L3C;
intr_iob = PCPPMU_V3_INT_IOB;
} else {
intr_mcu = PCPPMU_INT_MCU;
intr_mcb = PCPPMU_INT_MCB;
intr_l3c = PCPPMU_INT_L3C;
intr_iob = PCPPMU_INT_IOB;
}
if (val & intr_mcu) {
list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
_xgene_pmu_isr(irq, ctx->pmu_dev);
}
}
if (val & intr_mcb) {
list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
_xgene_pmu_isr(irq, ctx->pmu_dev);
}
}
if (val & intr_l3c) {
list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
_xgene_pmu_isr(irq, ctx->pmu_dev);
}
}
if (val & intr_iob) {
list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
_xgene_pmu_isr(irq, ctx->pmu_dev);
}
}
raw_spin_unlock(&xgene_pmu->lock);
return IRQ_HANDLED;
}
static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
unsigned int reg;
csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
}
mcba_csr = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(mcba_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
return PTR_ERR(mcba_csr);
}
mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(mcbb_csr)) {
dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
return PTR_ERR(mcbb_csr);
}
xgene_pmu->l3c_active_mask = 0x1;
reg = readl(csw_csr + CSW_CSWCR);
if (reg & CSW_CSWCR_DUALMCB_MASK) {
/* Dual MCB active */
xgene_pmu->mcb_active_mask = 0x3;
/* Probe all active MC(s) */
reg = readl(mcbb_csr + CSW_CSWCR);
xgene_pmu->mc_active_mask =
(reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
} else {
/* Single MCB active */
xgene_pmu->mcb_active_mask = 0x1;
/* Probe all active MC(s) */
reg = readl(mcba_csr + CSW_CSWCR);
xgene_pmu->mc_active_mask =
(reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
}
return 0;
}
static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
void __iomem *csw_csr;
unsigned int reg;
u32 mcb0routing;
u32 mcb1routing;
csw_csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(csw_csr)) {
dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
return PTR_ERR(csw_csr);
}
reg = readl(csw_csr + CSW_CSWCR);
mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg);
mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg);
if (reg & CSW_CSWCR_DUALMCB_MASK) {
/* Dual MCB active */
xgene_pmu->mcb_active_mask = 0x3;
/* Probe all active L3C(s), maximum is 8 */
xgene_pmu->l3c_active_mask = 0xFF;
/* Probe all active MC(s), maximum is 8 */
if ((mcb0routing == 0x2) && (mcb1routing == 0x2))
xgene_pmu->mc_active_mask = 0xFF;
else if ((mcb0routing == 0x1) && (mcb1routing == 0x1))
xgene_pmu->mc_active_mask = 0x33;
else
xgene_pmu->mc_active_mask = 0x11;
} else {
/* Single MCB active */
xgene_pmu->mcb_active_mask = 0x1;
/* Probe all active L3C(s), maximum is 4 */
xgene_pmu->l3c_active_mask = 0x0F;
/* Probe all active MC(s), maximum is 4 */
if (mcb0routing == 0x2)
xgene_pmu->mc_active_mask = 0x0F;
else if (mcb0routing == 0x1)
xgene_pmu->mc_active_mask = 0x03;
else
xgene_pmu->mc_active_mask = 0x01;
}
return 0;
}
static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
struct regmap *csw_map, *mcba_map, *mcbb_map;
struct device_node *np = pdev->dev.of_node;
unsigned int reg;
csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
if (IS_ERR(csw_map)) {
dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
return PTR_ERR(csw_map);
}
mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
if (IS_ERR(mcba_map)) {
dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
return PTR_ERR(mcba_map);
}
mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
if (IS_ERR(mcbb_map)) {
dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
return PTR_ERR(mcbb_map);
}
xgene_pmu->l3c_active_mask = 0x1;
if (regmap_read(csw_map, CSW_CSWCR, ®))
return -EINVAL;
if (reg & CSW_CSWCR_DUALMCB_MASK) {
/* Dual MCB active */
xgene_pmu->mcb_active_mask = 0x3;
/* Probe all active MC(s) */
if (regmap_read(mcbb_map, MCBADDRMR, ®))
return 0;
xgene_pmu->mc_active_mask =
(reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
} else {
/* Single MCB active */
xgene_pmu->mcb_active_mask = 0x1;
/* Probe all active MC(s) */
if (regmap_read(mcba_map, MCBADDRMR, ®))
return 0;
xgene_pmu->mc_active_mask =
(reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
}
return 0;
}
static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
if (has_acpi_companion(&pdev->dev)) {
if (xgene_pmu->version == PCP_PMU_V3)
return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu,
pdev);
else
return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu,
pdev);
}
return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
}
static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
{
switch (type) {
case PMU_TYPE_L3C:
return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
case PMU_TYPE_IOB:
return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
case PMU_TYPE_IOB_SLOW:
return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
case PMU_TYPE_MCB:
return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
case PMU_TYPE_MC:
return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
default:
return devm_kasprintf(dev, GFP_KERNEL, "unknown");
}
}
#if defined(CONFIG_ACPI)
static struct
xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct acpi_device *adev, u32 type)
{
struct device *dev = xgene_pmu->dev;
struct list_head resource_list;
struct xgene_pmu_dev_ctx *ctx;
const union acpi_object *obj;
struct hw_pmu_info *inf;
void __iomem *dev_csr;
struct resource res;
struct resource_entry *rentry;
int enable_bit;
int rc;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
INIT_LIST_HEAD(&resource_list);
rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (rc <= 0) {
dev_err(dev, "PMU type %d: No resources found\n", type);
return NULL;
}
list_for_each_entry(rentry, &resource_list, node) {
if (resource_type(rentry->res) == IORESOURCE_MEM) {
res = *rentry->res;
rentry = NULL;
break;
}
}
acpi_dev_free_resource_list(&resource_list);
if (rentry) {
dev_err(dev, "PMU type %d: No memory resource found\n", type);
return NULL;
}
dev_csr = devm_ioremap_resource(dev, &res);
if (IS_ERR(dev_csr)) {
dev_err(dev, "PMU type %d: Fail to map resource\n", type);
return NULL;
}
/* A PMU device node without enable-bit-index is always enabled */
rc = acpi_dev_get_property(adev, "enable-bit-index",
ACPI_TYPE_INTEGER, &obj);
if (rc < 0)
enable_bit = 0;
else
enable_bit = (int) obj->integer.value;
ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
if (!ctx->name) {
dev_err(dev, "PMU type %d: Fail to get device name\n", type);
return NULL;
}
inf = &ctx->inf;
inf->type = type;
inf->csr = dev_csr;
inf->enable_mask = 1 << enable_bit;
return ctx;
}
static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
{"APMC0D5D", PMU_TYPE_L3C},
{"APMC0D5E", PMU_TYPE_IOB},
{"APMC0D5F", PMU_TYPE_MCB},
{"APMC0D60", PMU_TYPE_MC},
{"APMC0D84", PMU_TYPE_L3C},
{"APMC0D85", PMU_TYPE_IOB},
{"APMC0D86", PMU_TYPE_IOB_SLOW},
{"APMC0D87", PMU_TYPE_MCB},
{"APMC0D88", PMU_TYPE_MC},
{},
};
static const struct acpi_device_id *xgene_pmu_acpi_match_type(
const struct acpi_device_id *ids,
struct acpi_device *adev)
{
const struct acpi_device_id *match_id = NULL;
const struct acpi_device_id *id;
for (id = ids; id->id[0] || id->cls; id++) {
if (!acpi_match_device_ids(adev, id))
match_id = id;
else if (match_id)
break;
}
return match_id;
}
static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
const struct acpi_device_id *acpi_id;
struct xgene_pmu *xgene_pmu = data;
struct xgene_pmu_dev_ctx *ctx;
if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
if (!acpi_id)
return AE_OK;
ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data);
if (!ctx)
return AE_OK;
if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
/* Can't add the PMU device, skip it */
devm_kfree(xgene_pmu->dev, ctx);
return AE_OK;
}
switch (ctx->inf.type) {
case PMU_TYPE_L3C:
list_add(&ctx->next, &xgene_pmu->l3cpmus);
break;
case PMU_TYPE_IOB:
list_add(&ctx->next, &xgene_pmu->iobpmus);
break;
case PMU_TYPE_IOB_SLOW:
list_add(&ctx->next, &xgene_pmu->iobpmus);
break;
case PMU_TYPE_MCB:
list_add(&ctx->next, &xgene_pmu->mcbpmus);
break;
case PMU_TYPE_MC:
list_add(&ctx->next, &xgene_pmu->mcpmus);
break;
}
return AE_OK;
}
static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
struct device *dev = xgene_pmu->dev;
acpi_handle handle;
acpi_status status;
handle = ACPI_HANDLE(dev);
if (!handle)
return -EINVAL;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to probe PMU devices\n");
return -ENODEV;
}
return 0;
}
#else
static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
return 0;
}
#endif
static struct
xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct device_node *np, u32 type)
{
struct device *dev = xgene_pmu->dev;
struct xgene_pmu_dev_ctx *ctx;
struct hw_pmu_info *inf;
void __iomem *dev_csr;
struct resource res;
int enable_bit;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
if (of_address_to_resource(np, 0, &res) < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type);
return NULL;
}
dev_csr = devm_ioremap_resource(dev, &res);
if (IS_ERR(dev_csr)) {
dev_err(dev, "PMU type %d: Fail to map resource\n", type);
return NULL;
}
/* A PMU device node without enable-bit-index is always enabled */
if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
enable_bit = 0;
ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
if (!ctx->name) {
dev_err(dev, "PMU type %d: Fail to get device name\n", type);
return NULL;
}
inf = &ctx->inf;
inf->type = type;
inf->csr = dev_csr;
inf->enable_mask = 1 << enable_bit;
return ctx;
}
static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
struct xgene_pmu_dev_ctx *ctx;
struct device_node *np;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_available(np))
continue;
if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
else
ctx = NULL;
if (!ctx)
continue;
if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
/* Can't add the PMU device, skip it */
devm_kfree(xgene_pmu->dev, ctx);
continue;
}
switch (ctx->inf.type) {
case PMU_TYPE_L3C:
list_add(&ctx->next, &xgene_pmu->l3cpmus);
break;
case PMU_TYPE_IOB:
list_add(&ctx->next, &xgene_pmu->iobpmus);
break;
case PMU_TYPE_IOB_SLOW:
list_add(&ctx->next, &xgene_pmu->iobpmus);
break;
case PMU_TYPE_MCB:
list_add(&ctx->next, &xgene_pmu->mcbpmus);
break;
case PMU_TYPE_MC:
list_add(&ctx->next, &xgene_pmu->mcpmus);
break;
}
}
return 0;
}
static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
struct platform_device *pdev)
{
if (has_acpi_companion(&pdev->dev))
return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
}
static const struct xgene_pmu_data xgene_pmu_data = {
.id = PCP_PMU_V1,
};
static const struct xgene_pmu_data xgene_pmu_v2_data = {
.id = PCP_PMU_V2,
};
static const struct xgene_pmu_ops xgene_pmu_ops = {
.mask_int = xgene_pmu_mask_int,
.unmask_int = xgene_pmu_unmask_int,
.read_counter = xgene_pmu_read_counter32,
.write_counter = xgene_pmu_write_counter32,
.write_evttype = xgene_pmu_write_evttype,
.write_agentmsk = xgene_pmu_write_agentmsk,
.write_agent1msk = xgene_pmu_write_agent1msk,
.enable_counter = xgene_pmu_enable_counter,
.disable_counter = xgene_pmu_disable_counter,
.enable_counter_int = xgene_pmu_enable_counter_int,
.disable_counter_int = xgene_pmu_disable_counter_int,
.reset_counters = xgene_pmu_reset_counters,
.start_counters = xgene_pmu_start_counters,
.stop_counters = xgene_pmu_stop_counters,
};
static const struct xgene_pmu_ops xgene_pmu_v3_ops = {
.mask_int = xgene_pmu_v3_mask_int,
.unmask_int = xgene_pmu_v3_unmask_int,
.read_counter = xgene_pmu_read_counter64,
.write_counter = xgene_pmu_write_counter64,
.write_evttype = xgene_pmu_write_evttype,
.write_agentmsk = xgene_pmu_v3_write_agentmsk,
.write_agent1msk = xgene_pmu_v3_write_agent1msk,
.enable_counter = xgene_pmu_enable_counter,
.disable_counter = xgene_pmu_disable_counter,
.enable_counter_int = xgene_pmu_enable_counter_int,
.disable_counter_int = xgene_pmu_disable_counter_int,
.reset_counters = xgene_pmu_reset_counters,
.start_counters = xgene_pmu_start_counters,
.stop_counters = xgene_pmu_stop_counters,
};
static const struct of_device_id xgene_pmu_of_match[] = {
{ .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
{ .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
{},
};
MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_pmu_acpi_match[] = {
{"APMC0D5B", PCP_PMU_V1},
{"APMC0D5C", PCP_PMU_V2},
{"APMC0D83", PCP_PMU_V3},
{},
};
MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
#endif
static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
node);
if (cpumask_empty(&xgene_pmu->cpu))
cpumask_set_cpu(cpu, &xgene_pmu->cpu);
/* Overflow interrupt also should use the same CPU */
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
return 0;
}
static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
node);
struct xgene_pmu_dev_ctx *ctx;
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
}
cpumask_set_cpu(target, &xgene_pmu->cpu);
/* Overflow interrupt also should use the same CPU */
WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
return 0;
}
static int xgene_pmu_probe(struct platform_device *pdev)
{
const struct xgene_pmu_data *dev_data;
const struct of_device_id *of_id;
struct xgene_pmu *xgene_pmu;
int irq, rc;
int version;
/* Install a hook to update the reader CPU in case it goes offline */
rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
"CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
xgene_pmu_online_cpu,
xgene_pmu_offline_cpu);
if (rc)
return rc;
xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
if (!xgene_pmu)
return -ENOMEM;
xgene_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, xgene_pmu);
version = -EINVAL;
of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
if (of_id) {
dev_data = (const struct xgene_pmu_data *) of_id->data;
version = dev_data->id;
}
#ifdef CONFIG_ACPI
if (ACPI_COMPANION(&pdev->dev)) {
const struct acpi_device_id *acpi_id;
acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
if (acpi_id)
version = (int) acpi_id->driver_data;
}
#endif
if (version < 0)
return -ENODEV;
if (version == PCP_PMU_V3)
xgene_pmu->ops = &xgene_pmu_v3_ops;
else
xgene_pmu->ops = &xgene_pmu_ops;
INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
INIT_LIST_HEAD(&xgene_pmu->iobpmus);
INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
INIT_LIST_HEAD(&xgene_pmu->mcpmus);
xgene_pmu->version = version;
dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
xgene_pmu->pcppmu_csr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xgene_pmu->pcppmu_csr)) {
dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
return PTR_ERR(xgene_pmu->pcppmu_csr);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(&pdev->dev), xgene_pmu);
if (rc) {
dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
return rc;
}
xgene_pmu->irq = irq;
raw_spin_lock_init(&xgene_pmu->lock);
/* Check for active MCBs and MCUs */
rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
if (rc) {
dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
xgene_pmu->mcb_active_mask = 0x1;
xgene_pmu->mc_active_mask = 0x1;
}
/* Add this instance to the list used by the hotplug callback */
rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
if (rc) {
dev_err(&pdev->dev, "Error %d registering hotplug", rc);
return rc;
}
/* Walk through the tree for all PMU perf devices */
rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
if (rc) {
dev_err(&pdev->dev, "No PMU perf devices found!\n");
goto out_unregister;
}
/* Enable interrupt */
xgene_pmu->ops->unmask_int(xgene_pmu);
return 0;
out_unregister:
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
return rc;
}
static void
xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
{
struct xgene_pmu_dev_ctx *ctx;
list_for_each_entry(ctx, pmus, next) {
perf_pmu_unregister(&ctx->pmu_dev->pmu);
}
}
static int xgene_pmu_remove(struct platform_device *pdev)
{
struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
return 0;
}
static struct platform_driver xgene_pmu_driver = {
.probe = xgene_pmu_probe,
.remove = xgene_pmu_remove,
.driver = {
.name = "xgene-pmu",
.of_match_table = xgene_pmu_of_match,
.acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(xgene_pmu_driver);
| linux-master | drivers/perf/xgene_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CAVIUM THUNDERX2 SoC PMU UNCORE
* Copyright (C) 2018 Cavium Inc.
* Author: Ganapatrao Kulkarni <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/cpuhotplug.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
* Each UNCORE PMU device consists of 4 independent programmable counters.
* Counters are 32 bit and do not support overflow interrupt,
* they need to be sampled before overflow(i.e, at every 2 seconds).
*/
#define TX2_PMU_DMC_L3C_MAX_COUNTERS 4
#define TX2_PMU_CCPI2_MAX_COUNTERS 8
#define TX2_PMU_MAX_COUNTERS TX2_PMU_CCPI2_MAX_COUNTERS
#define TX2_PMU_DMC_CHANNELS 8
#define TX2_PMU_L3_TILES 16
#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
#define GET_EVENTID(ev, mask) ((ev->hw.config) & mask)
#define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask)
/* 1 byte per counter(4 counters).
* Event id is encoded in bits [5:1] of a byte,
*/
#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
/* bits[3:0] to select counters, are indexed from 8 to 15. */
#define CCPI2_COUNTER_OFFSET 8
#define L3C_COUNTER_CTL 0xA8
#define L3C_COUNTER_DATA 0xAC
#define DMC_COUNTER_CTL 0x234
#define DMC_COUNTER_DATA 0x240
#define CCPI2_PERF_CTL 0x108
#define CCPI2_COUNTER_CTL 0x10C
#define CCPI2_COUNTER_SEL 0x12c
#define CCPI2_COUNTER_DATA_L 0x130
#define CCPI2_COUNTER_DATA_H 0x134
/* L3C event IDs */
#define L3_EVENT_READ_REQ 0xD
#define L3_EVENT_WRITEBACK_REQ 0xE
#define L3_EVENT_INV_N_WRITE_REQ 0xF
#define L3_EVENT_INV_REQ 0x10
#define L3_EVENT_EVICT_REQ 0x13
#define L3_EVENT_INV_N_WRITE_HIT 0x14
#define L3_EVENT_INV_HIT 0x15
#define L3_EVENT_READ_HIT 0x17
#define L3_EVENT_MAX 0x18
/* DMC event IDs */
#define DMC_EVENT_COUNT_CYCLES 0x1
#define DMC_EVENT_WRITE_TXNS 0xB
#define DMC_EVENT_DATA_TRANSFERS 0xD
#define DMC_EVENT_READ_TXNS 0xF
#define DMC_EVENT_MAX 0x10
#define CCPI2_EVENT_REQ_PKT_SENT 0x3D
#define CCPI2_EVENT_SNOOP_PKT_SENT 0x65
#define CCPI2_EVENT_DATA_PKT_SENT 0x105
#define CCPI2_EVENT_GIC_PKT_SENT 0x12D
#define CCPI2_EVENT_MAX 0x200
#define CCPI2_PERF_CTL_ENABLE BIT(0)
#define CCPI2_PERF_CTL_START BIT(1)
#define CCPI2_PERF_CTL_RESET BIT(4)
#define CCPI2_EVENT_LEVEL_RISING_EDGE BIT(10)
#define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11)
enum tx2_uncore_type {
PMU_TYPE_L3C,
PMU_TYPE_DMC,
PMU_TYPE_CCPI2,
PMU_TYPE_INVALID,
};
/*
* Each socket has 3 uncore devices associated with a PMU. The DMC and
* L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
*/
struct tx2_uncore_pmu {
struct hlist_node hpnode;
struct list_head entry;
struct pmu pmu;
char *name;
int node;
int cpu;
u32 max_counters;
u32 counters_mask;
u32 prorate_factor;
u32 max_events;
u32 events_mask;
u64 hrtimer_interval;
void __iomem *base;
DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
struct perf_event *events[TX2_PMU_MAX_COUNTERS];
struct device *dev;
struct hrtimer hrtimer;
const struct attribute_group **attr_groups;
enum tx2_uncore_type type;
enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
void (*init_cntr_base)(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu);
void (*stop_event)(struct perf_event *event);
void (*start_event)(struct perf_event *event, int flags);
};
static LIST_HEAD(tx2_pmus);
static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
{
return container_of(pmu, struct tx2_uncore_pmu, pmu);
}
#define TX2_PMU_FORMAT_ATTR(_var, _name, _format) \
static ssize_t \
__tx2_pmu_##_var##_show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sysfs_emit(page, _format "\n"); \
} \
\
static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL)
TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
static struct attribute *l3c_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute *dmc_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute *ccpi2_pmu_format_attrs[] = {
&format_attr_event_ccpi2.attr,
NULL,
};
static const struct attribute_group l3c_pmu_format_attr_group = {
.name = "format",
.attrs = l3c_pmu_format_attrs,
};
static const struct attribute_group dmc_pmu_format_attr_group = {
.name = "format",
.attrs = dmc_pmu_format_attrs,
};
static const struct attribute_group ccpi2_pmu_format_attr_group = {
.name = "format",
.attrs = ccpi2_pmu_format_attrs,
};
/*
* sysfs event attributes
*/
static ssize_t tx2_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "event=0x%lx\n", (unsigned long) eattr->var);
}
#define TX2_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
config, tx2_pmu_event_show)
TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
static struct attribute *l3c_pmu_events_attrs[] = {
&tx2_pmu_event_attr_read_request.attr.attr,
&tx2_pmu_event_attr_writeback_request.attr.attr,
&tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
&tx2_pmu_event_attr_inv_request.attr.attr,
&tx2_pmu_event_attr_evict_request.attr.attr,
&tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
&tx2_pmu_event_attr_inv_hit.attr.attr,
&tx2_pmu_event_attr_read_hit.attr.attr,
NULL,
};
TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
static struct attribute *dmc_pmu_events_attrs[] = {
&tx2_pmu_event_attr_cnt_cycles.attr.attr,
&tx2_pmu_event_attr_write_txns.attr.attr,
&tx2_pmu_event_attr_data_transfers.attr.attr,
&tx2_pmu_event_attr_read_txns.attr.attr,
NULL,
};
TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT);
TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT);
TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT);
TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT);
static struct attribute *ccpi2_pmu_events_attrs[] = {
&tx2_pmu_event_attr_req_pktsent.attr.attr,
&tx2_pmu_event_attr_snoop_pktsent.attr.attr,
&tx2_pmu_event_attr_data_pktsent.attr.attr,
&tx2_pmu_event_attr_gic_pktsent.attr.attr,
NULL,
};
static const struct attribute_group l3c_pmu_events_attr_group = {
.name = "events",
.attrs = l3c_pmu_events_attrs,
};
static const struct attribute_group dmc_pmu_events_attr_group = {
.name = "events",
.attrs = dmc_pmu_events_attrs,
};
static const struct attribute_group ccpi2_pmu_events_attr_group = {
.name = "events",
.attrs = ccpi2_pmu_events_attrs,
};
/*
* sysfs cpumask attributes
*/
static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tx2_uncore_pmu *tx2_pmu;
tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *tx2_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group pmu_cpumask_attr_group = {
.attrs = tx2_pmu_cpumask_attrs,
};
/*
* Per PMU device attribute groups
*/
static const struct attribute_group *l3c_pmu_attr_groups[] = {
&l3c_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&l3c_pmu_events_attr_group,
NULL
};
static const struct attribute_group *dmc_pmu_attr_groups[] = {
&dmc_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&dmc_pmu_events_attr_group,
NULL
};
static const struct attribute_group *ccpi2_pmu_attr_groups[] = {
&ccpi2_pmu_format_attr_group,
&pmu_cpumask_attr_group,
&ccpi2_pmu_events_attr_group,
NULL
};
static inline u32 reg_readl(unsigned long addr)
{
return readl((void __iomem *)addr);
}
static inline void reg_writel(u32 val, unsigned long addr)
{
writel(val, (void __iomem *)addr);
}
static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
{
int counter;
counter = find_first_zero_bit(tx2_pmu->active_counters,
tx2_pmu->max_counters);
if (counter == tx2_pmu->max_counters)
return -ENOSPC;
set_bit(counter, tx2_pmu->active_counters);
return counter;
}
static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
{
clear_bit(counter, tx2_pmu->active_counters);
}
static void init_cntr_base_l3c(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
u32 cmask;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
cmask = tx2_pmu->counters_mask;
/* counter ctrl/data reg offset at 8 */
hwc->config_base = (unsigned long)tx2_pmu->base
+ L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask));
hwc->event_base = (unsigned long)tx2_pmu->base
+ L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask));
}
static void init_cntr_base_dmc(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
u32 cmask;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
cmask = tx2_pmu->counters_mask;
hwc->config_base = (unsigned long)tx2_pmu->base
+ DMC_COUNTER_CTL;
/* counter data reg offset at 0xc */
hwc->event_base = (unsigned long)tx2_pmu->base
+ DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask));
}
static void init_cntr_base_ccpi2(struct perf_event *event,
struct tx2_uncore_pmu *tx2_pmu)
{
struct hw_perf_event *hwc = &event->hw;
u32 cmask;
cmask = tx2_pmu->counters_mask;
hwc->config_base = (unsigned long)tx2_pmu->base
+ CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask));
hwc->event_base = (unsigned long)tx2_pmu->base;
}
static void uncore_start_event_l3c(struct perf_event *event, int flags)
{
u32 val, emask;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
emask = tx2_pmu->events_mask;
/* event id encoded in bits [07:03] */
val = GET_EVENTID(event, emask) << 3;
reg_writel(val, hwc->config_base);
local64_set(&hwc->prev_count, 0);
reg_writel(0, hwc->event_base);
}
static inline void uncore_stop_event_l3c(struct perf_event *event)
{
reg_writel(0, event->hw.config_base);
}
static void uncore_start_event_dmc(struct perf_event *event, int flags)
{
u32 val, cmask, emask;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
int idx, event_id;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
cmask = tx2_pmu->counters_mask;
emask = tx2_pmu->events_mask;
idx = GET_COUNTERID(event, cmask);
event_id = GET_EVENTID(event, emask);
/* enable and start counters.
* 8 bits for each counter, bits[05:01] of a counter to set event type.
*/
val = reg_readl(hwc->config_base);
val &= ~DMC_EVENT_CFG(idx, 0x1f);
val |= DMC_EVENT_CFG(idx, event_id);
reg_writel(val, hwc->config_base);
local64_set(&hwc->prev_count, 0);
reg_writel(0, hwc->event_base);
}
static void uncore_stop_event_dmc(struct perf_event *event)
{
u32 val, cmask;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
int idx;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
cmask = tx2_pmu->counters_mask;
idx = GET_COUNTERID(event, cmask);
/* clear event type(bits[05:01]) to stop counter */
val = reg_readl(hwc->config_base);
val &= ~DMC_EVENT_CFG(idx, 0x1f);
reg_writel(val, hwc->config_base);
}
static void uncore_start_event_ccpi2(struct perf_event *event, int flags)
{
u32 emask;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
emask = tx2_pmu->events_mask;
/* Bit [09:00] to set event id.
* Bits [10], set level to rising edge.
* Bits [11], set type to edge sensitive.
*/
reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE |
CCPI2_EVENT_LEVEL_RISING_EDGE |
GET_EVENTID(event, emask)), hwc->config_base);
/* reset[4], enable[0] and start[1] counters */
reg_writel(CCPI2_PERF_CTL_RESET |
CCPI2_PERF_CTL_START |
CCPI2_PERF_CTL_ENABLE,
hwc->event_base + CCPI2_PERF_CTL);
local64_set(&event->hw.prev_count, 0ULL);
}
static void uncore_stop_event_ccpi2(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* disable and stop counter */
reg_writel(0, hwc->event_base + CCPI2_PERF_CTL);
}
static void tx2_uncore_event_update(struct perf_event *event)
{
u64 prev, delta, new = 0;
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
enum tx2_uncore_type type;
u32 prorate_factor;
u32 cmask, emask;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
type = tx2_pmu->type;
cmask = tx2_pmu->counters_mask;
emask = tx2_pmu->events_mask;
prorate_factor = tx2_pmu->prorate_factor;
if (type == PMU_TYPE_CCPI2) {
reg_writel(CCPI2_COUNTER_OFFSET +
GET_COUNTERID(event, cmask),
hwc->event_base + CCPI2_COUNTER_SEL);
new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H);
new = (new << 32) +
reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L);
prev = local64_xchg(&hwc->prev_count, new);
delta = new - prev;
} else {
new = reg_readl(hwc->event_base);
prev = local64_xchg(&hwc->prev_count, new);
/* handles rollover of 32 bit counter */
delta = (u32)(((1ULL << 32) - prev) + new);
}
/* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
if (type == PMU_TYPE_DMC &&
GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS)
delta = delta/4;
/* L3C and DMC has 16 and 8 interleave channels respectively.
* The sampled value is for channel 0 and multiplied with
* prorate_factor to get the count for a device.
*/
local64_add(delta * prorate_factor, &event->count);
}
static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
{
int i = 0;
struct acpi_tx2_pmu_device {
__u8 id[ACPI_ID_LEN];
enum tx2_uncore_type type;
} devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
{"CAV901E", PMU_TYPE_CCPI2},
{"", PMU_TYPE_INVALID}
};
while (devices[i].type != PMU_TYPE_INVALID) {
if (!strcmp(acpi_device_hid(adev), devices[i].id))
break;
i++;
}
return devices[i].type;
}
static bool tx2_uncore_validate_event(struct pmu *pmu,
struct perf_event *event, int *counters)
{
if (is_software_event(event))
return true;
/* Reject groups spanning multiple HW PMUs. */
if (event->pmu != pmu)
return false;
*counters = *counters + 1;
return true;
}
/*
* Make sure the group of events can be scheduled at once
* on the PMU.
*/
static bool tx2_uncore_validate_event_group(struct perf_event *event,
int max_counters)
{
struct perf_event *sibling, *leader = event->group_leader;
int counters = 0;
if (event->group_leader == event)
return true;
if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
return false;
for_each_sibling_event(sibling, leader) {
if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
return false;
}
if (!tx2_uncore_validate_event(event->pmu, event, &counters))
return false;
/*
* If the group requires more counters than the HW has,
* it cannot ever be scheduled.
*/
return counters <= max_counters;
}
static int tx2_uncore_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
/* Test the event attr type check for PMU enumeration */
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* SOC PMU counters are shared across all cores.
* Therefore, it does not support per-process mode.
* Also, it does not support event sampling mode.
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
if (event->cpu < 0)
return -EINVAL;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
if (tx2_pmu->cpu >= nr_cpu_ids)
return -EINVAL;
event->cpu = tx2_pmu->cpu;
if (event->attr.config >= tx2_pmu->max_events)
return -EINVAL;
/* store event id */
hwc->config = event->attr.config;
/* Validate the group */
if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
return -EINVAL;
return 0;
}
static void tx2_uncore_event_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
hwc->state = 0;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
tx2_pmu->start_event(event, flags);
perf_event_update_userpage(event);
/* No hrtimer needed for CCPI2, 64-bit counters */
if (!tx2_pmu->hrtimer_callback)
return;
/* Start timer for first event */
if (bitmap_weight(tx2_pmu->active_counters,
tx2_pmu->max_counters) == 1) {
hrtimer_start(&tx2_pmu->hrtimer,
ns_to_ktime(tx2_pmu->hrtimer_interval),
HRTIMER_MODE_REL_PINNED);
}
}
static void tx2_uncore_event_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
if (hwc->state & PERF_HES_UPTODATE)
return;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
tx2_pmu->stop_event(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) {
tx2_uncore_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
static int tx2_uncore_event_add(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct tx2_uncore_pmu *tx2_pmu;
tx2_pmu = pmu_to_tx2_pmu(event->pmu);
/* Allocate a free counter */
hwc->idx = alloc_counter(tx2_pmu);
if (hwc->idx < 0)
return -EAGAIN;
tx2_pmu->events[hwc->idx] = event;
/* set counter control and data registers base address */
tx2_pmu->init_cntr_base(event, tx2_pmu);
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
tx2_uncore_event_start(event, flags);
return 0;
}
static void tx2_uncore_event_del(struct perf_event *event, int flags)
{
struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 cmask;
cmask = tx2_pmu->counters_mask;
tx2_uncore_event_stop(event, PERF_EF_UPDATE);
/* clear the assigned counter */
free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
perf_event_update_userpage(event);
tx2_pmu->events[hwc->idx] = NULL;
hwc->idx = -1;
if (!tx2_pmu->hrtimer_callback)
return;
if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
hrtimer_cancel(&tx2_pmu->hrtimer);
}
static void tx2_uncore_event_read(struct perf_event *event)
{
tx2_uncore_event_update(event);
}
static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
{
struct tx2_uncore_pmu *tx2_pmu;
int max_counters, idx;
tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
max_counters = tx2_pmu->max_counters;
if (bitmap_empty(tx2_pmu->active_counters, max_counters))
return HRTIMER_NORESTART;
for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
struct perf_event *event = tx2_pmu->events[idx];
tx2_uncore_event_update(event);
}
hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
return HRTIMER_RESTART;
}
static int tx2_uncore_pmu_register(
struct tx2_uncore_pmu *tx2_pmu)
{
struct device *dev = tx2_pmu->dev;
char *name = tx2_pmu->name;
/* Perf event registration */
tx2_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = tx2_pmu->attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = tx2_uncore_event_init,
.add = tx2_uncore_event_add,
.del = tx2_uncore_event_del,
.start = tx2_uncore_event_start,
.stop = tx2_uncore_event_stop,
.read = tx2_uncore_event_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
"%s", name);
return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
}
static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
{
int ret, cpu;
cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
cpu_online_mask);
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
hrtimer_init(&tx2_pmu->hrtimer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
}
ret = tx2_uncore_pmu_register(tx2_pmu);
if (ret) {
dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
tx2_pmu->name);
return -ENODEV;
}
/* register hotplug callback for the pmu */
ret = cpuhp_state_add_instance(
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
&tx2_pmu->hpnode);
if (ret) {
dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
return ret;
}
/* Add to list */
list_add(&tx2_pmu->entry, &tx2_pmus);
dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
tx2_pmu->pmu.name);
return ret;
}
static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
acpi_handle handle, struct acpi_device *adev, u32 type)
{
struct tx2_uncore_pmu *tx2_pmu;
void __iomem *base;
struct resource res;
struct resource_entry *rentry;
struct list_head list;
int ret;
INIT_LIST_HEAD(&list);
ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
if (ret <= 0) {
dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
return NULL;
}
list_for_each_entry(rentry, &list, node) {
if (resource_type(rentry->res) == IORESOURCE_MEM) {
res = *rentry->res;
rentry = NULL;
break;
}
}
acpi_dev_free_resource_list(&list);
if (rentry) {
dev_err(dev, "PMU type %d: Fail to find resource\n", type);
return NULL;
}
base = devm_ioremap_resource(dev, &res);
if (IS_ERR(base))
return NULL;
tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
if (!tx2_pmu)
return NULL;
tx2_pmu->dev = dev;
tx2_pmu->type = type;
tx2_pmu->base = base;
tx2_pmu->node = dev_to_node(dev);
INIT_LIST_HEAD(&tx2_pmu->entry);
switch (tx2_pmu->type) {
case PMU_TYPE_L3C:
tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
tx2_pmu->max_events = L3_EVENT_MAX;
tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = l3c_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_l3c_%d", tx2_pmu->node);
tx2_pmu->init_cntr_base = init_cntr_base_l3c;
tx2_pmu->start_event = uncore_start_event_l3c;
tx2_pmu->stop_event = uncore_stop_event_l3c;
break;
case PMU_TYPE_DMC:
tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
tx2_pmu->counters_mask = 0x3;
tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
tx2_pmu->max_events = DMC_EVENT_MAX;
tx2_pmu->events_mask = 0x1f;
tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
tx2_pmu->attr_groups = dmc_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_dmc_%d", tx2_pmu->node);
tx2_pmu->init_cntr_base = init_cntr_base_dmc;
tx2_pmu->start_event = uncore_start_event_dmc;
tx2_pmu->stop_event = uncore_stop_event_dmc;
break;
case PMU_TYPE_CCPI2:
/* CCPI2 has 8 counters */
tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
tx2_pmu->counters_mask = 0x7;
tx2_pmu->prorate_factor = 1;
tx2_pmu->max_events = CCPI2_EVENT_MAX;
tx2_pmu->events_mask = 0x1ff;
tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
"uncore_ccpi2_%d", tx2_pmu->node);
tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
tx2_pmu->start_event = uncore_start_event_ccpi2;
tx2_pmu->stop_event = uncore_stop_event_ccpi2;
tx2_pmu->hrtimer_callback = NULL;
break;
case PMU_TYPE_INVALID:
devm_kfree(dev, tx2_pmu);
return NULL;
}
return tx2_pmu;
}
static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct tx2_uncore_pmu *tx2_pmu;
enum tx2_uncore_type type;
if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
return AE_OK;
type = get_tx2_pmu_type(adev);
if (type == PMU_TYPE_INVALID)
return AE_OK;
tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
handle, adev, type);
if (!tx2_pmu)
return AE_ERROR;
if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
/* Can't add the PMU device, abort */
return AE_ERROR;
}
return AE_OK;
}
static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
struct hlist_node *hpnode)
{
struct tx2_uncore_pmu *tx2_pmu;
tx2_pmu = hlist_entry_safe(hpnode,
struct tx2_uncore_pmu, hpnode);
/* Pick this CPU, If there is no CPU/PMU association and both are
* from same node.
*/
if ((tx2_pmu->cpu >= nr_cpu_ids) &&
(tx2_pmu->node == cpu_to_node(cpu)))
tx2_pmu->cpu = cpu;
return 0;
}
static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
struct hlist_node *hpnode)
{
int new_cpu;
struct tx2_uncore_pmu *tx2_pmu;
struct cpumask cpu_online_mask_temp;
tx2_pmu = hlist_entry_safe(hpnode,
struct tx2_uncore_pmu, hpnode);
if (cpu != tx2_pmu->cpu)
return 0;
if (tx2_pmu->hrtimer_callback)
hrtimer_cancel(&tx2_pmu->hrtimer);
cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
new_cpu = cpumask_any_and(
cpumask_of_node(tx2_pmu->node),
&cpu_online_mask_temp);
tx2_pmu->cpu = new_cpu;
if (new_cpu >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
return 0;
}
static const struct acpi_device_id tx2_uncore_acpi_match[] = {
{"CAV901C", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
static int tx2_uncore_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle;
acpi_status status;
set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
if (!has_acpi_companion(dev))
return -ENODEV;
handle = ACPI_HANDLE(dev);
if (!handle)
return -EINVAL;
/* Walk through the tree for all PMU UNCORE devices */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
tx2_uncore_pmu_add,
NULL, dev, NULL);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to probe PMU devices\n");
return_ACPI_STATUS(status);
}
dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
return 0;
}
static int tx2_uncore_remove(struct platform_device *pdev)
{
struct tx2_uncore_pmu *tx2_pmu, *temp;
struct device *dev = &pdev->dev;
if (!list_empty(&tx2_pmus)) {
list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
if (tx2_pmu->node == dev_to_node(dev)) {
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
&tx2_pmu->hpnode);
perf_pmu_unregister(&tx2_pmu->pmu);
list_del(&tx2_pmu->entry);
}
}
}
return 0;
}
static struct platform_driver tx2_uncore_driver = {
.driver = {
.name = "tx2-uncore-pmu",
.acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
.suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
.remove = tx2_uncore_remove,
};
static int __init tx2_uncore_driver_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
"perf/tx2/uncore:online",
tx2_uncore_pmu_online_cpu,
tx2_uncore_pmu_offline_cpu);
if (ret) {
pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
return ret;
}
ret = platform_driver_register(&tx2_uncore_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
return ret;
}
module_init(tx2_uncore_driver_init);
static void __exit tx2_uncore_driver_exit(void)
{
platform_driver_unregister(&tx2_uncore_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
}
module_exit(tx2_uncore_driver_exit);
MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ganapatrao Kulkarni <[email protected]>");
| linux-master | drivers/perf/thunderx2_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI probing code for ARM performance counters.
*
* Copyright (C) 2017 ARM Ltd.
*/
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
static DEFINE_PER_CPU(int, pmu_irqs);
static int arm_pmu_acpi_register_irq(int cpu)
{
struct acpi_madt_generic_interrupt *gicc;
int gsi, trigger;
gicc = acpi_cpu_get_madt_gicc(cpu);
gsi = gicc->performance_interrupt;
/*
* Per the ACPI spec, the MADT cannot describe a PMU that doesn't
* have an interrupt. QEMU advertises this by using a GSI of zero,
* which is not known to be valid on any hardware despite being
* valid per the spec. Take the pragmatic approach and reject a
* GSI of zero for now.
*/
if (!gsi)
return 0;
if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
trigger = ACPI_EDGE_SENSITIVE;
else
trigger = ACPI_LEVEL_SENSITIVE;
/*
* Helpfully, the MADT GICC doesn't have a polarity flag for the
* "performance interrupt". Luckily, on compliant GICs the polarity is
* a fixed value in HW (for both SPIs and PPIs) that we cannot change
* from SW.
*
* Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
* may not match the real polarity, but that should not matter.
*
* Other interrupt controllers are not supported with ACPI.
*/
return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
}
static void arm_pmu_acpi_unregister_irq(int cpu)
{
struct acpi_madt_generic_interrupt *gicc;
int gsi;
gicc = acpi_cpu_get_madt_gicc(cpu);
gsi = gicc->performance_interrupt;
if (gsi)
acpi_unregister_gsi(gsi);
}
static int __maybe_unused
arm_acpi_register_pmu_device(struct platform_device *pdev, u8 len,
u16 (*parse_gsi)(struct acpi_madt_generic_interrupt *))
{
int cpu, this_hetid, hetid, irq, ret;
u16 this_gsi = 0, gsi = 0;
/*
* Ensure that platform device must have IORESOURCE_IRQ
* resource to hold gsi interrupt.
*/
if (pdev->num_resources != 1)
return -ENXIO;
if (pdev->resource[0].flags != IORESOURCE_IRQ)
return -ENXIO;
/*
* Sanity check all the GICC tables for the same interrupt
* number. For now, only support homogeneous ACPI machines.
*/
for_each_possible_cpu(cpu) {
struct acpi_madt_generic_interrupt *gicc;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (gicc->header.length < len)
return gsi ? -ENXIO : 0;
this_gsi = parse_gsi(gicc);
this_hetid = find_acpi_cpu_topology_hetero_id(cpu);
if (!gsi) {
hetid = this_hetid;
gsi = this_gsi;
} else if (hetid != this_hetid || gsi != this_gsi) {
pr_warn("ACPI: %s: must be homogeneous\n", pdev->name);
return -ENXIO;
}
}
if (!this_gsi)
return 0;
irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
if (irq < 0) {
pr_warn("ACPI: %s Unable to register interrupt: %d\n", pdev->name, gsi);
return -ENXIO;
}
pdev->resource[0].start = irq;
ret = platform_device_register(pdev);
if (ret)
acpi_unregister_gsi(gsi);
return ret;
}
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
static struct resource spe_resources[] = {
{
/* irq */
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device spe_dev = {
.name = ARMV8_SPE_PDEV_NAME,
.id = -1,
.resource = spe_resources,
.num_resources = ARRAY_SIZE(spe_resources)
};
static u16 arm_spe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
{
return gicc->spe_interrupt;
}
/*
* For lack of a better place, hook the normal PMU MADT walk
* and create a SPE device if we detect a recent MADT with
* a homogeneous PPI mapping.
*/
static void arm_spe_acpi_register_device(void)
{
int ret = arm_acpi_register_pmu_device(&spe_dev, ACPI_MADT_GICC_SPE,
arm_spe_parse_gsi);
if (ret)
pr_warn("ACPI: SPE: Unable to register device\n");
}
#else
static inline void arm_spe_acpi_register_device(void)
{
}
#endif /* CONFIG_ARM_SPE_PMU */
#if IS_ENABLED(CONFIG_CORESIGHT_TRBE)
static struct resource trbe_resources[] = {
{
/* irq */
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device trbe_dev = {
.name = ARMV8_TRBE_PDEV_NAME,
.id = -1,
.resource = trbe_resources,
.num_resources = ARRAY_SIZE(trbe_resources)
};
static u16 arm_trbe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
{
return gicc->trbe_interrupt;
}
static void arm_trbe_acpi_register_device(void)
{
int ret = arm_acpi_register_pmu_device(&trbe_dev, ACPI_MADT_GICC_TRBE,
arm_trbe_parse_gsi);
if (ret)
pr_warn("ACPI: TRBE: Unable to register device\n");
}
#else
static inline void arm_trbe_acpi_register_device(void)
{
}
#endif /* CONFIG_CORESIGHT_TRBE */
static int arm_pmu_acpi_parse_irqs(void)
{
int irq, cpu, irq_cpu, err;
for_each_possible_cpu(cpu) {
irq = arm_pmu_acpi_register_irq(cpu);
if (irq < 0) {
err = irq;
pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
cpu, err);
goto out_err;
} else if (irq == 0) {
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
/*
* Log and request the IRQ so the core arm_pmu code can manage
* it. We'll have to sanity-check IRQs later when we associate
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
err = armpmu_request_irq(irq, cpu);
if (err)
goto out_err;
}
return 0;
out_err:
for_each_possible_cpu(cpu) {
irq = per_cpu(pmu_irqs, cpu);
if (!irq)
continue;
arm_pmu_acpi_unregister_irq(cpu);
/*
* Blat all copies of the IRQ so that we only unregister the
* corresponding GSI once (e.g. when we have PPIs).
*/
for_each_possible_cpu(irq_cpu) {
if (per_cpu(pmu_irqs, irq_cpu) == irq)
per_cpu(pmu_irqs, irq_cpu) = 0;
}
}
return err;
}
static struct arm_pmu *arm_pmu_acpi_find_pmu(void)
{
unsigned long cpuid = read_cpuid_id();
struct arm_pmu *pmu;
int cpu;
for_each_possible_cpu(cpu) {
pmu = per_cpu(probed_pmus, cpu);
if (!pmu || pmu->acpi_cpuid != cpuid)
continue;
return pmu;
}
return NULL;
}
/*
* Check whether the new IRQ is compatible with those already associated with
* the PMU (e.g. we don't have mismatched PPIs).
*/
static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
int cpu;
if (!irq)
return true;
for_each_cpu(cpu, &pmu->supported_cpus) {
int other_irq = per_cpu(hw_events->irq, cpu);
if (!other_irq)
continue;
if (irq == other_irq)
continue;
if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
continue;
pr_warn("mismatched PPIs detected\n");
return false;
}
return true;
}
static void arm_pmu_acpi_associate_pmu_cpu(struct arm_pmu *pmu,
unsigned int cpu)
{
int irq = per_cpu(pmu_irqs, cpu);
per_cpu(probed_pmus, cpu) = pmu;
if (pmu_irq_matches(pmu, irq)) {
struct pmu_hw_events __percpu *hw_events;
hw_events = pmu->hw_events;
per_cpu(hw_events->irq, cpu) = irq;
}
cpumask_set_cpu(cpu, &pmu->supported_cpus);
}
/*
* This must run before the common arm_pmu hotplug logic, so that we can
* associate a CPU and its interrupt before the common code tries to manage the
* affinity and so on.
*
* Note that hotplug events are serialized, so we cannot race with another CPU
* coming up. The perf core won't open events while a hotplug event is in
* progress.
*/
static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
{
struct arm_pmu *pmu;
/* If we've already probed this CPU, we have nothing to do */
if (per_cpu(probed_pmus, cpu))
return 0;
pmu = arm_pmu_acpi_find_pmu();
if (!pmu) {
pr_warn_ratelimited("Unable to associate CPU%d with a PMU\n",
cpu);
return 0;
}
arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
return 0;
}
static void arm_pmu_acpi_probe_matching_cpus(struct arm_pmu *pmu,
unsigned long cpuid)
{
int cpu;
for_each_online_cpu(cpu) {
unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr;
if (cpu_cpuid == cpuid)
arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
}
}
int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
{
int pmu_idx = 0;
unsigned int cpu;
int ret;
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_ACPI_STARTING,
"perf/arm/pmu_acpi:starting",
arm_pmu_acpi_cpu_starting, NULL);
if (ret)
return ret;
/*
* Initialise and register the set of PMUs which we know about right
* now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
* could handle late hotplug, but this may lead to deadlock since we
* might try to register a hotplug notifier instance from within a
* hotplug notifier.
*
* There's also the problem of having access to the right init_fn,
* without tying this too deeply into the "real" PMU driver.
*
* For the moment, as with the platform/DT case, we need at least one
* of a PMU's CPUs to be online at probe time.
*/
for_each_online_cpu(cpu) {
struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
unsigned long cpuid;
char *base_name;
/* If we've already probed this CPU, we have nothing to do */
if (pmu)
continue;
pmu = armpmu_alloc();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
cpu);
return -ENOMEM;
}
cpuid = per_cpu(cpu_data, cpu).reg_midr;
pmu->acpi_cpuid = cpuid;
arm_pmu_acpi_probe_matching_cpus(pmu, cpuid);
ret = init_fn(pmu);
if (ret == -ENODEV) {
/* PMU not handled by this driver, or not present */
continue;
} else if (ret) {
pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
return ret;
}
base_name = pmu->name;
pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
if (!pmu->name) {
pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
return -ENOMEM;
}
ret = armpmu_register(pmu);
if (ret) {
pr_warn("Failed to register PMU for CPU%d\n", cpu);
kfree(pmu->name);
return ret;
}
}
return ret;
}
static int arm_pmu_acpi_init(void)
{
if (acpi_disabled)
return 0;
arm_spe_acpi_register_device();
arm_trbe_acpi_register_device();
return 0;
}
subsys_initcall(arm_pmu_acpi_init)
| linux-master | drivers/perf/arm_pmu_acpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/local64.h>
#include <asm/sysreg.h>
#include <soc/qcom/kryo-l2-accessors.h>
#define MAX_L2_CTRS 9
#define L2PMCR_NUM_EV_SHIFT 11
#define L2PMCR_NUM_EV_MASK 0x1F
#define L2PMCR 0x400
#define L2PMCNTENCLR 0x403
#define L2PMCNTENSET 0x404
#define L2PMINTENCLR 0x405
#define L2PMINTENSET 0x406
#define L2PMOVSCLR 0x407
#define L2PMOVSSET 0x408
#define L2PMCCNTCR 0x409
#define L2PMCCNTR 0x40A
#define L2PMCCNTSR 0x40C
#define L2PMRESR 0x410
#define IA_L2PMXEVCNTCR_BASE 0x420
#define IA_L2PMXEVCNTR_BASE 0x421
#define IA_L2PMXEVFILTER_BASE 0x423
#define IA_L2PMXEVTYPER_BASE 0x424
#define IA_L2_REG_OFFSET 0x10
#define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000
#define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
#define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003
#define L2EVTYPER_REG_SHIFT 3
#define L2PMRESR_GROUP_BITS 8
#define L2PMRESR_GROUP_MASK GENMASK(7, 0)
#define L2CYCLE_CTR_BIT 31
#define L2CYCLE_CTR_RAW_CODE 0xFE
#define L2PMCR_RESET_ALL 0x6
#define L2PMCR_COUNTERS_ENABLE 0x1
#define L2PMCR_COUNTERS_DISABLE 0x0
#define L2PMRESR_EN BIT_ULL(63)
#define L2_EVT_MASK 0x00000FFF
#define L2_EVT_CODE_MASK 0x00000FF0
#define L2_EVT_GRP_MASK 0x0000000F
#define L2_EVT_CODE_SHIFT 4
#define L2_EVT_GRP_SHIFT 0
#define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
#define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
#define L2_EVT_GROUP_MAX 7
#define L2_COUNTER_RELOAD BIT_ULL(31)
#define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
#define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
/*
* Events
*/
#define L2_EVENT_CYCLES 0xfe
#define L2_EVENT_DCACHE_OPS 0x400
#define L2_EVENT_ICACHE_OPS 0x401
#define L2_EVENT_TLBI 0x402
#define L2_EVENT_BARRIERS 0x403
#define L2_EVENT_TOTAL_READS 0x405
#define L2_EVENT_TOTAL_WRITES 0x406
#define L2_EVENT_TOTAL_REQUESTS 0x407
#define L2_EVENT_LDREX 0x420
#define L2_EVENT_STREX 0x421
#define L2_EVENT_CLREX 0x422
struct cluster_pmu;
/*
* Aggregate PMU. Implements the core pmu functions and manages
* the hardware PMUs.
*/
struct l2cache_pmu {
struct hlist_node node;
u32 num_pmus;
struct pmu pmu;
int num_counters;
cpumask_t cpumask;
struct platform_device *pdev;
struct cluster_pmu * __percpu *pmu_cluster;
struct list_head clusters;
};
/*
* The cache is made up of one or more clusters, each cluster has its own PMU.
* Each cluster is associated with one or more CPUs.
* This structure represents one of the hardware PMUs.
*
* Events can be envisioned as a 2-dimensional array. Each column represents
* a group of events. There are 8 groups. Only one entry from each
* group can be in use at a time.
*
* Events are specified as 0xCCG, where CC is 2 hex digits specifying
* the code (array row) and G specifies the group (column).
*
* In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
* which is outside the above scheme.
*/
struct cluster_pmu {
struct list_head next;
struct perf_event *events[MAX_L2_CTRS];
struct l2cache_pmu *l2cache_pmu;
DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
int irq;
int cluster_id;
/* The CPU that is used for collecting events on this cluster */
int on_cpu;
/* All the CPUs associated with this cluster */
cpumask_t cluster_cpus;
spinlock_t pmu_lock;
};
#define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
static u32 l2_cycle_ctr_idx;
static u32 l2_counter_present_mask;
static inline u32 idx_to_reg_bit(u32 idx)
{
if (idx == l2_cycle_ctr_idx)
return BIT(L2CYCLE_CTR_BIT);
return BIT(idx);
}
static inline struct cluster_pmu *get_cluster_pmu(
struct l2cache_pmu *l2cache_pmu, int cpu)
{
return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
}
static void cluster_pmu_reset(void)
{
/* Reset all counters */
kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
}
static inline void cluster_pmu_enable(void)
{
kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
}
static inline void cluster_pmu_disable(void)
{
kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
}
static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
{
if (idx == l2_cycle_ctr_idx)
kryo_l2_set_indirect_reg(L2PMCCNTR, value);
else
kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
}
static inline u64 cluster_pmu_counter_get_value(u32 idx)
{
u64 value;
if (idx == l2_cycle_ctr_idx)
value = kryo_l2_get_indirect_reg(L2PMCCNTR);
else
value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
return value;
}
static inline void cluster_pmu_counter_enable(u32 idx)
{
kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
}
static inline void cluster_pmu_counter_disable(u32 idx)
{
kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
}
static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
{
kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
}
static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
{
kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
}
static inline void cluster_pmu_set_evccntcr(u32 val)
{
kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
}
static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
{
kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
}
static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
{
kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
}
static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
u32 event_group, u32 event_cc)
{
u64 field;
u64 resr_val;
u32 shift;
unsigned long flags;
shift = L2PMRESR_GROUP_BITS * event_group;
field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
spin_lock_irqsave(&cluster->pmu_lock, flags);
resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
resr_val |= field;
resr_val |= L2PMRESR_EN;
kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
spin_unlock_irqrestore(&cluster->pmu_lock, flags);
}
/*
* Hardware allows filtering of events based on the originating
* CPU. Turn this off by setting filter bits to allow events from
* all CPUS, subunits and ID independent events in this cluster.
*/
static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
{
u32 val = L2PMXEVFILTER_SUFILTER_ALL |
L2PMXEVFILTER_ORGFILTER_IDINDEP |
L2PMXEVFILTER_ORGFILTER_ALL;
kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
}
static inline u32 cluster_pmu_getreset_ovsr(void)
{
u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
return result;
}
static inline bool cluster_pmu_has_overflowed(u32 ovsr)
{
return !!(ovsr & l2_counter_present_mask);
}
static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
{
return !!(ovsr & idx_to_reg_bit(idx));
}
static void l2_cache_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev, now;
u32 idx = hwc->idx;
do {
prev = local64_read(&hwc->prev_count);
now = cluster_pmu_counter_get_value(idx);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
/*
* The cycle counter is 64-bit, but all other counters are
* 32-bit, and we must handle 32-bit overflow explicitly.
*/
delta = now - prev;
if (idx != l2_cycle_ctr_idx)
delta &= 0xffffffff;
local64_add(delta, &event->count);
}
static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u64 new;
/*
* We limit the max period to half the max counter value so
* that even in the case of extreme interrupt latency the
* counter will (hopefully) not wrap past its initial value.
*/
if (idx == l2_cycle_ctr_idx)
new = L2_CYCLE_COUNTER_RELOAD;
else
new = L2_COUNTER_RELOAD;
local64_set(&hwc->prev_count, new);
cluster_pmu_counter_set_value(idx, new);
}
static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx;
int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
unsigned int group;
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
return -EAGAIN;
return l2_cycle_ctr_idx;
}
idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
if (idx == num_ctrs)
/* The counters are all in use. */
return -EAGAIN;
/*
* Check for column exclusion: event column already in use by another
* event. This is for events which are not in the same group.
* Conflicting events in the same group are detected in event_init.
*/
group = L2_EVT_GROUP(hwc->config_base);
if (test_bit(group, cluster->used_groups))
return -EAGAIN;
set_bit(idx, cluster->used_counters);
set_bit(group, cluster->used_groups);
return idx;
}
static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
clear_bit(idx, cluster->used_counters);
if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
}
static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
{
struct cluster_pmu *cluster = data;
int num_counters = cluster->l2cache_pmu->num_counters;
u32 ovsr;
int idx;
ovsr = cluster_pmu_getreset_ovsr();
if (!cluster_pmu_has_overflowed(ovsr))
return IRQ_NONE;
for_each_set_bit(idx, cluster->used_counters, num_counters) {
struct perf_event *event = cluster->events[idx];
struct hw_perf_event *hwc;
if (WARN_ON_ONCE(!event))
continue;
if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
continue;
l2_cache_event_update(event);
hwc = &event->hw;
l2_cache_cluster_set_period(cluster, hwc);
}
return IRQ_HANDLED;
}
/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
*/
static void l2_cache_pmu_enable(struct pmu *pmu)
{
/*
* Although there is only one PMU (per socket) controlling multiple
* physical PMUs (per cluster), because we do not support per-task mode
* each event is associated with a CPU. Each event has pmu_enable
* called on its CPU, so here it is only necessary to enable the
* counters for the current CPU.
*/
cluster_pmu_enable();
}
static void l2_cache_pmu_disable(struct pmu *pmu)
{
cluster_pmu_disable();
}
static int l2_cache_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cluster_pmu *cluster;
struct perf_event *sibling;
struct l2cache_pmu *l2cache_pmu;
if (event->attr.type != event->pmu->type)
return -ENOENT;
l2cache_pmu = to_l2cache_pmu(event->pmu);
if (hwc->sample_period) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Sampling not supported\n");
return -EOPNOTSUPP;
}
if (event->cpu < 0) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Per-task mode not supported\n");
return -EOPNOTSUPP;
}
if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
((event->attr.config & ~L2_EVT_MASK) != 0)) &&
(event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Invalid config %llx\n",
event->attr.config);
return -EINVAL;
}
/* Don't allow groups with mixed PMUs, except for s/w events */
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader)) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Can't create mixed PMU group\n");
return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling)) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Can't create mixed PMU group\n");
return -EINVAL;
}
}
cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
if (!cluster) {
/* CPU has not been initialised */
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"CPU%d not associated with L2 cluster\n", event->cpu);
return -EINVAL;
}
/* Ensure all events in a group are on the same cpu */
if ((event->group_leader != event) &&
(cluster->on_cpu != event->group_leader->cpu)) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Can't create group on CPUs %d and %d",
event->cpu, event->group_leader->cpu);
return -EINVAL;
}
if ((event != event->group_leader) &&
!is_software_event(event->group_leader) &&
(L2_EVT_GROUP(event->group_leader->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Column exclusion: conflicting events %llx %llx\n",
event->group_leader->attr.config,
event->attr.config);
return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
if ((sibling != event) &&
!is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) ==
L2_EVT_GROUP(event->attr.config))) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
"Column exclusion: conflicting events %llx %llx\n",
sibling->attr.config,
event->attr.config);
return -EINVAL;
}
}
hwc->idx = -1;
hwc->config_base = event->attr.config;
/*
* Ensure all events are on the same cpu so all events are in the
* same cpu context, to avoid races on pmu_enable etc.
*/
event->cpu = cluster->on_cpu;
return 0;
}
static void l2_cache_event_start(struct perf_event *event, int flags)
{
struct cluster_pmu *cluster;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u32 config;
u32 event_cc, event_group;
hwc->state = 0;
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
l2_cache_cluster_set_period(cluster, hwc);
if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
cluster_pmu_set_evccntcr(0);
} else {
config = hwc->config_base;
event_cc = L2_EVT_CODE(config);
event_group = L2_EVT_GROUP(config);
cluster_pmu_set_evcntcr(idx, 0);
cluster_pmu_set_evtyper(idx, event_group);
cluster_pmu_set_resr(cluster, event_group, event_cc);
cluster_pmu_set_evfilter_sys_mode(idx);
}
cluster_pmu_counter_enable_interrupt(idx);
cluster_pmu_counter_enable(idx);
}
static void l2_cache_event_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (hwc->state & PERF_HES_STOPPED)
return;
cluster_pmu_counter_disable_interrupt(idx);
cluster_pmu_counter_disable(idx);
if (flags & PERF_EF_UPDATE)
l2_cache_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int l2_cache_event_add(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
struct cluster_pmu *cluster;
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
idx = l2_cache_get_event_idx(cluster, event);
if (idx < 0)
return idx;
hwc->idx = idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
cluster->events[idx] = event;
local64_set(&hwc->prev_count, 0);
if (flags & PERF_EF_START)
l2_cache_event_start(event, flags);
/* Propagate changes to the userspace mapping. */
perf_event_update_userpage(event);
return err;
}
static void l2_cache_event_del(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cluster_pmu *cluster;
int idx = hwc->idx;
cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
cluster->events[idx] = NULL;
l2_cache_clear_event_idx(cluster, event);
perf_event_update_userpage(event);
}
static void l2_cache_event_read(struct perf_event *event)
{
l2_cache_event_update(event);
}
static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
}
static struct device_attribute l2_cache_pmu_cpumask_attr =
__ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
&l2_cache_pmu_cpumask_attr.attr,
NULL,
};
static const struct attribute_group l2_cache_pmu_cpumask_group = {
.attrs = l2_cache_pmu_cpumask_attrs,
};
/* CCG format for perf RAW codes. */
PMU_FORMAT_ATTR(l2_code, "config:4-11");
PMU_FORMAT_ATTR(l2_group, "config:0-3");
PMU_FORMAT_ATTR(event, "config:0-11");
static struct attribute *l2_cache_pmu_formats[] = {
&format_attr_l2_code.attr,
&format_attr_l2_group.attr,
&format_attr_event.attr,
NULL,
};
static const struct attribute_group l2_cache_pmu_format_group = {
.name = "format",
.attrs = l2_cache_pmu_formats,
};
static ssize_t l2cache_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define L2CACHE_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id)
static struct attribute *l2_cache_pmu_events[] = {
L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
NULL
};
static const struct attribute_group l2_cache_pmu_events_group = {
.name = "events",
.attrs = l2_cache_pmu_events,
};
static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
&l2_cache_pmu_format_group,
&l2_cache_pmu_cpumask_group,
&l2_cache_pmu_events_group,
NULL,
};
/*
* Generic device handlers
*/
static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
{ "QCOM8130", },
{ }
};
static int get_num_counters(void)
{
int val;
val = kryo_l2_get_indirect_reg(L2PMCR);
/*
* Read number of counters from L2PMCR and add 1
* for the cycle counter.
*/
return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
}
static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
struct l2cache_pmu *l2cache_pmu, int cpu)
{
u64 mpidr;
int cpu_cluster_id;
struct cluster_pmu *cluster;
/*
* This assumes that the cluster_id is in MPIDR[aff1] for
* single-threaded cores, and MPIDR[aff2] for multi-threaded
* cores. This logic will have to be updated if this changes.
*/
mpidr = read_cpuid_mpidr();
if (mpidr & MPIDR_MT_BITMASK)
cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
else
cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
if (cluster->cluster_id != cpu_cluster_id)
continue;
dev_info(&l2cache_pmu->pdev->dev,
"CPU%d associated with cluster %d\n", cpu,
cluster->cluster_id);
cpumask_set_cpu(cpu, &cluster->cluster_cpus);
*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
return cluster;
}
return NULL;
}
static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cluster_pmu *cluster;
struct l2cache_pmu *l2cache_pmu;
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
cluster = get_cluster_pmu(l2cache_pmu, cpu);
if (!cluster) {
/* First time this CPU has come online */
cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
if (!cluster) {
/* Only if broken firmware doesn't list every cluster */
WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
return 0;
}
}
/* If another CPU is managing this cluster, we're done */
if (cluster->on_cpu != -1)
return 0;
/*
* All CPUs on this cluster were down, use this one.
* Reset to put it into sane state.
*/
cluster->on_cpu = cpu;
cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
cluster_pmu_reset();
WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
enable_irq(cluster->irq);
return 0;
}
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cluster_pmu *cluster;
struct l2cache_pmu *l2cache_pmu;
cpumask_t cluster_online_cpus;
unsigned int target;
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
cluster = get_cluster_pmu(l2cache_pmu, cpu);
if (!cluster)
return 0;
/* If this CPU is not managing the cluster, we're done */
if (cluster->on_cpu != cpu)
return 0;
/* Give up ownership of cluster */
cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
cluster->on_cpu = -1;
/* Any other CPU for this cluster which is still online */
cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
cpu_online_mask);
target = cpumask_any_but(&cluster_online_cpus, cpu);
if (target >= nr_cpu_ids) {
disable_irq(cluster->irq);
return 0;
}
perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
cluster->on_cpu = target;
cpumask_set_cpu(target, &l2cache_pmu->cpumask);
WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
return 0;
}
static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev->parent);
struct platform_device *sdev = to_platform_device(dev);
struct l2cache_pmu *l2cache_pmu = data;
struct cluster_pmu *cluster;
u64 fw_cluster_id;
int err;
int irq;
err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &fw_cluster_id);
if (err) {
dev_err(&pdev->dev, "unable to read ACPI uid\n");
return err;
}
cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
if (!cluster)
return -ENOMEM;
INIT_LIST_HEAD(&cluster->next);
cluster->cluster_id = fw_cluster_id;
irq = platform_get_irq(sdev, 0);
if (irq < 0)
return irq;
cluster->irq = irq;
cluster->l2cache_pmu = l2cache_pmu;
cluster->on_cpu = -1;
err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD |
IRQF_NO_AUTOEN,
"l2-cache-pmu", cluster);
if (err) {
dev_err(&pdev->dev,
"Unable to request IRQ%d for L2 PMU counters\n", irq);
return err;
}
dev_info(&pdev->dev,
"Registered L2 cache PMU cluster %lld\n", fw_cluster_id);
spin_lock_init(&cluster->pmu_lock);
list_add(&cluster->next, &l2cache_pmu->clusters);
l2cache_pmu->num_pmus++;
return 0;
}
static int l2_cache_pmu_probe(struct platform_device *pdev)
{
int err;
struct l2cache_pmu *l2cache_pmu;
l2cache_pmu =
devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
if (!l2cache_pmu)
return -ENOMEM;
INIT_LIST_HEAD(&l2cache_pmu->clusters);
platform_set_drvdata(pdev, l2cache_pmu);
l2cache_pmu->pmu = (struct pmu) {
/* suffix is instance id for future use with multiple sockets */
.name = "l2cache_0",
.task_ctx_nr = perf_invalid_context,
.pmu_enable = l2_cache_pmu_enable,
.pmu_disable = l2_cache_pmu_disable,
.event_init = l2_cache_event_init,
.add = l2_cache_event_add,
.del = l2_cache_event_del,
.start = l2_cache_event_start,
.stop = l2_cache_event_stop,
.read = l2_cache_event_read,
.attr_groups = l2_cache_pmu_attr_grps,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l2cache_pmu->num_counters = get_num_counters();
l2cache_pmu->pdev = pdev;
l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
struct cluster_pmu *);
if (!l2cache_pmu->pmu_cluster)
return -ENOMEM;
l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
BIT(L2CYCLE_CTR_BIT);
cpumask_clear(&l2cache_pmu->cpumask);
/* Read cluster info and initialize each cluster */
err = device_for_each_child(&pdev->dev, l2cache_pmu,
l2_cache_pmu_probe_cluster);
if (err)
return err;
if (l2cache_pmu->num_pmus == 0) {
dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
return -ENODEV;
}
err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
if (err) {
dev_err(&pdev->dev, "Error %d registering hotplug", err);
return err;
}
err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
if (err) {
dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
goto out_unregister;
}
dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
l2cache_pmu->num_pmus);
return err;
out_unregister:
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
return err;
}
static int l2_cache_pmu_remove(struct platform_device *pdev)
{
struct l2cache_pmu *l2cache_pmu =
to_l2cache_pmu(platform_get_drvdata(pdev));
perf_pmu_unregister(&l2cache_pmu->pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
return 0;
}
static struct platform_driver l2_cache_pmu_driver = {
.driver = {
.name = "qcom-l2cache-pmu",
.acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
.remove = l2_cache_pmu_remove,
};
static int __init register_l2_cache_pmu_driver(void)
{
int err;
err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
"AP_PERF_ARM_QCOM_L2_ONLINE",
l2cache_pmu_online_cpu,
l2cache_pmu_offline_cpu);
if (err)
return err;
return platform_driver_register(&l2_cache_pmu_driver);
}
device_initcall(register_l2_cache_pmu_driver);
| linux-master | drivers/perf/qcom_l2_pmu.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright 2023 NXP
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/perf_event.h>
/* Performance monitor configuration */
#define PMCFG1 0x00
#define PMCFG1_RD_TRANS_FILT_EN BIT(31)
#define PMCFG1_WR_TRANS_FILT_EN BIT(30)
#define PMCFG1_RD_BT_FILT_EN BIT(29)
#define PMCFG1_ID_MASK GENMASK(17, 0)
#define PMCFG2 0x04
#define PMCFG2_ID GENMASK(17, 0)
/* Global control register affects all counters and takes priority over local control registers */
#define PMGC0 0x40
/* Global control register bits */
#define PMGC0_FAC BIT(31)
#define PMGC0_PMIE BIT(30)
#define PMGC0_FCECE BIT(29)
/*
* 64bit counter0 exclusively dedicated to counting cycles
* 32bit counters monitor counter-specific events in addition to counting reference events
*/
#define PMLCA(n) (0x40 + 0x10 + (0x10 * n))
#define PMLCB(n) (0x40 + 0x14 + (0x10 * n))
#define PMC(n) (0x40 + 0x18 + (0x10 * n))
/* Local control register bits */
#define PMLCA_FC BIT(31)
#define PMLCA_CE BIT(26)
#define PMLCA_EVENT GENMASK(22, 16)
#define NUM_COUNTERS 11
#define CYCLES_COUNTER 0
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define DDR_PERF_DEV_NAME "imx9_ddr"
#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
static DEFINE_IDA(ddr_ida);
struct imx_ddr_devtype_data {
const char *identifier; /* system PMU identifier for userspace */
};
struct ddr_pmu {
struct pmu pmu;
void __iomem *base;
unsigned int cpu;
struct hlist_node node;
struct device *dev;
struct perf_event *events[NUM_COUNTERS];
int active_events;
enum cpuhp_state cpuhp_state;
const struct imx_ddr_devtype_data *devtype_data;
int irq;
int id;
};
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
};
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
static ssize_t ddr_perf_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
}
static struct device_attribute ddr_perf_identifier_attr =
__ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
static struct attribute *ddr_perf_identifier_attrs[] = {
&ddr_perf_identifier_attr.attr,
NULL,
};
static struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = ddr_perf_identifier_attrs,
};
static ssize_t ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
}
static struct device_attribute ddr_perf_cpumask_attr =
__ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
static struct attribute *ddr_perf_cpumask_attrs[] = {
&ddr_perf_cpumask_attr.attr,
NULL,
};
static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = ddr_perf_cpumask_attrs,
};
static ssize_t ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
(&((struct perf_pmu_events_attr[]) { \
{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
.id = _id, } \
})[0].attr.attr)
static struct attribute *ddr_perf_events_attrs[] = {
/* counter0 cycles event */
IMX9_DDR_PMU_EVENT_ATTR(cycles, 0),
/* reference events for all normal counters, need assert DEBUG19[21] bit */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ddrc1_rmw_for_ecc, 12),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_rreorder, 13),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_wreorder, 14),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_0, 15),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_1, 16),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_2, 17),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_3, 18),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_4, 19),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_5, 22),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_6, 23),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_7, 24),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_8, 25),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_9, 26),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_10, 27),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_11, 28),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_12, 31),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_13, 59),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_15, 61),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
/* counter1 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
/* counter2 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
/* counter3 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
/* counter4 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
/* counter5 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
/* counter6 specific events */
IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
/* counter7 specific events */
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
/* counter8 specific events */
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
/* counter9 specific events */
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
/* counter10 specific events */
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
NULL,
};
static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(counter, "config:8-15");
PMU_FORMAT_ATTR(axi_id, "config1:0-17");
PMU_FORMAT_ATTR(axi_mask, "config2:0-17");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
&format_attr_counter.attr,
&format_attr_axi_id.attr,
&format_attr_axi_mask.attr,
NULL,
};
static const struct attribute_group ddr_perf_format_attr_group = {
.name = "format",
.attrs = ddr_perf_format_attrs,
};
static const struct attribute_group *attr_groups[] = {
&ddr_perf_identifier_attr_group,
&ddr_perf_cpumask_attr_group,
&ddr_perf_events_attr_group,
&ddr_perf_format_attr_group,
NULL,
};
static void ddr_perf_clear_counter(struct ddr_pmu *pmu, int counter)
{
if (counter == CYCLES_COUNTER) {
writel(0, pmu->base + PMC(counter) + 0x4);
writel(0, pmu->base + PMC(counter));
} else {
writel(0, pmu->base + PMC(counter));
}
}
static u64 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
{
u32 val_lower, val_upper;
u64 val;
if (counter != CYCLES_COUNTER) {
val = readl_relaxed(pmu->base + PMC(counter));
goto out;
}
/* special handling for reading 64bit cycle counter */
do {
val_upper = readl_relaxed(pmu->base + PMC(counter) + 0x4);
val_lower = readl_relaxed(pmu->base + PMC(counter));
} while (val_upper != readl_relaxed(pmu->base + PMC(counter) + 0x4));
val = val_upper;
val = (val << 32);
val |= val_lower;
out:
return val;
}
static void ddr_perf_counter_global_config(struct ddr_pmu *pmu, bool enable)
{
u32 ctrl;
ctrl = readl_relaxed(pmu->base + PMGC0);
if (enable) {
/*
* The performance monitor must be reset before event counting
* sequences. The performance monitor can be reset by first freezing
* one or more counters and then clearing the freeze condition to
* allow the counters to count according to the settings in the
* performance monitor registers. Counters can be frozen individually
* by setting PMLCAn[FC] bits, or simultaneously by setting PMGC0[FAC].
* Simply clearing these freeze bits will then allow the performance
* monitor to begin counting based on the register settings.
*/
ctrl |= PMGC0_FAC;
writel(ctrl, pmu->base + PMGC0);
/*
* Freeze all counters disabled, interrupt enabled, and freeze
* counters on condition enabled.
*/
ctrl &= ~PMGC0_FAC;
ctrl |= PMGC0_PMIE | PMGC0_FCECE;
writel(ctrl, pmu->base + PMGC0);
} else {
ctrl |= PMGC0_FAC;
ctrl &= ~(PMGC0_PMIE | PMGC0_FCECE);
writel(ctrl, pmu->base + PMGC0);
}
}
static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
int counter, bool enable)
{
u32 ctrl_a;
ctrl_a = readl_relaxed(pmu->base + PMLCA(counter));
if (enable) {
ctrl_a |= PMLCA_FC;
writel(ctrl_a, pmu->base + PMLCA(counter));
ddr_perf_clear_counter(pmu, counter);
/* Freeze counter disabled, condition enabled, and program event.*/
ctrl_a &= ~PMLCA_FC;
ctrl_a |= PMLCA_CE;
ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F);
ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF));
writel(ctrl_a, pmu->base + PMLCA(counter));
} else {
/* Freeze counter. */
ctrl_a |= PMLCA_FC;
writel(ctrl_a, pmu->base + PMLCA(counter));
}
}
static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
{
u32 pmcfg1, pmcfg2;
int event, counter;
event = cfg & 0x000000FF;
counter = (cfg & 0x0000FF00) >> 8;
pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
if (counter == 2 && event == 73)
pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN;
else if (counter == 2 && event != 73)
pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN;
if (counter == 3 && event == 73)
pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN;
else if (counter == 3 && event != 73)
pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN;
if (counter == 4 && event == 73)
pmcfg1 |= PMCFG1_RD_BT_FILT_EN;
else if (counter == 4 && event != 73)
pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
writel(pmcfg1, pmu->base + PMCFG1);
pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
writel(pmcfg2, pmu->base + PMCFG2);
}
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
u64 new_raw_count;
new_raw_count = ddr_perf_read_counter(pmu, counter);
local64_add(new_raw_count, &event->count);
/* clear counter's value every time */
ddr_perf_clear_counter(pmu, counter);
}
static int ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
if (event->cpu < 0) {
dev_warn(pmu->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/*
* We must NOT create groups containing mixed PMUs, although software
* events are acceptable (for example to create a CCN group
* periodically read when a hrtimer aka cpu-clock leader triggers).
*/
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
return -EINVAL;
}
event->cpu = pmu->cpu;
hwc->idx = -1;
return 0;
}
static void ddr_perf_event_start(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
local64_set(&hwc->prev_count, 0);
ddr_perf_counter_local_config(pmu, event->attr.config, counter, true);
hwc->state = 0;
}
static int ddr_perf_event_add(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
int cfg2 = event->attr.config2;
int counter;
counter = (cfg & 0x0000FF00) >> 8;
pmu->events[counter] = event;
pmu->active_events++;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
if (flags & PERF_EF_START)
ddr_perf_event_start(event, flags);
/* read trans, write trans, read beat */
ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
return 0;
}
static void ddr_perf_event_stop(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
ddr_perf_counter_local_config(pmu, event->attr.config, counter, false);
ddr_perf_event_update(event);
hwc->state |= PERF_HES_STOPPED;
}
static void ddr_perf_event_del(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
ddr_perf_event_stop(event, PERF_EF_UPDATE);
pmu->active_events--;
hwc->idx = -1;
}
static void ddr_perf_pmu_enable(struct pmu *pmu)
{
struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
ddr_perf_counter_global_config(ddr_pmu, true);
}
static void ddr_perf_pmu_disable(struct pmu *pmu)
{
struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
ddr_perf_counter_global_config(ddr_pmu, false);
}
static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
struct device *dev)
{
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
.event_init = ddr_perf_event_init,
.add = ddr_perf_event_add,
.del = ddr_perf_event_del,
.start = ddr_perf_event_start,
.stop = ddr_perf_event_stop,
.read = ddr_perf_event_update,
.pmu_enable = ddr_perf_pmu_enable,
.pmu_disable = ddr_perf_pmu_disable,
},
.base = base,
.dev = dev,
};
}
static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
{
struct ddr_pmu *pmu = (struct ddr_pmu *)p;
struct perf_event *event;
int i;
/*
* Counters can generate an interrupt on an overflow when msb of a
* counter changes from 0 to 1. For the interrupt to be signalled,
* below condition mush be satisfied:
* PMGC0[PMIE] = 1, PMGC0[FCECE] = 1, PMLCAn[CE] = 1
* When an interrupt is signalled, PMGC0[FAC] is set by hardware and
* all of the registers are frozen.
* Software can clear the interrupt condition by resetting the performance
* monitor and clearing the most significant bit of the counter that
* generate the overflow.
*/
for (i = 0; i < NUM_COUNTERS; i++) {
if (!pmu->events[i])
continue;
event = pmu->events[i];
ddr_perf_event_update(event);
}
ddr_perf_counter_global_config(pmu, true);
return IRQ_HANDLED;
}
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
int target;
if (cpu != pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
return 0;
}
static int ddr_perf_probe(struct platform_device *pdev)
{
struct ddr_pmu *pmu;
void __iomem *base;
int ret, irq;
char *name;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
ddr_perf_init(pmu, base, &pdev->dev);
pmu->devtype_data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, pmu);
pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id);
if (!name) {
ret = -ENOMEM;
goto format_string_err;
}
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME,
NULL, ddr_perf_offline_cpu);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add callbacks for multi state\n");
goto cpuhp_state_err;
}
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
goto cpuhp_instance_err;
}
/* Request irq */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto ddr_perf_err;
}
ret = devm_request_irq(&pdev->dev, irq, ddr_perf_irq_handler,
IRQF_NOBALANCING | IRQF_NO_THREAD,
DDR_CPUHP_CB_NAME, pmu);
if (ret < 0) {
dev_err(&pdev->dev, "Request irq failed: %d", ret);
goto ddr_perf_err;
}
pmu->irq = irq;
ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
if (ret) {
dev_err(pmu->dev, "Failed to set interrupt affinity\n");
goto ddr_perf_err;
}
ret = perf_pmu_register(&pmu->pmu, name, -1);
if (ret)
goto ddr_perf_err;
return 0;
ddr_perf_err:
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
format_string_err:
ida_simple_remove(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
static int ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_remove_multi_state(pmu->cpuhp_state);
perf_pmu_unregister(&pmu->pmu);
ida_simple_remove(&ddr_ida, pmu->id);
return 0;
}
static struct platform_driver imx_ddr_pmu_driver = {
.driver = {
.name = "imx9-ddr-pmu",
.of_match_table = imx_ddr_pmu_dt_ids,
.suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
.remove = ddr_perf_remove,
};
module_platform_driver(imx_ddr_pmu_driver);
MODULE_AUTHOR("Xu Yang <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DDRC PerfMon for i.MX9 SoCs");
| linux-master | drivers/perf/fsl_imx9_ddr_perf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM DMC-620 memory controller PMU driver
*
* Copyright (C) 2020 Ampere Computing LLC.
*/
#define DMC620_PMUNAME "arm_dmc620"
#define DMC620_DRVNAME DMC620_PMUNAME "_pmu"
#define pr_fmt(fmt) DMC620_DRVNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/refcount.h>
#define DMC620_PA_SHIFT 12
#define DMC620_CNT_INIT 0x80000000
#define DMC620_CNT_MAX_PERIOD 0xffffffff
#define DMC620_PMU_CLKDIV2_MAX_COUNTERS 8
#define DMC620_PMU_CLK_MAX_COUNTERS 2
#define DMC620_PMU_MAX_COUNTERS \
(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
/*
* The PMU registers start at 0xA00 in the DMC-620 memory map, and these
* offsets are relative to that base.
*
* Each counter has a group of control/value registers, and the
* DMC620_PMU_COUNTERn offsets are within a counter group.
*
* The counter registers groups start at 0xA10.
*/
#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2 0x8
#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK \
(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
#define DMC620_PMU_OVERFLOW_STATUS_CLK 0xC
#define DMC620_PMU_OVERFLOW_STATUS_CLK_MASK \
(DMC620_PMU_CLK_MAX_COUNTERS - 1)
#define DMC620_PMU_COUNTERS_BASE 0x10
#define DMC620_PMU_COUNTERn_MASK_31_00 0x0
#define DMC620_PMU_COUNTERn_MASK_63_32 0x4
#define DMC620_PMU_COUNTERn_MATCH_31_00 0x8
#define DMC620_PMU_COUNTERn_MATCH_63_32 0xC
#define DMC620_PMU_COUNTERn_CONTROL 0x10
#define DMC620_PMU_COUNTERn_CONTROL_ENABLE BIT(0)
#define DMC620_PMU_COUNTERn_CONTROL_INVERT BIT(1)
#define DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX GENMASK(6, 2)
#define DMC620_PMU_COUNTERn_CONTROL_INCR_MUX GENMASK(8, 7)
#define DMC620_PMU_COUNTERn_VALUE 0x20
/* Offset of the registers for a given counter, relative to 0xA00 */
#define DMC620_PMU_COUNTERn_OFFSET(n) \
(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
/*
* dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list
* dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
*/
static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
static DEFINE_MUTEX(dmc620_pmu_node_lock);
static LIST_HEAD(dmc620_pmu_irqs);
struct dmc620_pmu_irq {
struct hlist_node node;
struct list_head pmus_node;
struct list_head irqs_node;
refcount_t refcount;
unsigned int irq_num;
unsigned int cpu;
};
struct dmc620_pmu {
struct pmu pmu;
void __iomem *base;
struct dmc620_pmu_irq *irq;
struct list_head pmus_node;
/*
* We put all clkdiv2 and clk counters to a same array.
* The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
* clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
* belong to clk counters.
*/
DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
};
#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
static int cpuhp_state_num;
struct dmc620_pmu_event_attr {
struct device_attribute attr;
u8 clkdiv2;
u8 eventid;
};
static ssize_t
dmc620_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct dmc620_pmu_event_attr *eattr;
eattr = container_of(attr, typeof(*eattr), attr);
return sysfs_emit(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
}
#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2) \
(&((struct dmc620_pmu_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL), \
.clkdiv2 = _clkdiv2, \
.eventid = _eventid, \
}})[0].attr.attr)
static struct attribute *dmc620_pmu_events_attrs[] = {
/* clkdiv2 events list */
DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
/* clk events list */
DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
NULL,
};
static const struct attribute_group dmc620_pmu_events_attr_group = {
.name = "events",
.attrs = dmc620_pmu_events_attrs,
};
/* User ABI */
#define ATTR_CFG_FLD_mask_CFG config
#define ATTR_CFG_FLD_mask_LO 0
#define ATTR_CFG_FLD_mask_HI 44
#define ATTR_CFG_FLD_match_CFG config1
#define ATTR_CFG_FLD_match_LO 0
#define ATTR_CFG_FLD_match_HI 44
#define ATTR_CFG_FLD_invert_CFG config2
#define ATTR_CFG_FLD_invert_LO 0
#define ATTR_CFG_FLD_invert_HI 0
#define ATTR_CFG_FLD_incr_CFG config2
#define ATTR_CFG_FLD_incr_LO 1
#define ATTR_CFG_FLD_incr_HI 2
#define ATTR_CFG_FLD_event_CFG config2
#define ATTR_CFG_FLD_event_LO 3
#define ATTR_CFG_FLD_event_HI 8
#define ATTR_CFG_FLD_clkdiv2_CFG config2
#define ATTR_CFG_FLD_clkdiv2_LO 9
#define ATTR_CFG_FLD_clkdiv2_HI 9
#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
#define GEN_PMU_FORMAT_ATTR(name) \
PMU_FORMAT_ATTR(name, \
_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI))
#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
#define ATTR_CFG_GET_FLD(attr, name) \
_ATTR_CFG_GET_FLD(attr, \
ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI)
GEN_PMU_FORMAT_ATTR(mask);
GEN_PMU_FORMAT_ATTR(match);
GEN_PMU_FORMAT_ATTR(invert);
GEN_PMU_FORMAT_ATTR(incr);
GEN_PMU_FORMAT_ATTR(event);
GEN_PMU_FORMAT_ATTR(clkdiv2);
static struct attribute *dmc620_pmu_formats_attrs[] = {
&format_attr_mask.attr,
&format_attr_match.attr,
&format_attr_invert.attr,
&format_attr_incr.attr,
&format_attr_event.attr,
&format_attr_clkdiv2.attr,
NULL,
};
static const struct attribute_group dmc620_pmu_format_attr_group = {
.name = "format",
.attrs = dmc620_pmu_formats_attrs,
};
static ssize_t dmc620_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf,
cpumask_of(dmc620_pmu->irq->cpu));
}
static struct device_attribute dmc620_pmu_cpumask_attr =
__ATTR(cpumask, 0444, dmc620_pmu_cpumask_show, NULL);
static struct attribute *dmc620_pmu_cpumask_attrs[] = {
&dmc620_pmu_cpumask_attr.attr,
NULL,
};
static const struct attribute_group dmc620_pmu_cpumask_attr_group = {
.attrs = dmc620_pmu_cpumask_attrs,
};
static const struct attribute_group *dmc620_pmu_attr_groups[] = {
&dmc620_pmu_events_attr_group,
&dmc620_pmu_format_attr_group,
&dmc620_pmu_cpumask_attr_group,
NULL,
};
static inline
u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
unsigned int idx, unsigned int reg)
{
return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
}
static inline
void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
unsigned int idx, unsigned int reg, u32 val)
{
writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
}
static
unsigned int dmc620_event_to_counter_control(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
unsigned int reg = 0;
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INVERT,
ATTR_CFG_GET_FLD(attr, invert));
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
ATTR_CFG_GET_FLD(attr, event));
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
ATTR_CFG_GET_FLD(attr, incr));
return reg;
}
static int dmc620_get_event_idx(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
int idx, start_idx, end_idx;
if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
start_idx = 0;
end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
} else {
start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
end_idx = DMC620_PMU_MAX_COUNTERS;
}
for (idx = start_idx; idx < end_idx; ++idx) {
if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
static inline
u64 dmc620_pmu_read_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
return dmc620_pmu_creg_read(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
}
static void dmc620_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_count, new_count;
do {
/* We may also be called from the irq handler */
prev_count = local64_read(&hwc->prev_count);
new_count = dmc620_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count,
prev_count, new_count) != prev_count);
delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
local64_add(delta, &event->count);
}
static void dmc620_pmu_event_set_period(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
}
static void dmc620_pmu_enable_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
u32 reg;
reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, reg);
}
static void dmc620_pmu_disable_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
}
static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
{
struct dmc620_pmu_irq *irq = data;
struct dmc620_pmu *dmc620_pmu;
irqreturn_t ret = IRQ_NONE;
rcu_read_lock();
list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
unsigned long status;
struct perf_event *event;
unsigned int idx;
/*
* HW doesn't provide a control to atomically disable all counters.
* To prevent race condition (overflow happens while clearing status register),
* disable all events before continuing
*/
for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
event = dmc620_pmu->events[idx];
if (!event)
continue;
dmc620_pmu_disable_counter(event);
}
status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
DMC620_PMU_CLKDIV2_MAX_COUNTERS);
if (status) {
for_each_set_bit(idx, &status,
DMC620_PMU_MAX_COUNTERS) {
event = dmc620_pmu->events[idx];
if (WARN_ON_ONCE(!event))
continue;
dmc620_pmu_event_update(event);
dmc620_pmu_event_set_period(event);
}
if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
}
for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
event = dmc620_pmu->events[idx];
if (!event)
continue;
if (!(event->hw.state & PERF_HES_STOPPED))
dmc620_pmu_enable_counter(event);
}
ret = IRQ_HANDLED;
}
rcu_read_unlock();
return ret;
}
static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
{
struct dmc620_pmu_irq *irq;
int ret;
list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
return irq;
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
if (!irq)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&irq->pmus_node);
/* Pick one CPU to be the preferred one to use */
irq->cpu = raw_smp_processor_id();
refcount_set(&irq->refcount, 1);
ret = request_irq(irq_num, dmc620_pmu_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD,
"dmc620-pmu", irq);
if (ret)
goto out_free_aff;
ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu));
if (ret)
goto out_free_irq;
ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
if (ret)
goto out_free_irq;
irq->irq_num = irq_num;
list_add(&irq->irqs_node, &dmc620_pmu_irqs);
return irq;
out_free_irq:
free_irq(irq_num, irq);
out_free_aff:
kfree(irq);
return ERR_PTR(ret);
}
static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
{
struct dmc620_pmu_irq *irq;
mutex_lock(&dmc620_pmu_irqs_lock);
irq = __dmc620_pmu_get_irq(irq_num);
mutex_unlock(&dmc620_pmu_irqs_lock);
if (IS_ERR(irq))
return PTR_ERR(irq);
dmc620_pmu->irq = irq;
mutex_lock(&dmc620_pmu_node_lock);
list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
mutex_unlock(&dmc620_pmu_node_lock);
return 0;
}
static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
{
struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
mutex_lock(&dmc620_pmu_node_lock);
list_del_rcu(&dmc620_pmu->pmus_node);
mutex_unlock(&dmc620_pmu_node_lock);
mutex_lock(&dmc620_pmu_irqs_lock);
if (!refcount_dec_and_test(&irq->refcount)) {
mutex_unlock(&dmc620_pmu_irqs_lock);
return;
}
list_del(&irq->irqs_node);
mutex_unlock(&dmc620_pmu_irqs_lock);
free_irq(irq->irq_num, irq);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
kfree(irq);
}
static int dmc620_pmu_event_init(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* DMC 620 PMUs are shared across all cpus and cannot
* support task bound and sampling events.
*/
if (is_sampling_event(event) ||
event->attach_state & PERF_ATTACH_TASK) {
dev_dbg(dmc620_pmu->pmu.dev,
"Can't support per-task counters\n");
return -EOPNOTSUPP;
}
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, where each
* event could be theoretically assigned to a different CPU. To
* mitigate this, we enforce CPU assignment to one, selected
* processor.
*/
event->cpu = dmc620_pmu->irq->cpu;
if (event->cpu < 0)
return -EINVAL;
/*
* We can't atomically disable all HW counters so only one event allowed,
* although software events are acceptable.
*/
if (event->group_leader != event &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling != event &&
!is_software_event(sibling))
return -EINVAL;
}
hwc->idx = -1;
return 0;
}
static void dmc620_pmu_read(struct perf_event *event)
{
dmc620_pmu_event_update(event);
}
static void dmc620_pmu_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
dmc620_pmu_event_set_period(event);
dmc620_pmu_enable_counter(event);
}
static void dmc620_pmu_stop(struct perf_event *event, int flags)
{
if (event->hw.state & PERF_HES_STOPPED)
return;
dmc620_pmu_disable_counter(event);
dmc620_pmu_event_update(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int dmc620_pmu_add(struct perf_event *event, int flags)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int idx;
u64 reg;
idx = dmc620_get_event_idx(event);
if (idx < 0)
return idx;
hwc->idx = idx;
dmc620_pmu->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
reg = ATTR_CFG_GET_FLD(attr, mask);
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
reg = ATTR_CFG_GET_FLD(attr, match);
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));
if (flags & PERF_EF_START)
dmc620_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
return 0;
}
static void dmc620_pmu_del(struct perf_event *event, int flags)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
dmc620_pmu_stop(event, PERF_EF_UPDATE);
dmc620_pmu->events[idx] = NULL;
clear_bit(idx, dmc620_pmu->used_mask);
perf_event_update_userpage(event);
}
static int dmc620_pmu_cpu_teardown(unsigned int cpu,
struct hlist_node *node)
{
struct dmc620_pmu_irq *irq;
struct dmc620_pmu *dmc620_pmu;
unsigned int target;
irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
/* We're only reading, but this isn't the place to be involving RCU */
mutex_lock(&dmc620_pmu_node_lock);
list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
mutex_unlock(&dmc620_pmu_node_lock);
WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
irq->cpu = target;
return 0;
}
static int dmc620_pmu_device_probe(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu;
struct resource *res;
char *name;
int irq_num;
int i, ret;
dmc620_pmu = devm_kzalloc(&pdev->dev,
sizeof(struct dmc620_pmu), GFP_KERNEL);
if (!dmc620_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, dmc620_pmu);
dmc620_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.event_init = dmc620_pmu_event_init,
.add = dmc620_pmu_add,
.del = dmc620_pmu_del,
.start = dmc620_pmu_start,
.stop = dmc620_pmu_stop,
.read = dmc620_pmu_read,
.attr_groups = dmc620_pmu_attr_groups,
};
dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dmc620_pmu->base))
return PTR_ERR(dmc620_pmu->base);
/* Make sure device is reset before enabling interrupt */
for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0)
return irq_num;
ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s_%llx", DMC620_PMUNAME,
(u64)(res->start >> DMC620_PA_SHIFT));
if (!name) {
dev_err(&pdev->dev,
"Create name failed, PMU @%pa\n", &res->start);
ret = -ENOMEM;
goto out_teardown_dev;
}
ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
if (ret)
goto out_teardown_dev;
return 0;
out_teardown_dev:
dmc620_pmu_put_irq(dmc620_pmu);
synchronize_rcu();
return ret;
}
static int dmc620_pmu_device_remove(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
dmc620_pmu_put_irq(dmc620_pmu);
/* perf will synchronise RCU before devres can free dmc620_pmu */
perf_pmu_unregister(&dmc620_pmu->pmu);
return 0;
}
static const struct acpi_device_id dmc620_acpi_match[] = {
{ "ARMHD620", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
static struct platform_driver dmc620_pmu_driver = {
.driver = {
.name = DMC620_DRVNAME,
.acpi_match_table = dmc620_acpi_match,
.suppress_bind_attrs = true,
},
.probe = dmc620_pmu_device_probe,
.remove = dmc620_pmu_device_remove,
};
static int __init dmc620_pmu_init(void)
{
int ret;
cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DMC620_DRVNAME,
NULL,
dmc620_pmu_cpu_teardown);
if (cpuhp_state_num < 0)
return cpuhp_state_num;
ret = platform_driver_register(&dmc620_pmu_driver);
if (ret)
cpuhp_remove_multi_state(cpuhp_state_num);
return ret;
}
static void __exit dmc620_pmu_exit(void)
{
platform_driver_unregister(&dmc620_pmu_driver);
cpuhp_remove_multi_state(cpuhp_state_num);
}
module_init(dmc620_pmu_init);
module_exit(dmc620_pmu_exit);
MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
MODULE_AUTHOR("Tuan Phan <[email protected]");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_dmc620_pmu.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2016-2020 Arm Limited
// CMN-600 Coherent Mesh Network PMU driver
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sort.h>
/* Common register stuff */
#define CMN_NODE_INFO 0x0000
#define CMN_NI_NODE_TYPE GENMASK_ULL(15, 0)
#define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
#define CMN_NODEID_DEVID(reg) ((reg) & 3)
#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
#define CMN_CHILD_INFO 0x0080
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_MAX_DIMENSION 12
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
/* The CFG node has various info besides the discovery tree */
#define CMN_CFGM_PERIPH_ID_01 0x0008
#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
#define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32)
#define CMN_CFGM_PERIPH_ID_23 0x0010
#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
#define CMN_CFGM_INFO_GLOBAL 0x900
#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
#define CMN_CFGM_INFO_GLOBAL_1 0x908
#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
#define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42)
#define CMN__PMU_SN_HOME_SEL GENMASK_ULL(40, 39)
#define CMN__PMU_HBT_LBT_SEL GENMASK_ULL(38, 37)
#define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35)
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
/* HN-Ps are weird... */
#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
#define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17)
#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
#define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
#define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4)
#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
#define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
#define CMN_DTM_WPn_MASK(n) (CMN_DTM_WPn(n) + 0x10)
#define CMN_DTM_PMU_CONFIG 0x210
#define CMN__PMEVCNT0_INPUT_SEL GENMASK_ULL(37, 32)
#define CMN__PMEVCNT0_INPUT_SEL_WP 0x00
#define CMN__PMEVCNT0_INPUT_SEL_XP 0x04
#define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10
#define CMN__PMEVCNT0_GLOBAL_NUM GENMASK_ULL(18, 16)
#define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n) ((n) * 4)
#define CMN__PMEVCNT_PAIRED(n) BIT(4 + (n))
#define CMN__PMEVCNT23_COMBINED BIT(2)
#define CMN__PMEVCNT01_COMBINED BIT(1)
#define CMN_DTM_PMU_CONFIG_PMU_EN BIT(0)
#define CMN_DTM_PMEVCNT 0x220
#define CMN_DTM_PMEVCNTSR 0x240
#define CMN_DTM_UNIT_INFO 0x0910
#define CMN_DTM_NUM_COUNTERS 4
/* Want more local counters? Why not replicate the whole DTM! Ugh... */
#define CMN_DTM_OFFSET(n) ((n) * 0x200)
/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL 0x0a00
#define CMN_DT_DTC_CTL_DT_EN BIT(0)
/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40)
#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90)
#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100)
#define CMN_DT_PMCR_PMU_EN BIT(0)
#define CMN_DT_PMCR_CNTR_RST BIT(5)
#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118)
#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120)
#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128)
#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130)
#define CMN_DT_PMSRR_SS_REQ BIT(0)
#define CMN_DT_NUM_COUNTERS 8
#define CMN_MAX_DTCS 4
/*
* Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
* so throwing away one bit to make overflow handling easy is no big deal.
*/
#define CMN_COUNTER_INIT 0x80000000
/* Similarly for the 40-bit cycle counter */
#define CMN_CC_INIT 0x8000000000ULL
/* Event attributes */
#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0)
#define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16)
#define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27)
#define CMN_CONFIG_BYNODEID BIT_ULL(31)
#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32)
#define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
#define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
#define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
/* Note that we don't yet support the tertiary match group on newer IPs */
#define CMN_CONFIG_WP_GRP BIT_ULL(56)
#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
#define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
#define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
#define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
#define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
#define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
#define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
#define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
/* Made-up event IDs for watchpoint direction */
#define CMN_WP_UP 0
#define CMN_WP_DOWN 2
/* Internal values for encoding event support */
enum cmn_model {
CMN600 = 1,
CMN650 = 2,
CMN700 = 4,
CI700 = 8,
/* ...and then we can use bitmap tricks for commonality */
CMN_ANY = -1,
NOT_CMN600 = -2,
CMN_650ON = CMN650 | CMN700,
};
/* Actual part numbers and revision IDs defined by the hardware */
enum cmn_part {
PART_CMN600 = 0x434,
PART_CMN650 = 0x436,
PART_CMN700 = 0x43c,
PART_CI700 = 0x43a,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
enum cmn_revision {
REV_CMN600_R1P0,
REV_CMN600_R1P1,
REV_CMN600_R1P2,
REV_CMN600_R1P3,
REV_CMN600_R2P0,
REV_CMN600_R3P0,
REV_CMN600_R3P1,
REV_CMN650_R0P0 = 0,
REV_CMN650_R1P0,
REV_CMN650_R1P1,
REV_CMN650_R2P0,
REV_CMN650_R1P2,
REV_CMN700_R0P0 = 0,
REV_CMN700_R1P0,
REV_CMN700_R2P0,
REV_CMN700_R3P0,
REV_CI700_R0P0 = 0,
REV_CI700_R1P0,
REV_CI700_R2P0,
};
enum cmn_node_type {
CMN_TYPE_INVALID,
CMN_TYPE_DVM,
CMN_TYPE_CFG,
CMN_TYPE_DTC,
CMN_TYPE_HNI,
CMN_TYPE_HNF,
CMN_TYPE_XP,
CMN_TYPE_SBSX,
CMN_TYPE_MPAM_S,
CMN_TYPE_MPAM_NS,
CMN_TYPE_RNI,
CMN_TYPE_RND = 0xd,
CMN_TYPE_RNSAM = 0xf,
CMN_TYPE_MTSX,
CMN_TYPE_HNP,
CMN_TYPE_CXRA = 0x100,
CMN_TYPE_CXHA,
CMN_TYPE_CXLA,
CMN_TYPE_CCRA,
CMN_TYPE_CCHA,
CMN_TYPE_CCLA,
CMN_TYPE_CCLA_RNI,
CMN_TYPE_HNS = 0x200,
CMN_TYPE_HNS_MPAM_S,
CMN_TYPE_HNS_MPAM_NS,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
enum cmn_filter_select {
SEL_NONE = -1,
SEL_OCCUP1ID,
SEL_CLASS_OCCUP_ID,
SEL_CBUSY_SNTHROTTLE_SEL,
SEL_HBT_LBT_SEL,
SEL_SN_HOME_SEL,
SEL_MAX
};
struct arm_cmn_node {
void __iomem *pmu_base;
u16 id, logid;
enum cmn_node_type type;
int dtm;
union {
/* DN/HN-F/CXHA */
struct {
u8 val : 4;
u8 count : 4;
} occupid[SEL_MAX];
/* XP */
u8 dtc;
};
union {
u8 event[4];
__le32 event_sel;
u16 event_w[4];
__le64 event_sel_w;
};
};
struct arm_cmn_dtm {
void __iomem *base;
u32 pmu_config_low;
union {
u8 input_sel[4];
__le32 pmu_config_high;
};
s8 wp_event[4];
};
struct arm_cmn_dtc {
void __iomem *base;
int irq;
int irq_friend;
bool cc_active;
struct perf_event *counters[CMN_DT_NUM_COUNTERS];
struct perf_event *cycles;
};
#define CMN_STATE_DISABLED BIT(0)
#define CMN_STATE_TXN BIT(1)
struct arm_cmn {
struct device *dev;
void __iomem *base;
unsigned int state;
enum cmn_revision rev;
enum cmn_part part;
u8 mesh_x;
u8 mesh_y;
u16 num_xps;
u16 num_dns;
bool multi_dtm;
u8 ports_used;
struct {
unsigned int rsp_vc_num : 2;
unsigned int dat_vc_num : 2;
unsigned int snp_vc_num : 2;
unsigned int req_vc_num : 2;
};
struct arm_cmn_node *xps;
struct arm_cmn_node *dns;
struct arm_cmn_dtm *dtms;
struct arm_cmn_dtc *dtc;
unsigned int num_dtcs;
int cpu;
struct hlist_node cpuhp_node;
struct pmu pmu;
struct dentry *debug;
};
#define to_cmn(p) container_of(p, struct arm_cmn, pmu)
static int arm_cmn_hp_state;
struct arm_cmn_nodeid {
u8 x;
u8 y;
u8 port;
u8 dev;
};
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
}
static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
{
struct arm_cmn_nodeid nid;
if (cmn->num_xps == 1) {
nid.x = 0;
nid.y = 0;
nid.port = CMN_NODEID_1x1_PID(id);
nid.dev = CMN_NODEID_DEVID(id);
} else {
int bits = arm_cmn_xyidbits(cmn);
nid.x = CMN_NODEID_X(id, bits);
nid.y = CMN_NODEID_Y(id, bits);
if (cmn->ports_used & 0xc) {
nid.port = CMN_NODEID_EXT_PID(id);
nid.dev = CMN_NODEID_EXT_DEVID(id);
} else {
nid.port = CMN_NODEID_PID(id);
nid.dev = CMN_NODEID_DEVID(id);
}
}
return nid;
}
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
const struct arm_cmn_node *dn)
{
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
int xp_idx = cmn->mesh_x * nid.y + nid.x;
return cmn->xps + xp_idx;
}
static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
enum cmn_node_type type)
{
struct arm_cmn_node *dn;
for (dn = cmn->dns; dn->type; dn++)
if (dn->type == type)
return dn;
return NULL;
}
static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
{
switch (cmn->part) {
case PART_CMN600:
return CMN600;
case PART_CMN650:
return CMN650;
case PART_CMN700:
return CMN700;
case PART_CI700:
return CI700;
default:
return 0;
};
}
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
int offset = CMN_MXP__CONNECT_INFO(port);
if (port >= 2) {
if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
return 0;
/*
* CI-700 may have extra ports, but still has the
* mesh_port_connect_info registers in the way.
*/
if (cmn->part == PART_CI700)
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
}
static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
{
switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
case 0x00: return " |";
case 0x01: return " RN-I |";
case 0x02: return " RN-D |";
case 0x04: return " RN-F_B |";
case 0x05: return "RN-F_B_E|";
case 0x06: return " RN-F_A |";
case 0x07: return "RN-F_A_E|";
case 0x08: return " HN-T |";
case 0x09: return " HN-I |";
case 0x0a: return " HN-D |";
case 0x0b: return " HN-P |";
case 0x0c: return " SN-F |";
case 0x0d: return " SBSX |";
case 0x0e: return " HN-F |";
case 0x0f: return " SN-F_E |";
case 0x10: return " SN-F_D |";
case 0x11: return " CXHA |";
case 0x12: return " CXRA |";
case 0x13: return " CXRH |";
case 0x14: return " RN-F_D |";
case 0x15: return "RN-F_D_E|";
case 0x16: return " RN-F_C |";
case 0x17: return "RN-F_C_E|";
case 0x18: return " RN-F_E |";
case 0x19: return "RN-F_E_E|";
case 0x1c: return " MTSX |";
case 0x1d: return " HN-V |";
case 0x1e: return " CCG |";
default: return " ???? |";
}
}
static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
{
struct arm_cmn *cmn = s->private;
struct arm_cmn_node *dn;
for (dn = cmn->dns; dn->type; dn++) {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
if (dn->type == CMN_TYPE_XP)
continue;
/* Ignore the extra components that will overlap on some ports */
if (dn->type < CMN_TYPE_HNI)
continue;
if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
continue;
seq_printf(s, " #%-2d |", dn->logid);
return;
}
seq_puts(s, " |");
}
static int arm_cmn_map_show(struct seq_file *s, void *data)
{
struct arm_cmn *cmn = s->private;
int x, y, p, pmax = fls(cmn->ports_used);
seq_puts(s, " X");
for (x = 0; x < cmn->mesh_x; x++)
seq_printf(s, " %d ", x);
seq_puts(s, "\nY P D+");
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
seq_printf(s, "\n%d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x;
for (p = 0; p < CMN_MAX_PORTS; p++)
port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
seq_printf(s, " XP #%-2d |", xp_base + x);
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++) {
u8 dtc = cmn->xps[xp_base + x].dtc;
if (dtc & (dtc - 1))
seq_puts(s, " DTC ?? |");
else
seq_printf(s, " DTC %ld |", __ffs(dtc));
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "........|");
for (p = 0; p < pmax; p++) {
seq_printf(s, "\n %d |", p);
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, arm_cmn_device_type(port[p][x]));
seq_puts(s, "\n 0|");
for (x = 0; x < cmn->mesh_x; x++)
arm_cmn_show_logid(s, x, y, p, 0);
seq_puts(s, "\n 1|");
for (x = 0; x < cmn->mesh_x; x++)
arm_cmn_show_logid(s, x, y, p, 1);
}
seq_puts(s, "\n-----+");
}
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
seq_puts(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);
static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
{
const char *name = "map";
if (id > 0)
name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
if (!name)
return;
cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
}
#else
static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
#endif
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
u64 dtm_idx[4];
unsigned int dtc_idx;
u8 dtcs_used;
u8 num_dns;
u8 dtm_offset;
bool wide_sel;
enum cmn_filter_select filter_sel;
};
#define for_each_hw_dn(hw, dn, i) \
for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
{
BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
return (struct arm_cmn_hw_event *)&event->hw;
}
static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
{
x[pos / 32] |= (u64)val << ((pos % 32) * 2);
}
static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
{
return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
}
struct arm_cmn_event_attr {
struct device_attribute attr;
enum cmn_model model;
enum cmn_node_type type;
enum cmn_filter_select fsel;
u16 eventid;
u8 occupid;
};
struct arm_cmn_format_attr {
struct device_attribute attr;
u64 field;
int config;
};
#define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
(&((struct arm_cmn_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
.model = _model, \
.type = _type, \
.eventid = _eventid, \
.occupid = _occupid, \
.fsel = _fsel, \
}})[0].attr.attr)
#define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \
_CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
static ssize_t arm_cmn_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn_event_attr *eattr;
eattr = container_of(attr, typeof(*eattr), attr);
if (eattr->type == CMN_TYPE_DTC)
return sysfs_emit(buf, "type=0x%x\n", eattr->type);
if (eattr->type == CMN_TYPE_WP)
return sysfs_emit(buf,
"type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
eattr->type, eattr->eventid);
if (eattr->fsel > SEL_NONE)
return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
eattr->type, eattr->eventid, eattr->occupid);
return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
eattr->eventid);
}
static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
struct arm_cmn_event_attr *eattr;
enum cmn_node_type type;
u16 eventid;
eattr = container_of(attr, typeof(*eattr), attr.attr);
if (!(eattr->model & arm_cmn_model(cmn)))
return 0;
type = eattr->type;
eventid = eattr->eventid;
/* Watchpoints aren't nodes, so avoid confusion */
if (type == CMN_TYPE_WP)
return attr->mode;
/* Hide XP events for unused interfaces/channels */
if (type == CMN_TYPE_XP) {
unsigned int intf = (eventid >> 2) & 7;
unsigned int chan = eventid >> 5;
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
if (chan == 4 && cmn->part == PART_CMN600)
return 0;
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2) ||
(chan == 7 && cmn->snp_vc_num < 2) ||
(chan == 8 && cmn->req_vc_num < 2))
return 0;
}
/* Revision-specific differences */
if (cmn->part == PART_CMN600) {
if (cmn->rev < REV_CMN600_R1P3) {
if (type == CMN_TYPE_CXRA && eventid > 0x10)
return 0;
}
if (cmn->rev < REV_CMN600_R1P2) {
if (type == CMN_TYPE_HNF && eventid == 0x1b)
return 0;
if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
return 0;
}
} else if (cmn->part == PART_CMN650) {
if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
if (type == CMN_TYPE_HNF && eventid > 0x22)
return 0;
if (type == CMN_TYPE_SBSX && eventid == 0x17)
return 0;
if (type == CMN_TYPE_RNI && eventid > 0x10)
return 0;
}
} else if (cmn->part == PART_CMN700) {
if (cmn->rev < REV_CMN700_R2P0) {
if (type == CMN_TYPE_HNF && eventid > 0x2c)
return 0;
if (type == CMN_TYPE_CCHA && eventid > 0x74)
return 0;
if (type == CMN_TYPE_CCLA && eventid > 0x27)
return 0;
}
if (cmn->rev < REV_CMN700_R1P0) {
if (type == CMN_TYPE_HNF && eventid > 0x2b)
return 0;
}
}
if (!arm_cmn_node(cmn, type))
return 0;
return attr->mode;
}
#define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \
_CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
#define CMN_EVENT_DTC(_name) \
CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
#define CMN_EVENT_HNF(_model, _name, _event) \
CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNI(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
#define CMN_EVENT_HNP(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
#define __CMN_EVENT_XP(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
#define CMN_EVENT_SBSX(_model, _name, _event) \
CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
#define CMN_EVENT_RNID(_model, _name, _event) \
CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
#define CMN_EVENT_MTSX(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
#define CMN_EVENT_CXRA(_model, _name, _event) \
CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
#define CMN_EVENT_CXHA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
#define CMN_EVENT_CCRA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
#define CMN_EVENT_CCHA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
#define CMN_EVENT_CCLA_RNI(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_DVM(_model, _name, _event) \
_CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
#define CMN_EVENT_DVM_OCC(_model, _name, _event) \
_CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \
_CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
_CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
#define CMN_EVENT_HN_OCC(_model, _name, _type, _event) \
_CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 1, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 2, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(_model, _name##_atomic, _type, _event, 3, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(_model, _name##_stash, _type, _event, 4, SEL_OCCUP1ID)
#define CMN_EVENT_HN_CLS(_model, _name, _type, _event) \
_CMN_EVENT_ATTR(_model, _name##_class0, _type, _event, 0, SEL_CLASS_OCCUP_ID), \
_CMN_EVENT_ATTR(_model, _name##_class1, _type, _event, 1, SEL_CLASS_OCCUP_ID), \
_CMN_EVENT_ATTR(_model, _name##_class2, _type, _event, 2, SEL_CLASS_OCCUP_ID), \
_CMN_EVENT_ATTR(_model, _name##_class3, _type, _event, 3, SEL_CLASS_OCCUP_ID)
#define CMN_EVENT_HN_SNT(_model, _name, _type, _event) \
_CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_group0_read, _type, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_group0_write, _type, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_group1_read, _type, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_group1_write, _type, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
_CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
#define CMN_EVENT_HNF_OCC(_model, _name, _event) \
CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNS_OCC(_name, _event) \
CMN_EVENT_HN_OCC(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_rxsnp, CMN_TYPE_HNS, _event, 5, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 6, SEL_OCCUP1ID), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 7, SEL_OCCUP1ID)
#define CMN_EVENT_HNS_CLS( _name, _event) \
CMN_EVENT_HN_CLS(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNS_SNT(_name, _event) \
CMN_EVENT_HN_SNT(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNS_HBT(_name, _event) \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_HBT_LBT_SEL), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 1, SEL_HBT_LBT_SEL), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 2, SEL_HBT_LBT_SEL)
#define CMN_EVENT_HNS_SNH(_name, _event) \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_SN_HOME_SEL), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_sn, CMN_TYPE_HNS, _event, 1, SEL_SN_HOME_SEL), \
_CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_home, CMN_TYPE_HNS, _event, 2, SEL_SN_HOME_SEL)
#define _CMN_EVENT_XP_MESH(_name, _event) \
__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
__CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \
__CMN_EVENT_XP(s_##_name, (_event) | (3 << 2))
#define _CMN_EVENT_XP_PORT(_name, _event) \
__CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \
__CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \
__CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \
__CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
#define _CMN_EVENT_XP(_name, _event) \
_CMN_EVENT_XP_MESH(_name, _event), \
_CMN_EVENT_XP_PORT(_name, _event)
/* Good thing there are only 3 fundamental XP events... */
#define CMN_EVENT_XP(_name, _event) \
_CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \
_CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \
_CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \
_CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
_CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
_CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
#define CMN_EVENT_XP_DAT(_name, _event) \
_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
_CMN_EVENT_XP_PORT(dat2_##_name, (_event) | (6 << 5))
static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_DTC(cycles),
/*
* DVM node events conflict with HN-I events in the equivalent PMU
* slot, but our lazy short-cut of using the DTM counter index for
* the PMU index as well happens to avoid that by construction.
*/
CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01),
CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02),
CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04),
CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05),
CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01),
CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02),
CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03),
CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04),
CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05),
CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06),
CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07),
CMN_EVENT_DVM(NOT_CMN600, retry, 0x08),
CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09),
CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a),
CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b),
CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c),
CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d),
CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e),
CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f),
CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10),
CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11),
CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12),
CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13),
CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14),
CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01),
CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03),
CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04),
CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05),
CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06),
CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07),
CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08),
CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09),
CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a),
CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b),
CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
CMN_EVENT_HNF_OCC(CMN_ANY, qos_pocq_occupancy, 0x0f),
CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13),
CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14),
CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15),
CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16),
CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17),
CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18),
CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19),
CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a),
CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b),
CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c),
CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d),
CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e),
CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f),
CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20),
CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21),
CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22),
CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23),
CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24),
CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25),
CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26),
CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27),
CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28),
CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29),
CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a),
CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b),
CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c),
CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d),
CMN_EVENT_HNF(CMN700, nc_excl, 0x2e),
CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f),
CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22),
CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23),
CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24),
CMN_EVENT_HNI(rrt_rd_alloc, 0x25),
CMN_EVENT_HNI(rrt_wr_alloc, 0x26),
CMN_EVENT_HNI(rdt_rd_alloc, 0x27),
CMN_EVENT_HNI(rdt_wr_alloc, 0x28),
CMN_EVENT_HNI(wdb_alloc, 0x29),
CMN_EVENT_HNI(txrsp_retryack, 0x2a),
CMN_EVENT_HNI(arvalid_no_arready, 0x2b),
CMN_EVENT_HNI(arready_no_arvalid, 0x2c),
CMN_EVENT_HNI(awvalid_no_awready, 0x2d),
CMN_EVENT_HNI(awready_no_awvalid, 0x2e),
CMN_EVENT_HNI(wvalid_no_wready, 0x2f),
CMN_EVENT_HNI(txdat_stall, 0x30),
CMN_EVENT_HNI(nonpcie_serialization, 0x31),
CMN_EVENT_HNI(pcie_serialization, 0x32),
/*
* HN-P events squat on top of the HN-I similarly to DVM events, except
* for being crammed into the same physical node as well. And of course
* where would the fun be if the same events were in the same order...
*/
CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01),
CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02),
CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03),
CMN_EVENT_HNP(rrt_wr_alloc, 0x04),
CMN_EVENT_HNP(rdt_wr_alloc, 0x05),
CMN_EVENT_HNP(wdb_alloc, 0x06),
CMN_EVENT_HNP(awvalid_no_awready, 0x07),
CMN_EVENT_HNP(awready_no_awvalid, 0x08),
CMN_EVENT_HNP(wvalid_no_wready, 0x09),
CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11),
CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12),
CMN_EVENT_HNP(rrt_rd_alloc, 0x13),
CMN_EVENT_HNP(rdt_rd_alloc, 0x14),
CMN_EVENT_HNP(arvalid_no_arready, 0x15),
CMN_EVENT_HNP(arready_no_arvalid, 0x16),
CMN_EVENT_XP(txflit_valid, 0x01),
CMN_EVENT_XP(txflit_stall, 0x02),
CMN_EVENT_XP_DAT(partial_dat_flit, 0x03),
/* We treat watchpoints as a special made-up class of XP events */
CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01),
CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02),
CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03),
CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04),
CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05),
CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06),
CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14),
CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17),
CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23),
CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24),
CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25),
CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01),
CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02),
CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03),
CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04),
CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05),
CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06),
CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07),
CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08),
CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09),
CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a),
CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b),
CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c),
CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d),
CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e),
CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f),
CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10),
CMN_EVENT_RNID(CMN600, rdb_unord, 0x11),
CMN_EVENT_RNID(CMN600, rdb_replay, 0x12),
CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13),
CMN_EVENT_RNID(CMN600, rdb_ord, 0x14),
CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11),
CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12),
CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16),
CMN_EVENT_RNID(CMN700, ldb_full, 0x17),
CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20),
CMN_EVENT_RNID(CMN700, awid_hash, 0x21),
CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22),
CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23),
CMN_EVENT_MTSX(tc_lookup, 0x01),
CMN_EVENT_MTSX(tc_fill, 0x02),
CMN_EVENT_MTSX(tc_miss, 0x03),
CMN_EVENT_MTSX(tdb_forward, 0x04),
CMN_EVENT_MTSX(tcq_hazard, 0x05),
CMN_EVENT_MTSX(tcq_rd_alloc, 0x06),
CMN_EVENT_MTSX(tcq_wr_alloc, 0x07),
CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08),
CMN_EVENT_MTSX(axi_rd_req, 0x09),
CMN_EVENT_MTSX(axi_wr_req, 0x0a),
CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b),
CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c),
CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01),
CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02),
CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03),
CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04),
CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05),
CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06),
CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07),
CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08),
CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09),
CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a),
CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11),
CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12),
CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),
CMN_EVENT_CXHA(rddatbyp, 0x21),
CMN_EVENT_CXHA(chirsp_up_stall, 0x22),
CMN_EVENT_CXHA(chidat_up_stall, 0x23),
CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24),
CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25),
CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26),
CMN_EVENT_CXHA(reqtrk_occ, 0x27),
CMN_EVENT_CXHA(rdb_occ, 0x28),
CMN_EVENT_CXHA(rdbyp_occ, 0x29),
CMN_EVENT_CXHA(wdb_occ, 0x2a),
CMN_EVENT_CXHA(snptrk_occ, 0x2b),
CMN_EVENT_CXHA(sdb_occ, 0x2c),
CMN_EVENT_CXHA(snphaz_occ, 0x2d),
CMN_EVENT_CCRA(rht_occ, 0x41),
CMN_EVENT_CCRA(sht_occ, 0x42),
CMN_EVENT_CCRA(rdb_occ, 0x43),
CMN_EVENT_CCRA(wdb_occ, 0x44),
CMN_EVENT_CCRA(ssb_occ, 0x45),
CMN_EVENT_CCRA(snp_bcasts, 0x46),
CMN_EVENT_CCRA(req_chains, 0x47),
CMN_EVENT_CCRA(req_chain_avglen, 0x48),
CMN_EVENT_CCRA(chirsp_stalls, 0x49),
CMN_EVENT_CCRA(chidat_stalls, 0x4a),
CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b),
CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c),
CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d),
CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e),
CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f),
CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50),
CMN_EVENT_CCRA(external_chirsp_stalls, 0x51),
CMN_EVENT_CCRA(external_chidat_stalls, 0x52),
CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53),
CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54),
CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55),
CMN_EVENT_CCRA(rht_alloc, 0x56),
CMN_EVENT_CCRA(sht_alloc, 0x57),
CMN_EVENT_CCRA(rdb_alloc, 0x58),
CMN_EVENT_CCRA(wdb_alloc, 0x59),
CMN_EVENT_CCRA(ssb_alloc, 0x5a),
CMN_EVENT_CCHA(rddatbyp, 0x61),
CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
CMN_EVENT_CCHA(chidat_up_stall, 0x63),
CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
CMN_EVENT_CCHA(reqtrk_occ, 0x67),
CMN_EVENT_CCHA(rdb_occ, 0x68),
CMN_EVENT_CCHA(rdbyp_occ, 0x69),
CMN_EVENT_CCHA(wdb_occ, 0x6a),
CMN_EVENT_CCHA(snptrk_occ, 0x6b),
CMN_EVENT_CCHA(sdb_occ, 0x6c),
CMN_EVENT_CCHA(snphaz_occ, 0x6d),
CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
CMN_EVENT_CCHA(rdb_alloc, 0x6f),
CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
CMN_EVENT_CCHA(wdb_alloc, 0x71),
CMN_EVENT_CCHA(snptrk_alloc, 0x72),
CMN_EVENT_CCHA(sdb_alloc, 0x73),
CMN_EVENT_CCHA(snphaz_alloc, 0x74),
CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
CMN_EVENT_CCLA(rx_cxs, 0x21),
CMN_EVENT_CCLA(tx_cxs, 0x22),
CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23),
CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24),
CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25),
CMN_EVENT_CCLA(link_crdbuf_occ, 0x26),
CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27),
CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28),
CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29),
CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b),
CMN_EVENT_HNS_HBT(cache_miss, 0x01),
CMN_EVENT_HNS_HBT(slc_sf_cache_access, 0x02),
CMN_EVENT_HNS_HBT(cache_fill, 0x03),
CMN_EVENT_HNS_HBT(pocq_retry, 0x04),
CMN_EVENT_HNS_HBT(pocq_reqs_recvd, 0x05),
CMN_EVENT_HNS_HBT(sf_hit, 0x06),
CMN_EVENT_HNS_HBT(sf_evictions, 0x07),
CMN_EVENT_HNS(dir_snoops_sent, 0x08),
CMN_EVENT_HNS(brd_snoops_sent, 0x09),
CMN_EVENT_HNS_HBT(slc_eviction, 0x0a),
CMN_EVENT_HNS_HBT(slc_fill_invalid_way, 0x0b),
CMN_EVENT_HNS(mc_retries_local, 0x0c),
CMN_EVENT_HNS_SNH(mc_reqs_local, 0x0d),
CMN_EVENT_HNS(qos_hh_retry, 0x0e),
CMN_EVENT_HNS_OCC(qos_pocq_occupancy, 0x0f),
CMN_EVENT_HNS(pocq_addrhaz, 0x10),
CMN_EVENT_HNS(pocq_atomic_addrhaz, 0x11),
CMN_EVENT_HNS(ld_st_swp_adq_full, 0x12),
CMN_EVENT_HNS(cmp_adq_full, 0x13),
CMN_EVENT_HNS(txdat_stall, 0x14),
CMN_EVENT_HNS(txrsp_stall, 0x15),
CMN_EVENT_HNS(seq_full, 0x16),
CMN_EVENT_HNS(seq_hit, 0x17),
CMN_EVENT_HNS(snp_sent, 0x18),
CMN_EVENT_HNS(sfbi_dir_snp_sent, 0x19),
CMN_EVENT_HNS(sfbi_brd_snp_sent, 0x1a),
CMN_EVENT_HNS(intv_dirty, 0x1c),
CMN_EVENT_HNS(stash_snp_sent, 0x1d),
CMN_EVENT_HNS(stash_data_pull, 0x1e),
CMN_EVENT_HNS(snp_fwded, 0x1f),
CMN_EVENT_HNS(atomic_fwd, 0x20),
CMN_EVENT_HNS(mpam_hardlim, 0x21),
CMN_EVENT_HNS(mpam_softlim, 0x22),
CMN_EVENT_HNS(snp_sent_cluster, 0x23),
CMN_EVENT_HNS(sf_imprecise_evict, 0x24),
CMN_EVENT_HNS(sf_evict_shared_line, 0x25),
CMN_EVENT_HNS_CLS(pocq_class_occup, 0x26),
CMN_EVENT_HNS_CLS(pocq_class_retry, 0x27),
CMN_EVENT_HNS_CLS(class_mc_reqs_local, 0x28),
CMN_EVENT_HNS_CLS(class_cgnt_cmin, 0x29),
CMN_EVENT_HNS_SNT(sn_throttle, 0x2a),
CMN_EVENT_HNS_SNT(sn_throttle_min, 0x2b),
CMN_EVENT_HNS(sf_precise_to_imprecise, 0x2c),
CMN_EVENT_HNS(snp_intv_cln, 0x2d),
CMN_EVENT_HNS(nc_excl, 0x2e),
CMN_EVENT_HNS(excl_mon_ovfl, 0x2f),
CMN_EVENT_HNS(snp_req_recvd, 0x30),
CMN_EVENT_HNS(snp_req_byp_pocq, 0x31),
CMN_EVENT_HNS(dir_ccgha_snp_sent, 0x32),
CMN_EVENT_HNS(brd_ccgha_snp_sent, 0x33),
CMN_EVENT_HNS(ccgha_snp_stall, 0x34),
CMN_EVENT_HNS(lbt_req_hardlim, 0x35),
CMN_EVENT_HNS(hbt_req_hardlim, 0x36),
CMN_EVENT_HNS(sf_reupdate, 0x37),
CMN_EVENT_HNS(excl_sf_imprecise, 0x38),
CMN_EVENT_HNS(snp_pocq_addrhaz, 0x39),
CMN_EVENT_HNS(mc_retries_remote, 0x3a),
CMN_EVENT_HNS_SNH(mc_reqs_remote, 0x3b),
CMN_EVENT_HNS_CLS(class_mc_reqs_remote, 0x3c),
NULL
};
static const struct attribute_group arm_cmn_event_attrs_group = {
.name = "events",
.attrs = arm_cmn_event_attrs,
.is_visible = arm_cmn_event_attr_is_visible,
};
static ssize_t arm_cmn_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
int lo = __ffs(fmt->field), hi = __fls(fmt->field);
if (lo == hi)
return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
(&((struct arm_cmn_format_attr[]) {{ \
.attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \
.config = _cfg, \
.field = _fld, \
}})[0].attr.attr)
#define CMN_FORMAT_ATTR(_name, _fld) _CMN_FORMAT_ATTR(_name, 0, _fld)
static struct attribute *arm_cmn_format_attrs[] = {
CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),
CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),
_CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
_CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),
NULL
};
static const struct attribute_group arm_cmn_format_attrs_group = {
.name = "format",
.attrs = arm_cmn_format_attrs,
};
static ssize_t arm_cmn_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
}
static struct device_attribute arm_cmn_cpumask_attr =
__ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
static ssize_t arm_cmn_identifier_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev);
}
static struct device_attribute arm_cmn_identifier_attr =
__ATTR(identifier, 0444, arm_cmn_identifier_show, NULL);
static struct attribute *arm_cmn_other_attrs[] = {
&arm_cmn_cpumask_attr.attr,
&arm_cmn_identifier_attr.attr,
NULL,
};
static const struct attribute_group arm_cmn_other_attrs_group = {
.attrs = arm_cmn_other_attrs,
};
static const struct attribute_group *arm_cmn_attr_groups[] = {
&arm_cmn_event_attrs_group,
&arm_cmn_format_attrs_group,
&arm_cmn_other_attrs_group,
NULL
};
static int arm_cmn_wp_idx(struct perf_event *event)
{
return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
}
static u32 arm_cmn_wp_config(struct perf_event *event)
{
u32 config;
u32 dev = CMN_EVENT_WP_DEV_SEL(event);
u32 chn = CMN_EVENT_WP_CHN_SEL(event);
u32 grp = CMN_EVENT_WP_GRP(event);
u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
u32 combine = CMN_EVENT_WP_COMBINE(event);
bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
if (exc)
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
if (combine && !grp)
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
CMN_DTM_WPn_CONFIG_WP_COMBINE;
return config;
}
static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
{
if (!cmn->state)
writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
cmn->state |= state;
}
static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
{
cmn->state &= ~state;
if (!cmn->state)
writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
cmn->dtc[0].base + CMN_DT_PMCR);
}
static void arm_cmn_pmu_enable(struct pmu *pmu)
{
arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
}
static void arm_cmn_pmu_disable(struct pmu *pmu)
{
arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
}
static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
bool snapshot)
{
struct arm_cmn_dtm *dtm = NULL;
struct arm_cmn_node *dn;
unsigned int i, offset, dtm_idx;
u64 reg, count = 0;
offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
for_each_hw_dn(hw, dn, i) {
if (dtm != &cmn->dtms[dn->dtm]) {
dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
reg = readq_relaxed(dtm->base + offset);
}
dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
count += (u16)(reg >> (dtm_idx * 16));
}
return count;
}
static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
{
u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
}
static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
{
u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
val = readl_relaxed(dtc->base + pmevcnt);
writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
return val - CMN_COUNTER_INIT;
}
static void arm_cmn_init_counter(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
u64 count;
for (i = 0; hw->dtcs_used & (1U << i); i++) {
writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
cmn->dtc[i].counters[hw->dtc_idx] = event;
}
count = arm_cmn_read_dtm(cmn, hw, false);
local64_set(&event->hw.prev_count, count);
}
static void arm_cmn_event_read(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
u64 delta, new, prev;
unsigned long flags;
unsigned int i;
if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
i = __ffs(hw->dtcs_used);
delta = arm_cmn_read_cc(cmn->dtc + i);
local64_add(delta, &event->count);
return;
}
new = arm_cmn_read_dtm(cmn, hw, false);
prev = local64_xchg(&event->hw.prev_count, new);
delta = new - prev;
local_irq_save(flags);
for (i = 0; hw->dtcs_used & (1U << i); i++) {
new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
delta += new << 16;
}
local_irq_restore(flags);
local64_add(delta, &event->count);
}
static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
enum cmn_filter_select fsel, u8 occupid)
{
u64 reg;
if (fsel == SEL_NONE)
return 0;
if (!dn->occupid[fsel].count) {
dn->occupid[fsel].val = occupid;
reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
FIELD_PREP(CMN__PMU_SN_HOME_SEL,
dn->occupid[SEL_SN_HOME_SEL].val) |
FIELD_PREP(CMN__PMU_HBT_LBT_SEL,
dn->occupid[SEL_HBT_LBT_SEL].val) |
FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
dn->occupid[SEL_CLASS_OCCUP_ID].val) |
FIELD_PREP(CMN__PMU_OCCUP1_ID,
dn->occupid[SEL_OCCUP1ID].val);
writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
} else if (dn->occupid[fsel].val != occupid) {
return -EBUSY;
}
dn->occupid[fsel].count++;
return 0;
}
static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
int eventid, bool wide_sel)
{
if (wide_sel) {
dn->event_w[dtm_idx] = eventid;
writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
} else {
dn->event[dtm_idx] = eventid;
writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
}
}
static void arm_cmn_event_start(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
enum cmn_node_type type = CMN_EVENT_TYPE(event);
int i;
if (type == CMN_TYPE_DTC) {
i = __ffs(hw->dtcs_used);
writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
cmn->dtc[i].cc_active = true;
} else if (type == CMN_TYPE_WP) {
int wp_idx = arm_cmn_wp_idx(event);
u64 val = CMN_EVENT_WP_VAL(event);
u64 mask = CMN_EVENT_WP_MASK(event);
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
}
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
hw->wide_sel);
}
}
static void arm_cmn_event_stop(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
enum cmn_node_type type = CMN_EVENT_TYPE(event);
int i;
if (type == CMN_TYPE_DTC) {
i = __ffs(hw->dtcs_used);
cmn->dtc[i].cc_active = false;
} else if (type == CMN_TYPE_WP) {
int wp_idx = arm_cmn_wp_idx(event);
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
}
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
}
arm_cmn_event_read(event);
}
struct arm_cmn_val {
u8 dtm_count[CMN_MAX_DTMS];
u8 occupid[CMN_MAX_DTMS][SEL_MAX];
u8 wp[CMN_MAX_DTMS][4];
int dtc_count;
bool cycles;
};
static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
struct perf_event *event)
{
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
enum cmn_node_type type;
int i;
if (is_software_event(event))
return;
type = CMN_EVENT_TYPE(event);
if (type == CMN_TYPE_DTC) {
val->cycles = true;
return;
}
val->dtc_count++;
for_each_hw_dn(hw, dn, i) {
int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
val->dtm_count[dtm]++;
if (sel > SEL_NONE)
val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
if (type != CMN_TYPE_WP)
continue;
wp_idx = arm_cmn_wp_idx(event);
val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
}
}
static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
{
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
struct perf_event *sibling, *leader = event->group_leader;
enum cmn_node_type type;
struct arm_cmn_val *val;
int i, ret = -EINVAL;
if (leader == event)
return 0;
if (event->pmu != leader->pmu && !is_software_event(leader))
return -EINVAL;
val = kzalloc(sizeof(*val), GFP_KERNEL);
if (!val)
return -ENOMEM;
arm_cmn_val_add_event(cmn, val, leader);
for_each_sibling_event(sibling, leader)
arm_cmn_val_add_event(cmn, val, sibling);
type = CMN_EVENT_TYPE(event);
if (type == CMN_TYPE_DTC) {
ret = val->cycles ? -EINVAL : 0;
goto done;
}
if (val->dtc_count == CMN_DT_NUM_COUNTERS)
goto done;
for_each_hw_dn(hw, dn, i) {
int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
goto done;
if (sel > SEL_NONE && val->occupid[dtm][sel] &&
val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
goto done;
if (type != CMN_TYPE_WP)
continue;
wp_idx = arm_cmn_wp_idx(event);
if (val->wp[dtm][wp_idx])
goto done;
wp_cmb = val->wp[dtm][wp_idx ^ 1];
if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
goto done;
}
ret = 0;
done:
kfree(val);
return ret;
}
static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
enum cmn_node_type type,
unsigned int eventid)
{
struct arm_cmn_event_attr *e;
enum cmn_model model = arm_cmn_model(cmn);
for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
if (e->model & model && e->type == type && e->eventid == eventid)
return e->fsel;
}
return SEL_NONE;
}
static int arm_cmn_event_init(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_node *dn;
enum cmn_node_type type;
bool bynodeid;
u16 nodeid, eventid;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
event->cpu = cmn->cpu;
if (event->cpu < 0)
return -EINVAL;
type = CMN_EVENT_TYPE(event);
/* DTC events (i.e. cycles) already have everything they need */
if (type == CMN_TYPE_DTC)
return arm_cmn_validate_group(cmn, event);
eventid = CMN_EVENT_EVENTID(event);
/* For watchpoints we need the actual XP node here */
if (type == CMN_TYPE_WP) {
type = CMN_TYPE_XP;
/* ...and we need a "real" direction */
if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
return -EINVAL;
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
} else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
hw->wide_sel = true;
}
/* This is sufficiently annoying to recalculate, so cache it */
hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
bynodeid = CMN_EVENT_BYNODEID(event);
nodeid = CMN_EVENT_NODEID(event);
hw->dn = arm_cmn_node(cmn, type);
if (!hw->dn)
return -EINVAL;
for (dn = hw->dn; dn->type == type; dn++) {
if (bynodeid && dn->id != nodeid) {
hw->dn++;
continue;
}
hw->num_dns++;
if (bynodeid)
break;
}
if (!hw->num_dns) {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
nodeid, nid.x, nid.y, nid.port, nid.dev, type);
return -EINVAL;
}
/*
* Keep assuming non-cycles events count in all DTC domains; turns out
* it's hard to make a worthwhile optimisation around this, short of
* going all-in with domain-local counter allocation as well.
*/
hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
return arm_cmn_validate_group(cmn, event);
}
static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
int i)
{
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
enum cmn_node_type type = CMN_EVENT_TYPE(event);
while (i--) {
struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
if (type == CMN_TYPE_WP)
dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
if (hw->filter_sel > SEL_NONE)
hw->dn[i].occupid[hw->filter_sel].count--;
dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
}
memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
for (i = 0; hw->dtcs_used & (1U << i); i++)
cmn->dtc[i].counters[hw->dtc_idx] = NULL;
}
static int arm_cmn_event_add(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
struct arm_cmn_dtc *dtc = &cmn->dtc[0];
struct arm_cmn_node *dn;
enum cmn_node_type type = CMN_EVENT_TYPE(event);
unsigned int i, dtc_idx, input_sel;
if (type == CMN_TYPE_DTC) {
i = 0;
while (cmn->dtc[i].cycles)
if (++i == cmn->num_dtcs)
return -ENOSPC;
cmn->dtc[i].cycles = event;
hw->dtc_idx = CMN_DT_NUM_COUNTERS;
hw->dtcs_used = 1U << i;
if (flags & PERF_EF_START)
arm_cmn_event_start(event, 0);
return 0;
}
/* Grab a free global counter first... */
dtc_idx = 0;
while (dtc->counters[dtc_idx])
if (++dtc_idx == CMN_DT_NUM_COUNTERS)
return -ENOSPC;
hw->dtc_idx = dtc_idx;
/* ...then the local counters to feed it. */
for_each_hw_dn(hw, dn, i) {
struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
unsigned int dtm_idx, shift;
u64 reg;
dtm_idx = 0;
while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
goto free_dtms;
if (type == CMN_TYPE_XP) {
input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
} else if (type == CMN_TYPE_WP) {
int tmp, wp_idx = arm_cmn_wp_idx(event);
u32 cfg = arm_cmn_wp_config(event);
if (dtm->wp_event[wp_idx] >= 0)
goto free_dtms;
tmp = dtm->wp_event[wp_idx ^ 1];
if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
goto free_dtms;
input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
dtm->wp_event[wp_idx] = dtc_idx;
writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
} else {
struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
if (cmn->multi_dtm)
nid.port %= 2;
input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
(nid.port << 4) + (nid.dev << 2);
if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
goto free_dtms;
}
arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
dtm->input_sel[dtm_idx] = input_sel;
shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
}
/* Go go go! */
arm_cmn_init_counter(event);
if (flags & PERF_EF_START)
arm_cmn_event_start(event, 0);
return 0;
free_dtms:
arm_cmn_event_clear(cmn, event, i);
return -ENOSPC;
}
static void arm_cmn_event_del(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
enum cmn_node_type type = CMN_EVENT_TYPE(event);
arm_cmn_event_stop(event, PERF_EF_UPDATE);
if (type == CMN_TYPE_DTC)
cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
else
arm_cmn_event_clear(cmn, event, hw->num_dns);
}
/*
* We stop the PMU for both add and read, to avoid skew across DTM counters.
* In theory we could use snapshots to read without stopping, but then it
* becomes a lot trickier to deal with overlow and racing against interrupts,
* plus it seems they don't work properly on some hardware anyway :(
*/
static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
{
arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
}
static void arm_cmn_end_txn(struct pmu *pmu)
{
arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
}
static int arm_cmn_commit_txn(struct pmu *pmu)
{
arm_cmn_end_txn(pmu);
return 0;
}
static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
{
unsigned int i;
perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
for (i = 0; i < cmn->num_dtcs; i++)
irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
cmn->cpu = cpu;
}
static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
{
struct arm_cmn *cmn;
int node;
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
node = dev_to_node(cmn->dev);
if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
arm_cmn_migrate(cmn, cpu);
return 0;
}
static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
{
struct arm_cmn *cmn;
unsigned int target;
int node;
cpumask_t mask;
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
if (cpu != cmn->cpu)
return 0;
node = dev_to_node(cmn->dev);
if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
target = cpumask_any(&mask);
else
target = cpumask_any_but(cpu_online_mask, cpu);
if (target < nr_cpu_ids)
arm_cmn_migrate(cmn, target);
return 0;
}
static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
{
struct arm_cmn_dtc *dtc = dev_id;
irqreturn_t ret = IRQ_NONE;
for (;;) {
u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
u64 delta;
int i;
for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
if (status & (1U << i)) {
ret = IRQ_HANDLED;
if (WARN_ON(!dtc->counters[i]))
continue;
delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
local64_add(delta, &dtc->counters[i]->count);
}
}
if (status & (1U << CMN_DT_NUM_COUNTERS)) {
ret = IRQ_HANDLED;
if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
delta = arm_cmn_read_cc(dtc);
local64_add(delta, &dtc->cycles->count);
}
}
writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
if (!dtc->irq_friend)
return ret;
dtc += dtc->irq_friend;
}
}
/* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
static int arm_cmn_init_irqs(struct arm_cmn *cmn)
{
int i, j, irq, err;
for (i = 0; i < cmn->num_dtcs; i++) {
irq = cmn->dtc[i].irq;
for (j = i; j--; ) {
if (cmn->dtc[j].irq == irq) {
cmn->dtc[j].irq_friend = i - j;
goto next;
}
}
err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(cmn->dev), &cmn->dtc[i]);
if (err)
return err;
err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
if (err)
return err;
next:
; /* isn't C great? */
}
return 0;
}
static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
{
int i;
dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
for (i = 0; i < 4; i++) {
dtm->wp_event[i] = -1;
writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
}
}
static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
{
struct arm_cmn_dtc *dtc = cmn->dtc + idx;
dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
if (dtc->irq < 0)
return dtc->irq;
writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
return 0;
}
static int arm_cmn_node_cmp(const void *a, const void *b)
{
const struct arm_cmn_node *dna = a, *dnb = b;
int cmp;
cmp = dna->type - dnb->type;
if (!cmp)
cmp = dna->logid - dnb->logid;
return cmp;
}
static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
{
struct arm_cmn_node *dn, *xp;
int dtc_idx = 0;
u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
if (!cmn->dtc)
return -ENOMEM;
sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);
cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
for (dn = cmn->dns; dn->type; dn++) {
if (dn->type == CMN_TYPE_XP) {
dn->dtc &= dtcs_present;
continue;
}
xp = arm_cmn_node_to_xp(cmn, dn);
dn->dtm = xp->dtm;
if (cmn->multi_dtm)
dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
if (dn->type == CMN_TYPE_DTC) {
int err;
/* We do at least know that a DTC's XP must be in that DTC's domain */
if (xp->dtc == 0xf)
xp->dtc = 1 << dtc_idx;
err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
if (err)
return err;
}
/* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
if (dn->type == CMN_TYPE_RND)
dn->type = CMN_TYPE_RNI;
/* We split the RN-I off already, so let the CCLA part match CCLA events */
if (dn->type == CMN_TYPE_CCLA_RNI)
dn->type = CMN_TYPE_CCLA;
}
arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
return 0;
}
static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
{
int level;
u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);
node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
if (node->type == CMN_TYPE_CFG)
level = 0;
else if (node->type == CMN_TYPE_XP)
level = 1;
else
level = 2;
dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
(level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
node->type, node->logid, offset);
}
static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
{
switch (type) {
case CMN_TYPE_HNP:
return CMN_TYPE_HNI;
case CMN_TYPE_CCLA_RNI:
return CMN_TYPE_RNI;
default:
return CMN_TYPE_INVALID;
}
}
static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
void __iomem *cfg_region;
struct arm_cmn_node cfg, *dn;
struct arm_cmn_dtm *dtm;
enum cmn_part part;
u16 child_count, child_poff;
u32 xp_offset[CMN_MAX_XPS];
u64 reg;
int i, j;
size_t sz;
arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
if (cfg.type != CMN_TYPE_CFG)
return -ENODEV;
cfg_region = cmn->base + rgn_offset;
reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
if (cmn->part && cmn->part != part)
dev_warn(cmn->dev,
"Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
cmn->part, part);
cmn->part = part;
if (!arm_cmn_model(cmn))
dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);
reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);
reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
cmn->num_xps = child_count;
cmn->num_dns = cmn->num_xps;
/* Pass 1: visit the XPs, enumerate their children */
for (i = 0; i < cmn->num_xps; i++) {
reg = readq_relaxed(cfg_region + child_poff + i * 8);
xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
}
/*
* Some nodes effectively have two separate types, which we'll handle
* by creating one of each internally. For a (very) safe initial upper
* bound, account for double the number of non-XP nodes.
*/
dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
sizeof(*dn), GFP_KERNEL);
if (!dn)
return -ENOMEM;
/* Initial safe upper bound on DTMs for any possible mesh layout */
i = cmn->num_xps;
if (cmn->multi_dtm)
i += cmn->num_xps + 1;
dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
if (!dtm)
return -ENOMEM;
/* Pass 2: now we can actually populate the nodes */
cmn->dns = dn;
cmn->dtms = dtm;
for (i = 0; i < cmn->num_xps; i++) {
void __iomem *xp_region = cmn->base + xp_offset[i];
struct arm_cmn_node *xp = dn++;
unsigned int xp_ports = 0;
arm_cmn_init_node_info(cmn, xp_offset[i], xp);
/*
* Thanks to the order in which XP logical IDs seem to be
* assigned, we can handily infer the mesh X dimension by
* looking out for the XP at (0,1) without needing to know
* the exact node ID format, which we can later derive.
*/
if (xp->id == (1 << 3))
cmn->mesh_x = xp->logid;
if (cmn->part == PART_CMN600)
xp->dtc = 0xf;
else
xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
xp->dtm = dtm - cmn->dtms;
arm_cmn_init_dtm(dtm++, xp, 0);
/*
* Keeping track of connected ports will let us filter out
* unnecessary XP events easily. We can also reliably infer the
* "extra device ports" configuration for the node ID format
* from this, since in that case we will see at least one XP
* with port 2 connected, for the HN-D.
*/
for (int p = 0; p < CMN_MAX_PORTS; p++)
if (arm_cmn_device_connect_info(cmn, xp, p))
xp_ports |= BIT(p);
if (cmn->multi_dtm && (xp_ports & 0xc))
arm_cmn_init_dtm(dtm++, xp, 1);
if (cmn->multi_dtm && (xp_ports & 0x30))
arm_cmn_init_dtm(dtm++, xp, 2);
cmn->ports_used |= xp_ports;
reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
for (j = 0; j < child_count; j++) {
reg = readq_relaxed(xp_region + child_poff + j * 8);
/*
* Don't even try to touch anything external, since in general
* we haven't a clue how to power up arbitrary CHI requesters.
* As of CMN-600r1 these could only be RN-SAMs or CXLAs,
* neither of which have any PMU events anyway.
* (Actually, CXLAs do seem to have grown some events in r1p2,
* but they don't go to regular XP DTMs, and they depend on
* secure configuration which we can't easily deal with)
*/
if (reg & CMN_CHILD_NODE_EXTERNAL) {
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
switch (dn->type) {
case CMN_TYPE_DTC:
cmn->num_dtcs++;
dn++;
break;
/* These guys have PMU events */
case CMN_TYPE_DVM:
case CMN_TYPE_HNI:
case CMN_TYPE_HNF:
case CMN_TYPE_SBSX:
case CMN_TYPE_RNI:
case CMN_TYPE_RND:
case CMN_TYPE_MTSX:
case CMN_TYPE_CXRA:
case CMN_TYPE_CXHA:
case CMN_TYPE_CCRA:
case CMN_TYPE_CCHA:
case CMN_TYPE_CCLA:
case CMN_TYPE_HNS:
dn++;
break;
/* Nothing to see here */
case CMN_TYPE_MPAM_S:
case CMN_TYPE_MPAM_NS:
case CMN_TYPE_RNSAM:
case CMN_TYPE_CXLA:
case CMN_TYPE_HNS_MPAM_S:
case CMN_TYPE_HNS_MPAM_NS:
break;
/*
* Split "optimised" combination nodes into separate
* types for the different event sets. Offsetting the
* base address lets us handle the second pmu_event_sel
* register via the normal mechanism later.
*/
case CMN_TYPE_HNP:
case CMN_TYPE_CCLA_RNI:
dn[1] = dn[0];
dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
dn[1].type = arm_cmn_subtype(dn->type);
dn += 2;
break;
/* Something has gone horribly wrong */
default:
dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
return -ENODEV;
}
}
}
/* Correct for any nodes we added or skipped */
cmn->num_dns = dn - cmn->dns;
/* Cheeky +1 to help terminate pointer-based iteration later */
sz = (void *)(dn + 1) - (void *)cmn->dns;
dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
if (dn)
cmn->dns = dn;
sz = (void *)dtm - (void *)cmn->dtms;
dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
if (dtm)
cmn->dtms = dtm;
/*
* If mesh_x wasn't set during discovery then we never saw
* an XP at (0,1), thus we must have an Nx1 configuration.
*/
if (!cmn->mesh_x)
cmn->mesh_x = cmn->num_xps;
cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
/* 1x1 config plays havoc with XP event encodings */
if (cmn->num_xps == 1)
dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
reg = cmn->ports_used;
dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), ®,
cmn->multi_dtm ? ", multi-DTM" : "");
return 0;
}
static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
{
struct resource *cfg, *root;
cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!cfg)
return -EINVAL;
root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!root)
return -EINVAL;
if (!resource_contains(cfg, root))
swap(cfg, root);
/*
* Note that devm_ioremap_resource() is dumb and won't let the platform
* device claim cfg when the ACPI companion device has already claimed
* root within it. But since they *are* already both claimed in the
* appropriate name, we don't really need to do it again here anyway.
*/
cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
if (!cmn->base)
return -ENOMEM;
return root->start - cfg->start;
}
static int arm_cmn600_of_probe(struct device_node *np)
{
u32 rootnode;
return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
}
static int arm_cmn_probe(struct platform_device *pdev)
{
struct arm_cmn *cmn;
const char *name;
static atomic_t id;
int err, rootnode, this_id;
cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
if (!cmn)
return -ENOMEM;
cmn->dev = &pdev->dev;
cmn->part = (unsigned long)device_get_match_data(cmn->dev);
platform_set_drvdata(pdev, cmn);
if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
rootnode = arm_cmn600_acpi_probe(pdev, cmn);
} else {
rootnode = 0;
cmn->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmn->base))
return PTR_ERR(cmn->base);
if (cmn->part == PART_CMN600)
rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
}
if (rootnode < 0)
return rootnode;
err = arm_cmn_discover(cmn, rootnode);
if (err)
return err;
err = arm_cmn_init_dtcs(cmn);
if (err)
return err;
err = arm_cmn_init_irqs(cmn);
if (err)
return err;
cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
.attr_groups = arm_cmn_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = arm_cmn_pmu_enable,
.pmu_disable = arm_cmn_pmu_disable,
.event_init = arm_cmn_event_init,
.add = arm_cmn_event_add,
.del = arm_cmn_event_del,
.start = arm_cmn_event_start,
.stop = arm_cmn_event_stop,
.read = arm_cmn_event_read,
.start_txn = arm_cmn_start_txn,
.commit_txn = arm_cmn_commit_txn,
.cancel_txn = arm_cmn_end_txn,
};
this_id = atomic_fetch_inc(&id);
name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
if (!name)
return -ENOMEM;
err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
if (err)
return err;
err = perf_pmu_register(&cmn->pmu, name, -1);
if (err)
cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
else
arm_cmn_debugfs_init(cmn, this_id);
return err;
}
static int arm_cmn_remove(struct platform_device *pdev)
{
struct arm_cmn *cmn = platform_get_drvdata(pdev);
writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
perf_pmu_unregister(&cmn->pmu);
cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
debugfs_remove(cmn->debug);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
{ .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
{ .compatible = "arm,cmn-650" },
{ .compatible = "arm,cmn-700" },
{ .compatible = "arm,ci-700" },
{}
};
MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", PART_CMN600 },
{ "ARMHC650" },
{ "ARMHC700" },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
#endif
static struct platform_driver arm_cmn_driver = {
.driver = {
.name = "arm-cmn",
.of_match_table = of_match_ptr(arm_cmn_of_match),
.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
},
.probe = arm_cmn_probe,
.remove = arm_cmn_remove,
};
static int __init arm_cmn_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/arm/cmn:online",
arm_cmn_pmu_online_cpu,
arm_cmn_pmu_offline_cpu);
if (ret < 0)
return ret;
arm_cmn_hp_state = ret;
arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);
ret = platform_driver_register(&arm_cmn_driver);
if (ret) {
cpuhp_remove_multi_state(arm_cmn_hp_state);
debugfs_remove(arm_cmn_debugfs);
}
return ret;
}
static void __exit arm_cmn_exit(void)
{
platform_driver_unregister(&arm_cmn_driver);
cpuhp_remove_multi_state(arm_cmn_hp_state);
debugfs_remove(arm_cmn_debugfs);
}
module_init(arm_cmn_init);
module_exit(arm_cmn_exit);
MODULE_AUTHOR("Robin Murphy <[email protected]>");
MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm-cmn.c |
// SPDX-License-Identifier: GPL-2.0
// CCI Cache Coherent Interconnect PMU driver
// Copyright (C) 2013-2018 Arm Ltd.
// Author: Punit Agrawal <[email protected]>, Suzuki Poulose <[email protected]>
#include <linux/arm-cci.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define DRIVER_NAME "ARM-CCI PMU"
#define CCI_PMCR 0x0100
#define CCI_PID2 0x0fe8
#define CCI_PMCR_CEN 0x00000001
#define CCI_PMCR_NCNT_MASK 0x0000f800
#define CCI_PMCR_NCNT_SHIFT 11
#define CCI_PID2_REV_MASK 0xf0
#define CCI_PID2_REV_SHIFT 4
#define CCI_PMU_EVT_SEL 0x000
#define CCI_PMU_CNTR 0x004
#define CCI_PMU_CNTR_CTRL 0x008
#define CCI_PMU_OVRFLW 0x00c
#define CCI_PMU_OVRFLW_FLAG 1
#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
#define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1)
#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
#define CCI_PMU_MAX_HW_CNTRS(model) \
((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
/* Types of interfaces that can generate events */
enum {
CCI_IF_SLAVE,
CCI_IF_MASTER,
#ifdef CONFIG_ARM_CCI5xx_PMU
CCI_IF_GLOBAL,
#endif
CCI_IF_MAX,
};
#define NUM_HW_CNTRS_CII_4XX 4
#define NUM_HW_CNTRS_CII_5XX 8
#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX
#define FIXED_HW_CNTRS_CII_4XX 1
#define FIXED_HW_CNTRS_CII_5XX 0
#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX
#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
struct event_range {
u32 min;
u32 max;
};
struct cci_pmu_hw_events {
struct perf_event **events;
unsigned long *used_mask;
raw_spinlock_t pmu_lock;
};
struct cci_pmu;
/*
* struct cci_pmu_model:
* @fixed_hw_cntrs - Number of fixed event counters
* @num_hw_cntrs - Maximum number of programmable event counters
* @cntr_size - Size of an event counter mapping
*/
struct cci_pmu_model {
char *name;
u32 fixed_hw_cntrs;
u32 num_hw_cntrs;
u32 cntr_size;
struct attribute **format_attrs;
struct attribute **event_attrs;
struct event_range event_ranges[CCI_IF_MAX];
int (*validate_hw_event)(struct cci_pmu *, unsigned long);
int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
void (*write_counters)(struct cci_pmu *, unsigned long *);
};
static struct cci_pmu_model cci_pmu_models[];
struct cci_pmu {
void __iomem *base;
void __iomem *ctrl_base;
struct pmu pmu;
int cpu;
int nr_irqs;
int *irqs;
unsigned long active_irqs;
const struct cci_pmu_model *model;
struct cci_pmu_hw_events hw_events;
struct platform_device *plat_device;
int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
static struct cci_pmu *g_cci_pmu;
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
CCI400_R1,
#endif
#ifdef CONFIG_ARM_CCI5xx_PMU
CCI500_R0,
CCI550_R0,
#endif
CCI_MODEL_MAX
};
static void pmu_write_counters(struct cci_pmu *cci_pmu,
unsigned long *mask);
static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
&((struct dev_ext_attribute[]) { \
{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
})[0].attr.attr
#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
/* CCI400 PMU Specific definitions */
#ifdef CONFIG_ARM_CCI400_PMU
/* Port ids */
#define CCI400_PORT_S0 0
#define CCI400_PORT_S1 1
#define CCI400_PORT_S2 2
#define CCI400_PORT_S3 3
#define CCI400_PORT_S4 4
#define CCI400_PORT_M0 5
#define CCI400_PORT_M1 6
#define CCI400_PORT_M2 7
#define CCI400_R1_PX 5
/*
* Instead of an event id to monitor CCI cycles, a dedicated counter is
* provided. Use 0xff to represent CCI cycles and hope that no future revisions
* make use of this event in hardware.
*/
enum cci400_perf_events {
CCI400_PMU_CYCLES = 0xff
};
#define CCI400_PMU_CYCLE_CNTR_IDX 0
#define CCI400_PMU_CNTR0_IDX 1
/*
* CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
* ports and bits 4:0 are event codes. There are different event codes
* associated with each port type.
*
* Additionally, the range of events associated with the port types changed
* between Rev0 and Rev1.
*
* The constants below define the range of valid codes for each port type for
* the different revisions and are used to validate the event to be monitored.
*/
#define CCI400_PMU_EVENT_MASK 0xffUL
#define CCI400_PMU_EVENT_SOURCE_SHIFT 5
#define CCI400_PMU_EVENT_SOURCE_MASK 0x7
#define CCI400_PMU_EVENT_CODE_SHIFT 0
#define CCI400_PMU_EVENT_CODE_MASK 0x1f
#define CCI400_PMU_EVENT_SOURCE(event) \
((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
CCI400_PMU_EVENT_SOURCE_MASK)
#define CCI400_PMU_EVENT_CODE(event) \
((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
#define CCI400_R0_MASTER_PORT_MIN_EV 0x14
#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
#define CCI400_R1_MASTER_PORT_MIN_EV 0x00
#define CCI400_R1_MASTER_PORT_MAX_EV 0x11
#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
(unsigned long)_config)
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
static struct attribute *cci400_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
NULL
};
static struct attribute *cci400_r0_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
/* Master events */
CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
NULL
};
static struct attribute *cci400_r1_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
/* Master events */
CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
NULL
};
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var);
}
static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
struct cci_pmu_hw_events *hw,
unsigned long cci_event)
{
int idx;
/* cycles event idx is fixed */
if (cci_event == CCI400_PMU_CYCLES) {
if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
return -EAGAIN;
return CCI400_PMU_CYCLE_CNTR_IDX;
}
for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
if (!test_and_set_bit(idx, hw->used_mask))
return idx;
/* No counters available */
return -EAGAIN;
}
static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
{
u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
int if_type;
if (hw_event & ~CCI400_PMU_EVENT_MASK)
return -ENOENT;
if (hw_event == CCI400_PMU_CYCLES)
return hw_event;
switch (ev_source) {
case CCI400_PORT_S0:
case CCI400_PORT_S1:
case CCI400_PORT_S2:
case CCI400_PORT_S3:
case CCI400_PORT_S4:
/* Slave Interface */
if_type = CCI_IF_SLAVE;
break;
case CCI400_PORT_M0:
case CCI400_PORT_M1:
case CCI400_PORT_M2:
/* Master Interface */
if_type = CCI_IF_MASTER;
break;
default:
return -ENOENT;
}
if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
ev_code <= cci_pmu->model->event_ranges[if_type].max)
return hw_event;
return -ENOENT;
}
static int probe_cci400_revision(struct cci_pmu *cci_pmu)
{
int rev;
rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
rev >>= CCI_PID2_REV_SHIFT;
if (rev < CCI400_R1_PX)
return CCI400_R0;
else
return CCI400_R1;
}
static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
{
if (platform_has_secure_cci_access())
return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
return NULL;
}
#else /* !CONFIG_ARM_CCI400_PMU */
static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
{
return NULL;
}
#endif /* CONFIG_ARM_CCI400_PMU */
#ifdef CONFIG_ARM_CCI5xx_PMU
/*
* CCI5xx PMU event id is an 9-bit value made of two parts.
* bits [8:5] - Source for the event
* bits [4:0] - Event code (specific to type of interface)
*
*
*/
/* Port ids */
#define CCI5xx_PORT_S0 0x0
#define CCI5xx_PORT_S1 0x1
#define CCI5xx_PORT_S2 0x2
#define CCI5xx_PORT_S3 0x3
#define CCI5xx_PORT_S4 0x4
#define CCI5xx_PORT_S5 0x5
#define CCI5xx_PORT_S6 0x6
#define CCI5xx_PORT_M0 0x8
#define CCI5xx_PORT_M1 0x9
#define CCI5xx_PORT_M2 0xa
#define CCI5xx_PORT_M3 0xb
#define CCI5xx_PORT_M4 0xc
#define CCI5xx_PORT_M5 0xd
#define CCI5xx_PORT_M6 0xe
#define CCI5xx_PORT_GLOBAL 0xf
#define CCI5xx_PMU_EVENT_MASK 0x1ffUL
#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5
#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf
#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0
#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f
#define CCI5xx_PMU_EVENT_SOURCE(event) \
((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
#define CCI5xx_PMU_EVENT_CODE(event) \
((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
#define CCI5xx_SLAVE_PORT_MIN_EV 0x00
#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f
#define CCI5xx_MASTER_PORT_MIN_EV 0x00
#define CCI5xx_MASTER_PORT_MAX_EV 0x06
#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00
#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f
#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
(unsigned long) _config)
static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
static struct attribute *cci5xx_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
NULL,
};
static struct attribute *cci5xx_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
/* Master events */
CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
/* Global events */
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
NULL
};
static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* Global events have single fixed source code */
return sysfs_emit(buf, "event=0x%lx,source=0x%x\n",
(unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
}
/*
* CCI500 provides 8 independent event counters that can count
* any of the events available.
* CCI500 PMU event source ids
* 0x0-0x6 - Slave interfaces
* 0x8-0xD - Master interfaces
* 0xf - Global Events
* 0x7,0xe - Reserved
*/
static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
unsigned long hw_event)
{
u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
int if_type;
if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
return -ENOENT;
switch (ev_source) {
case CCI5xx_PORT_S0:
case CCI5xx_PORT_S1:
case CCI5xx_PORT_S2:
case CCI5xx_PORT_S3:
case CCI5xx_PORT_S4:
case CCI5xx_PORT_S5:
case CCI5xx_PORT_S6:
if_type = CCI_IF_SLAVE;
break;
case CCI5xx_PORT_M0:
case CCI5xx_PORT_M1:
case CCI5xx_PORT_M2:
case CCI5xx_PORT_M3:
case CCI5xx_PORT_M4:
case CCI5xx_PORT_M5:
if_type = CCI_IF_MASTER;
break;
case CCI5xx_PORT_GLOBAL:
if_type = CCI_IF_GLOBAL;
break;
default:
return -ENOENT;
}
if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
ev_code <= cci_pmu->model->event_ranges[if_type].max)
return hw_event;
return -ENOENT;
}
/*
* CCI550 provides 8 independent event counters that can count
* any of the events available.
* CCI550 PMU event source ids
* 0x0-0x6 - Slave interfaces
* 0x8-0xe - Master interfaces
* 0xf - Global Events
* 0x7 - Reserved
*/
static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
unsigned long hw_event)
{
u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
int if_type;
if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
return -ENOENT;
switch (ev_source) {
case CCI5xx_PORT_S0:
case CCI5xx_PORT_S1:
case CCI5xx_PORT_S2:
case CCI5xx_PORT_S3:
case CCI5xx_PORT_S4:
case CCI5xx_PORT_S5:
case CCI5xx_PORT_S6:
if_type = CCI_IF_SLAVE;
break;
case CCI5xx_PORT_M0:
case CCI5xx_PORT_M1:
case CCI5xx_PORT_M2:
case CCI5xx_PORT_M3:
case CCI5xx_PORT_M4:
case CCI5xx_PORT_M5:
case CCI5xx_PORT_M6:
if_type = CCI_IF_MASTER;
break;
case CCI5xx_PORT_GLOBAL:
if_type = CCI_IF_GLOBAL;
break;
default:
return -ENOENT;
}
if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
ev_code <= cci_pmu->model->event_ranges[if_type].max)
return hw_event;
return -ENOENT;
}
#endif /* CONFIG_ARM_CCI5xx_PMU */
/*
* Program the CCI PMU counters which have PERF_HES_ARCH set
* with the event period and mark them ready before we enable
* PMU.
*/
static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
{
int i;
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
DECLARE_BITMAP(mask, HW_CNTRS_MAX);
bitmap_zero(mask, HW_CNTRS_MAX);
for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
struct perf_event *event = cci_hw->events[i];
if (WARN_ON(!event))
continue;
/* Leave the events which are not counting */
if (event->hw.state & PERF_HES_STOPPED)
continue;
if (event->hw.state & PERF_HES_ARCH) {
__set_bit(i, mask);
event->hw.state &= ~PERF_HES_ARCH;
}
}
pmu_write_counters(cci_pmu, mask);
}
/* Should be called with cci_pmu->hw_events->pmu_lock held */
static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
{
u32 val;
/* Enable all the PMU counters. */
val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
writel(val, cci_pmu->ctrl_base + CCI_PMCR);
}
/* Should be called with cci_pmu->hw_events->pmu_lock held */
static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
{
cci_pmu_sync_counters(cci_pmu);
__cci_pmu_enable_nosync(cci_pmu);
}
/* Should be called with cci_pmu->hw_events->pmu_lock held */
static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
{
u32 val;
/* Disable all the PMU counters. */
val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
writel(val, cci_pmu->ctrl_base + CCI_PMCR);
}
static ssize_t cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* source parameter is mandatory for normal PMU events */
return sysfs_emit(buf, "source=?,event=0x%lx\n",
(unsigned long)eattr->var);
}
static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
{
return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
}
static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
{
return readl_relaxed(cci_pmu->base +
CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
}
static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
int idx, unsigned int offset)
{
writel_relaxed(value, cci_pmu->base +
CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
}
static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
{
pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
}
static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
{
pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
}
static bool __maybe_unused
pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
{
return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
}
static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
{
pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
}
/*
* For all counters on the CCI-PMU, disable any 'enabled' counters,
* saving the changed counters in the mask, so that we can restore
* it later using pmu_restore_counters. The mask is private to the
* caller. We cannot rely on the used_mask maintained by the CCI_PMU
* as it only tells us if the counter is assigned to perf_event or not.
* The state of the perf_event cannot be locked by the PMU layer, hence
* we check the individual counter status (which can be locked by
* cci_pm->hw_events->pmu_lock).
*
* @mask should be initialised to empty by the caller.
*/
static void __maybe_unused
pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
for (i = 0; i < cci_pmu->num_cntrs; i++) {
if (pmu_counter_is_enabled(cci_pmu, i)) {
set_bit(i, mask);
pmu_disable_counter(cci_pmu, i);
}
}
}
/*
* Restore the status of the counters. Reversal of the pmu_save_counters().
* For each counter set in the mask, enable the counter back.
*/
static void __maybe_unused
pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
for_each_set_bit(i, mask, cci_pmu->num_cntrs)
pmu_enable_counter(cci_pmu, i);
}
/*
* Returns the number of programmable counters actually implemented
* by the cci
*/
static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
{
return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
}
static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
unsigned long cci_event = event->hw.config_base;
int idx;
if (cci_pmu->model->get_event_idx)
return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
/* Generic code to find an unused idx from the mask */
for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
if (!test_and_set_bit(idx, hw->used_mask))
return idx;
/* No counters available */
return -EAGAIN;
}
static int pmu_map_event(struct perf_event *event)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
if (event->attr.type < PERF_TYPE_MAX ||
!cci_pmu->model->validate_hw_event)
return -ENOENT;
return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
}
static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
{
int i;
struct platform_device *pmu_device = cci_pmu->plat_device;
if (unlikely(!pmu_device))
return -ENODEV;
if (cci_pmu->nr_irqs < 1) {
dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
return -ENODEV;
}
/*
* Register all available CCI PMU interrupts. In the interrupt handler
* we iterate over the counters checking for interrupt source (the
* overflowing counter) and clear it.
*
* This should allow handling of non-unique interrupt for the counters.
*/
for (i = 0; i < cci_pmu->nr_irqs; i++) {
int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
"arm-cci-pmu", cci_pmu);
if (err) {
dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
cci_pmu->irqs[i]);
return err;
}
set_bit(i, &cci_pmu->active_irqs);
}
return 0;
}
static void pmu_free_irq(struct cci_pmu *cci_pmu)
{
int i;
for (i = 0; i < cci_pmu->nr_irqs; i++) {
if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
continue;
free_irq(cci_pmu->irqs[i], cci_pmu);
}
}
static u32 pmu_read_counter(struct perf_event *event)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct hw_perf_event *hw_counter = &event->hw;
int idx = hw_counter->idx;
u32 value;
if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
return 0;
}
value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
return value;
}
static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
{
pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
}
static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
struct perf_event *event = cci_hw->events[i];
if (WARN_ON(!event))
continue;
pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
}
}
static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
if (cci_pmu->model->write_counters)
cci_pmu->model->write_counters(cci_pmu, mask);
else
__pmu_write_counters(cci_pmu, mask);
}
#ifdef CONFIG_ARM_CCI5xx_PMU
/*
* CCI-500/CCI-550 has advanced power saving policies, which could gate the
* clocks to the PMU counters, which makes the writes to them ineffective.
* The only way to write to those counters is when the global counters
* are enabled and the particular counter is enabled.
*
* So we do the following :
*
* 1) Disable all the PMU counters, saving their current state
* 2) Enable the global PMU profiling, now that all counters are
* disabled.
*
* For each counter to be programmed, repeat steps 3-7:
*
* 3) Write an invalid event code to the event control register for the
counter, so that the counters are not modified.
* 4) Enable the counter control for the counter.
* 5) Set the counter value
* 6) Disable the counter
* 7) Restore the event in the target counter
*
* 8) Disable the global PMU.
* 9) Restore the status of the rest of the counters.
*
* We choose an event which for CCI-5xx is guaranteed not to count.
* We use the highest possible event code (0x1f) for the master interface 0.
*/
#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
(CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
{
int i;
DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
bitmap_zero(saved_mask, cci_pmu->num_cntrs);
pmu_save_counters(cci_pmu, saved_mask);
/*
* Now that all the counters are disabled, we can safely turn the PMU on,
* without syncing the status of the counters
*/
__cci_pmu_enable_nosync(cci_pmu);
for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
struct perf_event *event = cci_pmu->hw_events.events[i];
if (WARN_ON(!event))
continue;
pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
pmu_enable_counter(cci_pmu, i);
pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
pmu_disable_counter(cci_pmu, i);
pmu_set_event(cci_pmu, i, event->hw.config_base);
}
__cci_pmu_disable(cci_pmu);
pmu_restore_counters(cci_pmu, saved_mask);
}
#endif /* CONFIG_ARM_CCI5xx_PMU */
static u64 pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
do {
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count);
delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
local64_add(delta, &event->count);
return new_raw_count;
}
static void pmu_read(struct perf_event *event)
{
pmu_event_update(event);
}
static void pmu_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
* The CCI PMU counters have a period of 2^32. To account for the
* possiblity of extreme interrupt latency we program for a period of
* half that. Hopefully we can handle the interrupt before another 2^31
* events occur and the counter overtakes its previous value.
*/
u64 val = 1ULL << 31;
local64_set(&hwc->prev_count, val);
/*
* CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
* values needs to be sync-ed with the s/w state before the PMU is
* enabled.
* Mark this counter for sync.
*/
hwc->state |= PERF_HES_ARCH;
}
static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
{
struct cci_pmu *cci_pmu = dev;
struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
int idx, handled = IRQ_NONE;
raw_spin_lock(&events->pmu_lock);
/* Disable the PMU while we walk through the counters */
__cci_pmu_disable(cci_pmu);
/*
* Iterate over counters and update the corresponding perf events.
* This should work regardless of whether we have per-counter overflow
* interrupt or a combined overflow interrupt.
*/
for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
struct perf_event *event = events->events[idx];
if (!event)
continue;
/* Did this counter overflow? */
if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
CCI_PMU_OVRFLW_FLAG))
continue;
pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
CCI_PMU_OVRFLW);
pmu_event_update(event);
pmu_event_set_period(event);
handled = IRQ_HANDLED;
}
/* Enable the PMU and sync possibly overflowed counters */
__cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock(&events->pmu_lock);
return IRQ_RETVAL(handled);
}
static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
{
int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
if (ret) {
pmu_free_irq(cci_pmu);
return ret;
}
return 0;
}
static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
{
pmu_free_irq(cci_pmu);
}
static void hw_perf_event_destroy(struct perf_event *event)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
atomic_t *active_events = &cci_pmu->active_events;
struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
cci_pmu_put_hw(cci_pmu);
mutex_unlock(reserve_mutex);
}
}
static void cci_pmu_enable(struct pmu *pmu)
{
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs);
unsigned long flags;
if (!enabled)
return;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
__cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
static void cci_pmu_disable(struct pmu *pmu)
{
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
unsigned long flags;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
__cci_pmu_disable(cci_pmu);
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
/*
* Check if the idx represents a non-programmable counter.
* All the fixed event counters are mapped before the programmable
* counters.
*/
static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
{
return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
}
static void cci_pmu_start(struct perf_event *event, int pmu_flags)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
unsigned long flags;
/*
* To handle interrupt latency, we always reprogram the period
* regardless of PERF_EF_RELOAD.
*/
if (pmu_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
return;
}
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
/* Configure the counter unless you are counting a fixed event */
if (!pmu_fixed_hw_idx(cci_pmu, idx))
pmu_set_event(cci_pmu, idx, hwc->config_base);
pmu_event_set_period(event);
pmu_enable_counter(cci_pmu, idx);
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (hwc->state & PERF_HES_STOPPED)
return;
if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
return;
}
/*
* We always reprogram the counter, so ignore PERF_EF_UPDATE. See
* cci_pmu_start()
*/
pmu_disable_counter(cci_pmu, idx);
pmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int cci_pmu_add(struct perf_event *event, int flags)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx;
/* If we don't have a space for the counter then finish early. */
idx = pmu_get_event_idx(hw_events, event);
if (idx < 0)
return idx;
event->hw.idx = idx;
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
cci_pmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void cci_pmu_del(struct perf_event *event, int flags)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
cci_pmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
static int validate_event(struct pmu *cci_pmu,
struct cci_pmu_hw_events *hw_events,
struct perf_event *event)
{
if (is_software_event(event))
return 1;
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != cci_pmu)
return 0;
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return pmu_get_event_idx(hw_events, event) >= 0;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
struct cci_pmu_hw_events fake_pmu = {
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
.used_mask = mask,
};
bitmap_zero(mask, cci_pmu->num_cntrs);
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int mapping;
mapping = pmu_map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (event->group_leader != event) {
if (validate_group(event) != 0)
return -EINVAL;
}
return 0;
}
static int cci_pmu_event_init(struct perf_event *event)
{
struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
atomic_t *active_events = &cci_pmu->active_events;
int err = 0;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* Shared by all CPUs, no meaningful state to sample */
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
/*
* Following the example set by other "uncore" PMUs, we accept any CPU
* and rewrite its affinity dynamically rather than having perf core
* handle cpu == -1 and pid == -1 for this case.
*
* The perf core will pin online CPUs for the duration of this call and
* the event being installed into its context, so the PMU's CPU can't
* change under our feet.
*/
if (event->cpu < 0)
return -EINVAL;
event->cpu = cci_pmu->cpu;
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(active_events)) {
mutex_lock(&cci_pmu->reserve_mutex);
if (atomic_read(active_events) == 0)
err = cci_pmu_get_hw(cci_pmu);
if (!err)
atomic_inc(active_events);
mutex_unlock(&cci_pmu->reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static ssize_t pmu_cpumask_attr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
}
static struct device_attribute pmu_cpumask_attr =
__ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
static struct attribute *pmu_attrs[] = {
&pmu_cpumask_attr.attr,
NULL,
};
static const struct attribute_group pmu_attr_group = {
.attrs = pmu_attrs,
};
static struct attribute_group pmu_format_attr_group = {
.name = "format",
.attrs = NULL, /* Filled in cci_pmu_init_attrs */
};
static struct attribute_group pmu_event_attr_group = {
.name = "events",
.attrs = NULL, /* Filled in cci_pmu_init_attrs */
};
static const struct attribute_group *pmu_attr_groups[] = {
&pmu_attr_group,
&pmu_format_attr_group,
&pmu_event_attr_group,
NULL
};
static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
{
const struct cci_pmu_model *model = cci_pmu->model;
char *name = model->name;
u32 num_cntrs;
if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
return -EINVAL;
if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
return -EINVAL;
pmu_event_attr_group.attrs = model->event_attrs;
pmu_format_attr_group.attrs = model->format_attrs;
cci_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
.pmu_disable = cci_pmu_disable,
.event_init = cci_pmu_event_init,
.add = cci_pmu_add,
.del = cci_pmu_del,
.start = cci_pmu_start,
.stop = cci_pmu_stop,
.read = pmu_read,
.attr_groups = pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
cci_pmu->plat_device = pdev;
num_cntrs = pmu_get_max_counters(cci_pmu);
if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
dev_warn(&pdev->dev,
"PMU implements more counters(%d) than supported by"
" the model(%d), truncated.",
num_cntrs, cci_pmu->model->num_hw_cntrs);
num_cntrs = cci_pmu->model->num_hw_cntrs;
}
cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
static int cci_pmu_offline_cpu(unsigned int cpu)
{
int target;
if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
g_cci_pmu->cpu = target;
return 0;
}
static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
#ifdef CONFIG_ARM_CCI400_PMU
[CCI400_R0] = {
.name = "CCI_400",
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r0_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R0_SLAVE_PORT_MIN_EV,
CCI400_R0_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
CCI400_R0_MASTER_PORT_MIN_EV,
CCI400_R0_MASTER_PORT_MAX_EV,
},
},
.validate_hw_event = cci400_validate_hw_event,
.get_event_idx = cci400_get_event_idx,
},
[CCI400_R1] = {
.name = "CCI_400_r1",
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
.event_attrs = cci400_r1_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R1_SLAVE_PORT_MIN_EV,
CCI400_R1_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
CCI400_R1_MASTER_PORT_MIN_EV,
CCI400_R1_MASTER_PORT_MAX_EV,
},
},
.validate_hw_event = cci400_validate_hw_event,
.get_event_idx = cci400_get_event_idx,
},
#endif
#ifdef CONFIG_ARM_CCI5xx_PMU
[CCI500_R0] = {
.name = "CCI_500",
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI5xx_SLAVE_PORT_MIN_EV,
CCI5xx_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
CCI5xx_MASTER_PORT_MIN_EV,
CCI5xx_MASTER_PORT_MAX_EV,
},
[CCI_IF_GLOBAL] = {
CCI5xx_GLOBAL_PORT_MIN_EV,
CCI5xx_GLOBAL_PORT_MAX_EV,
},
},
.validate_hw_event = cci500_validate_hw_event,
.write_counters = cci5xx_pmu_write_counters,
},
[CCI550_R0] = {
.name = "CCI_550",
.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
.cntr_size = SZ_64K,
.format_attrs = cci5xx_pmu_format_attrs,
.event_attrs = cci5xx_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI5xx_SLAVE_PORT_MIN_EV,
CCI5xx_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
CCI5xx_MASTER_PORT_MIN_EV,
CCI5xx_MASTER_PORT_MAX_EV,
},
[CCI_IF_GLOBAL] = {
CCI5xx_GLOBAL_PORT_MIN_EV,
CCI5xx_GLOBAL_PORT_MAX_EV,
},
},
.validate_hw_event = cci550_validate_hw_event,
.write_counters = cci5xx_pmu_write_counters,
},
#endif
};
static const struct of_device_id arm_cci_pmu_matches[] = {
#ifdef CONFIG_ARM_CCI400_PMU
{
.compatible = "arm,cci-400-pmu",
.data = NULL,
},
{
.compatible = "arm,cci-400-pmu,r0",
.data = &cci_pmu_models[CCI400_R0],
},
{
.compatible = "arm,cci-400-pmu,r1",
.data = &cci_pmu_models[CCI400_R1],
},
#endif
#ifdef CONFIG_ARM_CCI5xx_PMU
{
.compatible = "arm,cci-500-pmu,r0",
.data = &cci_pmu_models[CCI500_R0],
},
{
.compatible = "arm,cci-550-pmu,r0",
.data = &cci_pmu_models[CCI550_R0],
},
#endif
{},
};
MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches);
static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
{
int i;
for (i = 0; i < nr_irqs; i++)
if (irq == irqs[i])
return true;
return false;
}
static struct cci_pmu *cci_pmu_alloc(struct device *dev)
{
struct cci_pmu *cci_pmu;
const struct cci_pmu_model *model;
/*
* All allocations are devm_* hence we don't have to free
* them explicitly on an error, as it would end up in driver
* detach.
*/
cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
if (!cci_pmu)
return ERR_PTR(-ENOMEM);
cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
model = of_device_get_match_data(dev);
if (!model) {
dev_warn(dev,
"DEPRECATED compatible property, requires secure access to CCI registers");
model = probe_cci_model(cci_pmu);
}
if (!model) {
dev_warn(dev, "CCI PMU version not supported\n");
return ERR_PTR(-ENODEV);
}
cci_pmu->model = model;
cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
sizeof(*cci_pmu->irqs), GFP_KERNEL);
if (!cci_pmu->irqs)
return ERR_PTR(-ENOMEM);
cci_pmu->hw_events.events = devm_kcalloc(dev,
CCI_PMU_MAX_HW_CNTRS(model),
sizeof(*cci_pmu->hw_events.events),
GFP_KERNEL);
if (!cci_pmu->hw_events.events)
return ERR_PTR(-ENOMEM);
cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev,
CCI_PMU_MAX_HW_CNTRS(model),
GFP_KERNEL);
if (!cci_pmu->hw_events.used_mask)
return ERR_PTR(-ENOMEM);
return cci_pmu;
}
static int cci_pmu_probe(struct platform_device *pdev)
{
struct cci_pmu *cci_pmu;
int i, ret, irq;
cci_pmu = cci_pmu_alloc(&pdev->dev);
if (IS_ERR(cci_pmu))
return PTR_ERR(cci_pmu);
cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cci_pmu->base))
return -ENOMEM;
/*
* CCI PMU has one overflow interrupt per counter; but some may be tied
* together to a common interrupt.
*/
cci_pmu->nr_irqs = 0;
for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0)
break;
if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
continue;
cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
}
/*
* Ensure that the device tree has as many interrupts as the number
* of counters.
*/
if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
return -EINVAL;
}
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
mutex_init(&cci_pmu->reserve_mutex);
atomic_set(&cci_pmu->active_events, 0);
cci_pmu->cpu = raw_smp_processor_id();
g_cci_pmu = cci_pmu;
cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
"perf/arm/cci:online", NULL,
cci_pmu_offline_cpu);
ret = cci_pmu_init(cci_pmu, pdev);
if (ret)
goto error_pmu_init;
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
error_pmu_init:
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
g_cci_pmu = NULL;
return ret;
}
static int cci_pmu_remove(struct platform_device *pdev)
{
if (!g_cci_pmu)
return 0;
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
perf_pmu_unregister(&g_cci_pmu->pmu);
g_cci_pmu = NULL;
return 0;
}
static struct platform_driver cci_pmu_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = arm_cci_pmu_matches,
.suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
.remove = cci_pmu_remove,
};
module_platform_driver(cci_pmu_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ARM CCI PMU support");
| linux-master | drivers/perf/arm-cci.c |
// SPDX-License-Identifier: GPL-2.0-only
#undef DEBUG
/*
* ARM performance counter support.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
* Copyright (C) 2010 ARM Ltd., Will Deacon <[email protected]>
*
* This code is based on the sparc64 perf event code, which is in turn based
* on the x86 code.
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/perf/arm_pmu.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/irq_regs.h>
static int armpmu_count_irq_users(const int irq);
struct pmu_irq_ops {
void (*enable_pmuirq)(unsigned int irq);
void (*disable_pmuirq)(unsigned int irq);
void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
};
static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
{
free_irq(irq, per_cpu_ptr(devid, cpu));
}
static const struct pmu_irq_ops pmuirq_ops = {
.enable_pmuirq = enable_irq,
.disable_pmuirq = disable_irq_nosync,
.free_pmuirq = armpmu_free_pmuirq
};
static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
{
free_nmi(irq, per_cpu_ptr(devid, cpu));
}
static const struct pmu_irq_ops pmunmi_ops = {
.enable_pmuirq = enable_nmi,
.disable_pmuirq = disable_nmi_nosync,
.free_pmuirq = armpmu_free_pmunmi
};
static void armpmu_enable_percpu_pmuirq(unsigned int irq)
{
enable_percpu_irq(irq, IRQ_TYPE_NONE);
}
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
void __percpu *devid)
{
if (armpmu_count_irq_users(irq) == 1)
free_percpu_irq(irq, devid);
}
static const struct pmu_irq_ops percpu_pmuirq_ops = {
.enable_pmuirq = armpmu_enable_percpu_pmuirq,
.disable_pmuirq = disable_percpu_irq,
.free_pmuirq = armpmu_free_percpu_pmuirq
};
static void armpmu_enable_percpu_pmunmi(unsigned int irq)
{
if (!prepare_percpu_nmi(irq))
enable_percpu_nmi(irq, IRQ_TYPE_NONE);
}
static void armpmu_disable_percpu_pmunmi(unsigned int irq)
{
disable_percpu_nmi(irq);
teardown_percpu_nmi(irq);
}
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
void __percpu *devid)
{
if (armpmu_count_irq_users(irq) == 1)
free_percpu_nmi(irq, devid);
}
static const struct pmu_irq_ops percpu_pmunmi_ops = {
.enable_pmuirq = armpmu_enable_percpu_pmunmi,
.disable_pmuirq = armpmu_disable_percpu_pmunmi,
.free_pmuirq = armpmu_free_percpu_pmunmi
};
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
static bool has_nmi;
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
if (event->hw.flags & ARMPMU_EVT_64BIT)
return GENMASK_ULL(63, 0);
else if (event->hw.flags & ARMPMU_EVT_63BIT)
return GENMASK_ULL(62, 0);
else if (event->hw.flags & ARMPMU_EVT_47BIT)
return GENMASK_ULL(46, 0);
else
return GENMASK_ULL(31, 0);
}
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
if (!cache_map)
return -ENOENT;
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
return ret;
}
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping;
if (config >= PERF_COUNT_HW_MAX)
return -EINVAL;
if (!event_map)
return -ENOENT;
mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
static int
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
return (int)(config & raw_event_mask);
}
int
armpmu_map_event(struct perf_event *event,
const unsigned (*event_map)[PERF_COUNT_HW_MAX],
const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask)
{
u64 config = event->attr.config;
int type = event->attr.type;
if (type == event->pmu->type)
return armpmu_map_raw_event(raw_event_mask, config);
switch (type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_hw_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
int armpmu_event_set_period(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
u64 max_period;
int ret = 0;
max_period = arm_pmu_event_max_period(event);
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Limit the maximum period to prevent the counter value
* from overtaking the one we are about to program. In
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
if (left > (max_period >> 1))
left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
armpmu->write_counter(event, (u64)(-left) & max_period);
perf_event_update_userpage(event);
return ret;
}
u64 armpmu_event_update(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
u64 max_period = arm_pmu_event_max_period(event);
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = armpmu->read_counter(event);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - prev_raw_count) & max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static void
armpmu_read(struct perf_event *event)
{
armpmu_event_update(event);
}
static void
armpmu_stop(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(event);
armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
*/
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
* were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
armpmu_event_set_period(event);
armpmu->enable(event);
}
static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
/* Clear the allocated counter */
hwc->idx = -1;
}
static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
/* An event following a process won't be stopped earlier */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return -ENOENT;
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
if (idx < 0)
return idx;
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled.
*/
event->hw.idx = idx;
armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static int
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu;
if (is_software_event(event))
return 1;
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != pmu)
return 0;
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, event) >= 0;
}
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
if (event == leader)
return 0;
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
}
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
int ret;
u64 start_clock, finish_clock;
/*
* we request the IRQ with a (possibly percpu) struct arm_pmu**, but
* the handlers expect a struct arm_pmu*. The percpu_irq framework will
* do any necessary shifting, we just need to perform the first
* dereference.
*/
armpmu = *(void **)dev;
if (WARN_ON_ONCE(!armpmu))
return IRQ_NONE;
start_clock = sched_clock();
ret = armpmu->handle_irq(armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
return ret;
}
static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping;
hwc->flags = 0;
mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if (armpmu->set_event_filter &&
armpmu->set_event_filter(hwc, &event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EOPNOTSUPP;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return validate_group(event);
}
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
/*
* Reject CPU-affine events for CPUs that are of a different class to
* that which this PMU handles. Process-following events (where
* event->cpu == -1) can be migrated between CPUs, and thus we have to
* reject them later (in armpmu_add) if they're scheduled on a
* different class of CPU.
*/
if (event->cpu != -1 &&
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
return -ENOENT;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
return __hw_perf_event_init(event);
}
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return;
if (enabled)
armpmu->start(armpmu);
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return;
armpmu->stop(armpmu);
}
/*
* In heterogeneous systems, events are specific to a particular
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture.
*/
static bool armpmu_filter(struct pmu *pmu, int cpu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
static ssize_t cpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
}
static DEVICE_ATTR_RO(cpus);
static struct attribute *armpmu_common_attrs[] = {
&dev_attr_cpus.attr,
NULL,
};
static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
static int armpmu_count_irq_users(const int irq)
{
int cpu, count = 0;
for_each_possible_cpu(cpu) {
if (per_cpu(cpu_irq, cpu) == irq)
count++;
}
return count;
}
static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
{
const struct pmu_irq_ops *ops = NULL;
int cpu;
for_each_possible_cpu(cpu) {
if (per_cpu(cpu_irq, cpu) != irq)
continue;
ops = per_cpu(cpu_irq_ops, cpu);
if (ops)
break;
}
return ops;
}
void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
per_cpu(cpu_irq_ops, cpu) = NULL;
}
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
const struct pmu_irq_ops *irq_ops;
if (!irq)
return 0;
if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
if (err && num_possible_cpus() > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, cpu);
goto err_out;
}
irq_flags = IRQF_PERCPU |
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
IRQF_NO_THREAD;
err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
irq_ops = &pmuirq_ops;
} else {
has_nmi = true;
irq_ops = &pmunmi_ops;
}
} else if (armpmu_count_irq_users(irq) == 0) {
err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&cpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
has_nmi = true;
irq_ops = &percpu_pmunmi_ops;
}
} else {
/* Per cpudevid irq was already requested by another CPU */
irq_ops = armpmu_find_irq_ops(irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;
}
if (err)
goto err_out;
per_cpu(cpu_irq, cpu) = irq;
per_cpu(cpu_irq_ops, cpu) = irq_ops;
return 0;
err_out:
pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
return err;
}
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
return per_cpu(hw_events->irq, cpu);
}
bool arm_pmu_irq_is_nmi(void)
{
return has_nmi;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
int irq;
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
if (pmu->reset)
pmu->reset(pmu);
per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq)
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
return 0;
}
static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
int irq;
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq)
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
per_cpu(cpu_armpmu, cpu) = NULL;
return 0;
}
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct perf_event *event;
int idx;
for (idx = 0; idx < armpmu->num_events; idx++) {
event = hw_events->events[idx];
if (!event)
continue;
switch (cmd) {
case CPU_PM_ENTER:
/*
* Stop and update the counter
*/
armpmu_stop(event, PERF_EF_UPDATE);
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
*/
armpmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
}
}
}
static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
void *v)
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
/*
* Always reset the PMU registers on power-up even if
* there are no events running.
*/
if (cmd == CPU_PM_EXIT && armpmu->reset)
armpmu->reset(armpmu);
if (!enabled)
return NOTIFY_OK;
switch (cmd) {
case CPU_PM_ENTER:
armpmu->stop(armpmu);
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
{
cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
}
static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}
#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
#endif
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
if (err)
goto out;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
goto out_unregister;
return 0;
out_unregister:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
out:
return err;
}
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
}
struct arm_pmu *armpmu_alloc(void)
{
struct arm_pmu *pmu;
int cpu;
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
if (!pmu)
goto out;
pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
}
pmu->pmu = (struct pmu) {
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
.filter = armpmu_filter,
.attr_groups = pmu->attr_groups,
/*
* This is a CPU PMU potentially in a heterogeneous
* configuration (e.g. big.LITTLE) so
* PERF_PMU_CAP_EXTENDED_HW_TYPE is required to open
* PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE events on a
* specific PMU.
*/
.capabilities = PERF_PMU_CAP_EXTENDED_REGS |
PERF_PMU_CAP_EXTENDED_HW_TYPE,
};
pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
&armpmu_common_attr_group;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
return pmu;
out_free_pmu:
kfree(pmu);
out:
return NULL;
}
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);
kfree(pmu);
}
int armpmu_register(struct arm_pmu *pmu)
{
int ret;
ret = cpu_pmu_init(pmu);
if (ret)
return ret;
if (!pmu->set_event_filter)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
goto out_destroy;
pr_info("enabled with %s PMU driver, %d counters available%s\n",
pmu->name, pmu->num_events,
has_nmi ? ", using NMIs" : "");
kvm_host_pmu_init(pmu);
return 0;
out_destroy:
cpu_pmu_destroy(pmu);
return ret;
}
static int arm_pmu_hp_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
"perf/arm/pmu:starting",
arm_perf_starting_cpu,
arm_perf_teardown_cpu);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);
return ret;
}
subsys_initcall(arm_pmu_hp_init);
| linux-master | drivers/perf/arm_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Perf support for the Statistical Profiling Extension, introduced as
* part of ARMv8.2.
*
* Copyright (C) 2016 ARM Limited
*
* Author: Will Deacon <[email protected]>
*/
#define PMUNAME "arm_spe"
#define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/capability.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/sysreg.h>
/*
* Cache if the event is allowed to trace Context information.
* This allows us to perform the check, i.e, perfmon_capable(),
* in the context of the event owner, once, during the event_init().
*/
#define SPE_PMU_HW_FLAGS_CX 0x00001
static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
static void set_spe_event_has_cx(struct perf_event *event)
{
if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
}
static bool get_spe_event_has_cx(struct perf_event *event)
{
return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX);
}
#define ARM_SPE_BUF_PAD_BYTE 0
struct arm_spe_pmu_buf {
int nr_pages;
bool snapshot;
void *base;
};
struct arm_spe_pmu {
struct pmu pmu;
struct platform_device *pdev;
cpumask_t supported_cpus;
struct hlist_node hotplug_node;
int irq; /* PPI */
u16 pmsver;
u16 min_period;
u16 counter_sz;
#define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
#define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
#define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
u16 max_record_sz;
u16 align;
struct perf_output_handle __percpu *handle;
};
#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
/* Convert a free-running index from perf into an SPE buffer offset */
#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
/* Keep track of our dynamic hotplug state */
static enum cpuhp_state arm_spe_pmu_online;
enum arm_spe_pmu_buf_fault_action {
SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
SPE_PMU_BUF_FAULT_ACT_FATAL,
SPE_PMU_BUF_FAULT_ACT_OK,
};
/* This sysfs gunk was really good fun to write. */
enum arm_spe_pmu_capabilities {
SPE_PMU_CAP_ARCH_INST = 0,
SPE_PMU_CAP_ERND,
SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_MIN_IVAL,
};
static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
[SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
[SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
};
static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
{
if (cap < SPE_PMU_CAP_FEAT_MAX)
return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
switch (cap) {
case SPE_PMU_CAP_CNT_SZ:
return spe_pmu->counter_sz;
case SPE_PMU_CAP_MIN_IVAL:
return spe_pmu->min_period;
default:
WARN(1, "unknown cap %d\n", cap);
}
return 0;
}
static ssize_t arm_spe_pmu_cap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
struct dev_ext_attribute *ea =
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
&((struct dev_ext_attribute[]) { \
{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
})[0].attr.attr
#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
static struct attribute *arm_spe_pmu_cap_attr[] = {
SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
NULL,
};
static const struct attribute_group arm_spe_pmu_cap_group = {
.name = "caps",
.attrs = arm_spe_pmu_cap_attr,
};
/* User ABI */
#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
#define ATTR_CFG_FLD_ts_enable_LO 0
#define ATTR_CFG_FLD_ts_enable_HI 0
#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
#define ATTR_CFG_FLD_pa_enable_LO 1
#define ATTR_CFG_FLD_pa_enable_HI 1
#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
#define ATTR_CFG_FLD_pct_enable_LO 2
#define ATTR_CFG_FLD_pct_enable_HI 2
#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
#define ATTR_CFG_FLD_jitter_LO 16
#define ATTR_CFG_FLD_jitter_HI 16
#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
#define ATTR_CFG_FLD_branch_filter_LO 32
#define ATTR_CFG_FLD_branch_filter_HI 32
#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
#define ATTR_CFG_FLD_load_filter_LO 33
#define ATTR_CFG_FLD_load_filter_HI 33
#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
#define ATTR_CFG_FLD_store_filter_LO 34
#define ATTR_CFG_FLD_store_filter_HI 34
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
#define ATTR_CFG_FLD_event_filter_HI 63
#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
#define ATTR_CFG_FLD_min_latency_LO 0
#define ATTR_CFG_FLD_min_latency_HI 11
#define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */
#define ATTR_CFG_FLD_inv_event_filter_LO 0
#define ATTR_CFG_FLD_inv_event_filter_HI 63
/* Why does everything I do descend into this? */
#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
#define GEN_PMU_FORMAT_ATTR(name) \
PMU_FORMAT_ATTR(name, \
_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI))
#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
#define ATTR_CFG_GET_FLD(attr, name) \
_ATTR_CFG_GET_FLD(attr, \
ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI)
GEN_PMU_FORMAT_ATTR(ts_enable);
GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
GEN_PMU_FORMAT_ATTR(jitter);
GEN_PMU_FORMAT_ATTR(branch_filter);
GEN_PMU_FORMAT_ATTR(load_filter);
GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_ts_enable.attr,
&format_attr_pa_enable.attr,
&format_attr_pct_enable.attr,
&format_attr_jitter.attr,
&format_attr_branch_filter.attr,
&format_attr_load_filter.attr,
&format_attr_store_filter.attr,
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
NULL,
};
static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
return attr->mode;
}
static const struct attribute_group arm_spe_pmu_format_group = {
.name = "format",
.is_visible = arm_spe_pmu_format_attr_is_visible,
.attrs = arm_spe_pmu_formats_attr,
};
static ssize_t cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *arm_spe_pmu_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group arm_spe_pmu_group = {
.attrs = arm_spe_pmu_attrs,
};
static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
&arm_spe_pmu_group,
&arm_spe_pmu_cap_group,
&arm_spe_pmu_format_group,
NULL,
};
/* Convert between user ABI and register values */
static u64 arm_spe_event_to_pmscr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
if (!attr->exclude_user)
reg |= PMSCR_EL1_E0SPE;
if (!attr->exclude_kernel)
reg |= PMSCR_EL1_E1SPE;
if (get_spe_event_has_cx(event))
reg |= PMSCR_EL1_CX;
return reg;
}
static void arm_spe_event_sanitise_period(struct perf_event *event)
{
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
u64 period = event->hw.sample_period;
u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
if (period < spe_pmu->min_period)
period = spe_pmu->min_period;
else if (period > max_period)
period = max_period;
else
period &= max_period;
event->hw.sample_period = period;
}
static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
arm_spe_event_sanitise_period(event);
reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
reg |= event->hw.sample_period;
return reg;
}
static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
if (reg)
reg |= PMSFCR_EL1_FT;
if (ATTR_CFG_GET_FLD(attr, event_filter))
reg |= PMSFCR_EL1_FE;
if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
reg |= PMSFCR_EL1_FnE;
if (ATTR_CFG_GET_FLD(attr, min_latency))
reg |= PMSFCR_EL1_FL;
return reg;
}
static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
return ATTR_CFG_GET_FLD(attr, event_filter);
}
static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
return ATTR_CFG_GET_FLD(attr, inv_event_filter);
}
static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
}
static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
{
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
if (!buf->snapshot)
perf_aux_output_skip(handle, len);
}
static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
{
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
u64 head = PERF_IDX2OFF(handle->head, buf);
u64 limit = buf->nr_pages * PAGE_SIZE;
/*
* The trace format isn't parseable in reverse, so clamp
* the limit to half of the buffer size in snapshot mode
* so that the worst case is half a buffer of records, as
* opposed to a single record.
*/
if (head < limit >> 1)
limit >>= 1;
/*
* If we're within max_record_sz of the limit, we must
* pad, move the head index and recompute the limit.
*/
if (limit - head < spe_pmu->max_record_sz) {
arm_spe_pmu_pad_buf(handle, limit - head);
handle->head = PERF_IDX2OFF(limit, buf);
limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
}
return limit;
}
static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
{
struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
const u64 bufsize = buf->nr_pages * PAGE_SIZE;
u64 limit = bufsize;
u64 head, tail, wakeup;
/*
* The head can be misaligned for two reasons:
*
* 1. The hardware left PMBPTR pointing to the first byte after
* a record when generating a buffer management event.
*
* 2. We used perf_aux_output_skip to consume handle->size bytes
* and CIRC_SPACE was used to compute the size, which always
* leaves one entry free.
*
* Deal with this by padding to the next alignment boundary and
* moving the head index. If we run out of buffer space, we'll
* reduce handle->size to zero and end up reporting truncation.
*/
head = PERF_IDX2OFF(handle->head, buf);
if (!IS_ALIGNED(head, spe_pmu->align)) {
unsigned long delta = roundup(head, spe_pmu->align) - head;
delta = min(delta, handle->size);
arm_spe_pmu_pad_buf(handle, delta);
head = PERF_IDX2OFF(handle->head, buf);
}
/* If we've run out of free space, then nothing more to do */
if (!handle->size)
goto no_space;
/* Compute the tail and wakeup indices now that we've aligned head */
tail = PERF_IDX2OFF(handle->head + handle->size, buf);
wakeup = PERF_IDX2OFF(handle->wakeup, buf);
/*
* Avoid clobbering unconsumed data. We know we have space, so
* if we see head == tail we know that the buffer is empty. If
* head > tail, then there's nothing to clobber prior to
* wrapping.
*/
if (head < tail)
limit = round_down(tail, PAGE_SIZE);
/*
* Wakeup may be arbitrarily far into the future. If it's not in
* the current generation, either we'll wrap before hitting it,
* or it's in the past and has been handled already.
*
* If there's a wakeup before we wrap, arrange to be woken up by
* the page boundary following it. Keep the tail boundary if
* that's lower.
*/
if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
limit = min(limit, round_up(wakeup, PAGE_SIZE));
if (limit > head)
return limit;
arm_spe_pmu_pad_buf(handle, handle->size);
no_space:
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
return 0;
}
static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
{
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
u64 limit = __arm_spe_pmu_next_off(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
/*
* If the head has come too close to the end of the buffer,
* then pad to the end and recompute the limit.
*/
if (limit && (limit - head < spe_pmu->max_record_sz)) {
arm_spe_pmu_pad_buf(handle, limit - head);
limit = __arm_spe_pmu_next_off(handle);
}
return limit;
}
static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 base, limit;
struct arm_spe_pmu_buf *buf;
/* Start a new aux session */
buf = perf_aux_output_begin(handle, event);
if (!buf) {
event->hw.state |= PERF_HES_STOPPED;
/*
* We still need to clear the limit pointer, since the
* profiler might only be disabled by virtue of a fault.
*/
limit = 0;
goto out_write_limit;
}
limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
: arm_spe_pmu_next_off(handle);
if (limit)
limit |= PMBLIMITR_EL1_E;
limit += (u64)buf->base;
base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
write_sysreg_s(base, SYS_PMBPTR_EL1);
out_write_limit:
write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
}
static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
{
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
u64 offset, size;
offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
size = offset - PERF_IDX2OFF(handle->head, buf);
if (buf->snapshot)
handle->head = offset;
perf_aux_output_end(handle, size);
}
static void arm_spe_pmu_disable_and_drain_local(void)
{
/* Disable profiling at EL0 and EL1 */
write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Drain any buffered data */
psb_csync();
dsb(nsh);
/* Disable the profiling buffer */
write_sysreg_s(0, SYS_PMBLIMITR_EL1);
isb();
}
/* IRQ handling */
static enum arm_spe_pmu_buf_fault_action
arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
{
const char *err_str;
u64 pmbsr;
enum arm_spe_pmu_buf_fault_action ret;
/*
* Ensure new profiling data is visible to the CPU and any external
* aborts have been resolved.
*/
psb_csync();
dsb(nsh);
/* Ensure hardware updates to PMBPTR_EL1 are visible */
isb();
/* Service required? */
pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
/*
* If we've lost data, disable profiling and also set the PARTIAL
* flag to indicate that the last record is corrupted.
*/
if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
PERF_AUX_FLAG_PARTIAL);
/* Report collisions to userspace so that it can up the period */
if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
/* We only expect buffer management events */
switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
case PMBSR_EL1_EC_BUF:
/* Handled below */
break;
case PMBSR_EL1_EC_FAULT_S1:
case PMBSR_EL1_EC_FAULT_S2:
err_str = "Unexpected buffer fault";
goto out_err;
default:
err_str = "Unknown error code";
goto out_err;
}
/* Buffer management event */
switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
case PMBSR_EL1_BUF_BSC_FULL:
ret = SPE_PMU_BUF_FAULT_ACT_OK;
goto out_stop;
default:
err_str = "Unknown buffer status code";
}
out_err:
pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
err_str, smp_processor_id(), pmbsr,
read_sysreg_s(SYS_PMBPTR_EL1),
read_sysreg_s(SYS_PMBLIMITR_EL1));
ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
out_stop:
arm_spe_perf_aux_output_end(handle);
return ret;
}
static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
{
struct perf_output_handle *handle = dev;
struct perf_event *event = handle->event;
enum arm_spe_pmu_buf_fault_action act;
if (!perf_get_aux(handle))
return IRQ_NONE;
act = arm_spe_pmu_buf_get_fault_act(handle);
if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
return IRQ_NONE;
/*
* Ensure perf callbacks have completed, which may disable the
* profiling buffer in response to a TRUNCATION flag.
*/
irq_work_run();
switch (act) {
case SPE_PMU_BUF_FAULT_ACT_FATAL:
/*
* If a fatal exception occurred then leaving the profiling
* buffer enabled is a recipe waiting to happen. Since
* fatal faults don't always imply truncation, make sure
* that the profiling buffer is disabled explicitly before
* clearing the syndrome register.
*/
arm_spe_pmu_disable_and_drain_local();
break;
case SPE_PMU_BUF_FAULT_ACT_OK:
/*
* We handled the fault (the buffer was full), so resume
* profiling as long as we didn't detect truncation.
* PMBPTR might be misaligned, but we'll burn that bridge
* when we get to it.
*/
if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
arm_spe_perf_aux_output_begin(handle, event);
isb();
}
break;
case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
/* We've seen you before, but GCC has the memory of a sieve. */
break;
}
/* The buffer pointers are now sane, so resume profiling. */
write_sysreg_s(0, SYS_PMBSR_EL1);
return IRQ_HANDLED;
}
static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
case ID_AA64DFR0_EL1_PMSVer_IMP:
return PMSEVFR_EL1_RES0_IMP;
case ID_AA64DFR0_EL1_PMSVer_V1P1:
return PMSEVFR_EL1_RES0_V1P1;
case ID_AA64DFR0_EL1_PMSVer_V1P2:
/* Return the highest version we support in default */
default:
return PMSEVFR_EL1_RES0_V1P2;
}
}
/* Perf callbacks */
static int arm_spe_pmu_event_init(struct perf_event *event)
{
u64 reg;
struct perf_event_attr *attr = &event->attr;
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
/* This is, of course, deeply driver-specific */
if (attr->type != event->pmu->type)
return -ENOENT;
if (event->cpu >= 0 &&
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
return -ENOENT;
if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
return -EOPNOTSUPP;
if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
return -EOPNOTSUPP;
if (attr->exclude_idle)
return -EOPNOTSUPP;
/*
* Feedback-directed frequency throttling doesn't work when we
* have a buffer of samples. We'd need to manually count the
* samples in the buffer when it fills up and adjust the event
* count to reflect that. Instead, just force the user to specify
* a sample period.
*/
if (attr->freq)
return -EINVAL;
reg = arm_spe_event_to_pmsfcr(event);
if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
return -EOPNOTSUPP;
if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return -EOPNOTSUPP;
if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
return -EOPNOTSUPP;
if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (!perfmon_capable() &&
(reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
return -EACCES;
return 0;
}
static void arm_spe_pmu_start(struct perf_event *event, int flags)
{
u64 reg;
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
hwc->state = 0;
arm_spe_perf_aux_output_begin(handle, event);
if (hwc->state)
return;
reg = arm_spe_event_to_pmsfcr(event);
write_sysreg_s(reg, SYS_PMSFCR_EL1);
reg = arm_spe_event_to_pmsevfr(event);
write_sysreg_s(reg, SYS_PMSEVFR_EL1);
if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
reg = arm_spe_event_to_pmsnevfr(event);
write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
}
reg = arm_spe_event_to_pmslatfr(event);
write_sysreg_s(reg, SYS_PMSLATFR_EL1);
if (flags & PERF_EF_RELOAD) {
reg = arm_spe_event_to_pmsirr(event);
write_sysreg_s(reg, SYS_PMSIRR_EL1);
isb();
reg = local64_read(&hwc->period_left);
write_sysreg_s(reg, SYS_PMSICR_EL1);
}
reg = arm_spe_event_to_pmscr(event);
isb();
write_sysreg_s(reg, SYS_PMSCR_EL1);
}
static void arm_spe_pmu_stop(struct perf_event *event, int flags)
{
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
/* If we're already stopped, then nothing to do */
if (hwc->state & PERF_HES_STOPPED)
return;
/* Stop all trace generation */
arm_spe_pmu_disable_and_drain_local();
if (flags & PERF_EF_UPDATE) {
/*
* If there's a fault pending then ensure we contain it
* to this buffer, since we might be on the context-switch
* path.
*/
if (perf_get_aux(handle)) {
enum arm_spe_pmu_buf_fault_action act;
act = arm_spe_pmu_buf_get_fault_act(handle);
if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
arm_spe_perf_aux_output_end(handle);
else
write_sysreg_s(0, SYS_PMBSR_EL1);
}
/*
* This may also contain ECOUNT, but nobody else should
* be looking at period_left, since we forbid frequency
* based sampling.
*/
local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
hwc->state |= PERF_HES_UPTODATE;
}
hwc->state |= PERF_HES_STOPPED;
}
static int arm_spe_pmu_add(struct perf_event *event, int flags)
{
int ret = 0;
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
return -ENOENT;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START) {
arm_spe_pmu_start(event, PERF_EF_RELOAD);
if (hwc->state & PERF_HES_STOPPED)
ret = -EINVAL;
}
return ret;
}
static void arm_spe_pmu_del(struct perf_event *event, int flags)
{
arm_spe_pmu_stop(event, PERF_EF_UPDATE);
}
static void arm_spe_pmu_read(struct perf_event *event)
{
}
static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool snapshot)
{
int i, cpu = event->cpu;
struct page **pglist;
struct arm_spe_pmu_buf *buf;
/* We need at least two pages for this to work. */
if (nr_pages < 2)
return NULL;
/*
* We require an even number of pages for snapshot mode, so that
* we can effectively treat the buffer as consisting of two equal
* parts and give userspace a fighting chance of getting some
* useful data out of it.
*/
if (snapshot && (nr_pages & 1))
return NULL;
if (cpu == -1)
cpu = raw_smp_processor_id();
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
if (!buf)
return NULL;
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
if (!pglist)
goto out_free_buf;
for (i = 0; i < nr_pages; ++i)
pglist[i] = virt_to_page(pages[i]);
buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
if (!buf->base)
goto out_free_pglist;
buf->nr_pages = nr_pages;
buf->snapshot = snapshot;
kfree(pglist);
return buf;
out_free_pglist:
kfree(pglist);
out_free_buf:
kfree(buf);
return NULL;
}
static void arm_spe_pmu_free_aux(void *aux)
{
struct arm_spe_pmu_buf *buf = aux;
vunmap(buf->base);
kfree(buf);
}
/* Initialisation and teardown functions */
static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
{
static atomic_t pmu_idx = ATOMIC_INIT(-1);
int idx;
char *name;
struct device *dev = &spe_pmu->pdev->dev;
spe_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
.attr_groups = arm_spe_pmu_attr_groups,
/*
* We hitch a ride on the software context here, so that
* we can support per-task profiling (which is not possible
* with the invalid context as it doesn't get sched callbacks).
* This requires that userspace either uses a dummy event for
* perf_event_open, since the aux buffer is not setup until
* a subsequent mmap, or creates the profiling event in a
* disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
* once the buffer has been created.
*/
.task_ctx_nr = perf_sw_context,
.event_init = arm_spe_pmu_event_init,
.add = arm_spe_pmu_add,
.del = arm_spe_pmu_del,
.start = arm_spe_pmu_start,
.stop = arm_spe_pmu_stop,
.read = arm_spe_pmu_read,
.setup_aux = arm_spe_pmu_setup_aux,
.free_aux = arm_spe_pmu_free_aux,
};
idx = atomic_inc_return(&pmu_idx);
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
if (!name) {
dev_err(dev, "failed to allocate name for pmu %d\n", idx);
return -ENOMEM;
}
return perf_pmu_register(&spe_pmu->pmu, name, -1);
}
static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
{
perf_pmu_unregister(&spe_pmu->pmu);
}
static void __arm_spe_pmu_dev_probe(void *info)
{
int fld;
u64 reg;
struct arm_spe_pmu *spe_pmu = info;
struct device *dev = &spe_pmu->pdev->dev;
fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
ID_AA64DFR0_EL1_PMSVer_SHIFT);
if (!fld) {
dev_err(dev,
"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
fld, smp_processor_id());
return;
}
spe_pmu->pmsver = (u16)fld;
/* Read PMBIDR first to determine whether or not we have access */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
if (FIELD_GET(PMBIDR_EL1_P, reg)) {
dev_err(dev,
"profiling buffer owned by higher exception level\n");
return;
}
/* Minimum alignment. If it's out-of-range, then fail the probe */
fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
spe_pmu->align = 1 << fld;
if (spe_pmu->align > SZ_2K) {
dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
fld, smp_processor_id());
return;
}
/* It's now safe to read PMSIDR and figure out what we've got */
reg = read_sysreg_s(SYS_PMSIDR_EL1);
if (FIELD_GET(PMSIDR_EL1_FE, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
if (FIELD_GET(PMSIDR_EL1_FnE, reg))
spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
if (FIELD_GET(PMSIDR_EL1_FT, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
if (FIELD_GET(PMSIDR_EL1_FL, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
if (FIELD_GET(PMSIDR_EL1_LDS, reg))
spe_pmu->features |= SPE_PMU_FEAT_LDS;
if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
case PMSIDR_EL1_INTERVAL_256:
spe_pmu->min_period = 256;
break;
case PMSIDR_EL1_INTERVAL_512:
spe_pmu->min_period = 512;
break;
case PMSIDR_EL1_INTERVAL_768:
spe_pmu->min_period = 768;
break;
case PMSIDR_EL1_INTERVAL_1024:
spe_pmu->min_period = 1024;
break;
case PMSIDR_EL1_INTERVAL_1536:
spe_pmu->min_period = 1536;
break;
case PMSIDR_EL1_INTERVAL_2048:
spe_pmu->min_period = 2048;
break;
case PMSIDR_EL1_INTERVAL_3072:
spe_pmu->min_period = 3072;
break;
default:
dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
fld);
fallthrough;
case PMSIDR_EL1_INTERVAL_4096:
spe_pmu->min_period = 4096;
}
/* Maximum record size. If it's out-of-range, then fail the probe */
fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
spe_pmu->max_record_sz = 1 << fld;
if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
fld, smp_processor_id());
return;
}
fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
switch (fld) {
default:
dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
fld);
fallthrough;
case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
spe_pmu->counter_sz = 12;
break;
case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
spe_pmu->counter_sz = 16;
}
dev_info(dev,
"probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
}
static void __arm_spe_pmu_reset_local(void)
{
/*
* This is probably overkill, as we have no idea where we're
* draining any buffered data to...
*/
arm_spe_pmu_disable_and_drain_local();
/* Reset the buffer base pointer */
write_sysreg_s(0, SYS_PMBPTR_EL1);
isb();
/* Clear any pending management interrupts */
write_sysreg_s(0, SYS_PMBSR_EL1);
isb();
}
static void __arm_spe_pmu_setup_one(void *info)
{
struct arm_spe_pmu *spe_pmu = info;
__arm_spe_pmu_reset_local();
enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
}
static void __arm_spe_pmu_stop_one(void *info)
{
struct arm_spe_pmu *spe_pmu = info;
disable_percpu_irq(spe_pmu->irq);
__arm_spe_pmu_reset_local();
}
static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
{
struct arm_spe_pmu *spe_pmu;
spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
return 0;
__arm_spe_pmu_setup_one(spe_pmu);
return 0;
}
static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
struct arm_spe_pmu *spe_pmu;
spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
return 0;
__arm_spe_pmu_stop_one(spe_pmu);
return 0;
}
static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
{
int ret;
cpumask_t *mask = &spe_pmu->supported_cpus;
/* Make sure we probe the hardware on a relevant CPU */
ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
return -ENXIO;
/* Request our PPIs (note that the IRQ is still disabled) */
ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
spe_pmu->handle);
if (ret)
return ret;
/*
* Register our hotplug notifier now so we don't miss any events.
* This will enable the IRQ for any supported CPUs that are already
* up.
*/
ret = cpuhp_state_add_instance(arm_spe_pmu_online,
&spe_pmu->hotplug_node);
if (ret)
free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
return ret;
}
static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
{
cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
}
/* Driver and device probing */
static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
{
struct platform_device *pdev = spe_pmu->pdev;
int irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
if (!irq_is_percpu(irq)) {
dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
return -EINVAL;
}
if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
return -EINVAL;
}
spe_pmu->irq = irq;
return 0;
}
static const struct of_device_id arm_spe_pmu_of_match[] = {
{ .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
static const struct platform_device_id arm_spe_match[] = {
{ ARMV8_SPE_PDEV_NAME, 0},
{ }
};
MODULE_DEVICE_TABLE(platform, arm_spe_match);
static int arm_spe_pmu_device_probe(struct platform_device *pdev)
{
int ret;
struct arm_spe_pmu *spe_pmu;
struct device *dev = &pdev->dev;
/*
* If kernelspace is unmapped when running at EL0, then the SPE
* buffer will fault and prematurely terminate the AUX session.
*/
if (arm64_kernel_unmapped_at_el0()) {
dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
return -EPERM;
}
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
if (!spe_pmu)
return -ENOMEM;
spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
if (!spe_pmu->handle)
return -ENOMEM;
spe_pmu->pdev = pdev;
platform_set_drvdata(pdev, spe_pmu);
ret = arm_spe_pmu_irq_probe(spe_pmu);
if (ret)
goto out_free_handle;
ret = arm_spe_pmu_dev_init(spe_pmu);
if (ret)
goto out_free_handle;
ret = arm_spe_pmu_perf_init(spe_pmu);
if (ret)
goto out_teardown_dev;
return 0;
out_teardown_dev:
arm_spe_pmu_dev_teardown(spe_pmu);
out_free_handle:
free_percpu(spe_pmu->handle);
return ret;
}
static int arm_spe_pmu_device_remove(struct platform_device *pdev)
{
struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
arm_spe_pmu_perf_destroy(spe_pmu);
arm_spe_pmu_dev_teardown(spe_pmu);
free_percpu(spe_pmu->handle);
return 0;
}
static struct platform_driver arm_spe_pmu_driver = {
.id_table = arm_spe_match,
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,
};
static int __init arm_spe_pmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
arm_spe_pmu_cpu_startup,
arm_spe_pmu_cpu_teardown);
if (ret < 0)
return ret;
arm_spe_pmu_online = ret;
ret = platform_driver_register(&arm_spe_pmu_driver);
if (ret)
cpuhp_remove_multi_state(arm_spe_pmu_online);
return ret;
}
static void __exit arm_spe_pmu_exit(void)
{
platform_driver_unregister(&arm_spe_pmu_driver);
cpuhp_remove_multi_state(arm_spe_pmu_online);
}
module_init(arm_spe_pmu_init);
module_exit(arm_spe_pmu_exit);
MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
MODULE_AUTHOR("Will Deacon <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_spe_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CPU PMU driver for the Apple M1 and derivatives
*
* Copyright (C) 2021 Google LLC
*
* Author: Marc Zyngier <[email protected]>
*
* Most of the information used in this driver was provided by the
* Asahi Linux project. The rest was experimentally discovered.
*/
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <asm/apple_m1_pmu.h>
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
#define M1_PMU_NR_COUNTERS 10
#define M1_PMU_CFG_EVENT GENMASK(7, 0)
#define ANY_BUT_0_1 GENMASK(9, 2)
#define ONLY_2_TO_7 GENMASK(7, 2)
#define ONLY_2_4_6 (BIT(2) | BIT(4) | BIT(6))
#define ONLY_5_6_7 (BIT(5) | BIT(6) | BIT(7))
/*
* Description of the events we actually know about, as well as those with
* a specific counter affinity. Yes, this is a grand total of two known
* counters, and the rest is anybody's guess.
*
* Not all counters can count all events. Counters #0 and #1 are wired to
* count cycles and instructions respectively, and some events have
* bizarre mappings (every other counter, or even *one* counter). These
* restrictions equally apply to both P and E cores.
*
* It is worth noting that the PMUs attached to P and E cores are likely
* to be different because the underlying uarches are different. At the
* moment, we don't really need to distinguish between the two because we
* know next to nothing about the events themselves, and we already have
* per cpu-type PMU abstractions.
*
* If we eventually find out that the events are different across
* implementations, we'll have to introduce per cpu-type tables.
*/
enum m1_pmu_events {
M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
/*
* From this point onwards, these are not actual HW events,
* but attributes that get stored in hw->config_base.
*/
M1_PMU_CFG_COUNT_USER = BIT(8),
M1_PMU_CFG_COUNT_KERNEL = BIT(9),
};
/*
* Per-event affinity table. Most events can be installed on counter
* 2-9, but there are a number of exceptions. Note that this table
* has been created experimentally, and I wouldn't be surprised if more
* counters had strange affinities.
*/
static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
[0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
[M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
[M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
[M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
[M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
[M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
[M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
[M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
[M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
[M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
[M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
[M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
[M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
};
static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
/* No idea about the rest yet */
};
/* sysfs definitions */
static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
#define M1_PMU_EVENT_ATTR(name, config) \
PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
static struct attribute *m1_pmu_event_attrs[] = {
M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
NULL,
};
static const struct attribute_group m1_pmu_events_attr_group = {
.name = "events",
.attrs = m1_pmu_event_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *m1_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static const struct attribute_group m1_pmu_format_attr_group = {
.name = "format",
.attrs = m1_pmu_format_attrs,
};
/* Low level accessors. No synchronisation. */
#define PMU_READ_COUNTER(_idx) \
case _idx: return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
#define PMU_WRITE_COUNTER(_val, _idx) \
case _idx: \
write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1); \
return
static u64 m1_pmu_read_hw_counter(unsigned int index)
{
switch (index) {
PMU_READ_COUNTER(0);
PMU_READ_COUNTER(1);
PMU_READ_COUNTER(2);
PMU_READ_COUNTER(3);
PMU_READ_COUNTER(4);
PMU_READ_COUNTER(5);
PMU_READ_COUNTER(6);
PMU_READ_COUNTER(7);
PMU_READ_COUNTER(8);
PMU_READ_COUNTER(9);
}
BUG();
}
static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
{
switch (index) {
PMU_WRITE_COUNTER(val, 0);
PMU_WRITE_COUNTER(val, 1);
PMU_WRITE_COUNTER(val, 2);
PMU_WRITE_COUNTER(val, 3);
PMU_WRITE_COUNTER(val, 4);
PMU_WRITE_COUNTER(val, 5);
PMU_WRITE_COUNTER(val, 6);
PMU_WRITE_COUNTER(val, 7);
PMU_WRITE_COUNTER(val, 8);
PMU_WRITE_COUNTER(val, 9);
}
BUG();
}
#define get_bit_offset(index, mask) (__ffs(mask) + (index))
static void __m1_pmu_enable_counter(unsigned int index, bool en)
{
u64 val, bit;
switch (index) {
case 0 ... 7:
bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
break;
case 8 ... 9:
bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
break;
default:
BUG();
}
val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
if (en)
val |= bit;
else
val &= ~bit;
write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
}
static void m1_pmu_enable_counter(unsigned int index)
{
__m1_pmu_enable_counter(index, true);
}
static void m1_pmu_disable_counter(unsigned int index)
{
__m1_pmu_enable_counter(index, false);
}
static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
{
u64 val, bit;
switch (index) {
case 0 ... 7:
bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
break;
case 8 ... 9:
bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
break;
default:
BUG();
}
val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
if (en)
val |= bit;
else
val &= ~bit;
write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
}
static void m1_pmu_enable_counter_interrupt(unsigned int index)
{
__m1_pmu_enable_counter_interrupt(index, true);
}
static void m1_pmu_disable_counter_interrupt(unsigned int index)
{
__m1_pmu_enable_counter_interrupt(index, false);
}
static void m1_pmu_configure_counter(unsigned int index, u8 event,
bool user, bool kernel)
{
u64 val, user_bit, kernel_bit;
int shift;
switch (index) {
case 0 ... 7:
user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
break;
case 8 ... 9:
user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
break;
default:
BUG();
}
val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
if (user)
val |= user_bit;
else
val &= ~user_bit;
if (kernel)
val |= kernel_bit;
else
val &= ~kernel_bit;
write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
/*
* Counters 0 and 1 have fixed events. For anything else,
* place the event at the expected location in the relevant
* register (PMESR0 holds the event configuration for counters
* 2-5, resp. PMESR1 for counters 6-9).
*/
switch (index) {
case 0 ... 1:
break;
case 2 ... 5:
shift = (index - 2) * 8;
val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
val &= ~((u64)0xff << shift);
val |= (u64)event << shift;
write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
break;
case 6 ... 9:
shift = (index - 6) * 8;
val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
val &= ~((u64)0xff << shift);
val |= (u64)event << shift;
write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
break;
}
}
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
bool user, kernel;
u8 evt;
evt = event->hw.config_base & M1_PMU_CFG_EVENT;
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
m1_pmu_disable_counter_interrupt(event->hw.idx);
m1_pmu_disable_counter(event->hw.idx);
isb();
m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
}
static void m1_pmu_disable_event(struct perf_event *event)
{
m1_pmu_disable_counter_interrupt(event->hw.idx);
m1_pmu_disable_counter(event->hw.idx);
isb();
}
static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
u64 overflow, state;
int idx;
overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
if (!overflow) {
/* Spurious interrupt? */
state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
state &= ~PMCR0_IACT;
write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
isb();
return IRQ_NONE;
}
cpu_pmu->stop(cpu_pmu);
regs = get_irq_regs();
for (idx = 0; idx < cpu_pmu->num_events; idx++) {
struct perf_event *event = cpuc->events[idx];
struct perf_sample_data data;
if (!event)
continue;
armpmu_event_update(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
m1_pmu_disable_event(event);
}
cpu_pmu->start(cpu_pmu);
return IRQ_HANDLED;
}
static u64 m1_pmu_read_counter(struct perf_event *event)
{
return m1_pmu_read_hw_counter(event->hw.idx);
}
static void m1_pmu_write_counter(struct perf_event *event, u64 value)
{
m1_pmu_write_hw_counter(value, event->hw.idx);
isb();
}
static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
unsigned long affinity = m1_pmu_event_affinity[evtype];
int idx;
/*
* Place the event on the first free counter that can count
* this event.
*
* We could do a better job if we had a view of all the events
* counting on the PMU at any given time, and by placing the
* most constraining events first.
*/
for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
return -EAGAIN;
}
static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
clear_bit(event->hw.idx, cpuc->used_mask);
}
static void __m1_pmu_set_mode(u8 mode)
{
u64 val;
val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
val &= ~(PMCR0_IMODE | PMCR0_IACT);
val |= FIELD_PREP(PMCR0_IMODE, mode);
write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
isb();
}
static void m1_pmu_start(struct arm_pmu *cpu_pmu)
{
__m1_pmu_set_mode(PMCR0_IMODE_FIQ);
}
static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
{
__m1_pmu_set_mode(PMCR0_IMODE_OFF);
}
static int m1_pmu_map_event(struct perf_event *event)
{
/*
* Although the counters are 48bit wide, bit 47 is what
* triggers the overflow interrupt. Advertise the counters
* being 47bit wide to mimick the behaviour of the ARM PMU.
*/
event->hw.flags |= ARMPMU_EVT_47BIT;
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
static int m2_pmu_map_event(struct perf_event *event)
{
/*
* Same deal as the above, except that M2 has 64bit counters.
* Which, as far as we're concerned, actually means 63 bits.
* Yes, this is getting awkward.
*/
event->hw.flags |= ARMPMU_EVT_63BIT;
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
static void m1_pmu_reset(void *info)
{
int i;
__m1_pmu_set_mode(PMCR0_IMODE_OFF);
for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
m1_pmu_disable_counter(i);
m1_pmu_disable_counter_interrupt(i);
m1_pmu_write_hw_counter(0, i);
}
isb();
}
static int m1_pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
if (!attr->exclude_guest)
return -EINVAL;
if (!attr->exclude_kernel)
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
event->config_base = config_base;
return 0;
}
static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
{
cpu_pmu->handle_irq = m1_pmu_handle_irq;
cpu_pmu->enable = m1_pmu_enable_event;
cpu_pmu->disable = m1_pmu_disable_event;
cpu_pmu->read_counter = m1_pmu_read_counter;
cpu_pmu->write_counter = m1_pmu_write_counter;
cpu_pmu->get_event_idx = m1_pmu_get_event_idx;
cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx;
cpu_pmu->start = m1_pmu_start;
cpu_pmu->stop = m1_pmu_stop;
if (flags & ARMPMU_EVT_47BIT)
cpu_pmu->map_event = m1_pmu_map_event;
else if (flags & ARMPMU_EVT_63BIT)
cpu_pmu->map_event = m2_pmu_map_event;
else
return WARN_ON(-EINVAL);
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
return 0;
}
/* Device driver gunk */
static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_icestorm_pmu";
return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
}
static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_firestorm_pmu";
return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
}
static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_avalanche_pmu";
return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
}
static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_blizzard_pmu";
return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
}
static const struct of_device_id m1_pmu_of_device_ids[] = {
{ .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, },
{ .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, },
{ .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
{ .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
{ },
};
MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
static int m1_pmu_device_probe(struct platform_device *pdev)
{
return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
}
static struct platform_driver m1_pmu_driver = {
.driver = {
.name = "apple-m1-cpu-pmu",
.of_match_table = m1_pmu_of_device_ids,
.suppress_bind_attrs = true,
},
.probe = m1_pmu_device_probe,
};
module_platform_driver(m1_pmu_driver);
| linux-master | drivers/perf/apple_m1_cpu_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
*
* Copyright (C) 2021 Marvell.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/hrtimer.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
#define OP_MODE_CTRL_VAL_MANNUAL 0x1
/* Performance Counters Start Operation Control Registers */
#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
#define START_OP_CTRL_VAL_START 0x1ULL
#define START_OP_CTRL_VAL_ACTIVE 0x2
/* Performance Counters End Operation Control Registers */
#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
#define END_OP_CTRL_VAL_END 0x1ULL
/* Performance Counters End Status Registers */
#define DDRC_PERF_CNT_END_STATUS 0x8038
#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
/* Performance Counters Configuration Registers */
#define DDRC_PERF_CFG_BASE 0x8040
/* 8 Generic event counter + 2 fixed event counters */
#define DDRC_PERF_NUM_GEN_COUNTERS 8
#define DDRC_PERF_NUM_FIX_COUNTERS 2
#define DDRC_PERF_READ_COUNTER_IDX DDRC_PERF_NUM_GEN_COUNTERS
#define DDRC_PERF_WRITE_COUNTER_IDX (DDRC_PERF_NUM_GEN_COUNTERS + 1)
#define DDRC_PERF_NUM_COUNTERS (DDRC_PERF_NUM_GEN_COUNTERS + \
DDRC_PERF_NUM_FIX_COUNTERS)
/* Generic event counter registers */
#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
#define EVENT_ENABLE BIT_ULL(63)
/* Two dedicated event counters for DDR reads and writes */
#define EVENT_DDR_READS 101
#define EVENT_DDR_WRITES 100
/*
* programmable events IDs in programmable event counters.
* DO NOT change these event-id numbers, they are used to
* program event bitmap in h/w.
*/
#define EVENT_OP_IS_ZQLATCH 55
#define EVENT_OP_IS_ZQSTART 54
#define EVENT_OP_IS_TCR_MRR 53
#define EVENT_OP_IS_DQSOSC_MRR 52
#define EVENT_OP_IS_DQSOSC_MPC 51
#define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR 50
#define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD 49
#define EVENT_BSM_STARVATION 48
#define EVENT_BSM_ALLOC 47
#define EVENT_LPR_REQ_WITH_NOCREDIT 46
#define EVENT_HPR_REQ_WITH_NOCREDIT 45
#define EVENT_OP_IS_ZQCS 44
#define EVENT_OP_IS_ZQCL 43
#define EVENT_OP_IS_LOAD_MODE 42
#define EVENT_OP_IS_SPEC_REF 41
#define EVENT_OP_IS_CRIT_REF 40
#define EVENT_OP_IS_REFRESH 39
#define EVENT_OP_IS_ENTER_MPSM 35
#define EVENT_OP_IS_ENTER_POWERDOWN 31
#define EVENT_OP_IS_ENTER_SELFREF 27
#define EVENT_WAW_HAZARD 26
#define EVENT_RAW_HAZARD 25
#define EVENT_WAR_HAZARD 24
#define EVENT_WRITE_COMBINE 23
#define EVENT_RDWR_TRANSITIONS 22
#define EVENT_PRECHARGE_FOR_OTHER 21
#define EVENT_PRECHARGE_FOR_RDWR 20
#define EVENT_OP_IS_PRECHARGE 19
#define EVENT_OP_IS_MWR 18
#define EVENT_OP_IS_WR 17
#define EVENT_OP_IS_RD 16
#define EVENT_OP_IS_RD_ACTIVATE 15
#define EVENT_OP_IS_RD_OR_WR 14
#define EVENT_OP_IS_ACTIVATE 13
#define EVENT_WR_XACT_WHEN_CRITICAL 12
#define EVENT_LPR_XACT_WHEN_CRITICAL 11
#define EVENT_HPR_XACT_WHEN_CRITICAL 10
#define EVENT_DFI_RD_DATA_CYCLES 9
#define EVENT_DFI_WR_DATA_CYCLES 8
#define EVENT_ACT_BYPASS 7
#define EVENT_READ_BYPASS 6
#define EVENT_HIF_HI_PRI_RD 5
#define EVENT_HIF_RMW 4
#define EVENT_HIF_RD 3
#define EVENT_HIF_WR 2
#define EVENT_HIF_RD_OR_WR 1
/* Event counter value registers */
#define DDRC_PERF_CNT_VALUE_BASE 0x8080
#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
/* Fixed event counter enable/disable register */
#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
#define DDRC_PERF_FREERUN_WRITE_EN 0x1
#define DDRC_PERF_FREERUN_READ_EN 0x2
/* Fixed event counter control register */
#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
#define DDRC_FREERUN_READ_CNT_CLR 0x2
/* Fixed event counter value register */
#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
unsigned int cpu;
struct device *dev;
int active_events;
struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
struct hrtimer hrtimer;
struct hlist_node node;
};
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
#define CN10K_DDR_PMU_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id)
static struct attribute *cn10k_ddr_perf_events_attrs[] = {
CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
EVENT_HPR_XACT_WHEN_CRITICAL),
CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
EVENT_LPR_XACT_WHEN_CRITICAL),
CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
EVENT_WR_XACT_WHEN_CRITICAL),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
EVENT_PRECHARGE_FOR_OTHER),
CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
EVENT_HPR_REQ_WITH_NOCREDIT),
CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
EVENT_LPR_REQ_WITH_NOCREDIT),
CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
/* Free run event counters */
CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
NULL
};
static struct attribute_group cn10k_ddr_perf_events_attr_group = {
.name = "events",
.attrs = cn10k_ddr_perf_events_attrs,
};
PMU_FORMAT_ATTR(event, "config:0-8");
static struct attribute *cn10k_ddr_perf_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group cn10k_ddr_perf_format_attr_group = {
.name = "format",
.attrs = cn10k_ddr_perf_format_attrs,
};
static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
}
static struct device_attribute cn10k_ddr_perf_cpumask_attr =
__ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
&cn10k_ddr_perf_cpumask_attr.attr,
NULL,
};
static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
.attrs = cn10k_ddr_perf_cpumask_attrs,
};
static const struct attribute_group *cn10k_attr_groups[] = {
&cn10k_ddr_perf_events_attr_group,
&cn10k_ddr_perf_format_attr_group,
&cn10k_ddr_perf_cpumask_attr_group,
NULL,
};
/* Default poll timeout is 100 sec, which is very sufficient for
* 48 bit counter incremented max at 5.6 GT/s, which may take many
* hours to overflow.
*/
static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
static ktime_t cn10k_ddr_pmu_timer_period(void)
{
return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
}
static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
{
switch (eventid) {
case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
*event_bitmap = (1ULL << (eventid - 1));
break;
case EVENT_OP_IS_ENTER_SELFREF:
case EVENT_OP_IS_ENTER_POWERDOWN:
case EVENT_OP_IS_ENTER_MPSM:
*event_bitmap = (0xFULL << (eventid - 1));
break;
default:
pr_err("%s Invalid eventid %d\n", __func__, eventid);
return -EINVAL;
}
return 0;
}
static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
struct perf_event *event)
{
u8 config = event->attr.config;
int i;
/* DDR read free-run counter index */
if (config == EVENT_DDR_READS) {
pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
return DDRC_PERF_READ_COUNTER_IDX;
}
/* DDR write free-run counter index */
if (config == EVENT_DDR_WRITES) {
pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
return DDRC_PERF_WRITE_COUNTER_IDX;
}
/* Allocate DDR generic counters */
for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
if (pmu->events[i] == NULL) {
pmu->events[i] = event;
return i;
}
}
return -ENOENT;
}
static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
pmu->events[counter] = NULL;
}
static int cn10k_ddr_perf_event_init(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event)) {
dev_info(pmu->dev, "Sampling not supported!\n");
return -EOPNOTSUPP;
}
if (event->cpu < 0) {
dev_warn(pmu->dev, "Can't provide per-task data!\n");
return -EOPNOTSUPP;
}
/* We must NOT create groups containing mixed PMUs */
if (event->group_leader->pmu != event->pmu &&
!is_software_event(event->group_leader))
return -EINVAL;
/* Set ownership of event to one CPU, same event can not be observed
* on multiple cpus at same time.
*/
event->cpu = pmu->cpu;
hwc->idx = -1;
return 0;
}
static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
u32 reg;
u64 val;
if (counter > DDRC_PERF_NUM_COUNTERS) {
pr_err("Error: unsupported counter %d\n", counter);
return;
}
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
reg = DDRC_PERF_CFG(counter);
val = readq_relaxed(pmu->base + reg);
if (enable)
val |= EVENT_ENABLE;
else
val &= ~EVENT_ENABLE;
writeq_relaxed(val, pmu->base + reg);
} else {
val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
if (enable) {
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val |= DDRC_PERF_FREERUN_READ_EN;
else
val |= DDRC_PERF_FREERUN_WRITE_EN;
} else {
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val &= ~DDRC_PERF_FREERUN_READ_EN;
else
val &= ~DDRC_PERF_FREERUN_WRITE_EN;
}
writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
}
}
static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
u64 val;
if (counter == DDRC_PERF_READ_COUNTER_IDX)
return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
return val;
}
static void cn10k_ddr_perf_event_update(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 prev_count, new_count, mask;
do {
prev_count = local64_read(&hwc->prev_count);
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
mask = DDRC_PERF_CNT_MAX_VALUE;
local64_add((new_count - prev_count) & mask, &event->count);
}
static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
local64_set(&hwc->prev_count, 0);
cn10k_ddr_perf_counter_enable(pmu, counter, true);
hwc->state = 0;
}
static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
u32 reg_offset;
u64 val;
counter = cn10k_ddr_perf_alloc_counter(pmu, event);
if (counter < 0)
return -EAGAIN;
pmu->active_events++;
hwc->idx = counter;
if (pmu->active_events == 1)
hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
HRTIMER_MODE_REL_PINNED);
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
/* Generic counters, configure event id */
reg_offset = DDRC_PERF_CFG(counter);
ret = ddr_perf_get_event_bitmap(config, &val);
if (ret)
return ret;
writeq_relaxed(val, pmu->base + reg_offset);
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
val = DDRC_FREERUN_READ_CNT_CLR;
else
val = DDRC_FREERUN_WRITE_CNT_CLR;
writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
}
hwc->state |= PERF_HES_STOPPED;
if (flags & PERF_EF_START)
cn10k_ddr_perf_event_start(event, flags);
return 0;
}
static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
cn10k_ddr_perf_counter_enable(pmu, counter, false);
if (flags & PERF_EF_UPDATE)
cn10k_ddr_perf_event_update(event);
hwc->state |= PERF_HES_STOPPED;
}
static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
cn10k_ddr_perf_free_counter(pmu, counter);
pmu->active_events--;
hwc->idx = -1;
/* Cancel timer when no events to capture */
if (pmu->active_events == 0)
hrtimer_cancel(&pmu->hrtimer);
}
static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
DDRC_PERF_CNT_START_OP_CTRL);
}
static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
DDRC_PERF_CNT_END_OP_CTRL);
}
static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
{
struct hw_perf_event *hwc;
int i;
for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
if (pmu->events[i] == NULL)
continue;
cn10k_ddr_perf_event_update(pmu->events[i]);
}
/* Reset previous count as h/w counter are reset */
for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
if (pmu->events[i] == NULL)
continue;
hwc = &pmu->events[i]->hw;
local64_set(&hwc->prev_count, 0);
}
}
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
u64 value;
int i;
event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
if (event) {
hwc = &event->hw;
prev_count = local64_read(&hwc->prev_count);
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
/* Overflow condition is when new count less than
* previous count
*/
if (new_count < prev_count)
cn10k_ddr_perf_event_update(event);
}
event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
if (event) {
hwc = &event->hw;
prev_count = local64_read(&hwc->prev_count);
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
/* Overflow condition is when new count less than
* previous count
*/
if (new_count < prev_count)
cn10k_ddr_perf_event_update(event);
}
for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
if (pmu->events[i] == NULL)
continue;
value = cn10k_ddr_perf_read_counter(pmu, i);
if (value == DDRC_PERF_CNT_MAX_VALUE) {
pr_info("Counter-(%d) reached max value\n", i);
cn10k_ddr_perf_event_update_all(pmu);
cn10k_ddr_perf_pmu_disable(&pmu->pmu);
cn10k_ddr_perf_pmu_enable(&pmu->pmu);
}
}
return IRQ_HANDLED;
}
static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
{
struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
hrtimer);
unsigned long flags;
local_irq_save(flags);
cn10k_ddr_pmu_overflow_handler(pmu);
local_irq_restore(flags);
hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
return HRTIMER_RESTART;
}
static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
node);
unsigned int target;
if (cpu != pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
return 0;
}
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu;
struct resource *res;
void __iomem *base;
char *name;
int ret;
ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
if (!ddr_pmu)
return -ENOMEM;
ddr_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, ddr_pmu);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ddr_pmu->base = base;
/* Setup the PMU counter to work in manual mode */
writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
DDRC_PERF_CNT_OP_MODE_CTRL);
ddr_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = cn10k_attr_groups,
.event_init = cn10k_ddr_perf_event_init,
.add = cn10k_ddr_perf_event_add,
.del = cn10k_ddr_perf_event_del,
.start = cn10k_ddr_perf_event_start,
.stop = cn10k_ddr_perf_event_stop,
.read = cn10k_ddr_perf_event_update,
.pmu_enable = cn10k_ddr_perf_pmu_enable,
.pmu_disable = cn10k_ddr_perf_pmu_disable,
};
/* Choose this cpu to collect perf data */
ddr_pmu->cpu = raw_smp_processor_id();
name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx",
res->start);
if (!name)
return -ENOMEM;
hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
&ddr_pmu->node);
ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
if (ret)
goto error;
pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
error:
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
&ddr_pmu->node);
return ret;
}
static int cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
&ddr_pmu->node);
perf_pmu_unregister(&ddr_pmu->pmu);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
{ .compatible = "marvell,cn10k-ddr-pmu", },
{ },
};
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
{"MRVL000A", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
#endif
static struct platform_driver cn10k_ddr_pmu_driver = {
.driver = {
.name = "cn10k-ddr-pmu",
.of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
.acpi_match_table = ACPI_PTR(cn10k_ddr_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
.remove = cn10k_ddr_perf_remove,
};
static int __init cn10k_ddr_pmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
"perf/marvell/cn10k/ddr:online", NULL,
cn10k_ddr_pmu_offline_cpu);
if (ret)
return ret;
ret = platform_driver_register(&cn10k_ddr_pmu_driver);
if (ret)
cpuhp_remove_multi_state(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
return ret;
}
static void __exit cn10k_ddr_pmu_exit(void)
{
platform_driver_unregister(&cn10k_ddr_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
}
module_init(cn10k_ddr_pmu_init);
module_exit(cn10k_ddr_pmu_exit);
MODULE_AUTHOR("Bharat Bhushan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/marvell_cn10k_ddr_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Amlogic, Inc. All rights reserved.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <soc/amlogic/meson_ddr_pmu.h>
#define PORT_MAJOR 32
#define DEFAULT_XTAL_FREQ 24000000UL
#define DMC_QOS_IRQ BIT(30)
/* DMC bandwidth monitor register address offset */
#define DMC_MON_G12_CTRL0 (0x0 << 2)
#define DMC_MON_G12_CTRL1 (0x1 << 2)
#define DMC_MON_G12_CTRL2 (0x2 << 2)
#define DMC_MON_G12_CTRL3 (0x3 << 2)
#define DMC_MON_G12_CTRL4 (0x4 << 2)
#define DMC_MON_G12_CTRL5 (0x5 << 2)
#define DMC_MON_G12_CTRL6 (0x6 << 2)
#define DMC_MON_G12_CTRL7 (0x7 << 2)
#define DMC_MON_G12_CTRL8 (0x8 << 2)
#define DMC_MON_G12_ALL_REQ_CNT (0x9 << 2)
#define DMC_MON_G12_ALL_GRANT_CNT (0xa << 2)
#define DMC_MON_G12_ONE_GRANT_CNT (0xb << 2)
#define DMC_MON_G12_SEC_GRANT_CNT (0xc << 2)
#define DMC_MON_G12_THD_GRANT_CNT (0xd << 2)
#define DMC_MON_G12_FOR_GRANT_CNT (0xe << 2)
#define DMC_MON_G12_TIMER (0xf << 2)
/* Each bit represent a axi line */
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(arm, "config1:0");
PMU_FORMAT_ATTR(gpu, "config1:1");
PMU_FORMAT_ATTR(pcie, "config1:2");
PMU_FORMAT_ATTR(hdcp, "config1:3");
PMU_FORMAT_ATTR(hevc_front, "config1:4");
PMU_FORMAT_ATTR(usb3_0, "config1:6");
PMU_FORMAT_ATTR(device, "config1:7");
PMU_FORMAT_ATTR(hevc_back, "config1:8");
PMU_FORMAT_ATTR(h265enc, "config1:9");
PMU_FORMAT_ATTR(vpu_read1, "config1:16");
PMU_FORMAT_ATTR(vpu_read2, "config1:17");
PMU_FORMAT_ATTR(vpu_read3, "config1:18");
PMU_FORMAT_ATTR(vpu_write1, "config1:19");
PMU_FORMAT_ATTR(vpu_write2, "config1:20");
PMU_FORMAT_ATTR(vdec, "config1:21");
PMU_FORMAT_ATTR(hcodec, "config1:22");
PMU_FORMAT_ATTR(ge2d, "config1:23");
PMU_FORMAT_ATTR(spicc1, "config1:32");
PMU_FORMAT_ATTR(usb0, "config1:33");
PMU_FORMAT_ATTR(dma, "config1:34");
PMU_FORMAT_ATTR(arb0, "config1:35");
PMU_FORMAT_ATTR(sd_emmc_b, "config1:36");
PMU_FORMAT_ATTR(usb1, "config1:37");
PMU_FORMAT_ATTR(audio, "config1:38");
PMU_FORMAT_ATTR(aififo, "config1:39");
PMU_FORMAT_ATTR(parser, "config1:41");
PMU_FORMAT_ATTR(ao_cpu, "config1:42");
PMU_FORMAT_ATTR(sd_emmc_c, "config1:43");
PMU_FORMAT_ATTR(spicc2, "config1:44");
PMU_FORMAT_ATTR(ethernet, "config1:45");
PMU_FORMAT_ATTR(sana, "config1:46");
/* for sm1 and g12b */
PMU_FORMAT_ATTR(nna, "config1:10");
/* for g12b only */
PMU_FORMAT_ATTR(gdc, "config1:11");
PMU_FORMAT_ATTR(mipi_isp, "config1:12");
PMU_FORMAT_ATTR(arm1, "config1:13");
PMU_FORMAT_ATTR(sd_emmc_a, "config1:40");
static struct attribute *g12_pmu_format_attrs[] = {
&format_attr_event.attr,
&format_attr_arm.attr,
&format_attr_gpu.attr,
&format_attr_nna.attr,
&format_attr_gdc.attr,
&format_attr_arm1.attr,
&format_attr_mipi_isp.attr,
&format_attr_sd_emmc_a.attr,
&format_attr_pcie.attr,
&format_attr_hdcp.attr,
&format_attr_hevc_front.attr,
&format_attr_usb3_0.attr,
&format_attr_device.attr,
&format_attr_hevc_back.attr,
&format_attr_h265enc.attr,
&format_attr_vpu_read1.attr,
&format_attr_vpu_read2.attr,
&format_attr_vpu_read3.attr,
&format_attr_vpu_write1.attr,
&format_attr_vpu_write2.attr,
&format_attr_vdec.attr,
&format_attr_hcodec.attr,
&format_attr_ge2d.attr,
&format_attr_spicc1.attr,
&format_attr_usb0.attr,
&format_attr_dma.attr,
&format_attr_arb0.attr,
&format_attr_sd_emmc_b.attr,
&format_attr_usb1.attr,
&format_attr_audio.attr,
&format_attr_aififo.attr,
&format_attr_parser.attr,
&format_attr_ao_cpu.attr,
&format_attr_sd_emmc_c.attr,
&format_attr_spicc2.attr,
&format_attr_ethernet.attr,
&format_attr_sana.attr,
NULL,
};
/* calculate ddr clock */
static unsigned long dmc_g12_get_freq_quick(struct dmc_info *info)
{
unsigned int val;
unsigned int n, m, od1;
unsigned int od_div = 0xfff;
unsigned long freq = 0;
val = readl(info->pll_reg);
val = val & 0xfffff;
switch ((val >> 16) & 7) {
case 0:
od_div = 2;
break;
case 1:
od_div = 3;
break;
case 2:
od_div = 4;
break;
case 3:
od_div = 6;
break;
case 4:
od_div = 8;
break;
default:
break;
}
m = val & 0x1ff;
n = ((val >> 10) & 0x1f);
od1 = (((val >> 19) & 0x1)) == 1 ? 2 : 1;
freq = DEFAULT_XTAL_FREQ / 1000; /* avoid overflow */
if (n)
freq = ((((freq * m) / n) >> od1) / od_div) * 1000;
return freq;
}
#ifdef DEBUG
static void g12_dump_reg(struct dmc_info *db)
{
int s = 0, i;
unsigned int r;
for (i = 0; i < 9; i++) {
r = readl(db->ddr_reg[0] + (DMC_MON_G12_CTRL0 + (i << 2)));
pr_notice("DMC_MON_CTRL%d: %08x\n", i, r);
}
r = readl(db->ddr_reg[0] + DMC_MON_G12_ALL_REQ_CNT);
pr_notice("DMC_MON_ALL_REQ_CNT: %08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_ALL_GRANT_CNT);
pr_notice("DMC_MON_ALL_GRANT_CNT:%08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_ONE_GRANT_CNT);
pr_notice("DMC_MON_ONE_GRANT_CNT:%08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_SEC_GRANT_CNT);
pr_notice("DMC_MON_SEC_GRANT_CNT:%08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_THD_GRANT_CNT);
pr_notice("DMC_MON_THD_GRANT_CNT:%08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_FOR_GRANT_CNT);
pr_notice("DMC_MON_FOR_GRANT_CNT:%08x\n", r);
r = readl(db->ddr_reg[0] + DMC_MON_G12_TIMER);
pr_notice("DMC_MON_TIMER: %08x\n", r);
}
#endif
static void dmc_g12_counter_enable(struct dmc_info *info)
{
unsigned int val;
unsigned long clock_count = dmc_g12_get_freq_quick(info) / 10; /* 100ms */
writel(clock_count, info->ddr_reg[0] + DMC_MON_G12_TIMER);
val = readl(info->ddr_reg[0] + DMC_MON_G12_CTRL0);
/* enable all channel */
val = BIT(31) | /* enable bit */
BIT(20) | /* use timer */
0x0f; /* 4 channels */
writel(val, info->ddr_reg[0] + DMC_MON_G12_CTRL0);
#ifdef DEBUG
g12_dump_reg(info);
#endif
}
static void dmc_g12_config_fiter(struct dmc_info *info,
int port, int channel)
{
u32 val;
u32 rp[MAX_CHANNEL_NUM] = {DMC_MON_G12_CTRL1, DMC_MON_G12_CTRL3,
DMC_MON_G12_CTRL5, DMC_MON_G12_CTRL7};
u32 rs[MAX_CHANNEL_NUM] = {DMC_MON_G12_CTRL2, DMC_MON_G12_CTRL4,
DMC_MON_G12_CTRL6, DMC_MON_G12_CTRL8};
int subport = -1;
/* clear all port mask */
if (port < 0) {
writel(0, info->ddr_reg[0] + rp[channel]);
writel(0, info->ddr_reg[0] + rs[channel]);
return;
}
if (port >= PORT_MAJOR)
subport = port - PORT_MAJOR;
if (subport < 0) {
val = readl(info->ddr_reg[0] + rp[channel]);
val |= (1 << port);
writel(val, info->ddr_reg[0] + rp[channel]);
val = 0xffff;
writel(val, info->ddr_reg[0] + rs[channel]);
} else {
val = BIT(23); /* select device */
writel(val, info->ddr_reg[0] + rp[channel]);
val = readl(info->ddr_reg[0] + rs[channel]);
val |= (1 << subport);
writel(val, info->ddr_reg[0] + rs[channel]);
}
}
static void dmc_g12_set_axi_filter(struct dmc_info *info, int axi_id, int channel)
{
if (channel > info->hw_info->chann_nr)
return;
dmc_g12_config_fiter(info, axi_id, channel);
}
static void dmc_g12_counter_disable(struct dmc_info *info)
{
int i;
/* clear timer */
writel(0, info->ddr_reg[0] + DMC_MON_G12_CTRL0);
writel(0, info->ddr_reg[0] + DMC_MON_G12_TIMER);
writel(0, info->ddr_reg[0] + DMC_MON_G12_ALL_REQ_CNT);
writel(0, info->ddr_reg[0] + DMC_MON_G12_ALL_GRANT_CNT);
writel(0, info->ddr_reg[0] + DMC_MON_G12_ONE_GRANT_CNT);
writel(0, info->ddr_reg[0] + DMC_MON_G12_SEC_GRANT_CNT);
writel(0, info->ddr_reg[0] + DMC_MON_G12_THD_GRANT_CNT);
writel(0, info->ddr_reg[0] + DMC_MON_G12_FOR_GRANT_CNT);
/* clear port channel mapping */
for (i = 0; i < info->hw_info->chann_nr; i++)
dmc_g12_config_fiter(info, -1, i);
}
static void dmc_g12_get_counters(struct dmc_info *info,
struct dmc_counter *counter)
{
int i;
unsigned int reg;
counter->all_cnt = readl(info->ddr_reg[0] + DMC_MON_G12_ALL_GRANT_CNT);
counter->all_req = readl(info->ddr_reg[0] + DMC_MON_G12_ALL_REQ_CNT);
for (i = 0; i < info->hw_info->chann_nr; i++) {
reg = DMC_MON_G12_ONE_GRANT_CNT + (i << 2);
counter->channel_cnt[i] = readl(info->ddr_reg[0] + reg);
}
}
static int dmc_g12_irq_handler(struct dmc_info *info,
struct dmc_counter *counter)
{
unsigned int val;
int ret = -EINVAL;
val = readl(info->ddr_reg[0] + DMC_MON_G12_CTRL0);
if (val & DMC_QOS_IRQ) {
dmc_g12_get_counters(info, counter);
/* clear irq flags */
writel(val, info->ddr_reg[0] + DMC_MON_G12_CTRL0);
ret = 0;
}
return ret;
}
static const struct dmc_hw_info g12a_dmc_info = {
.enable = dmc_g12_counter_enable,
.disable = dmc_g12_counter_disable,
.irq_handler = dmc_g12_irq_handler,
.get_counters = dmc_g12_get_counters,
.set_axi_filter = dmc_g12_set_axi_filter,
.dmc_nr = 1,
.chann_nr = 4,
.capability = {0X7EFF00FF03DF, 0},
.fmt_attr = g12_pmu_format_attrs,
};
static const struct dmc_hw_info g12b_dmc_info = {
.enable = dmc_g12_counter_enable,
.disable = dmc_g12_counter_disable,
.irq_handler = dmc_g12_irq_handler,
.get_counters = dmc_g12_get_counters,
.set_axi_filter = dmc_g12_set_axi_filter,
.dmc_nr = 1,
.chann_nr = 4,
.capability = {0X7FFF00FF3FDF, 0},
.fmt_attr = g12_pmu_format_attrs,
};
static const struct dmc_hw_info sm1_dmc_info = {
.enable = dmc_g12_counter_enable,
.disable = dmc_g12_counter_disable,
.irq_handler = dmc_g12_irq_handler,
.get_counters = dmc_g12_get_counters,
.set_axi_filter = dmc_g12_set_axi_filter,
.dmc_nr = 1,
.chann_nr = 4,
.capability = {0X7EFF00FF07DF, 0},
.fmt_attr = g12_pmu_format_attrs,
};
static int g12_ddr_pmu_probe(struct platform_device *pdev)
{
return meson_ddr_pmu_create(pdev);
}
static int g12_ddr_pmu_remove(struct platform_device *pdev)
{
meson_ddr_pmu_remove(pdev);
return 0;
}
static const struct of_device_id meson_ddr_pmu_dt_match[] = {
{
.compatible = "amlogic,g12a-ddr-pmu",
.data = &g12a_dmc_info,
},
{
.compatible = "amlogic,g12b-ddr-pmu",
.data = &g12b_dmc_info,
},
{
.compatible = "amlogic,sm1-ddr-pmu",
.data = &sm1_dmc_info,
},
{}
};
static struct platform_driver g12_ddr_pmu_driver = {
.probe = g12_ddr_pmu_probe,
.remove = g12_ddr_pmu_remove,
.driver = {
.name = "meson-g12-ddr-pmu",
.of_match_table = meson_ddr_pmu_dt_match,
},
};
module_platform_driver(g12_ddr_pmu_driver);
MODULE_AUTHOR("Jiucheng Xu");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Amlogic G12 series SoC DDR PMU");
| linux-master | drivers/perf/amlogic/meson_g12_ddr_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Amlogic, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <soc/amlogic/meson_ddr_pmu.h>
struct ddr_pmu {
struct pmu pmu;
struct dmc_info info;
struct dmc_counter counters; /* save counters from hw */
bool pmu_enabled;
struct device *dev;
char *name;
struct hlist_node node;
enum cpuhp_state cpuhp_state;
int cpu; /* for cpu hotplug */
};
#define DDR_PERF_DEV_NAME "meson_ddr_bw"
#define MAX_AXI_PORTS_OF_CHANNEL 4 /* A DMC channel can monitor max 4 axi ports */
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define dmc_info_to_pmu(p) container_of(p, struct ddr_pmu, info)
static void dmc_pmu_enable(struct ddr_pmu *pmu)
{
if (!pmu->pmu_enabled)
pmu->info.hw_info->enable(&pmu->info);
pmu->pmu_enabled = true;
}
static void dmc_pmu_disable(struct ddr_pmu *pmu)
{
if (pmu->pmu_enabled)
pmu->info.hw_info->disable(&pmu->info);
pmu->pmu_enabled = false;
}
static void meson_ddr_set_axi_filter(struct perf_event *event, u8 axi_id)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
int chann;
if (event->attr.config > ALL_CHAN_COUNTER_ID &&
event->attr.config < COUNTER_MAX_ID) {
chann = event->attr.config - CHAN1_COUNTER_ID;
pmu->info.hw_info->set_axi_filter(&pmu->info, axi_id, chann);
}
}
static void ddr_cnt_addition(struct dmc_counter *sum,
struct dmc_counter *add1,
struct dmc_counter *add2,
int chann_nr)
{
int i;
u64 cnt1, cnt2;
sum->all_cnt = add1->all_cnt + add2->all_cnt;
sum->all_req = add1->all_req + add2->all_req;
for (i = 0; i < chann_nr; i++) {
cnt1 = add1->channel_cnt[i];
cnt2 = add2->channel_cnt[i];
sum->channel_cnt[i] = cnt1 + cnt2;
}
}
static void meson_ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
u64 new_raw_count = 0;
struct dmc_counter dc = {0}, sum_dc = {0};
int idx;
int chann_nr = pmu->info.hw_info->chann_nr;
/* get the remain counters in register. */
pmu->info.hw_info->get_counters(&pmu->info, &dc);
ddr_cnt_addition(&sum_dc, &pmu->counters, &dc, chann_nr);
switch (event->attr.config) {
case ALL_CHAN_COUNTER_ID:
new_raw_count = sum_dc.all_cnt;
break;
case CHAN1_COUNTER_ID:
case CHAN2_COUNTER_ID:
case CHAN3_COUNTER_ID:
case CHAN4_COUNTER_ID:
case CHAN5_COUNTER_ID:
case CHAN6_COUNTER_ID:
case CHAN7_COUNTER_ID:
case CHAN8_COUNTER_ID:
idx = event->attr.config - CHAN1_COUNTER_ID;
new_raw_count = sum_dc.channel_cnt[idx];
break;
}
local64_set(&event->count, new_raw_count);
}
static int meson_ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
u64 config1 = event->attr.config1;
u64 config2 = event->attr.config2;
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
if (event->cpu < 0)
return -EOPNOTSUPP;
/* check if the number of parameters is too much */
if (event->attr.config != ALL_CHAN_COUNTER_ID &&
hweight64(config1) + hweight64(config2) > MAX_AXI_PORTS_OF_CHANNEL)
return -EOPNOTSUPP;
event->cpu = pmu->cpu;
return 0;
}
static void meson_ddr_perf_event_start(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
memset(&pmu->counters, 0, sizeof(pmu->counters));
dmc_pmu_enable(pmu);
}
static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
{
u64 config1 = event->attr.config1;
u64 config2 = event->attr.config2;
int i;
for_each_set_bit(i,
(const unsigned long *)&config1,
BITS_PER_TYPE(config1))
meson_ddr_set_axi_filter(event, i);
for_each_set_bit(i,
(const unsigned long *)&config2,
BITS_PER_TYPE(config2))
meson_ddr_set_axi_filter(event, i + 64);
if (flags & PERF_EF_START)
meson_ddr_perf_event_start(event, flags);
return 0;
}
static void meson_ddr_perf_event_stop(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
if (flags & PERF_EF_UPDATE)
meson_ddr_perf_event_update(event);
dmc_pmu_disable(pmu);
}
static void meson_ddr_perf_event_del(struct perf_event *event, int flags)
{
meson_ddr_perf_event_stop(event, PERF_EF_UPDATE);
}
static ssize_t meson_ddr_perf_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
}
static struct device_attribute meson_ddr_perf_cpumask_attr =
__ATTR(cpumask, 0444, meson_ddr_perf_cpumask_show, NULL);
static struct attribute *meson_ddr_perf_cpumask_attrs[] = {
&meson_ddr_perf_cpumask_attr.attr,
NULL,
};
static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = meson_ddr_perf_cpumask_attrs,
};
static ssize_t
pmu_event_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
static ssize_t
event_show_unit(struct device *dev, struct device_attribute *attr,
char *page)
{
return sysfs_emit(page, "MB\n");
}
static ssize_t
event_show_scale(struct device *dev, struct device_attribute *attr,
char *page)
{
/* one count = 16byte = 1.52587890625e-05 MB */
return sysfs_emit(page, "1.52587890625e-05\n");
}
#define AML_DDR_PMU_EVENT_ATTR(_name, _id) \
{ \
.attr = __ATTR(_name, 0444, pmu_event_show, NULL), \
.id = _id, \
}
#define AML_DDR_PMU_EVENT_UNIT_ATTR(_name) \
__ATTR(_name.unit, 0444, event_show_unit, NULL)
#define AML_DDR_PMU_EVENT_SCALE_ATTR(_name) \
__ATTR(_name.scale, 0444, event_show_scale, NULL)
static struct device_attribute event_unit_attrs[] = {
AML_DDR_PMU_EVENT_UNIT_ATTR(total_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_1_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_2_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_3_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_4_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_5_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_6_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_7_rw_bytes),
AML_DDR_PMU_EVENT_UNIT_ATTR(chan_8_rw_bytes),
};
static struct device_attribute event_scale_attrs[] = {
AML_DDR_PMU_EVENT_SCALE_ATTR(total_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_1_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_2_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_3_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_4_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_5_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_6_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_7_rw_bytes),
AML_DDR_PMU_EVENT_SCALE_ATTR(chan_8_rw_bytes),
};
static struct perf_pmu_events_attr event_attrs[] = {
AML_DDR_PMU_EVENT_ATTR(total_rw_bytes, ALL_CHAN_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_1_rw_bytes, CHAN1_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_2_rw_bytes, CHAN2_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_3_rw_bytes, CHAN3_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_4_rw_bytes, CHAN4_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_5_rw_bytes, CHAN5_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_6_rw_bytes, CHAN6_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_7_rw_bytes, CHAN7_COUNTER_ID),
AML_DDR_PMU_EVENT_ATTR(chan_8_rw_bytes, CHAN8_COUNTER_ID),
};
/* three attrs are combined an event */
static struct attribute *ddr_perf_events_attrs[COUNTER_MAX_ID * 3];
static struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
};
static umode_t meson_ddr_perf_format_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
const u64 *capability = ddr_pmu->info.hw_info->capability;
struct device_attribute *dev_attr;
int id;
char value[20]; // config1:xxx, 20 is enough
dev_attr = container_of(attr, struct device_attribute, attr);
dev_attr->show(NULL, NULL, value);
if (sscanf(value, "config1:%d", &id) == 1)
return capability[0] & (1ULL << id) ? attr->mode : 0;
if (sscanf(value, "config2:%d", &id) == 1)
return capability[1] & (1ULL << id) ? attr->mode : 0;
return attr->mode;
}
static struct attribute_group ddr_perf_format_attr_group = {
.name = "format",
.is_visible = meson_ddr_perf_format_attr_visible,
};
static ssize_t meson_ddr_perf_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return sysfs_emit(page, "%s\n", pmu->name);
}
static struct device_attribute meson_ddr_perf_identifier_attr =
__ATTR(identifier, 0444, meson_ddr_perf_identifier_show, NULL);
static struct attribute *meson_ddr_perf_identifier_attrs[] = {
&meson_ddr_perf_identifier_attr.attr,
NULL,
};
static const struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = meson_ddr_perf_identifier_attrs,
};
static const struct attribute_group *attr_groups[] = {
&ddr_perf_events_attr_group,
&ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group,
&ddr_perf_identifier_attr_group,
NULL,
};
static irqreturn_t dmc_irq_handler(int irq, void *dev_id)
{
struct dmc_info *info = dev_id;
struct ddr_pmu *pmu;
struct dmc_counter counters, *sum_cnter;
int i;
pmu = dmc_info_to_pmu(info);
if (info->hw_info->irq_handler(info, &counters) != 0)
goto out;
sum_cnter = &pmu->counters;
sum_cnter->all_cnt += counters.all_cnt;
sum_cnter->all_req += counters.all_req;
for (i = 0; i < pmu->info.hw_info->chann_nr; i++)
sum_cnter->channel_cnt[i] += counters.channel_cnt[i];
if (pmu->pmu_enabled)
/*
* the timer interrupt only supprt
* one shot mode, we have to re-enable
* it in ISR to support continue mode.
*/
info->hw_info->enable(info);
dev_dbg(pmu->dev, "counts: %llu %llu %llu, %llu, %llu, %llu\t\t"
"sum: %llu %llu %llu, %llu, %llu, %llu\n",
counters.all_req,
counters.all_cnt,
counters.channel_cnt[0],
counters.channel_cnt[1],
counters.channel_cnt[2],
counters.channel_cnt[3],
pmu->counters.all_req,
pmu->counters.all_cnt,
pmu->counters.channel_cnt[0],
pmu->counters.channel_cnt[1],
pmu->counters.channel_cnt[2],
pmu->counters.channel_cnt[3]);
out:
return IRQ_HANDLED;
}
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
int target;
if (cpu != pmu->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu->cpu = target;
WARN_ON(irq_set_affinity(pmu->info.irq_num, cpumask_of(pmu->cpu)));
return 0;
}
static void fill_event_attr(struct ddr_pmu *pmu)
{
int i, j, k;
struct attribute **dst = ddr_perf_events_attrs;
j = 0;
k = 0;
/* fill ALL_CHAN_COUNTER_ID event */
dst[j++] = &event_attrs[k].attr.attr;
dst[j++] = &event_unit_attrs[k].attr;
dst[j++] = &event_scale_attrs[k].attr;
k++;
/* fill each channel event */
for (i = 0; i < pmu->info.hw_info->chann_nr; i++, k++) {
dst[j++] = &event_attrs[k].attr.attr;
dst[j++] = &event_unit_attrs[k].attr;
dst[j++] = &event_scale_attrs[k].attr;
}
dst[j] = NULL; /* mark end */
}
static void fmt_attr_fill(struct attribute **fmt_attr)
{
ddr_perf_format_attr_group.attrs = fmt_attr;
}
static int ddr_pmu_parse_dt(struct platform_device *pdev,
struct dmc_info *info)
{
void __iomem *base;
int i, ret;
info->hw_info = of_device_get_match_data(&pdev->dev);
for (i = 0; i < info->hw_info->dmc_nr; i++) {
/* resource 0 for ddr register base */
base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(base))
return PTR_ERR(base);
info->ddr_reg[i] = base;
}
/* resource i for pll register base */
base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(base))
return PTR_ERR(base);
info->pll_reg = base;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
info->irq_num = ret;
ret = devm_request_irq(&pdev->dev, info->irq_num, dmc_irq_handler,
IRQF_NOBALANCING, dev_name(&pdev->dev),
(void *)info);
if (ret < 0)
return ret;
return 0;
}
int meson_ddr_pmu_create(struct platform_device *pdev)
{
int ret;
char *name;
struct ddr_pmu *pmu;
pmu = devm_kzalloc(&pdev->dev, sizeof(struct ddr_pmu), GFP_KERNEL);
if (!pmu)
return -ENOMEM;
*pmu = (struct ddr_pmu) {
.pmu = {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
.event_init = meson_ddr_perf_event_init,
.add = meson_ddr_perf_event_add,
.del = meson_ddr_perf_event_del,
.start = meson_ddr_perf_event_start,
.stop = meson_ddr_perf_event_stop,
.read = meson_ddr_perf_event_update,
},
};
ret = ddr_pmu_parse_dt(pdev, &pmu->info);
if (ret < 0)
return ret;
fmt_attr_fill(pmu->info.hw_info->fmt_attr);
pmu->cpu = smp_processor_id();
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
if (!name)
return -ENOMEM;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, name, NULL,
ddr_perf_offline_cpu);
if (ret < 0)
return ret;
pmu->cpuhp_state = ret;
/* Register the pmu instance for cpu hotplug */
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
if (ret)
goto cpuhp_instance_err;
fill_event_attr(pmu);
ret = perf_pmu_register(&pmu->pmu, name, -1);
if (ret)
goto pmu_register_err;
pmu->name = name;
pmu->dev = &pdev->dev;
pmu->pmu_enabled = false;
platform_set_drvdata(pdev, pmu);
return 0;
pmu_register_err:
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_instance_err:
cpuhp_remove_state(pmu->cpuhp_state);
return ret;
}
int meson_ddr_pmu_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&pmu->pmu);
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_remove_state(pmu->cpuhp_state);
return 0;
}
| linux-master | drivers/perf/amlogic/meson_ddr_pmu_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM CoreSight Architecture PMU driver.
*
* This driver adds support for uncore PMU based on ARM CoreSight Performance
* Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
* like other uncore PMUs, it does not support process specific events and
* cannot be used in sampling mode.
*
* This code is based on other uncore PMUs like ARM DSU PMU. It provides a
* generic implementation to operate the PMU according to CoreSight PMU
* architecture and ACPI ARM PMU table (APMT) documents below:
* - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
* - APMT document number: ARM DEN0117.
*
* The user should refer to the vendor technical documentation to get details
* about the supported events.
*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*/
#include <linux/acpi.h>
#include <linux/cacheinfo.h>
#include <linux/ctype.h>
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include "arm_cspmu.h"
#include "nvidia_cspmu.h"
#define PMUNAME "arm_cspmu"
#define DRVNAME "arm-cs-arch-pmu"
#define ARM_CSPMU_CPUMASK_ATTR(_name, _config) \
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
/*
* CoreSight PMU Arch register offsets.
*/
#define PMEVCNTR_LO 0x0
#define PMEVCNTR_HI 0x4
#define PMEVTYPER 0x400
#define PMCCFILTR 0x47C
#define PMEVFILTR 0xA00
#define PMCNTENSET 0xC00
#define PMCNTENCLR 0xC20
#define PMINTENSET 0xC40
#define PMINTENCLR 0xC60
#define PMOVSCLR 0xC80
#define PMOVSSET 0xCC0
#define PMCFGR 0xE00
#define PMCR 0xE04
#define PMIIDR 0xE08
/* PMCFGR register field */
#define PMCFGR_NCG GENMASK(31, 28)
#define PMCFGR_HDBG BIT(24)
#define PMCFGR_TRO BIT(23)
#define PMCFGR_SS BIT(22)
#define PMCFGR_FZO BIT(21)
#define PMCFGR_MSI BIT(20)
#define PMCFGR_UEN BIT(19)
#define PMCFGR_NA BIT(17)
#define PMCFGR_EX BIT(16)
#define PMCFGR_CCD BIT(15)
#define PMCFGR_CC BIT(14)
#define PMCFGR_SIZE GENMASK(13, 8)
#define PMCFGR_N GENMASK(7, 0)
/* PMCR register field */
#define PMCR_TRO BIT(11)
#define PMCR_HDBG BIT(10)
#define PMCR_FZO BIT(9)
#define PMCR_NA BIT(8)
#define PMCR_DP BIT(5)
#define PMCR_X BIT(4)
#define PMCR_D BIT(3)
#define PMCR_C BIT(2)
#define PMCR_P BIT(1)
#define PMCR_E BIT(0)
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
(1 << ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
/* Convert counter idx into SET/CLR register number. */
#define COUNTER_TO_SET_CLR_ID(idx) \
(idx >> ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
/* Convert counter idx into SET/CLR register bit. */
#define COUNTER_TO_SET_CLR_BIT(idx) \
(idx & (ARM_CSPMU_SET_CLR_COUNTER_NUM - 1))
#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
/* Check and use default if implementer doesn't provide attribute callback */
#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
do { \
if (!ops->callback) \
ops->callback = arm_cspmu_ ## callback; \
} while (0)
/*
* Maximum poll count for reading counter value using high-low-high sequence.
*/
#define HILOHI_MAX_POLL 1000
/* JEDEC-assigned JEP106 identification code */
#define ARM_CSPMU_IMPL_ID_NVIDIA 0x36B
static unsigned long arm_cspmu_cpuhp_state;
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
return *(struct acpi_apmt_node **)dev_get_platdata(dev);
}
/*
* In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
* counter register. The counter register can be implemented as 32-bit or 64-bit
* register depending on the value of PMCFGR.SIZE field. For 64-bit access,
* single-copy 64-bit atomic support is implementation defined. APMT node flag
* is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
* single copy atomic is not supported, the driver treats the register as a pair
* of 32-bit register.
*/
/*
* Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
*/
static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
{
u32 val_lo, val_hi;
u64 val;
/* Use high-low-high sequence to avoid tearing */
do {
if (max_poll_count-- == 0) {
pr_err("ARM CSPMU: timeout hi-low-high sequence\n");
return 0;
}
val_hi = readl(addr + 4);
val_lo = readl(addr);
} while (val_hi != readl(addr + 4));
val = (((u64)val_hi << 32) | val_lo);
return val;
}
/* Check if cycle counter is supported. */
static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
{
return (cspmu->pmcfgr & PMCFGR_CC);
}
/* Get counter size, which is (PMCFGR_SIZE + 1). */
static inline u32 counter_size(const struct arm_cspmu *cspmu)
{
return FIELD_GET(PMCFGR_SIZE, cspmu->pmcfgr) + 1;
}
/* Get counter mask. */
static inline u64 counter_mask(const struct arm_cspmu *cspmu)
{
return GENMASK_ULL(counter_size(cspmu) - 1, 0);
}
/* Check if counter is implemented as 64-bit register. */
static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
{
return (counter_size(cspmu) > 32);
}
ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
}
EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
/* Default event list. */
static struct attribute *arm_cspmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
NULL,
};
static struct attribute **
arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
{
struct attribute **attrs;
attrs = devm_kmemdup(cspmu->dev, arm_cspmu_event_attrs,
sizeof(arm_cspmu_event_attrs), GFP_KERNEL);
return attrs;
}
static umode_t
arm_cspmu_event_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
struct perf_pmu_events_attr *eattr;
eattr = container_of(attr, typeof(*eattr), attr.attr);
/* Hide cycle event if not supported */
if (!supports_cycle_counter(cspmu) &&
eattr->id == ARM_CSPMU_EVT_CYCLES_DEFAULT)
return 0;
return attr->mode;
}
ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *eattr =
container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_format_show);
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
NULL,
};
static struct attribute **
arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
{
struct attribute **attrs;
attrs = devm_kmemdup(cspmu->dev, arm_cspmu_format_attrs,
sizeof(arm_cspmu_format_attrs), GFP_KERNEL);
return attrs;
}
static u32 arm_cspmu_event_type(const struct perf_event *event)
{
return event->attr.config & ARM_CSPMU_EVENT_MASK;
}
static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
{
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
static u32 arm_cspmu_event_filter(const struct perf_event *event)
{
return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
}
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
return sysfs_emit(page, "%s\n", cspmu->identifier);
}
static struct device_attribute arm_cspmu_identifier_attr =
__ATTR(identifier, 0444, arm_cspmu_identifier_show, NULL);
static struct attribute *arm_cspmu_identifier_attrs[] = {
&arm_cspmu_identifier_attr.attr,
NULL,
};
static struct attribute_group arm_cspmu_identifier_attr_group = {
.attrs = arm_cspmu_identifier_attrs,
};
static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
{
const char *identifier =
devm_kasprintf(cspmu->dev, GFP_KERNEL, "%x",
cspmu->impl.pmiidr);
return identifier;
}
static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] = {
"mc",
"smmu",
"pcie",
"acpi",
"cache",
};
static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
{
struct device *dev;
struct acpi_apmt_node *apmt_node;
u8 pmu_type;
char *name;
char acpi_hid_string[ACPI_ID_LEN] = { 0 };
static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
dev = cspmu->dev;
apmt_node = arm_cspmu_apmt_node(dev);
pmu_type = apmt_node->type;
if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
dev_err(dev, "unsupported PMU type-%u\n", pmu_type);
return NULL;
}
if (pmu_type == ACPI_APMT_NODE_TYPE_ACPI) {
memcpy(acpi_hid_string,
&apmt_node->inst_primary,
sizeof(apmt_node->inst_primary));
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%s_%u", PMUNAME,
arm_cspmu_type_str[pmu_type],
acpi_hid_string,
apmt_node->inst_secondary);
} else {
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%d", PMUNAME,
arm_cspmu_type_str[pmu_type],
atomic_fetch_inc(&pmu_idx[pmu_type]));
}
return name;
}
static ssize_t arm_cspmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
struct dev_ext_attribute *eattr =
container_of(attr, struct dev_ext_attribute, attr);
unsigned long mask_id = (unsigned long)eattr->var;
const cpumask_t *cpumask;
switch (mask_id) {
case ARM_CSPMU_ACTIVE_CPU_MASK:
cpumask = &cspmu->active_cpu;
break;
case ARM_CSPMU_ASSOCIATED_CPU_MASK:
cpumask = &cspmu->associated_cpus;
break;
default:
return 0;
}
return cpumap_print_to_pagebuf(true, buf, cpumask);
}
static struct attribute *arm_cspmu_cpumask_attrs[] = {
ARM_CSPMU_CPUMASK_ATTR(cpumask, ARM_CSPMU_ACTIVE_CPU_MASK),
ARM_CSPMU_CPUMASK_ATTR(associated_cpus, ARM_CSPMU_ASSOCIATED_CPU_MASK),
NULL,
};
static struct attribute_group arm_cspmu_cpumask_attr_group = {
.attrs = arm_cspmu_cpumask_attrs,
};
struct impl_match {
u32 pmiidr;
u32 mask;
int (*impl_init_ops)(struct arm_cspmu *cspmu);
};
static const struct impl_match impl_match[] = {
{
.pmiidr = ARM_CSPMU_IMPL_ID_NVIDIA,
.mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
.impl_init_ops = nv_cspmu_init_ops
},
{}
};
static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{
int ret;
struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
const struct impl_match *match = impl_match;
/*
* Get PMU implementer and product id from APMT node.
* If APMT node doesn't have implementer/product id, try get it
* from PMIIDR.
*/
cspmu->impl.pmiidr =
(apmt_node->impl_id) ? apmt_node->impl_id :
readl(cspmu->base0 + PMIIDR);
/* Find implementer specific attribute ops. */
for (; match->pmiidr; match++) {
const u32 mask = match->mask;
if ((match->pmiidr & mask) == (cspmu->impl.pmiidr & mask)) {
ret = match->impl_init_ops(cspmu);
if (ret)
return ret;
break;
}
}
/* Use default callbacks if implementer doesn't provide one. */
CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
return 0;
}
static struct attribute_group *
arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
{
struct attribute_group *event_group;
struct device *dev = cspmu->dev;
const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
event_group =
devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
if (!event_group)
return NULL;
event_group->name = "events";
event_group->is_visible = impl_ops->event_attr_is_visible;
event_group->attrs = impl_ops->get_event_attrs(cspmu);
if (!event_group->attrs)
return NULL;
return event_group;
}
static struct attribute_group *
arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
{
struct attribute_group *format_group;
struct device *dev = cspmu->dev;
format_group =
devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
if (!format_group)
return NULL;
format_group->name = "format";
format_group->attrs = cspmu->impl.ops.get_format_attrs(cspmu);
if (!format_group->attrs)
return NULL;
return format_group;
}
static struct attribute_group **
arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
{
struct attribute_group **attr_groups = NULL;
struct device *dev = cspmu->dev;
const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
int ret;
ret = arm_cspmu_init_impl_ops(cspmu);
if (ret)
return NULL;
cspmu->identifier = impl_ops->get_identifier(cspmu);
cspmu->name = impl_ops->get_name(cspmu);
if (!cspmu->identifier || !cspmu->name)
return NULL;
attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
GFP_KERNEL);
if (!attr_groups)
return NULL;
attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
attr_groups[2] = &arm_cspmu_identifier_attr_group;
attr_groups[3] = &arm_cspmu_cpumask_attr_group;
if (!attr_groups[0] || !attr_groups[1])
return NULL;
return attr_groups;
}
static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
{
u32 pmcr = 0;
pmcr |= PMCR_P;
pmcr |= PMCR_C;
writel(pmcr, cspmu->base0 + PMCR);
}
static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
{
writel(PMCR_E, cspmu->base0 + PMCR);
}
static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
{
writel(0, cspmu->base0 + PMCR);
}
static void arm_cspmu_enable(struct pmu *pmu)
{
bool disabled;
struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
disabled = bitmap_empty(cspmu->hw_events.used_ctrs,
cspmu->num_logical_ctrs);
if (disabled)
return;
arm_cspmu_start_counters(cspmu);
}
static void arm_cspmu_disable(struct pmu *pmu)
{
struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
arm_cspmu_stop_counters(cspmu);
}
static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
struct perf_event *event)
{
int idx;
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
if (supports_cycle_counter(cspmu)) {
if (cspmu->impl.ops.is_cycle_counter_event(event)) {
/* Search for available cycle counter. */
if (test_and_set_bit(cspmu->cycle_counter_logical_idx,
hw_events->used_ctrs))
return -EAGAIN;
return cspmu->cycle_counter_logical_idx;
}
/*
* Search a regular counter from the used counter bitmap.
* The cycle counter divides the bitmap into two parts. Search
* the first then second half to exclude the cycle counter bit.
*/
idx = find_first_zero_bit(hw_events->used_ctrs,
cspmu->cycle_counter_logical_idx);
if (idx >= cspmu->cycle_counter_logical_idx) {
idx = find_next_zero_bit(
hw_events->used_ctrs,
cspmu->num_logical_ctrs,
cspmu->cycle_counter_logical_idx + 1);
}
} else {
idx = find_first_zero_bit(hw_events->used_ctrs,
cspmu->num_logical_ctrs);
}
if (idx >= cspmu->num_logical_ctrs)
return -EAGAIN;
set_bit(idx, hw_events->used_ctrs);
return idx;
}
static bool arm_cspmu_validate_event(struct pmu *pmu,
struct arm_cspmu_hw_events *hw_events,
struct perf_event *event)
{
if (is_software_event(event))
return true;
/* Reject groups spanning multiple HW PMUs. */
if (event->pmu != pmu)
return false;
return (arm_cspmu_get_event_idx(hw_events, event) >= 0);
}
/*
* Make sure the group of events can be scheduled at once
* on the PMU.
*/
static bool arm_cspmu_validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct arm_cspmu_hw_events fake_hw_events;
if (event->group_leader == event)
return true;
memset(&fake_hw_events, 0, sizeof(fake_hw_events));
if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events, leader))
return false;
for_each_sibling_event(sibling, leader) {
if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events,
sibling))
return false;
}
return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
}
static int arm_cspmu_event_init(struct perf_event *event)
{
struct arm_cspmu *cspmu;
struct hw_perf_event *hwc = &event->hw;
cspmu = to_arm_cspmu(event->pmu);
/*
* Following other "uncore" PMUs, we do not support sampling mode or
* attach to a task (per-process mode).
*/
if (is_sampling_event(event)) {
dev_dbg(cspmu->pmu.dev,
"Can't support sampling events\n");
return -EOPNOTSUPP;
}
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
dev_dbg(cspmu->pmu.dev,
"Can't support per-task counters\n");
return -EINVAL;
}
/*
* Make sure the CPU assignment is on one of the CPUs associated with
* this PMU.
*/
if (!cpumask_test_cpu(event->cpu, &cspmu->associated_cpus)) {
dev_dbg(cspmu->pmu.dev,
"Requested cpu is not associated with the PMU\n");
return -EINVAL;
}
/* Enforce the current active CPU to handle the events in this PMU. */
event->cpu = cpumask_first(&cspmu->active_cpu);
if (event->cpu >= nr_cpu_ids)
return -EINVAL;
if (!arm_cspmu_validate_group(event))
return -EINVAL;
/*
* The logical counter id is tracked with hw_perf_event.extra_reg.idx.
* The physical counter id is tracked with hw_perf_event.idx.
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet.
*/
hwc->idx = -1;
hwc->extra_reg.idx = -1;
hwc->config = cspmu->impl.ops.event_type(event);
return 0;
}
static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
{
return (PMEVCNTR_LO + (reg_sz * ctr_idx));
}
static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
{
u32 offset;
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
if (use_64b_counter_reg(cspmu)) {
offset = counter_offset(sizeof(u64), event->hw.idx);
writeq(val, cspmu->base1 + offset);
} else {
offset = counter_offset(sizeof(u32), event->hw.idx);
writel(lower_32_bits(val), cspmu->base1 + offset);
}
}
static u64 arm_cspmu_read_counter(struct perf_event *event)
{
u32 offset;
const void __iomem *counter_addr;
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
if (use_64b_counter_reg(cspmu)) {
offset = counter_offset(sizeof(u64), event->hw.idx);
counter_addr = cspmu->base1 + offset;
return cspmu->has_atomic_dword ?
readq(counter_addr) :
read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL);
}
offset = counter_offset(sizeof(u32), event->hw.idx);
return readl(cspmu->base1 + offset);
}
/*
* arm_cspmu_set_event_period: Set the period for the counter.
*
* To handle cases of extreme interrupt latency, we program
* the counter with half of the max count for the counters.
*/
static void arm_cspmu_set_event_period(struct perf_event *event)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
u64 val = counter_mask(cspmu) >> 1ULL;
local64_set(&event->hw.prev_count, val);
arm_cspmu_write_counter(event, val);
}
static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
{
u32 reg_id, reg_bit, inten_off, cnten_off;
reg_id = COUNTER_TO_SET_CLR_ID(idx);
reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
inten_off = PMINTENSET + (4 * reg_id);
cnten_off = PMCNTENSET + (4 * reg_id);
writel(BIT(reg_bit), cspmu->base0 + inten_off);
writel(BIT(reg_bit), cspmu->base0 + cnten_off);
}
static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
{
u32 reg_id, reg_bit, inten_off, cnten_off;
reg_id = COUNTER_TO_SET_CLR_ID(idx);
reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
inten_off = PMINTENCLR + (4 * reg_id);
cnten_off = PMCNTENCLR + (4 * reg_id);
writel(BIT(reg_bit), cspmu->base0 + cnten_off);
writel(BIT(reg_bit), cspmu->base0 + inten_off);
}
static void arm_cspmu_event_update(struct perf_event *event)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev, now;
do {
prev = local64_read(&hwc->prev_count);
now = arm_cspmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
delta = (now - prev) & counter_mask(cspmu);
local64_add(delta, &event->count);
}
static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
struct hw_perf_event *hwc)
{
u32 offset = PMEVTYPER + (4 * hwc->idx);
writel(hwc->config, cspmu->base0 + offset);
}
static inline void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
struct hw_perf_event *hwc,
u32 filter)
{
u32 offset = PMEVFILTR + (4 * hwc->idx);
writel(filter, cspmu->base0 + offset);
}
static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
{
u32 offset = PMCCFILTR;
writel(filter, cspmu->base0 + offset);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
WARN_ON(!(hwc->state & PERF_HES_UPTODATE));
arm_cspmu_set_event_period(event);
filter = cspmu->impl.ops.event_filter(event);
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
arm_cspmu_set_cc_filter(cspmu, filter);
} else {
arm_cspmu_set_event(cspmu, hwc);
arm_cspmu_set_ev_filter(cspmu, hwc, filter);
}
hwc->state = 0;
arm_cspmu_enable_counter(cspmu, hwc->idx);
}
static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (hwc->state & PERF_HES_STOPPED)
return;
arm_cspmu_disable_counter(cspmu, hwc->idx);
arm_cspmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
{
return (idx == cspmu->cycle_counter_logical_idx) ?
ARM_CSPMU_CYCLE_CNTR_IDX : idx;
}
static int arm_cspmu_add(struct perf_event *event, int flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx;
if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
&cspmu->associated_cpus)))
return -ENOENT;
idx = arm_cspmu_get_event_idx(hw_events, event);
if (idx < 0)
return idx;
hw_events->events[idx] = event;
hwc->idx = to_phys_idx(cspmu, idx);
hwc->extra_reg.idx = idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
arm_cspmu_start(event, PERF_EF_RELOAD);
/* Propagate changes to the userspace mapping. */
perf_event_update_userpage(event);
return 0;
}
static void arm_cspmu_del(struct perf_event *event, int flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->extra_reg.idx;
arm_cspmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_ctrs);
perf_event_update_userpage(event);
}
static void arm_cspmu_read(struct perf_event *event)
{
arm_cspmu_event_update(event);
}
static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
{
struct acpi_apmt_node *apmt_node;
struct arm_cspmu *cspmu;
struct device *dev = &pdev->dev;
cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL);
if (!cspmu)
return NULL;
cspmu->dev = dev;
platform_set_drvdata(pdev, cspmu);
apmt_node = arm_cspmu_apmt_node(dev);
cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
return cspmu;
}
static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
{
struct device *dev;
struct platform_device *pdev;
dev = cspmu->dev;
pdev = to_platform_device(dev);
/* Base address for page 0. */
cspmu->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cspmu->base0)) {
dev_err(dev, "ioremap failed for page-0 resource\n");
return PTR_ERR(cspmu->base0);
}
/* Base address for page 1 if supported. Otherwise point to page 0. */
cspmu->base1 = cspmu->base0;
if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
cspmu->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cspmu->base1)) {
dev_err(dev, "ioremap failed for page-1 resource\n");
return PTR_ERR(cspmu->base1);
}
}
cspmu->pmcfgr = readl(cspmu->base0 + PMCFGR);
cspmu->num_logical_ctrs = FIELD_GET(PMCFGR_N, cspmu->pmcfgr) + 1;
cspmu->cycle_counter_logical_idx = ARM_CSPMU_MAX_HW_CNTRS;
if (supports_cycle_counter(cspmu)) {
/*
* The last logical counter is mapped to cycle counter if
* there is a gap between regular and cycle counter. Otherwise,
* logical and physical have 1-to-1 mapping.
*/
cspmu->cycle_counter_logical_idx =
(cspmu->num_logical_ctrs <= ARM_CSPMU_CYCLE_CNTR_IDX) ?
cspmu->num_logical_ctrs - 1 :
ARM_CSPMU_CYCLE_CNTR_IDX;
}
cspmu->num_set_clr_reg =
DIV_ROUND_UP(cspmu->num_logical_ctrs,
ARM_CSPMU_SET_CLR_COUNTER_NUM);
cspmu->hw_events.events =
devm_kcalloc(dev, cspmu->num_logical_ctrs,
sizeof(*cspmu->hw_events.events), GFP_KERNEL);
if (!cspmu->hw_events.events)
return -ENOMEM;
return 0;
}
static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
u32 *pmovs)
{
int i;
u32 pmovclr_offset = PMOVSCLR;
u32 has_overflowed = 0;
for (i = 0; i < cspmu->num_set_clr_reg; ++i) {
pmovs[i] = readl(cspmu->base1 + pmovclr_offset);
has_overflowed |= pmovs[i];
writel(pmovs[i], cspmu->base1 + pmovclr_offset);
pmovclr_offset += sizeof(u32);
}
return has_overflowed != 0;
}
static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
{
int idx, has_overflowed;
struct perf_event *event;
struct arm_cspmu *cspmu = dev;
DECLARE_BITMAP(pmovs, ARM_CSPMU_MAX_HW_CNTRS);
bool handled = false;
arm_cspmu_stop_counters(cspmu);
has_overflowed = arm_cspmu_get_reset_overflow(cspmu, (u32 *)pmovs);
if (!has_overflowed)
goto done;
for_each_set_bit(idx, cspmu->hw_events.used_ctrs,
cspmu->num_logical_ctrs) {
event = cspmu->hw_events.events[idx];
if (!event)
continue;
if (!test_bit(event->hw.idx, pmovs))
continue;
arm_cspmu_event_update(event);
arm_cspmu_set_event_period(event);
handled = true;
}
done:
arm_cspmu_start_counters(cspmu);
return IRQ_RETVAL(handled);
}
static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
{
int irq, ret;
struct device *dev;
struct platform_device *pdev;
dev = cspmu->dev;
pdev = to_platform_device(dev);
/* Skip IRQ request if the PMU does not support overflow interrupt. */
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0)
return irq == -ENXIO ? 0 : irq;
ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev),
cspmu);
if (ret) {
dev_err(dev, "Could not request IRQ %d\n", irq);
return ret;
}
cspmu->irq = irq;
return 0;
}
#if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
#include <acpi/processor.h>
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
u32 acpi_uid;
struct device *cpu_dev;
struct acpi_device *acpi_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return -ENODEV;
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) {
if (!strcmp(acpi_device_hid(acpi_dev),
ACPI_PROCESSOR_CONTAINER_HID) &&
!kstrtouint(acpi_device_uid(acpi_dev), 0, &acpi_uid) &&
acpi_uid == container_uid)
return 0;
acpi_dev = acpi_dev_parent(acpi_dev);
}
return -ENODEV;
}
static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
{
struct acpi_apmt_node *apmt_node;
int affinity_flag;
int cpu;
apmt_node = arm_cspmu_apmt_node(cspmu->dev);
affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
for_each_possible_cpu(cpu) {
if (apmt_node->proc_affinity ==
get_acpi_id_for_cpu(cpu)) {
cpumask_set_cpu(cpu, &cspmu->associated_cpus);
break;
}
}
} else {
for_each_possible_cpu(cpu) {
if (arm_cspmu_find_cpu_container(
cpu, apmt_node->proc_affinity))
continue;
cpumask_set_cpu(cpu, &cspmu->associated_cpus);
}
}
if (cpumask_empty(&cspmu->associated_cpus)) {
dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
return -ENODEV;
}
return 0;
}
#else
static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
{
return -ENODEV;
}
#endif
static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
{
return arm_cspmu_acpi_get_cpus(cspmu);
}
static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{
int ret, capabilities;
struct attribute_group **attr_groups;
attr_groups = arm_cspmu_alloc_attr_group(cspmu);
if (!attr_groups)
return -ENOMEM;
ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
&cspmu->cpuhp_node);
if (ret)
return ret;
capabilities = PERF_PMU_CAP_NO_EXCLUDE;
if (cspmu->irq == 0)
capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
cspmu->pmu = (struct pmu){
.task_ctx_nr = perf_invalid_context,
.module = THIS_MODULE,
.pmu_enable = arm_cspmu_enable,
.pmu_disable = arm_cspmu_disable,
.event_init = arm_cspmu_event_init,
.add = arm_cspmu_add,
.del = arm_cspmu_del,
.start = arm_cspmu_start,
.stop = arm_cspmu_stop,
.read = arm_cspmu_read,
.attr_groups = (const struct attribute_group **)attr_groups,
.capabilities = capabilities,
};
/* Hardware counter init */
arm_cspmu_stop_counters(cspmu);
arm_cspmu_reset_counters(cspmu);
ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
if (ret) {
cpuhp_state_remove_instance(arm_cspmu_cpuhp_state,
&cspmu->cpuhp_node);
}
return ret;
}
static int arm_cspmu_device_probe(struct platform_device *pdev)
{
int ret;
struct arm_cspmu *cspmu;
cspmu = arm_cspmu_alloc(pdev);
if (!cspmu)
return -ENOMEM;
ret = arm_cspmu_init_mmio(cspmu);
if (ret)
return ret;
ret = arm_cspmu_request_irq(cspmu);
if (ret)
return ret;
ret = arm_cspmu_get_cpus(cspmu);
if (ret)
return ret;
ret = arm_cspmu_register_pmu(cspmu);
if (ret)
return ret;
return 0;
}
static int arm_cspmu_device_remove(struct platform_device *pdev)
{
struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&cspmu->pmu);
cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
return 0;
}
static const struct platform_device_id arm_cspmu_id[] = {
{DRVNAME, 0},
{ },
};
MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
static struct platform_driver arm_cspmu_driver = {
.driver = {
.name = DRVNAME,
.suppress_bind_attrs = true,
},
.probe = arm_cspmu_device_probe,
.remove = arm_cspmu_device_remove,
.id_table = arm_cspmu_id,
};
static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
{
cpumask_set_cpu(cpu, &cspmu->active_cpu);
if (cspmu->irq)
WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
}
static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct arm_cspmu *cspmu =
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
if (!cpumask_test_cpu(cpu, &cspmu->associated_cpus))
return 0;
/* If the PMU is already managed, there is nothing to do */
if (!cpumask_empty(&cspmu->active_cpu))
return 0;
/* Use this CPU for event counting */
arm_cspmu_set_active_cpu(cpu, cspmu);
return 0;
}
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
int dst;
struct cpumask online_supported;
struct arm_cspmu *cspmu =
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
/* Nothing to do if this CPU doesn't own the PMU */
if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu))
return 0;
/* Choose a new CPU to migrate ownership of the PMU to */
cpumask_and(&online_supported, &cspmu->associated_cpus,
cpu_online_mask);
dst = cpumask_any_but(&online_supported, cpu);
if (dst >= nr_cpu_ids)
return 0;
/* Use this CPU for event counting */
perf_pmu_migrate_context(&cspmu->pmu, cpu, dst);
arm_cspmu_set_active_cpu(dst, cspmu);
return 0;
}
static int __init arm_cspmu_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/arm/cspmu:online",
arm_cspmu_cpu_online,
arm_cspmu_cpu_teardown);
if (ret < 0)
return ret;
arm_cspmu_cpuhp_state = ret;
return platform_driver_register(&arm_cspmu_driver);
}
static void __exit arm_cspmu_exit(void)
{
platform_driver_unregister(&arm_cspmu_driver);
cpuhp_remove_multi_state(arm_cspmu_cpuhp_state);
}
module_init(arm_cspmu_init);
module_exit(arm_cspmu_exit);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_cspmu/arm_cspmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*/
/* Support for NVIDIA specific attributes. */
#include <linux/topology.h>
#include "nvidia_cspmu.h"
#define NV_PCIE_PORT_COUNT 10ULL
#define NV_PCIE_FILTER_ID_MASK GENMASK_ULL(NV_PCIE_PORT_COUNT - 1, 0)
#define NV_NVL_C2C_PORT_COUNT 2ULL
#define NV_NVL_C2C_FILTER_ID_MASK GENMASK_ULL(NV_NVL_C2C_PORT_COUNT - 1, 0)
#define NV_CNVL_PORT_COUNT 4ULL
#define NV_CNVL_FILTER_ID_MASK GENMASK_ULL(NV_CNVL_PORT_COUNT - 1, 0)
#define NV_GENERIC_FILTER_ID_MASK GENMASK_ULL(31, 0)
#define NV_PRODID_MASK GENMASK(31, 0)
#define NV_FORMAT_NAME_GENERIC 0
#define to_nv_cspmu_ctx(cspmu) ((struct nv_cspmu_ctx *)(cspmu->impl.ctx))
#define NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _num, _suff, _config) \
ARM_CSPMU_EVENT_ATTR(_pref##_num##_suff, _config)
#define NV_CSPMU_EVENT_ATTR_4(_pref, _suff, _config) \
NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _0_, _suff, _config), \
NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _1_, _suff, _config + 1), \
NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _2_, _suff, _config + 2), \
NV_CSPMU_EVENT_ATTR_4_INNER(_pref, _3_, _suff, _config + 3)
struct nv_cspmu_ctx {
const char *name;
u32 filter_mask;
u32 filter_default_val;
struct attribute **event_attr;
struct attribute **format_attr;
};
static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(bus_cycles, 0x1d),
ARM_CSPMU_EVENT_ATTR(scf_cache_allocate, 0xF0),
ARM_CSPMU_EVENT_ATTR(scf_cache_refill, 0xF1),
ARM_CSPMU_EVENT_ATTR(scf_cache, 0xF2),
ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_outstanding, 0x1a3),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_access, 0x1a4),
ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
NULL,
};
static struct attribute *mcf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(rd_bytes_loc, 0x0),
ARM_CSPMU_EVENT_ATTR(rd_bytes_rem, 0x1),
ARM_CSPMU_EVENT_ATTR(wr_bytes_loc, 0x2),
ARM_CSPMU_EVENT_ATTR(wr_bytes_rem, 0x3),
ARM_CSPMU_EVENT_ATTR(total_bytes_loc, 0x4),
ARM_CSPMU_EVENT_ATTR(total_bytes_rem, 0x5),
ARM_CSPMU_EVENT_ATTR(rd_req_loc, 0x6),
ARM_CSPMU_EVENT_ATTR(rd_req_rem, 0x7),
ARM_CSPMU_EVENT_ATTR(wr_req_loc, 0x8),
ARM_CSPMU_EVENT_ATTR(wr_req_rem, 0x9),
ARM_CSPMU_EVENT_ATTR(total_req_loc, 0xa),
ARM_CSPMU_EVENT_ATTR(total_req_rem, 0xb),
ARM_CSPMU_EVENT_ATTR(rd_cum_outs_loc, 0xc),
ARM_CSPMU_EVENT_ATTR(rd_cum_outs_rem, 0xd),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
NULL,
};
static struct attribute *generic_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
NULL,
};
static struct attribute *scf_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
NULL,
};
static struct attribute *pcie_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_ATTR(root_port, "config1:0-9"),
NULL,
};
static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
NULL,
};
static struct attribute *cnvlink_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_ATTR(rem_socket, "config1:0-3"),
NULL,
};
static struct attribute *generic_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
NULL,
};
static struct attribute **
nv_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
{
const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
return ctx->event_attr;
}
static struct attribute **
nv_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
{
const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
return ctx->format_attr;
}
static const char *
nv_cspmu_get_name(const struct arm_cspmu *cspmu)
{
const struct nv_cspmu_ctx *ctx = to_nv_cspmu_ctx(cspmu);
return ctx->name;
}
static u32 nv_cspmu_event_filter(const struct perf_event *event)
{
const struct nv_cspmu_ctx *ctx =
to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
if (ctx->filter_mask == 0)
return ctx->filter_default_val;
return event->attr.config1 & ctx->filter_mask;
}
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
};
struct nv_cspmu_match {
u32 prodid;
u32 prodid_mask;
u64 filter_mask;
u32 filter_default_val;
const char *name_pattern;
enum nv_cspmu_name_fmt name_fmt;
struct attribute **event_attr;
struct attribute **format_attr;
};
static const struct nv_cspmu_match nv_cspmu_match[] = {
{
.prodid = 0x103,
.prodid_mask = NV_PRODID_MASK,
.filter_mask = NV_PCIE_FILTER_ID_MASK,
.filter_default_val = NV_PCIE_FILTER_ID_MASK,
.name_pattern = "nvidia_pcie_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
.event_attr = mcf_pmu_event_attrs,
.format_attr = pcie_pmu_format_attrs
},
{
.prodid = 0x104,
.prodid_mask = NV_PRODID_MASK,
.filter_mask = 0x0,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
.event_attr = mcf_pmu_event_attrs,
.format_attr = nvlink_c2c_pmu_format_attrs
},
{
.prodid = 0x105,
.prodid_mask = NV_PRODID_MASK,
.filter_mask = 0x0,
.filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
.event_attr = mcf_pmu_event_attrs,
.format_attr = nvlink_c2c_pmu_format_attrs
},
{
.prodid = 0x106,
.prodid_mask = NV_PRODID_MASK,
.filter_mask = NV_CNVL_FILTER_ID_MASK,
.filter_default_val = NV_CNVL_FILTER_ID_MASK,
.name_pattern = "nvidia_cnvlink_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
.event_attr = mcf_pmu_event_attrs,
.format_attr = cnvlink_pmu_format_attrs
},
{
.prodid = 0x2CF,
.prodid_mask = NV_PRODID_MASK,
.filter_mask = 0x0,
.filter_default_val = 0x0,
.name_pattern = "nvidia_scf_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
.event_attr = scf_pmu_event_attrs,
.format_attr = scf_pmu_format_attrs
},
{
.prodid = 0,
.prodid_mask = 0,
.filter_mask = NV_GENERIC_FILTER_ID_MASK,
.filter_default_val = NV_GENERIC_FILTER_ID_MASK,
.name_pattern = "nvidia_uncore_pmu_%u",
.name_fmt = NAME_FMT_GENERIC,
.event_attr = generic_pmu_event_attrs,
.format_attr = generic_pmu_format_attrs
},
};
static char *nv_cspmu_format_name(const struct arm_cspmu *cspmu,
const struct nv_cspmu_match *match)
{
char *name;
struct device *dev = cspmu->dev;
static atomic_t pmu_generic_idx = {0};
switch (match->name_fmt) {
case NAME_FMT_SOCKET: {
const int cpu = cpumask_first(&cspmu->associated_cpus);
const int socket = cpu_to_node(cpu);
name = devm_kasprintf(dev, GFP_KERNEL, match->name_pattern,
socket);
break;
}
case NAME_FMT_GENERIC:
name = devm_kasprintf(dev, GFP_KERNEL, match->name_pattern,
atomic_fetch_inc(&pmu_generic_idx));
break;
default:
name = NULL;
break;
}
return name;
}
int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
{
u32 prodid;
struct nv_cspmu_ctx *ctx;
struct device *dev = cspmu->dev;
struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
const struct nv_cspmu_match *match = nv_cspmu_match;
ctx = devm_kzalloc(dev, sizeof(struct nv_cspmu_ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
prodid = FIELD_GET(ARM_CSPMU_PMIIDR_PRODUCTID, cspmu->impl.pmiidr);
/* Find matching PMU. */
for (; match->prodid; match++) {
const u32 prodid_mask = match->prodid_mask;
if ((match->prodid & prodid_mask) == (prodid & prodid_mask))
break;
}
ctx->name = nv_cspmu_format_name(cspmu, match);
ctx->filter_mask = match->filter_mask;
ctx->filter_default_val = match->filter_default_val;
ctx->event_attr = match->event_attr;
ctx->format_attr = match->format_attr;
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
impl_ops->event_filter = nv_cspmu_event_filter;
impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
/* Set others to NULL to use default callback. */
impl_ops->event_type = NULL;
impl_ops->event_attr_is_visible = NULL;
impl_ops->get_identifier = NULL;
impl_ops->is_cycle_counter_event = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(nv_cspmu_init_ops);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/arm_cspmu/nvidia_cspmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC HHA uncore Hardware event counters support
*
* Copyright (C) 2017 HiSilicon Limited
* Author: Shaokun Zhang <[email protected]>
* Anurup M <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/acpi.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* HHA register definition */
#define HHA_INT_MASK 0x0804
#define HHA_INT_STATUS 0x0808
#define HHA_INT_CLEAR 0x080C
#define HHA_VERSION 0x1cf0
#define HHA_PERF_CTRL 0x1E00
#define HHA_EVENT_CTRL 0x1E04
#define HHA_SRCID_CTRL 0x1E08
#define HHA_DATSRC_CTRL 0x1BF0
#define HHA_EVENT_TYPE0 0x1E80
/*
* If the HW version only supports a 48-bit counter, then
* bits [63:48] are reserved, which are Read-As-Zero and
* Writes-Ignored.
*/
#define HHA_CNT0_LOWER 0x1F00
/* HHA PMU v1 has 16 counters and v2 only has 8 counters */
#define HHA_V1_NR_COUNTERS 0x10
#define HHA_V2_NR_COUNTERS 0x8
#define HHA_PERF_CTRL_EN 0x1
#define HHA_TRACETAG_EN BIT(31)
#define HHA_SRCID_EN BIT(2)
#define HHA_SRCID_CMD_SHIFT 6
#define HHA_SRCID_MSK_SHIFT 20
#define HHA_SRCID_CMD GENMASK(16, 6)
#define HHA_SRCID_MSK GENMASK(30, 20)
#define HHA_DATSRC_SKT_EN BIT(23)
#define HHA_EVTYPE_NONE 0xff
#define HHA_V1_NR_EVENT 0x65
#define HHA_V2_NR_EVENT 0xCE
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23);
static void hisi_hha_pmu_enable_tracetag(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
val = readl(hha_pmu->base + HHA_SRCID_CTRL);
val |= HHA_TRACETAG_EN;
writel(val, hha_pmu->base + HHA_SRCID_CTRL);
}
}
static void hisi_hha_pmu_clear_tracetag(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 val;
val = readl(hha_pmu->base + HHA_SRCID_CTRL);
val &= ~HHA_TRACETAG_EN;
writel(val, hha_pmu->base + HHA_SRCID_CTRL);
}
static void hisi_hha_pmu_config_ds(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 ds_skt = hisi_get_datasrc_skt(event);
if (ds_skt) {
u32 val;
val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
val |= HHA_DATSRC_SKT_EN;
writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
}
}
static void hisi_hha_pmu_clear_ds(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 ds_skt = hisi_get_datasrc_skt(event);
if (ds_skt) {
u32 val;
val = readl(hha_pmu->base + HHA_DATSRC_CTRL);
val &= ~HHA_DATSRC_SKT_EN;
writel(val, hha_pmu->base + HHA_DATSRC_CTRL);
}
}
static void hisi_hha_pmu_config_srcid(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val, msk;
msk = hisi_get_srcid_msk(event);
val = readl(hha_pmu->base + HHA_SRCID_CTRL);
val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) |
(msk << HHA_SRCID_MSK_SHIFT);
writel(val, hha_pmu->base + HHA_SRCID_CTRL);
}
}
static void hisi_hha_pmu_disable_srcid(struct perf_event *event)
{
struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val;
val = readl(hha_pmu->base + HHA_SRCID_CTRL);
val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD);
writel(val, hha_pmu->base + HHA_SRCID_CTRL);
}
}
static void hisi_hha_pmu_enable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_hha_pmu_enable_tracetag(event);
hisi_hha_pmu_config_ds(event);
hisi_hha_pmu_config_srcid(event);
}
}
static void hisi_hha_pmu_disable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_hha_pmu_disable_srcid(event);
hisi_hha_pmu_clear_ds(event);
hisi_hha_pmu_clear_tracetag(event);
}
}
/*
* Select the counter register offset using the counter index
* each counter is 48-bits.
*/
static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
{
return (HHA_CNT0_LOWER + (cntr_idx * 8));
}
static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
/* Read 64 bits and like L3C, top 16 bits are RAZ */
return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc, u64 val)
{
/* Write 64 bits and like L3C, top 16 bits are WI */
writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx));
}
static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
u32 type)
{
u32 reg, reg_idx, shift, val;
/*
* Select the appropriate event select register(HHA_EVENT_TYPEx).
* There are 4 event select registers for the 16 hardware counters.
* Event code is 8-bits and for the first 4 hardware counters,
* HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
* HHA_EVENT_TYPE1 is chosen and so on.
*/
reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
reg_idx = idx % 4;
shift = 8 * reg_idx;
/* Write event code to HHA_EVENT_TYPEx register */
val = readl(hha_pmu->base + reg);
val &= ~(HHA_EVTYPE_NONE << shift);
val |= (type << shift);
writel(val, hha_pmu->base + reg);
}
static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
{
u32 val;
/*
* Set perf_enable bit in HHA_PERF_CTRL to start event
* counting for all enabled counters.
*/
val = readl(hha_pmu->base + HHA_PERF_CTRL);
val |= HHA_PERF_CTRL_EN;
writel(val, hha_pmu->base + HHA_PERF_CTRL);
}
static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
{
u32 val;
/*
* Clear perf_enable bit in HHA_PERF_CTRL to stop event
* counting for all enabled counters.
*/
val = readl(hha_pmu->base + HHA_PERF_CTRL);
val &= ~(HHA_PERF_CTRL_EN);
writel(val, hha_pmu->base + HHA_PERF_CTRL);
}
static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Enable counter index in HHA_EVENT_CTRL register */
val = readl(hha_pmu->base + HHA_EVENT_CTRL);
val |= (1 << hwc->idx);
writel(val, hha_pmu->base + HHA_EVENT_CTRL);
}
static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index in HHA_EVENT_CTRL register */
val = readl(hha_pmu->base + HHA_EVENT_CTRL);
val &= ~(1 << hwc->idx);
writel(val, hha_pmu->base + HHA_EVENT_CTRL);
}
static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 0 to enable interrupt */
val = readl(hha_pmu->base + HHA_INT_MASK);
val &= ~(1 << hwc->idx);
writel(val, hha_pmu->base + HHA_INT_MASK);
}
static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 1 to mask interrupt */
val = readl(hha_pmu->base + HHA_INT_MASK);
val |= (1 << hwc->idx);
writel(val, hha_pmu->base + HHA_INT_MASK);
}
static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu)
{
return readl(hha_pmu->base + HHA_INT_STATUS);
}
static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx)
{
writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR);
}
static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
{ "HISI0243", },
{ "HISI0244", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
static int hisi_hha_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *hha_pmu)
{
unsigned long long id;
acpi_status status;
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&hha_pmu->sccl_id)) {
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
/*
* Early versions of BIOS support _UID by mistake, so we support
* both "hisilicon, idx-id" as preference, if available.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
&hha_pmu->index_id)) {
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
"_UID", NULL, &id);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
hha_pmu->index_id = id;
}
/* HHA PMUs only share the same SCCL */
hha_pmu->ccl_id = -1;
hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
return PTR_ERR(hha_pmu->base);
}
hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION);
return 0;
}
static struct attribute *hisi_hha_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
static const struct attribute_group hisi_hha_pmu_v1_format_group = {
.name = "format",
.attrs = hisi_hha_pmu_v1_format_attr,
};
static struct attribute *hisi_hha_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"),
HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"),
HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"),
HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"),
NULL
};
static const struct attribute_group hisi_hha_pmu_v2_format_group = {
.name = "format",
.attrs = hisi_hha_pmu_v2_format_attr,
};
static struct attribute *hisi_hha_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
HISI_PMU_EVENT_ATTR(rx_ccix, 0x03),
HISI_PMU_EVENT_ATTR(rx_wbi, 0x04),
HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d),
HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
HISI_PMU_EVENT_ATTR(spill_num, 0x20),
HISI_PMU_EVENT_ATTR(spill_success, 0x21),
HISI_PMU_EVENT_ATTR(bi_num, 0x23),
HISI_PMU_EVENT_ATTR(mediated_num, 0x32),
HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33),
HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34),
HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35),
HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38),
HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c),
HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40),
HISI_PMU_EVENT_ATTR(edir-lookup, 0x41),
HISI_PMU_EVENT_ATTR(sdir-hit, 0x42),
HISI_PMU_EVENT_ATTR(edir-hit, 0x43),
HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c),
HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d),
NULL,
};
static const struct attribute_group hisi_hha_pmu_v1_events_group = {
.name = "events",
.attrs = hisi_hha_pmu_v1_events_attr,
};
static struct attribute *hisi_hha_pmu_v2_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
HISI_PMU_EVENT_ATTR(hha_retry, 0x2e),
HISI_PMU_EVENT_ATTR(cycles, 0x55),
NULL
};
static const struct attribute_group hisi_hha_pmu_v2_events_group = {
.name = "events",
.attrs = hisi_hha_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
.attrs = hisi_hha_pmu_cpumask_attrs,
};
static struct device_attribute hisi_hha_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
&hisi_hha_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
&hisi_hha_pmu_v1_format_group,
&hisi_hha_pmu_v1_events_group,
&hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
&hisi_hha_pmu_v2_format_group,
&hisi_hha_pmu_v2_events_group,
&hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
.write_evtype = hisi_hha_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_hha_pmu_start_counters,
.stop_counters = hisi_hha_pmu_stop_counters,
.enable_counter = hisi_hha_pmu_enable_counter,
.disable_counter = hisi_hha_pmu_disable_counter,
.enable_counter_int = hisi_hha_pmu_enable_counter_int,
.disable_counter_int = hisi_hha_pmu_disable_counter_int,
.write_counter = hisi_hha_pmu_write_counter,
.read_counter = hisi_hha_pmu_read_counter,
.get_int_status = hisi_hha_pmu_get_int_status,
.clear_int_status = hisi_hha_pmu_clear_int_status,
.enable_filter = hisi_hha_pmu_enable_filter,
.disable_filter = hisi_hha_pmu_disable_filter,
};
static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *hha_pmu)
{
int ret;
ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev);
if (ret)
return ret;
if (hha_pmu->identifier >= HISI_PMU_V2) {
hha_pmu->counter_bits = 64;
hha_pmu->check_event = HHA_V2_NR_EVENT;
hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups;
hha_pmu->num_counters = HHA_V2_NR_COUNTERS;
} else {
hha_pmu->counter_bits = 48;
hha_pmu->check_event = HHA_V1_NR_EVENT;
hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups;
hha_pmu->num_counters = HHA_V1_NR_COUNTERS;
}
hha_pmu->ops = &hisi_uncore_hha_ops;
hha_pmu->dev = &pdev->dev;
hha_pmu->on_cpu = -1;
return 0;
}
static int hisi_hha_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *hha_pmu;
char *name;
int ret;
hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
if (!hha_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, hha_pmu);
ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
return ret;
}
hisi_pmu_init(hha_pmu, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node);
}
return ret;
}
static int hisi_hha_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&hha_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
return 0;
}
static struct platform_driver hisi_hha_pmu_driver = {
.driver = {
.name = "hisi_hha_pmu",
.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
.remove = hisi_hha_pmu_remove,
};
static int __init hisi_hha_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
"AP_PERF_ARM_HISI_HHA_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_hha_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
return ret;
}
module_init(hisi_hha_pmu_module_init);
static void __exit hisi_hha_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_hha_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
}
module_exit(hisi_hha_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
MODULE_AUTHOR("Anurup M <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_hha_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC L3C uncore Hardware event counters support
*
* Copyright (C) 2017 HiSilicon Limited
* Author: Anurup M <[email protected]>
* Shaokun Zhang <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/acpi.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* L3C register definition */
#define L3C_PERF_CTRL 0x0408
#define L3C_INT_MASK 0x0800
#define L3C_INT_STATUS 0x0808
#define L3C_INT_CLEAR 0x080c
#define L3C_CORE_CTRL 0x1b04
#define L3C_TRACETAG_CTRL 0x1b20
#define L3C_DATSRC_TYPE 0x1b48
#define L3C_DATSRC_CTRL 0x1bf0
#define L3C_EVENT_CTRL 0x1c00
#define L3C_VERSION 0x1cf0
#define L3C_EVENT_TYPE0 0x1d00
/*
* If the HW version only supports a 48-bit counter, then
* bits [63:48] are reserved, which are Read-As-Zero and
* Writes-Ignored.
*/
#define L3C_CNTR0_LOWER 0x1e00
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
#define L3C_PERF_CTRL_EN 0x10000
#define L3C_TRACETAG_EN BIT(31)
#define L3C_TRACETAG_REQ_SHIFT 7
#define L3C_TRACETAG_MARK_EN BIT(0)
#define L3C_TRACETAG_REQ_EN (L3C_TRACETAG_MARK_EN | BIT(2))
#define L3C_TRACETAG_CORE_EN (L3C_TRACETAG_MARK_EN | BIT(3))
#define L3C_CORE_EN BIT(20)
#define L3C_COER_NONE 0x0
#define L3C_DATSRC_MASK 0xFF
#define L3C_DATSRC_SKT_EN BIT(23)
#define L3C_DATSRC_NONE 0x0
#define L3C_EVTYPE_NONE 0xff
#define L3C_V1_NR_EVENTS 0x59
#define L3C_V2_NR_EVENTS 0xFF
HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Set request-type for tracetag */
val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
val |= L3C_TRACETAG_REQ_EN;
writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
/* Enable request-tracetag statistics */
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val |= L3C_TRACETAG_EN;
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
}
}
static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Clear request-type */
val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
val &= ~L3C_TRACETAG_REQ_EN;
writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
/* Disable request-tracetag statistics */
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val &= ~L3C_TRACETAG_EN;
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
}
}
static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 reg, reg_idx, shift, val;
int idx = hwc->idx;
/*
* Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
* There are 2 datasource ctrl register for the 8 hardware counters.
* Datasrc is 8-bits and for the former 4 hardware counters,
* L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters,
* L3C_DATSRC_TYPE1 is chosen.
*/
reg = L3C_DATSRC_TYPE + (idx / 4) * 4;
reg_idx = idx % 4;
shift = 8 * reg_idx;
val = readl(l3c_pmu->base + reg);
val &= ~(L3C_DATSRC_MASK << shift);
val |= ds_cfg << shift;
writel(val, l3c_pmu->base + reg);
}
static void hisi_l3c_pmu_config_ds(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
if (ds_cfg)
hisi_l3c_pmu_write_ds(event, ds_cfg);
if (ds_skt) {
u32 val;
val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
val |= L3C_DATSRC_SKT_EN;
writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
}
}
static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
if (ds_cfg)
hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE);
if (ds_skt) {
u32 val;
val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
val &= ~L3C_DATSRC_SKT_EN;
writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
}
}
static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 core = hisi_get_tt_core(event);
if (core) {
u32 val;
/* Config and enable core information */
writel(core, l3c_pmu->base + L3C_CORE_CTRL);
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val |= L3C_CORE_EN;
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
/* Enable core-tracetag statistics */
val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
val |= L3C_TRACETAG_CORE_EN;
writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
}
}
static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
u32 core = hisi_get_tt_core(event);
if (core) {
u32 val;
/* Clear core information */
writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val &= ~L3C_CORE_EN;
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
/* Disable core-tracetag statistics */
val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
val &= ~L3C_TRACETAG_CORE_EN;
writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
}
}
static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_l3c_pmu_config_req_tracetag(event);
hisi_l3c_pmu_config_core_tracetag(event);
hisi_l3c_pmu_config_ds(event);
}
}
static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_l3c_pmu_clear_ds(event);
hisi_l3c_pmu_clear_core_tracetag(event);
hisi_l3c_pmu_clear_req_tracetag(event);
}
}
/*
* Select the counter register offset using the counter index
*/
static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
{
return (L3C_CNTR0_LOWER + (cntr_idx * 8));
}
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
u32 type)
{
u32 reg, reg_idx, shift, val;
/*
* Select the appropriate event select register(L3C_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
* Event code is 8-bits and for the former 4 hardware counters,
* L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
* L3C_EVENT_TYPE1 is chosen.
*/
reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
reg_idx = idx % 4;
shift = 8 * reg_idx;
/* Write event code to L3C_EVENT_TYPEx Register */
val = readl(l3c_pmu->base + reg);
val &= ~(L3C_EVTYPE_NONE << shift);
val |= (type << shift);
writel(val, l3c_pmu->base + reg);
}
static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
{
u32 val;
/*
* Set perf_enable bit in L3C_PERF_CTRL register to start counting
* for all enabled counters.
*/
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val |= L3C_PERF_CTRL_EN;
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
}
static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
{
u32 val;
/*
* Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
* for all enabled counters.
*/
val = readl(l3c_pmu->base + L3C_PERF_CTRL);
val &= ~(L3C_PERF_CTRL_EN);
writel(val, l3c_pmu->base + L3C_PERF_CTRL);
}
static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Enable counter index in L3C_EVENT_CTRL register */
val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
val |= (1 << hwc->idx);
writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
}
static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index in L3C_EVENT_CTRL register */
val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
val &= ~(1 << hwc->idx);
writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
}
static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(l3c_pmu->base + L3C_INT_MASK);
/* Write 0 to enable interrupt */
val &= ~(1 << hwc->idx);
writel(val, l3c_pmu->base + L3C_INT_MASK);
}
static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(l3c_pmu->base + L3C_INT_MASK);
/* Write 1 to mask interrupt */
val |= (1 << hwc->idx);
writel(val, l3c_pmu->base + L3C_INT_MASK);
}
static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
return readl(l3c_pmu->base + L3C_INT_STATUS);
}
static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
}
static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
{ "HISI0213", },
{ "HISI0214", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&l3c_pmu->sccl_id)) {
dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
&l3c_pmu->ccl_id)) {
dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
return -EINVAL;
}
l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
return PTR_ERR(l3c_pmu->base);
}
l3c_pmu->identifier = readl(l3c_pmu->base + L3C_VERSION);
return 0;
}
static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
};
static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
.name = "format",
.attrs = hisi_l3c_pmu_v1_format_attr,
};
static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
NULL
};
static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.name = "format",
.attrs = hisi_l3c_pmu_v2_format_attr,
};
static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02),
HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03),
HISI_PMU_EVENT_ATTR(victim_num, 0x04),
HISI_PMU_EVENT_ATTR(rd_spipe, 0x20),
HISI_PMU_EVENT_ATTR(wr_spipe, 0x21),
HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22),
HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23),
HISI_PMU_EVENT_ATTR(back_invalid, 0x29),
HISI_PMU_EVENT_ATTR(retry_cpu, 0x40),
HISI_PMU_EVENT_ATTR(retry_ring, 0x41),
HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42),
NULL,
};
static const struct attribute_group hisi_l3c_pmu_v1_events_group = {
.name = "events",
.attrs = hisi_l3c_pmu_v1_events_attr,
};
static struct attribute *hisi_l3c_pmu_v2_events_attr[] = {
HISI_PMU_EVENT_ATTR(l3c_hit, 0x48),
HISI_PMU_EVENT_ATTR(cycles, 0x7f),
HISI_PMU_EVENT_ATTR(l3c_ref, 0xb8),
HISI_PMU_EVENT_ATTR(dat_access, 0xb9),
NULL
};
static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.name = "events",
.attrs = hisi_l3c_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
.attrs = hisi_l3c_pmu_cpumask_attrs,
};
static struct device_attribute hisi_l3c_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
&hisi_l3c_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
&hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
&hisi_l3c_pmu_v2_format_group,
&hisi_l3c_pmu_v2_events_group,
&hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_l3c_pmu_start_counters,
.stop_counters = hisi_l3c_pmu_stop_counters,
.enable_counter = hisi_l3c_pmu_enable_counter,
.disable_counter = hisi_l3c_pmu_disable_counter,
.enable_counter_int = hisi_l3c_pmu_enable_counter_int,
.disable_counter_int = hisi_l3c_pmu_disable_counter_int,
.write_counter = hisi_l3c_pmu_write_counter,
.read_counter = hisi_l3c_pmu_read_counter,
.get_int_status = hisi_l3c_pmu_get_int_status,
.clear_int_status = hisi_l3c_pmu_clear_int_status,
.enable_filter = hisi_l3c_pmu_enable_filter,
.disable_filter = hisi_l3c_pmu_disable_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
int ret;
ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev);
if (ret)
return ret;
if (l3c_pmu->identifier >= HISI_PMU_V2) {
l3c_pmu->counter_bits = 64;
l3c_pmu->check_event = L3C_V2_NR_EVENTS;
l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
} else {
l3c_pmu->counter_bits = 48;
l3c_pmu->check_event = L3C_V1_NR_EVENTS;
l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
}
l3c_pmu->num_counters = L3C_NR_COUNTERS;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
return 0;
}
static int hisi_l3c_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *l3c_pmu;
char *name;
int ret;
l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
if (!l3c_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, l3c_pmu);
ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
return ret;
}
hisi_pmu_init(l3c_pmu, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
}
return ret;
}
static int hisi_l3c_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&l3c_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
return 0;
}
static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
.remove = hisi_l3c_pmu_remove,
};
static int __init hisi_l3c_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
"AP_PERF_ARM_HISI_L3_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_l3c_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
return ret;
}
module_init(hisi_l3c_pmu_module_init);
static void __exit hisi_l3c_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_l3c_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
}
module_exit(hisi_l3c_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anurup M <[email protected]>");
MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC UC (unified cache) uncore Hardware event counters support
*
* Copyright (C) 2023 HiSilicon Limited
*
* This code is based on the uncore PMUs like hisi_uncore_l3c_pmu.
*/
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include "hisi_uncore_pmu.h"
/* Dynamic CPU hotplug state used by UC PMU */
static enum cpuhp_state hisi_uc_pmu_online;
/* UC register definition */
#define HISI_UC_INT_MASK_REG 0x0800
#define HISI_UC_INT_STS_REG 0x0808
#define HISI_UC_INT_CLEAR_REG 0x080c
#define HISI_UC_TRACETAG_CTRL_REG 0x1b2c
#define HISI_UC_TRACETAG_REQ_MSK GENMASK(9, 7)
#define HISI_UC_TRACETAG_MARK_EN BIT(0)
#define HISI_UC_TRACETAG_REQ_EN (HISI_UC_TRACETAG_MARK_EN | BIT(2))
#define HISI_UC_TRACETAG_SRCID_EN BIT(3)
#define HISI_UC_SRCID_CTRL_REG 0x1b40
#define HISI_UC_SRCID_MSK GENMASK(14, 1)
#define HISI_UC_EVENT_CTRL_REG 0x1c00
#define HISI_UC_EVENT_TRACETAG_EN BIT(29)
#define HISI_UC_EVENT_URING_MSK GENMASK(28, 27)
#define HISI_UC_EVENT_GLB_EN BIT(26)
#define HISI_UC_VERSION_REG 0x1cf0
#define HISI_UC_EVTYPE_REGn(n) (0x1d00 + (n) * 4)
#define HISI_UC_EVTYPE_MASK GENMASK(7, 0)
#define HISI_UC_CNTR_REGn(n) (0x1e00 + (n) * 8)
#define HISI_UC_NR_COUNTERS 0x8
#define HISI_UC_V2_NR_EVENTS 0xFF
#define HISI_UC_CNTR_REG_BITS 64
#define HISI_UC_RD_REQ_TRACETAG 0x4
#define HISI_UC_URING_EVENT_MIN 0x47
#define HISI_UC_URING_EVENT_MAX 0x59
HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20);
static int hisi_uc_pmu_check_filter(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) {
dev_err(uc_pmu->dev,
"rcid_en depends on rd_req_en being enabled!\n");
return -EINVAL;
}
if (!hisi_get_uring_channel(event))
return 0;
if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) ||
(HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX))
dev_warn(uc_pmu->dev,
"Only events: [%#x ~ %#x] support channel filtering!",
HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX);
return 0;
}
static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 val;
if (!hisi_get_rd_req_en(event))
return;
val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
/* The request-type has been configured */
if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG)
return;
/* Set request-type for tracetag, only read request is supported! */
val &= ~HISI_UC_TRACETAG_REQ_MSK;
val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG);
val |= HISI_UC_TRACETAG_REQ_EN;
writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
}
static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 val;
if (!hisi_get_rd_req_en(event))
return;
val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
/* Do nothing, the request-type tracetag has been cleaned up */
if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0)
return;
/* Clear request-type */
val &= ~HISI_UC_TRACETAG_REQ_MSK;
val &= ~HISI_UC_TRACETAG_REQ_EN;
writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
}
static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 val;
if (!hisi_get_srcid_en(event))
return;
val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
/* Do nothing, the source id has been configured */
if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val))
return;
/* Enable source id tracetag */
val |= HISI_UC_TRACETAG_SRCID_EN;
writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
val &= ~HISI_UC_SRCID_MSK;
val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event));
writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
/* Depend on request-type tracetag enabled */
hisi_uc_pmu_config_req_tracetag(event);
}
static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 val;
if (!hisi_get_srcid_en(event))
return;
val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
/* Do nothing, the source id has been cleaned up */
if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0)
return;
hisi_uc_pmu_clear_req_tracetag(event);
/* Disable source id tracetag */
val &= ~HISI_UC_TRACETAG_SRCID_EN;
writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
val &= ~HISI_UC_SRCID_MSK;
writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
}
static void hisi_uc_pmu_config_uring_channel(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 uring_channel = hisi_get_uring_channel(event);
u32 val;
/* Do nothing if not being set or is set explicitly to zero (default) */
if (uring_channel == 0)
return;
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
/* Do nothing, the uring_channel has been configured */
if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val))
return;
val &= ~HISI_UC_EVENT_URING_MSK;
val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel);
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event)
{
struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
u32 val;
/* Do nothing if not being set or is set explicitly to zero (default) */
if (hisi_get_uring_channel(event) == 0)
return;
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
/* Do nothing, the uring_channel has been cleaned up */
if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0)
return;
val &= ~HISI_UC_EVENT_URING_MSK;
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static void hisi_uc_pmu_enable_filter(struct perf_event *event)
{
if (event->attr.config1 == 0)
return;
hisi_uc_pmu_config_uring_channel(event);
hisi_uc_pmu_config_req_tracetag(event);
hisi_uc_pmu_config_srcid_tracetag(event);
}
static void hisi_uc_pmu_disable_filter(struct perf_event *event)
{
if (event->attr.config1 == 0)
return;
hisi_uc_pmu_clear_srcid_tracetag(event);
hisi_uc_pmu_clear_req_tracetag(event);
hisi_uc_pmu_clear_uring_channel(event);
}
static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type)
{
u32 val;
/*
* Select the appropriate event select register.
* There are 2 32-bit event select registers for the
* 8 hardware counters, each event code is 8-bit wide.
*/
val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
}
static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu)
{
u32 val;
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
val |= HISI_UC_EVENT_GLB_EN;
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu)
{
u32 val;
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
val &= ~HISI_UC_EVENT_GLB_EN;
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Enable counter index */
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
val |= (1 << hwc->idx);
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index */
val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
val &= ~(1 << hwc->idx);
writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
}
static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
val &= ~(1 << hwc->idx);
writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
}
static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
val |= (1 << hwc->idx);
writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
}
static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu)
{
return readl(uc_pmu->base + HISI_UC_INT_STS_REG);
}
static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
{
writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG);
}
static int hisi_uc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
/*
* Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
* identify the topology information of UC PMU devices in the chip.
* They have some CCLs per SCCL and then 4 UC PMU per CCL.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&uc_pmu->sccl_id)) {
dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
&uc_pmu->ccl_id)) {
dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
&uc_pmu->sub_id)) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
uc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(uc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n");
return PTR_ERR(uc_pmu->base);
}
uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG);
return 0;
}
static struct attribute *hisi_uc_pmu_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"),
HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"),
HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"),
HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"),
NULL
};
static const struct attribute_group hisi_uc_pmu_format_group = {
.name = "format",
.attrs = hisi_uc_pmu_format_attr,
};
static struct attribute *hisi_uc_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(sq_time, 0x00),
HISI_PMU_EVENT_ATTR(pq_time, 0x01),
HISI_PMU_EVENT_ATTR(hbm_time, 0x02),
HISI_PMU_EVENT_ATTR(iq_comp_time_cring, 0x03),
HISI_PMU_EVENT_ATTR(iq_comp_time_uring, 0x05),
HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
HISI_PMU_EVENT_ATTR(cycles, 0x9c),
HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
HISI_PMU_EVENT_ATTR(cring_txdat_cnt, 0xfb),
HISI_PMU_EVENT_ATTR(uring_rxdat_cnt, 0xfc),
HISI_PMU_EVENT_ATTR(uring_txdat_cnt, 0xfd),
NULL
};
static const struct attribute_group hisi_uc_pmu_events_group = {
.name = "events",
.attrs = hisi_uc_pmu_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
.attrs = hisi_uc_pmu_cpumask_attrs,
};
static struct device_attribute hisi_uc_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
&hisi_uc_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_uc_pmu_identifier_group = {
.attrs = hisi_uc_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
&hisi_uc_pmu_format_group,
&hisi_uc_pmu_events_group,
&hisi_uc_pmu_cpumask_attr_group,
&hisi_uc_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = {
.check_filter = hisi_uc_pmu_check_filter,
.write_evtype = hisi_uc_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_uc_pmu_start_counters,
.stop_counters = hisi_uc_pmu_stop_counters,
.enable_counter = hisi_uc_pmu_enable_counter,
.disable_counter = hisi_uc_pmu_disable_counter,
.enable_counter_int = hisi_uc_pmu_enable_counter_int,
.disable_counter_int = hisi_uc_pmu_disable_counter_int,
.write_counter = hisi_uc_pmu_write_counter,
.read_counter = hisi_uc_pmu_read_counter,
.get_int_status = hisi_uc_pmu_get_int_status,
.clear_int_status = hisi_uc_pmu_clear_int_status,
.enable_filter = hisi_uc_pmu_enable_filter,
.disable_filter = hisi_uc_pmu_disable_filter,
};
static int hisi_uc_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
int ret;
ret = hisi_uc_pmu_init_data(pdev, uc_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev);
if (ret)
return ret;
uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups;
uc_pmu->check_event = HISI_UC_EVTYPE_MASK;
uc_pmu->ops = &hisi_uncore_uc_pmu_ops;
uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS;
uc_pmu->num_counters = HISI_UC_NR_COUNTERS;
uc_pmu->dev = &pdev->dev;
uc_pmu->on_cpu = -1;
return 0;
}
static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node)
{
cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node);
}
static void hisi_uc_pmu_unregister_pmu(void *pmu)
{
perf_pmu_unregister(pmu);
}
static int hisi_uc_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *uc_pmu;
char *name;
int ret;
uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL);
if (!uc_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, uc_pmu);
ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n");
ret = devm_add_action_or_reset(&pdev->dev,
hisi_uc_pmu_remove_cpuhp_instance,
&uc_pmu->node);
if (ret)
return ret;
hisi_pmu_init(uc_pmu, THIS_MODULE);
ret = perf_pmu_register(&uc_pmu->pmu, name, -1);
if (ret)
return ret;
return devm_add_action_or_reset(&pdev->dev,
hisi_uc_pmu_unregister_pmu,
&uc_pmu->pmu);
}
static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = {
{ "HISI0291", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match);
static struct platform_driver hisi_uc_pmu_driver = {
.driver = {
.name = "hisi_uc_pmu",
.acpi_match_table = hisi_uc_pmu_acpi_match,
/*
* We have not worked out a safe bind/unbind process,
* Forcefully unbinding during sampling will lead to a
* kernel panic, so this is not supported yet.
*/
.suppress_bind_attrs = true,
},
.probe = hisi_uc_pmu_probe,
};
static int __init hisi_uc_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"perf/hisi/uc:online",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret < 0) {
pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret);
return ret;
}
hisi_uc_pmu_online = ret;
ret = platform_driver_register(&hisi_uc_pmu_driver);
if (ret)
cpuhp_remove_multi_state(hisi_uc_pmu_online);
return ret;
}
module_init(hisi_uc_pmu_module_init);
static void __exit hisi_uc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_uc_pmu_driver);
cpuhp_remove_multi_state(hisi_uc_pmu_online);
}
module_exit(hisi_uc_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Junhao He <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_uc_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This driver adds support for PCIe PMU RCiEP device. Related
* perf events are bandwidth, latency etc.
*
* Copyright (C) 2021 HiSilicon Limited
* Author: Qi Liu <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
#define DRV_NAME "hisi_pcie_pmu"
/* Define registers */
#define HISI_PCIE_GLOBAL_CTRL 0x00
#define HISI_PCIE_EVENT_CTRL 0x010
#define HISI_PCIE_CNT 0x090
#define HISI_PCIE_EXT_CNT 0x110
#define HISI_PCIE_INT_STAT 0x150
#define HISI_PCIE_INT_MASK 0x154
#define HISI_PCIE_REG_BDF 0xfe0
#define HISI_PCIE_REG_VERSION 0xfe4
#define HISI_PCIE_REG_INFO 0xfe8
/* Define command in HISI_PCIE_GLOBAL_CTRL */
#define HISI_PCIE_GLOBAL_EN 0x01
#define HISI_PCIE_GLOBAL_NONE 0
/* Define command in HISI_PCIE_EVENT_CTRL */
#define HISI_PCIE_EVENT_EN BIT_ULL(20)
#define HISI_PCIE_RESET_CNT BIT_ULL(22)
#define HISI_PCIE_INIT_SET BIT_ULL(34)
#define HISI_PCIE_THR_EN BIT_ULL(26)
#define HISI_PCIE_TARGET_EN BIT_ULL(32)
#define HISI_PCIE_TRIG_EN BIT_ULL(52)
/* Define offsets in HISI_PCIE_EVENT_CTRL */
#define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0)
#define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27)
#define HISI_PCIE_THR_M GENMASK_ULL(31, 28)
#define HISI_PCIE_LEN_M GENMASK_ULL(35, 34)
#define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36)
#define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53)
#define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56)
/* Default config of TLP length mode, will count both TLP headers and payloads */
#define HISI_PCIE_LEN_M_DEFAULT 3ULL
#define HISI_PCIE_MAX_COUNTERS 8
#define HISI_PCIE_REG_STEP 8
#define HISI_PCIE_THR_MAX_VAL 10
#define HISI_PCIE_TRIG_MAX_VAL 10
#define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0))
#define HISI_PCIE_INIT_VAL BIT_ULL(63)
struct hisi_pcie_pmu {
struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS];
struct hlist_node node;
struct pci_dev *pdev;
struct pmu pmu;
void __iomem *base;
int irq;
u32 identifier;
/* Minimum and maximum BDF of root ports monitored by PMU */
u16 bdf_min;
u16 bdf_max;
int on_cpu;
};
struct hisi_pcie_reg_pair {
u16 lo;
u16 hi;
};
#define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu))
#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
#define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \
static u64 hisi_pcie_get_##_name(struct perf_event *event) \
{ \
return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \
} \
HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0);
HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0);
HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4);
HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5);
HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9);
HISI_PCIE_PMU_FILTER_ATTR(len_mode, config1, 11, 10);
HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0);
HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16);
static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
}
#define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \
(&((struct dev_ext_attribute[]){ \
{ .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \
NULL), \
.var = (void *)_format } \
})[0].attr.attr)
#define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id)
static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
}
static DEVICE_ATTR_RO(cpumask);
static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier);
}
static DEVICE_ATTR_RO(identifier);
static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min));
}
static DEVICE_ATTR_RO(bus);
static struct hisi_pcie_reg_pair
hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
{
u32 val = readl_relaxed(pcie_pmu->base + reg_off);
struct hisi_pcie_reg_pair regs = {
.lo = val,
.hi = val >> 16,
};
return regs;
}
/*
* Hardware counter and ext_counter work together for bandwidth, latency, bus
* utilization and buffer occupancy events. For example, RX memory write latency
* events(index = 0x0010), counter counts total delay cycles and ext_counter
* counts RX memory write PCIe packets number.
*
* As we don't want PMU driver to process these two data, "delay cycles" can
* be treated as an independent event(index = 0x0010), "RX memory write packets
* number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15
* bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL.
*/
#define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16))
static u32 hisi_pcie_get_real_event(struct perf_event *event)
{
return hisi_pcie_get_event(event) & GENMASK(15, 0);
}
static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx)
{
return offset + HISI_PCIE_REG_STEP * idx;
}
static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
u32 idx)
{
u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
return readl_relaxed(pcie_pmu->base + offset);
}
static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val)
{
u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
writel_relaxed(val, pcie_pmu->base + offset);
}
static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx)
{
u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
return readq_relaxed(pcie_pmu->base + offset);
}
static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val)
{
u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx);
writeq_relaxed(val, pcie_pmu->base + offset);
}
static void hisi_pcie_pmu_config_filter(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 port, trig_len, thr_len, len_mode;
u64 reg = HISI_PCIE_INIT_SET;
/* Config HISI_PCIE_EVENT_CTRL according to event. */
reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
/* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */
port = hisi_pcie_get_port(event);
if (port)
reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port);
else
reg |= HISI_PCIE_TARGET_EN |
FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event));
/* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */
trig_len = hisi_pcie_get_trig_len(event);
if (trig_len) {
reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len);
reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event));
reg |= HISI_PCIE_TRIG_EN;
}
/* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */
thr_len = hisi_pcie_get_thr_len(event);
if (thr_len) {
reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len);
reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event));
reg |= HISI_PCIE_THR_EN;
}
len_mode = hisi_pcie_get_len_mode(event);
if (len_mode)
reg |= FIELD_PREP(HISI_PCIE_LEN_M, len_mode);
else
reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
}
static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET);
}
static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf)
{
struct pci_dev *root_port, *pdev;
u16 rp_bdf;
pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf),
GET_PCI_DEVFN(bdf));
if (!pdev)
return false;
root_port = pcie_find_root_port(pdev);
if (!root_port) {
pci_dev_put(pdev);
return false;
}
pci_dev_put(pdev);
rp_bdf = pci_dev_id(root_port);
return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max;
}
static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
struct hisi_pcie_pmu *pcie_pmu)
{
u32 requester_id = hisi_pcie_get_bdf(event);
if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL)
return false;
if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
return false;
if (requester_id) {
if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
return false;
}
return true;
}
static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
struct perf_event *event)
{
return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
}
static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS];
int counters = 1;
int num;
event_group[0] = leader;
if (!is_software_event(leader)) {
if (leader->pmu != event->pmu)
return false;
if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event))
event_group[counters++] = event;
}
for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling))
continue;
if (sibling->pmu != event->pmu)
return false;
for (num = 0; num < counters; num++) {
if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
break;
}
if (num == counters)
event_group[counters++] = sibling;
}
return counters <= HISI_PCIE_MAX_COUNTERS;
}
static int hisi_pcie_pmu_event_init(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
event->cpu = pcie_pmu->on_cpu;
if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
hwc->event_base = HISI_PCIE_EXT_CNT;
else
hwc->event_base = HISI_PCIE_CNT;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* Sampling is not supported. */
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu))
return -EINVAL;
if (!hisi_pcie_pmu_validate_event_group(event))
return -EINVAL;
return 0;
}
static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
u32 idx = event->hw.idx;
return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
}
static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
struct perf_event *event)
{
struct perf_event *sibling;
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
sibling = pcie_pmu->hw_events[idx];
if (!sibling)
continue;
if (!hisi_pcie_pmu_cmp_event(sibling, event))
continue;
/* Related events must be used in group */
if (sibling->group_leader == event->group_leader)
return idx;
else
return -EINVAL;
}
return idx;
}
static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
{
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
if (!pcie_pmu->hw_events[idx])
return idx;
}
return -EINVAL;
}
static void hisi_pcie_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 new_cnt, prev_cnt, delta;
do {
prev_cnt = local64_read(&hwc->prev_count);
new_cnt = hisi_pcie_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev_cnt,
new_cnt) != prev_cnt);
delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD;
local64_add(delta, &event->count);
}
static void hisi_pcie_pmu_read(struct perf_event *event)
{
hisi_pcie_pmu_event_update(event);
}
static void hisi_pcie_pmu_set_period(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
}
static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u64 val;
val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
val |= HISI_PCIE_EVENT_EN;
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
}
static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u64 val;
val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx);
val &= ~HISI_PCIE_EVENT_EN;
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val);
}
static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0);
}
static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1);
}
static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx)
{
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET);
}
static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
u64 prev_cnt;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
hisi_pcie_pmu_config_filter(event);
hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
hisi_pcie_pmu_set_period(event);
if (flags & PERF_EF_RELOAD) {
prev_cnt = local64_read(&hwc->prev_count);
hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt);
}
perf_event_update_userpage(event);
}
static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_pcie_pmu_event_update(event);
hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
hisi_pcie_pmu_clear_filter(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
if (hwc->state & PERF_HES_UPTODATE)
return;
hwc->state |= PERF_HES_UPTODATE;
}
static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/* Check all working events to find a related event. */
idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
if (idx < 0)
return idx;
/* Current event shares an enabled counter with the related event */
if (idx < HISI_PCIE_MAX_COUNTERS) {
hwc->idx = idx;
goto start_count;
}
idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
if (idx < 0)
return idx;
hwc->idx = idx;
pcie_pmu->hw_events[idx] = event;
/* Reset Counter to avoid previous statistic interference. */
hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
start_count:
if (flags & PERF_EF_START)
hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
return 0;
}
static void hisi_pcie_pmu_del(struct perf_event *event, int flags)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_pcie_pmu_stop(event, PERF_EF_UPDATE);
pcie_pmu->hw_events[hwc->idx] = NULL;
perf_event_update_userpage(event);
}
static void hisi_pcie_pmu_enable(struct pmu *pmu)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
int num;
for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) {
if (pcie_pmu->hw_events[num])
break;
}
if (num == HISI_PCIE_MAX_COUNTERS)
return;
writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
}
static void hisi_pcie_pmu_disable(struct pmu *pmu)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu);
writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL);
}
static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data)
{
struct hisi_pcie_pmu *pcie_pmu = data;
irqreturn_t ret = IRQ_NONE;
struct perf_event *event;
u32 overflown;
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx);
if (!overflown)
continue;
/* Clear status of interrupt. */
hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1);
event = pcie_pmu->hw_events[idx];
if (!event)
continue;
hisi_pcie_pmu_event_update(event);
hisi_pcie_pmu_set_period(event);
ret = IRQ_HANDLED;
}
return ret;
}
static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
{
int irq, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret < 0) {
pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret);
return ret;
}
irq = pci_irq_vector(pdev, 0);
ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
pcie_pmu);
if (ret) {
pci_err(pdev, "Failed to register IRQ: %d\n", ret);
pci_free_irq_vectors(pdev);
return ret;
}
pcie_pmu->irq = irq;
return 0;
}
static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
{
free_irq(pcie_pmu->irq, pcie_pmu);
pci_free_irq_vectors(pdev);
}
static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
if (pcie_pmu->on_cpu == -1) {
pcie_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(&pcie_pmu->pdev->dev));
WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(pcie_pmu->on_cpu)));
}
return 0;
}
static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
unsigned int target;
cpumask_t mask;
int numa_node;
/* Nothing to do if this CPU doesn't own the PMU */
if (pcie_pmu->on_cpu != cpu)
return 0;
pcie_pmu->on_cpu = -1;
/* Choose a local CPU from all online cpus. */
numa_node = dev_to_node(&pcie_pmu->pdev->dev);
if (cpumask_and(&mask, cpumask_of_node(numa_node), cpu_online_mask) &&
cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
target = cpumask_any(&mask);
else
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
return 0;
}
perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
/* Use this CPU for event counting */
pcie_pmu->on_cpu = target;
WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target)));
return 0;
}
static struct attribute *hisi_pcie_pmu_events_attr[] = {
HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010),
HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
NULL
};
static struct attribute_group hisi_pcie_pmu_events_group = {
.name = "events",
.attrs = hisi_pcie_pmu_events_attr,
};
static struct attribute *hisi_pcie_pmu_format_attr[] = {
HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"),
HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"),
HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"),
HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"),
HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"),
HISI_PCIE_PMU_FORMAT_ATTR(len_mode, "config1:10-11"),
HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"),
HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"),
NULL
};
static const struct attribute_group hisi_pcie_pmu_format_group = {
.name = "format",
.attrs = hisi_pcie_pmu_format_attr,
};
static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
&dev_attr_bus.attr,
NULL
};
static const struct attribute_group hisi_pcie_pmu_bus_attr_group = {
.attrs = hisi_pcie_pmu_bus_attrs,
};
static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = {
.attrs = hisi_pcie_pmu_cpumask_attrs,
};
static struct attribute *hisi_pcie_pmu_identifier_attrs[] = {
&dev_attr_identifier.attr,
NULL
};
static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = {
.attrs = hisi_pcie_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = {
&hisi_pcie_pmu_events_group,
&hisi_pcie_pmu_format_group,
&hisi_pcie_pmu_bus_attr_group,
&hisi_pcie_pmu_cpumask_attr_group,
&hisi_pcie_pmu_identifier_attr_group,
NULL
};
static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
{
struct hisi_pcie_reg_pair regs;
u16 sicl_id, core_id;
char *name;
regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF);
pcie_pmu->bdf_min = regs.lo;
pcie_pmu->bdf_max = regs.hi;
regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO);
sicl_id = regs.hi;
core_id = regs.lo;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id);
if (!name)
return -ENOMEM;
pcie_pmu->pdev = pdev;
pcie_pmu->on_cpu = -1;
pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION);
pcie_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
.event_init = hisi_pcie_pmu_event_init,
.pmu_enable = hisi_pcie_pmu_enable,
.pmu_disable = hisi_pcie_pmu_disable,
.add = hisi_pcie_pmu_add,
.del = hisi_pcie_pmu_del,
.start = hisi_pcie_pmu_start,
.stop = hisi_pcie_pmu_stop,
.read = hisi_pcie_pmu_read,
.task_ctx_nr = perf_invalid_context,
.attr_groups = hisi_pcie_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
return 0;
}
static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu)
{
int ret;
pcie_pmu->base = pci_ioremap_bar(pdev, 2);
if (!pcie_pmu->base) {
pci_err(pdev, "Ioremap failed for pcie_pmu resource\n");
return -ENOMEM;
}
ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu);
if (ret)
goto err_iounmap;
ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu);
if (ret)
goto err_iounmap;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
if (ret) {
pci_err(pdev, "Failed to register hotplug: %d\n", ret);
goto err_irq_unregister;
}
ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1);
if (ret) {
pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret);
goto err_hotplug_unregister;
}
return ret;
err_hotplug_unregister:
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
err_irq_unregister:
hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
err_iounmap:
iounmap(pcie_pmu->base);
return ret;
}
static void hisi_pcie_uninit_pmu(struct pci_dev *pdev)
{
struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev);
perf_pmu_unregister(&pcie_pmu->pmu);
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node);
hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu);
iounmap(pcie_pmu->base);
}
static int hisi_pcie_init_dev(struct pci_dev *pdev)
{
int ret;
ret = pcim_enable_device(pdev);
if (ret) {
pci_err(pdev, "Failed to enable PCI device: %d\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
if (ret < 0) {
pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret);
return ret;
}
pci_set_master(pdev);
return 0;
}
static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_pcie_pmu *pcie_pmu;
int ret;
pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
if (!pcie_pmu)
return -ENOMEM;
ret = hisi_pcie_init_dev(pdev);
if (ret)
return ret;
ret = hisi_pcie_init_pmu(pdev, pcie_pmu);
if (ret)
return ret;
pci_set_drvdata(pdev, pcie_pmu);
return ret;
}
static void hisi_pcie_pmu_remove(struct pci_dev *pdev)
{
hisi_pcie_uninit_pmu(pdev);
pci_set_drvdata(pdev, NULL);
}
static const struct pci_device_id hisi_pcie_pmu_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids);
static struct pci_driver hisi_pcie_pmu_driver = {
.name = DRV_NAME,
.id_table = hisi_pcie_pmu_ids,
.probe = hisi_pcie_pmu_probe,
.remove = hisi_pcie_pmu_remove,
};
static int __init hisi_pcie_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
"AP_PERF_ARM_HISI_PCIE_PMU_ONLINE",
hisi_pcie_pmu_online_cpu,
hisi_pcie_pmu_offline_cpu);
if (ret) {
pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret);
return ret;
}
ret = pci_register_driver(&hisi_pcie_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
return ret;
}
module_init(hisi_pcie_module_init);
static void __exit hisi_pcie_module_exit(void)
{
pci_unregister_driver(&hisi_pcie_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE);
}
module_exit(hisi_pcie_module_exit);
MODULE_DESCRIPTION("HiSilicon PCIe PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_pcie_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
*
* Copyright (C) 2022 HiSilicon Limited
* Author: Qi Liu <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#define pr_fmt(fmt) "cpa pmu: " fmt
#include <linux/acpi.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* CPA register definition */
#define CPA_PERF_CTRL 0x1c00
#define CPA_EVENT_CTRL 0x1c04
#define CPA_INT_MASK 0x1c70
#define CPA_INT_STATUS 0x1c78
#define CPA_INT_CLEAR 0x1c7c
#define CPA_EVENT_TYPE0 0x1c80
#define CPA_VERSION 0x1cf0
#define CPA_CNT0_LOWER 0x1d00
#define CPA_CFG_REG 0x0534
/* CPA operation command */
#define CPA_PERF_CTRL_EN BIT_ULL(0)
#define CPA_EVTYPE_MASK 0xffUL
#define CPA_PM_CTRL BIT_ULL(9)
/* CPA has 8-counters */
#define CPA_NR_COUNTERS 0x8
#define CPA_COUNTER_BITS 64
#define CPA_NR_EVENTS 0xff
#define CPA_REG_OFFSET 0x8
static u32 hisi_cpa_pmu_get_counter_offset(int idx)
{
return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET);
}
static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc)
{
return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
}
static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
}
static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx,
u32 type)
{
u32 reg, reg_idx, shift, val;
/*
* Select the appropriate event select register(CPA_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
* Event code is 8-bits and for the former 4 hardware counters,
* CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
* CPA_EVENT_TYPE1 is chosen.
*/
reg = CPA_EVENT_TYPE0 + (idx / 4) * 4;
reg_idx = idx % 4;
shift = CPA_REG_OFFSET * reg_idx;
/* Write event code to CPA_EVENT_TYPEx Register */
val = readl(cpa_pmu->base + reg);
val &= ~(CPA_EVTYPE_MASK << shift);
val |= type << shift;
writel(val, cpa_pmu->base + reg);
}
static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu)
{
u32 val;
val = readl(cpa_pmu->base + CPA_PERF_CTRL);
val |= CPA_PERF_CTRL_EN;
writel(val, cpa_pmu->base + CPA_PERF_CTRL);
}
static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu)
{
u32 val;
val = readl(cpa_pmu->base + CPA_PERF_CTRL);
val &= ~(CPA_PERF_CTRL_EN);
writel(val, cpa_pmu->base + CPA_PERF_CTRL);
}
static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu)
{
u32 val;
val = readl(cpa_pmu->base + CPA_CFG_REG);
val |= CPA_PM_CTRL;
writel(val, cpa_pmu->base + CPA_CFG_REG);
}
static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu)
{
u32 val;
val = readl(cpa_pmu->base + CPA_CFG_REG);
val &= ~(CPA_PM_CTRL);
writel(val, cpa_pmu->base + CPA_CFG_REG);
}
static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Enable counter index in CPA_EVENT_CTRL register */
val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
val |= 1 << hwc->idx;
writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
}
static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index in CPA_EVENT_CTRL register */
val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
val &= ~(1UL << hwc->idx);
writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
}
static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 0 to enable interrupt */
val = readl(cpa_pmu->base + CPA_INT_MASK);
val &= ~(1UL << hwc->idx);
writel(val, cpa_pmu->base + CPA_INT_MASK);
}
static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 1 to mask interrupt */
val = readl(cpa_pmu->base + CPA_INT_MASK);
val |= 1 << hwc->idx;
writel(val, cpa_pmu->base + CPA_INT_MASK);
}
static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu)
{
return readl(cpa_pmu->base + CPA_INT_STATUS);
}
static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx)
{
writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR);
}
static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = {
{ "HISI0281", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&cpa_pmu->sicl_id)) {
dev_err(&pdev->dev, "Can not read sicl-id\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
&cpa_pmu->index_id)) {
dev_err(&pdev->dev, "Cannot read idx-id\n");
return -EINVAL;
}
cpa_pmu->ccl_id = -1;
cpa_pmu->sccl_id = -1;
cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cpa_pmu->base))
return PTR_ERR(cpa_pmu->base);
cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION);
return 0;
}
static struct attribute *hisi_cpa_pmu_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-15"),
NULL
};
static const struct attribute_group hisi_cpa_pmu_format_group = {
.name = "format",
.attrs = hisi_cpa_pmu_format_attr,
};
static struct attribute *hisi_cpa_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00),
HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61),
HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62),
HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1),
HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2),
NULL
};
static const struct attribute_group hisi_cpa_pmu_events_group = {
.name = "events",
.attrs = hisi_cpa_pmu_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
.attrs = hisi_cpa_pmu_cpumask_attrs,
};
static struct device_attribute hisi_cpa_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
&hisi_cpa_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_cpa_pmu_identifier_group = {
.attrs = hisi_cpa_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
&hisi_cpa_pmu_format_group,
&hisi_cpa_pmu_events_group,
&hisi_cpa_pmu_cpumask_attr_group,
&hisi_cpa_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = {
.write_evtype = hisi_cpa_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_cpa_pmu_start_counters,
.stop_counters = hisi_cpa_pmu_stop_counters,
.enable_counter = hisi_cpa_pmu_enable_counter,
.disable_counter = hisi_cpa_pmu_disable_counter,
.enable_counter_int = hisi_cpa_pmu_enable_counter_int,
.disable_counter_int = hisi_cpa_pmu_disable_counter_int,
.write_counter = hisi_cpa_pmu_write_counter,
.read_counter = hisi_cpa_pmu_read_counter,
.get_int_status = hisi_cpa_pmu_get_int_status,
.clear_int_status = hisi_cpa_pmu_clear_int_status,
};
static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
int ret;
ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev);
if (ret)
return ret;
cpa_pmu->counter_bits = CPA_COUNTER_BITS;
cpa_pmu->check_event = CPA_NR_EVENTS;
cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups;
cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops;
cpa_pmu->num_counters = CPA_NR_COUNTERS;
cpa_pmu->dev = &pdev->dev;
cpa_pmu->on_cpu = -1;
return 0;
}
static int hisi_cpa_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *cpa_pmu;
char *name;
int ret;
cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL);
if (!cpa_pmu)
return -ENOMEM;
ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
cpa_pmu->sicl_id, cpa_pmu->index_id);
if (!name)
return -ENOMEM;
hisi_pmu_init(cpa_pmu, THIS_MODULE);
/* Power Management should be disabled before using CPA PMU. */
hisi_cpa_pmu_disable_pm(cpa_pmu);
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
&cpa_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
hisi_cpa_pmu_enable_pm(cpa_pmu);
return ret;
}
ret = perf_pmu_register(&cpa_pmu->pmu, name, -1);
if (ret) {
dev_err(cpa_pmu->dev, "PMU register failed\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
hisi_cpa_pmu_enable_pm(cpa_pmu);
return ret;
}
platform_set_drvdata(pdev, cpa_pmu);
return ret;
}
static int hisi_cpa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&cpa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
&cpa_pmu->node);
hisi_cpa_pmu_enable_pm(cpa_pmu);
return 0;
}
static struct platform_driver hisi_cpa_pmu_driver = {
.driver = {
.name = "hisi_cpa_pmu",
.acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_cpa_pmu_probe,
.remove = hisi_cpa_pmu_remove,
};
static int __init hisi_cpa_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
"AP_PERF_ARM_HISI_CPA_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("setup hotplug failed: %d\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_cpa_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
return ret;
}
module_init(hisi_cpa_pmu_module_init);
static void __exit hisi_cpa_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_cpa_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
}
module_exit(hisi_cpa_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SLLC uncore Hardware event counters support
*
* Copyright (C) 2020 HiSilicon Limited
* Author: Shaokun Zhang <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/acpi.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* SLLC register definition */
#define SLLC_INT_MASK 0x0814
#define SLLC_INT_STATUS 0x0818
#define SLLC_INT_CLEAR 0x081c
#define SLLC_PERF_CTRL 0x1c00
#define SLLC_SRCID_CTRL 0x1c04
#define SLLC_TGTID_CTRL 0x1c08
#define SLLC_EVENT_CTRL 0x1c14
#define SLLC_EVENT_TYPE0 0x1c18
#define SLLC_VERSION 0x1cf0
#define SLLC_EVENT_CNT0_L 0x1d00
#define SLLC_EVTYPE_MASK 0xff
#define SLLC_PERF_CTRL_EN BIT(0)
#define SLLC_FILT_EN BIT(1)
#define SLLC_TRACETAG_EN BIT(2)
#define SLLC_SRCID_EN BIT(4)
#define SLLC_SRCID_NONE 0x0
#define SLLC_TGTID_EN BIT(5)
#define SLLC_TGTID_NONE 0x0
#define SLLC_TGTID_MIN_SHIFT 1
#define SLLC_TGTID_MAX_SHIFT 12
#define SLLC_SRCID_CMD_SHIFT 1
#define SLLC_SRCID_MSK_SHIFT 12
#define SLLC_NR_EVENTS 0x80
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
static bool tgtid_is_valid(u32 max, u32 min)
{
return max > 0 && max >= min;
}
static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val |= SLLC_TRACETAG_EN | SLLC_FILT_EN;
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN);
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_config_tgtid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 min = hisi_get_tgtid_min(event);
u32 max = hisi_get_tgtid_max(event);
if (tgtid_is_valid(max, min)) {
u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT);
writel(val, sllc_pmu->base + SLLC_TGTID_CTRL);
/* Enable the tgtid */
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val |= SLLC_TGTID_EN | SLLC_FILT_EN;
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 min = hisi_get_tgtid_min(event);
u32 max = hisi_get_tgtid_max(event);
if (tgtid_is_valid(max, min)) {
u32 val;
writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL);
/* Disable the tgtid */
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN);
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_config_srcid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val, msk;
msk = hisi_get_srcid_msk(event);
val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT);
writel(val, sllc_pmu->base + SLLC_SRCID_CTRL);
/* Enable the srcid */
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val |= SLLC_SRCID_EN | SLLC_FILT_EN;
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_clear_srcid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val;
writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL);
/* Disable the srcid */
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN);
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
}
static void hisi_sllc_pmu_enable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_sllc_pmu_enable_tracetag(event);
hisi_sllc_pmu_config_srcid(event);
hisi_sllc_pmu_config_tgtid(event);
}
}
static void hisi_sllc_pmu_clear_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_sllc_pmu_disable_tracetag(event);
hisi_sllc_pmu_clear_srcid(event);
hisi_sllc_pmu_clear_tgtid(event);
}
}
static u32 hisi_sllc_pmu_get_counter_offset(int idx)
{
return (SLLC_EVENT_CNT0_L + idx * 8);
}
static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
return readq(sllc_pmu->base +
hisi_sllc_pmu_get_counter_offset(hwc->idx));
}
static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, sllc_pmu->base +
hisi_sllc_pmu_get_counter_offset(hwc->idx));
}
static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
u32 type)
{
u32 reg, reg_idx, shift, val;
/*
* Select the appropriate event select register(SLLC_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
* Event code is 8-bits and for the former 4 hardware counters,
* SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
* SLLC_EVENT_TYPE1 is chosen.
*/
reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4;
reg_idx = idx % 4;
shift = 8 * reg_idx;
/* Write event code to SLLC_EVENT_TYPEx Register */
val = readl(sllc_pmu->base + reg);
val &= ~(SLLC_EVTYPE_MASK << shift);
val |= (type << shift);
writel(val, sllc_pmu->base + reg);
}
static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val |= SLLC_PERF_CTRL_EN;
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
val &= ~(SLLC_PERF_CTRL_EN);
writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
}
static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
val |= 1 << hwc->idx;
writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
}
static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
val &= ~(1 << hwc->idx);
writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
}
static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_INT_MASK);
/* Write 0 to enable interrupt */
val &= ~(1 << hwc->idx);
writel(val, sllc_pmu->base + SLLC_INT_MASK);
}
static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(sllc_pmu->base + SLLC_INT_MASK);
/* Write 1 to mask interrupt */
val |= 1 << hwc->idx;
writel(val, sllc_pmu->base + SLLC_INT_MASK);
}
static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu)
{
return readl(sllc_pmu->base + SLLC_INT_STATUS);
}
static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx)
{
writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR);
}
static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
{ "HISI0263", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
/*
* Use the SCCL_ID and the index ID to identify the SLLC PMU,
* while SCCL_ID is from MPIDR_EL1 by CPU.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&sllc_pmu->sccl_id)) {
dev_err(&pdev->dev, "Cannot read sccl-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
&sllc_pmu->index_id)) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
/* SLLC PMUs only share the same SCCL */
sllc_pmu->ccl_id = -1;
sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sllc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n");
return PTR_ERR(sllc_pmu->base);
}
sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION);
return 0;
}
static struct attribute *hisi_sllc_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
HISI_PMU_FORMAT_ATTR(tgtid_min, "config1:0-10"),
HISI_PMU_FORMAT_ATTR(tgtid_max, "config1:11-21"),
HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
NULL
};
static const struct attribute_group hisi_sllc_pmu_v2_format_group = {
.name = "format",
.attrs = hisi_sllc_pmu_v2_format_attr,
};
static struct attribute *hisi_sllc_pmu_v2_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_req, 0x30),
HISI_PMU_EVENT_ATTR(rx_data, 0x31),
HISI_PMU_EVENT_ATTR(tx_req, 0x34),
HISI_PMU_EVENT_ATTR(tx_data, 0x35),
HISI_PMU_EVENT_ATTR(cycles, 0x09),
NULL
};
static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
.name = "events",
.attrs = hisi_sllc_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
.attrs = hisi_sllc_pmu_cpumask_attrs,
};
static struct device_attribute hisi_sllc_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
&hisi_sllc_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_sllc_pmu_identifier_group = {
.attrs = hisi_sllc_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
&hisi_sllc_pmu_v2_format_group,
&hisi_sllc_pmu_v2_events_group,
&hisi_sllc_pmu_cpumask_attr_group,
&hisi_sllc_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_sllc_ops = {
.write_evtype = hisi_sllc_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_sllc_pmu_start_counters,
.stop_counters = hisi_sllc_pmu_stop_counters,
.enable_counter = hisi_sllc_pmu_enable_counter,
.disable_counter = hisi_sllc_pmu_disable_counter,
.enable_counter_int = hisi_sllc_pmu_enable_counter_int,
.disable_counter_int = hisi_sllc_pmu_disable_counter_int,
.write_counter = hisi_sllc_pmu_write_counter,
.read_counter = hisi_sllc_pmu_read_counter,
.get_int_status = hisi_sllc_pmu_get_int_status,
.clear_int_status = hisi_sllc_pmu_clear_int_status,
.enable_filter = hisi_sllc_pmu_enable_filter,
.disable_filter = hisi_sllc_pmu_clear_filter,
};
static int hisi_sllc_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
int ret;
ret = hisi_sllc_pmu_init_data(pdev, sllc_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(sllc_pmu, pdev);
if (ret)
return ret;
sllc_pmu->pmu_events.attr_groups = hisi_sllc_pmu_v2_attr_groups;
sllc_pmu->ops = &hisi_uncore_sllc_ops;
sllc_pmu->check_event = SLLC_NR_EVENTS;
sllc_pmu->counter_bits = 64;
sllc_pmu->num_counters = 8;
sllc_pmu->dev = &pdev->dev;
sllc_pmu->on_cpu = -1;
return 0;
}
static int hisi_sllc_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *sllc_pmu;
char *name;
int ret;
sllc_pmu = devm_kzalloc(&pdev->dev, sizeof(*sllc_pmu), GFP_KERNEL);
if (!sllc_pmu)
return -ENOMEM;
ret = hisi_sllc_pmu_dev_probe(pdev, sllc_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
sllc_pmu->sccl_id, sllc_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
return ret;
}
hisi_pmu_init(sllc_pmu, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
return ret;
}
platform_set_drvdata(pdev, sllc_pmu);
return ret;
}
static int hisi_sllc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&sllc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
return 0;
}
static struct platform_driver hisi_sllc_pmu_driver = {
.driver = {
.name = "hisi_sllc_pmu",
.acpi_match_table = hisi_sllc_pmu_acpi_match,
.suppress_bind_attrs = true,
},
.probe = hisi_sllc_pmu_probe,
.remove = hisi_sllc_pmu_remove,
};
static int __init hisi_sllc_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
"AP_PERF_ARM_HISI_SLLC_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("SLLC PMU: cpuhp state setup failed, ret = %d\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_sllc_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
return ret;
}
module_init(hisi_sllc_pmu_module_init);
static void __exit hisi_sllc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_sllc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE);
}
module_exit(hisi_sllc_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
MODULE_AUTHOR("Qi Liu <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC DDRC uncore Hardware event counters support
*
* Copyright (C) 2017 HiSilicon Limited
* Author: Shaokun Zhang <[email protected]>
* Anurup M <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/acpi.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* DDRC register definition in v1 */
#define DDRC_PERF_CTRL 0x010
#define DDRC_FLUX_WR 0x380
#define DDRC_FLUX_RD 0x384
#define DDRC_FLUX_WCMD 0x388
#define DDRC_FLUX_RCMD 0x38c
#define DDRC_PRE_CMD 0x3c0
#define DDRC_ACT_CMD 0x3c4
#define DDRC_RNK_CHG 0x3cc
#define DDRC_RW_CHG 0x3d0
#define DDRC_EVENT_CTRL 0x6C0
#define DDRC_INT_MASK 0x6c8
#define DDRC_INT_STATUS 0x6cc
#define DDRC_INT_CLEAR 0x6d0
#define DDRC_VERSION 0x710
/* DDRC register definition in v2 */
#define DDRC_V2_INT_MASK 0x528
#define DDRC_V2_INT_STATUS 0x52c
#define DDRC_V2_INT_CLEAR 0x530
#define DDRC_V2_EVENT_CNT 0xe00
#define DDRC_V2_EVENT_CTRL 0xe70
#define DDRC_V2_EVENT_TYPE 0xe74
#define DDRC_V2_PERF_CTRL 0xeA0
/* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8
#define DDRC_V1_PERF_CTRL_EN 0x2
#define DDRC_V2_PERF_CTRL_EN 0x1
#define DDRC_V1_NR_EVENTS 0x7
#define DDRC_V2_NR_EVENTS 0x90
/*
* For PMU v1, there are eight-events and every event has been mapped
* to fixed-purpose counters which register offset is not consistent.
* Therefore there is no write event type and we assume that event
* code (0 to 7) is equal to counter index in PMU driver.
*/
#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
static const u32 ddrc_reg_off[] = {
DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
};
/*
* Select the counter register offset using the counter index.
* In PMU v1, there are no programmable counter, the count
* is read form the statistics counter register itself.
*/
static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
{
return ddrc_reg_off[cntr_idx];
}
static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
{
return DDRC_V2_EVENT_CNT + cntr_idx * 8;
}
static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
return readl(ddrc_pmu->base +
hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
}
static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writel((u32)val,
ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
}
static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
return readq(ddrc_pmu->base +
hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
}
static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val,
ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
}
/*
* For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
* so there is no need to write event type, while it is programmable counter in
* PMU v2.
*/
static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
u32 type)
{
u32 offset;
if (hha_pmu->identifier >= HISI_PMU_V2) {
offset = DDRC_V2_EVENT_TYPE + 4 * idx;
writel(type, hha_pmu->base + offset);
}
}
static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
val |= DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
val &= ~DDRC_V1_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
}
static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Set counter index(event code) in DDRC_EVENT_CTRL register */
val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
val |= (1 << GET_DDRC_EVENTID(hwc));
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
val &= ~(1 << GET_DDRC_EVENTID(hwc));
writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
}
static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
struct hw_perf_event *hwc = &event->hw;
/* For DDRC PMU, we use event code as counter index */
int idx = GET_DDRC_EVENTID(hwc);
if (test_bit(idx, used_mask))
return -EAGAIN;
set_bit(idx, used_mask);
return idx;
}
static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
{
return hisi_uncore_pmu_get_event_idx(event);
}
static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
val |= DDRC_V2_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
}
static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
val &= ~DDRC_V2_PERF_CTRL_EN;
writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
}
static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
val |= 1 << hwc->idx;
writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
}
static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
val &= ~(1 << hwc->idx);
writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
}
static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 0 to enable interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
val &= ~(1 << hwc->idx);
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Write 1 to mask interrupt */
val = readl(ddrc_pmu->base + DDRC_INT_MASK);
val |= 1 << hwc->idx;
writel(val, ddrc_pmu->base + DDRC_INT_MASK);
}
static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
val &= ~(1 << hwc->idx);
writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
}
static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
u32 val;
val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
val |= 1 << hwc->idx;
writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
}
static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
{
return readl(ddrc_pmu->base + DDRC_INT_STATUS);
}
static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
int idx)
{
writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
}
static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
{
return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
}
static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
int idx)
{
writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
}
static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
{ "HISI0233", },
{ "HISI0234", },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
&ddrc_pmu->index_id)) {
dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&ddrc_pmu->sccl_id)) {
dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
return -EINVAL;
}
/* DDRC PMUs only share the same SCCL */
ddrc_pmu->ccl_id = -1;
ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
return PTR_ERR(ddrc_pmu->base);
}
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
&ddrc_pmu->sub_id)) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
}
return 0;
}
static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
NULL,
};
static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
.name = "format",
.attrs = hisi_ddrc_pmu_v1_format_attr,
};
static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL
};
static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
.name = "format",
.attrs = hisi_ddrc_pmu_v2_format_attr,
};
static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
NULL,
};
static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
.name = "events",
.attrs = hisi_ddrc_pmu_v1_events_attr,
};
static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
HISI_PMU_EVENT_ATTR(cycles, 0x00),
HISI_PMU_EVENT_ATTR(flux_wr, 0x83),
HISI_PMU_EVENT_ATTR(flux_rd, 0x84),
NULL
};
static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
.name = "events",
.attrs = hisi_ddrc_pmu_v2_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL,
};
static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
.attrs = hisi_ddrc_pmu_cpumask_attrs,
};
static struct device_attribute hisi_ddrc_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
&hisi_ddrc_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
&hisi_ddrc_pmu_v1_format_group,
&hisi_ddrc_pmu_v1_events_group,
&hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
&hisi_ddrc_pmu_v2_format_group,
&hisi_ddrc_pmu_v2_events_group,
&hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL
};
static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
.write_evtype = hisi_ddrc_pmu_write_evtype,
.get_event_idx = hisi_ddrc_pmu_v1_get_event_idx,
.start_counters = hisi_ddrc_pmu_v1_start_counters,
.stop_counters = hisi_ddrc_pmu_v1_stop_counters,
.enable_counter = hisi_ddrc_pmu_v1_enable_counter,
.disable_counter = hisi_ddrc_pmu_v1_disable_counter,
.enable_counter_int = hisi_ddrc_pmu_v1_enable_counter_int,
.disable_counter_int = hisi_ddrc_pmu_v1_disable_counter_int,
.write_counter = hisi_ddrc_pmu_v1_write_counter,
.read_counter = hisi_ddrc_pmu_v1_read_counter,
.get_int_status = hisi_ddrc_pmu_v1_get_int_status,
.clear_int_status = hisi_ddrc_pmu_v1_clear_int_status,
};
static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
.write_evtype = hisi_ddrc_pmu_write_evtype,
.get_event_idx = hisi_ddrc_pmu_v2_get_event_idx,
.start_counters = hisi_ddrc_pmu_v2_start_counters,
.stop_counters = hisi_ddrc_pmu_v2_stop_counters,
.enable_counter = hisi_ddrc_pmu_v2_enable_counter,
.disable_counter = hisi_ddrc_pmu_v2_disable_counter,
.enable_counter_int = hisi_ddrc_pmu_v2_enable_counter_int,
.disable_counter_int = hisi_ddrc_pmu_v2_disable_counter_int,
.write_counter = hisi_ddrc_pmu_v2_write_counter,
.read_counter = hisi_ddrc_pmu_v2_read_counter,
.get_int_status = hisi_ddrc_pmu_v2_get_int_status,
.clear_int_status = hisi_ddrc_pmu_v2_clear_int_status,
};
static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
int ret;
ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
if (ret)
return ret;
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
ddrc_pmu->counter_bits = 48;
ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
} else {
ddrc_pmu->counter_bits = 32;
ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
}
ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
ddrc_pmu->dev = &pdev->dev;
ddrc_pmu->on_cpu = -1;
return 0;
}
static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *ddrc_pmu;
char *name;
int ret;
ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
if (!ddrc_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, ddrc_pmu);
ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
if (ret)
return ret;
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"hisi_sccl%u_ddrc%u_%u",
ddrc_pmu->sccl_id, ddrc_pmu->index_id,
ddrc_pmu->sub_id);
else
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
return ret;
}
hisi_pmu_init(ddrc_pmu, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
cpuhp_state_remove_instance_nocalls(
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
}
return ret;
}
static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&ddrc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
return 0;
}
static struct platform_driver hisi_ddrc_pmu_driver = {
.driver = {
.name = "hisi_ddrc_pmu",
.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
.remove = hisi_ddrc_pmu_remove,
};
static int __init hisi_ddrc_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
"AP_PERF_ARM_HISI_DDRC_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_ddrc_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
return ret;
}
module_init(hisi_ddrc_pmu_module_init);
static void __exit hisi_ddrc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_ddrc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
}
module_exit(hisi_ddrc_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
MODULE_AUTHOR("Anurup M <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon SoC Hardware event counters support
*
* Copyright (C) 2017 HiSilicon Limited
* Author: Anurup M <[email protected]>
* Shaokun Zhang <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <asm/cputype.h>
#include <asm/local64.h>
#include "hisi_uncore_pmu.h"
#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
/*
* PMU format attributes
*/
ssize_t hisi_format_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
/*
* PMU event attributes
*/
ssize_t hisi_event_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
*/
ssize_t hisi_cpumask_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
static bool hisi_validate_event_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
/* Include count for the event */
int counters = 1;
if (!is_software_event(leader)) {
/*
* We must NOT create groups containing mixed PMUs, although
* software events are acceptable
*/
if (leader->pmu != event->pmu)
return false;
/* Increment counter for the leader */
if (leader != event)
counters++;
}
for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling))
continue;
if (sibling->pmu != event->pmu)
return false;
/* Increment counter for each sibling */
counters++;
}
/* The group can not count events more than the counters in the HW */
return counters <= hisi_pmu->num_counters;
}
int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
u32 num_counters = hisi_pmu->num_counters;
int idx;
idx = find_first_zero_bit(used_mask, num_counters);
if (idx == num_counters)
return -EAGAIN;
set_bit(idx, used_mask);
return idx;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
clear_bit(idx, hisi_pmu->pmu_events.used_mask);
}
static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
{
struct hisi_pmu *hisi_pmu = data;
struct perf_event *event;
unsigned long overflown;
int idx;
overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
if (!overflown)
return IRQ_NONE;
/*
* Find the counter index which overflowed if the bit was set
* and handle it.
*/
for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
/* Write 1 to clear the IRQ status flag */
hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
/* Get the corresponding event struct */
event = hisi_pmu->pmu_events.hw_events[idx];
if (!event)
continue;
hisi_uncore_pmu_event_update(event);
hisi_uncore_pmu_set_event_period(event);
}
return IRQ_HANDLED;
}
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev)
{
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(&pdev->dev), hisi_pmu);
if (ret < 0) {
dev_err(&pdev->dev,
"Fail to request IRQ: %d ret: %d.\n", irq, ret);
return ret;
}
hisi_pmu->irq = irq;
return 0;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hisi_pmu *hisi_pmu;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* We do not support sampling as the counters are all
* shared by all CPU cores in a CPU die(SCCL). Also we
* do not support attach to a task(per-process mode)
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
/*
* The uncore counters not specific to any CPU, so cannot
* support per-task
*/
if (event->cpu < 0)
return -EINVAL;
/*
* Validate if the events in group does not exceed the
* available counters in hardware.
*/
if (!hisi_validate_event_group(event))
return -EINVAL;
hisi_pmu = to_hisi_pmu(event->pmu);
if (event->attr.config > hisi_pmu->check_event)
return -EINVAL;
if (hisi_pmu->on_cpu == -1)
return -EINVAL;
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet.
*/
hwc->idx = -1;
hwc->config_base = event->attr.config;
if (hisi_pmu->ops->check_filter && hisi_pmu->ops->check_filter(event))
return -EINVAL;
/* Enforce to use the same CPU for all events in this PMU */
event->cpu = hisi_pmu->on_cpu;
return 0;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
/*
* Set the counter to count the event that we're interested in,
* and enable interrupt and counter.
*/
static void hisi_uncore_pmu_enable_event(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
HISI_GET_EVENTID(event));
if (hisi_pmu->ops->enable_filter)
hisi_pmu->ops->enable_filter(event);
hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
}
/*
* Disable counter and interrupt.
*/
static void hisi_uncore_pmu_disable_event(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
if (hisi_pmu->ops->disable_filter)
hisi_pmu->ops->disable_filter(event);
}
void hisi_uncore_pmu_set_event_period(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
/*
* The HiSilicon PMU counters support 32 bits or 48 bits, depending on
* the PMU. We reduce it to 2^(counter_bits - 1) to account for the
* extreme interrupt latency. So we could hopefully handle the overflow
* interrupt before another 2^(counter_bits - 1) events occur and the
* counter overtakes its previous value.
*/
u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
local64_set(&hwc->prev_count, val);
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
do {
/* Read the count from the counter register */
new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
prev_raw_count = local64_read(&hwc->prev_count);
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count);
/*
* compute the delta
*/
delta = (new_raw_count - prev_raw_count) &
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
hisi_uncore_pmu_set_event_period(event);
if (flags & PERF_EF_RELOAD) {
u64 prev_raw_count = local64_read(&hwc->prev_count);
hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
}
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
hisi_uncore_pmu_disable_event(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
if (hwc->state & PERF_HES_UPTODATE)
return;
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/* Get an available counter index for counting */
idx = hisi_pmu->ops->get_event_idx(event);
if (idx < 0)
return idx;
event->hw.idx = idx;
hisi_pmu->pmu_events.hw_events[idx] = event;
if (flags & PERF_EF_START)
hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask,
hisi_pmu->num_counters);
if (!enabled)
return;
hisi_pmu->ops->start_counters(hisi_pmu);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
hisi_pmu->ops->stop_counters(hisi_pmu);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
/*
* The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
* determined from the MPIDR_EL1, but the encoding varies by CPU:
*
* - For MT variants of TSV110:
* SCCL is Aff2[7:3], CCL is Aff2[2:0]
*
* - For other MT parts:
* SCCL is Aff3[7:0], CCL is Aff2[7:0]
*
* - For non-MT parts:
* SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/
static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr();
int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
bool mt = mpidr & MPIDR_MT_BITMASK;
int sccl, ccl;
if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
sccl = aff2 >> 3;
ccl = aff2 & 0x7;
} else if (mt) {
sccl = aff3;
ccl = aff2;
} else {
sccl = aff2;
ccl = aff1;
}
if (scclp)
*scclp = sccl;
if (cclp)
*cclp = ccl;
}
/*
* Check whether the CPU is associated with this uncore PMU
*/
static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
int sccl_id, ccl_id;
/* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
if (hisi_pmu->sccl_id == -1)
return true;
if (hisi_pmu->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
return sccl_id == hisi_pmu->sccl_id;
}
hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
}
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
return 0;
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
/* If another CPU is already managing this PMU, simply return. */
if (hisi_pmu->on_cpu != -1)
return 0;
/* Use this CPU in cpumask for event counting */
hisi_pmu->on_cpu = cpu;
/* Overflow interrupt also should use the same CPU */
WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
return 0;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
cpumask_t pmu_online_cpus;
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
return 0;
/* Nothing to do if this CPU doesn't own the PMU */
if (hisi_pmu->on_cpu != cpu)
return 0;
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
/* Choose a new CPU to migrate ownership of the PMU to */
cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
cpu_online_mask);
target = cpumask_any_but(&pmu_online_cpus, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
/* Use this CPU for event counting */
hisi_pmu->on_cpu = target;
WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
return 0;
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
struct pmu *pmu = &hisi_pmu->pmu;
pmu->module = module;
pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init;
pmu->pmu_enable = hisi_uncore_pmu_enable;
pmu->pmu_disable = hisi_uncore_pmu_disable;
pmu->add = hisi_uncore_pmu_add;
pmu->del = hisi_uncore_pmu_del;
pmu->start = hisi_uncore_pmu_start;
pmu->stop = hisi_uncore_pmu_stop;
pmu->read = hisi_uncore_pmu_read;
pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
EXPORT_SYMBOL_GPL(hisi_pmu_init);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/hisilicon/hisi_uncore_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon PA uncore Hardware event counters support
*
* Copyright (C) 2020 HiSilicon Limited
* Author: Shaokun Zhang <[email protected]>
*
* This code is based on the uncore PMUs like arm-cci and arm-ccn.
*/
#include <linux/acpi.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/smp.h>
#include "hisi_uncore_pmu.h"
/* PA register definition */
#define PA_PERF_CTRL 0x1c00
#define PA_EVENT_CTRL 0x1c04
#define PA_TT_CTRL 0x1c08
#define PA_TGTID_CTRL 0x1c14
#define PA_SRCID_CTRL 0x1c18
/* H32 PA interrupt registers */
#define PA_INT_MASK 0x1c70
#define PA_INT_STATUS 0x1c78
#define PA_INT_CLEAR 0x1c7c
#define H60PA_INT_STATUS 0x1c70
#define H60PA_INT_MASK 0x1c74
#define PA_EVENT_TYPE0 0x1c80
#define PA_PMU_VERSION 0x1cf0
#define PA_EVENT_CNT0_L 0x1d00
#define PA_EVTYPE_MASK 0xff
#define PA_NR_COUNTERS 0x8
#define PA_PERF_CTRL_EN BIT(0)
#define PA_TRACETAG_EN BIT(4)
#define PA_TGTID_EN BIT(11)
#define PA_SRCID_EN BIT(11)
#define PA_TGTID_NONE 0
#define PA_SRCID_NONE 0
#define PA_TGTID_MSK_SHIFT 12
#define PA_SRCID_MSK_SHIFT 12
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
struct hisi_pa_pmu_int_regs {
u32 mask_offset;
u32 clear_offset;
u32 status_offset;
};
static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
val = readl(pa_pmu->base + PA_TT_CTRL);
val |= PA_TRACETAG_EN;
writel(val, pa_pmu->base + PA_TT_CTRL);
}
}
static void hisi_pa_pmu_clear_tracetag(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
val = readl(pa_pmu->base + PA_TT_CTRL);
val &= ~PA_TRACETAG_EN;
writel(val, pa_pmu->base + PA_TT_CTRL);
}
}
static void hisi_pa_pmu_config_tgtid(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_tgtid_cmd(event);
if (cmd) {
u32 msk = hisi_get_tgtid_msk(event);
u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT);
writel(val, pa_pmu->base + PA_TGTID_CTRL);
}
}
static void hisi_pa_pmu_clear_tgtid(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_tgtid_cmd(event);
if (cmd)
writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL);
}
static void hisi_pa_pmu_config_srcid(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 msk = hisi_get_srcid_msk(event);
u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT);
writel(val, pa_pmu->base + PA_SRCID_CTRL);
}
}
static void hisi_pa_pmu_clear_srcid(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd)
writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL);
}
static void hisi_pa_pmu_enable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_pa_pmu_enable_tracetag(event);
hisi_pa_pmu_config_srcid(event);
hisi_pa_pmu_config_tgtid(event);
}
}
static void hisi_pa_pmu_disable_filter(struct perf_event *event)
{
if (event->attr.config1 != 0x0) {
hisi_pa_pmu_clear_tgtid(event);
hisi_pa_pmu_clear_srcid(event);
hisi_pa_pmu_clear_tracetag(event);
}
}
static u32 hisi_pa_pmu_get_counter_offset(int idx)
{
return (PA_EVENT_CNT0_L + idx * 8);
}
static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
}
static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx));
}
static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx,
u32 type)
{
u32 reg, reg_idx, shift, val;
/*
* Select the appropriate event select register(PA_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
* Event code is 8-bits and for the former 4 hardware counters,
* PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
* PA_EVENT_TYPE1 is chosen.
*/
reg = PA_EVENT_TYPE0 + (idx / 4) * 4;
reg_idx = idx % 4;
shift = 8 * reg_idx;
/* Write event code to pa_EVENT_TYPEx Register */
val = readl(pa_pmu->base + reg);
val &= ~(PA_EVTYPE_MASK << shift);
val |= (type << shift);
writel(val, pa_pmu->base + reg);
}
static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu)
{
u32 val;
val = readl(pa_pmu->base + PA_PERF_CTRL);
val |= PA_PERF_CTRL_EN;
writel(val, pa_pmu->base + PA_PERF_CTRL);
}
static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu)
{
u32 val;
val = readl(pa_pmu->base + PA_PERF_CTRL);
val &= ~(PA_PERF_CTRL_EN);
writel(val, pa_pmu->base + PA_PERF_CTRL);
}
static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Enable counter index in PA_EVENT_CTRL register */
val = readl(pa_pmu->base + PA_EVENT_CTRL);
val |= 1 << hwc->idx;
writel(val, pa_pmu->base + PA_EVENT_CTRL);
}
static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
u32 val;
/* Clear counter index in PA_EVENT_CTRL register */
val = readl(pa_pmu->base + PA_EVENT_CTRL);
val &= ~(1 << hwc->idx);
writel(val, pa_pmu->base + PA_EVENT_CTRL);
}
static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
u32 val;
/* Write 0 to enable interrupt */
val = readl(pa_pmu->base + regs->mask_offset);
val &= ~(1 << hwc->idx);
writel(val, pa_pmu->base + regs->mask_offset);
}
static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
u32 val;
/* Write 1 to mask interrupt */
val = readl(pa_pmu->base + regs->mask_offset);
val |= 1 << hwc->idx;
writel(val, pa_pmu->base + regs->mask_offset);
}
static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
{
struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
return readl(pa_pmu->base + regs->status_offset);
}
static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
{
struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
writel(1 << idx, pa_pmu->base + regs->clear_offset);
}
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
/*
* As PA PMU is in a SICL, use the SICL_ID and the index ID
* to identify the PA PMU.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
&pa_pmu->sicl_id)) {
dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
&pa_pmu->index_id)) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
pa_pmu->ccl_id = -1;
pa_pmu->sccl_id = -1;
pa_pmu->dev_info = device_get_match_data(&pdev->dev);
if (!pa_pmu->dev_info)
return -ENODEV;
pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pa_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
return PTR_ERR(pa_pmu->base);
}
pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION);
return 0;
}
static struct attribute *hisi_pa_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"),
HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"),
HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"),
HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"),
HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"),
NULL,
};
static const struct attribute_group hisi_pa_pmu_v2_format_group = {
.name = "format",
.attrs = hisi_pa_pmu_v2_format_attr,
};
static struct attribute *hisi_pa_pmu_v2_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_req, 0x40),
HISI_PMU_EVENT_ATTR(tx_req, 0x5c),
HISI_PMU_EVENT_ATTR(cycle, 0x78),
NULL
};
static const struct attribute_group hisi_pa_pmu_v2_events_group = {
.name = "events",
.attrs = hisi_pa_pmu_v2_events_attr,
};
static struct attribute *hisi_pa_pmu_v3_events_attr[] = {
HISI_PMU_EVENT_ATTR(tx_req, 0x0),
HISI_PMU_EVENT_ATTR(tx_dat, 0x1),
HISI_PMU_EVENT_ATTR(tx_snp, 0x2),
HISI_PMU_EVENT_ATTR(rx_req, 0x7),
HISI_PMU_EVENT_ATTR(rx_dat, 0x8),
HISI_PMU_EVENT_ATTR(rx_snp, 0x9),
NULL
};
static const struct attribute_group hisi_pa_pmu_v3_events_group = {
.name = "events",
.attrs = hisi_pa_pmu_v3_events_attr,
};
static struct attribute *hisi_h60pa_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(rx_flit, 0x50),
HISI_PMU_EVENT_ATTR(tx_flit, 0x65),
NULL
};
static const struct attribute_group hisi_h60pa_pmu_events_group = {
.name = "events",
.attrs = hisi_h60pa_pmu_events_attr,
};
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
.attrs = hisi_pa_pmu_cpumask_attrs,
};
static struct device_attribute hisi_pa_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
&hisi_pa_pmu_identifier_attr.attr,
NULL
};
static const struct attribute_group hisi_pa_pmu_identifier_group = {
.attrs = hisi_pa_pmu_identifier_attrs,
};
static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
.mask_offset = PA_INT_MASK,
.clear_offset = PA_INT_CLEAR,
.status_offset = PA_INT_STATUS,
};
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
&hisi_pa_pmu_cpumask_attr_group,
&hisi_pa_pmu_identifier_group,
NULL
};
static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
.name = "pa",
.attr_groups = hisi_pa_pmu_v2_attr_groups,
.private = &hisi_pa_pmu_regs,
};
static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v3_events_group,
&hisi_pa_pmu_cpumask_attr_group,
&hisi_pa_pmu_identifier_group,
NULL
};
static const struct hisi_pmu_dev_info hisi_h32pa_v3 = {
.name = "pa",
.attr_groups = hisi_pa_pmu_v3_attr_groups,
.private = &hisi_pa_pmu_regs,
};
static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
.mask_offset = H60PA_INT_MASK,
.clear_offset = H60PA_INT_STATUS, /* Clear on write */
.status_offset = H60PA_INT_STATUS,
};
static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_h60pa_pmu_events_group,
&hisi_pa_pmu_cpumask_attr_group,
&hisi_pa_pmu_identifier_group,
NULL
};
static const struct hisi_pmu_dev_info hisi_h60pa = {
.name = "h60pa",
.attr_groups = hisi_h60pa_pmu_attr_groups,
.private = &hisi_h60pa_pmu_regs,
};
static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
.write_evtype = hisi_pa_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
.start_counters = hisi_pa_pmu_start_counters,
.stop_counters = hisi_pa_pmu_stop_counters,
.enable_counter = hisi_pa_pmu_enable_counter,
.disable_counter = hisi_pa_pmu_disable_counter,
.enable_counter_int = hisi_pa_pmu_enable_counter_int,
.disable_counter_int = hisi_pa_pmu_disable_counter_int,
.write_counter = hisi_pa_pmu_write_counter,
.read_counter = hisi_pa_pmu_read_counter,
.get_int_status = hisi_pa_pmu_get_int_status,
.clear_int_status = hisi_pa_pmu_clear_int_status,
.enable_filter = hisi_pa_pmu_enable_filter,
.disable_filter = hisi_pa_pmu_disable_filter,
};
static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
int ret;
ret = hisi_pa_pmu_init_data(pdev, pa_pmu);
if (ret)
return ret;
ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev);
if (ret)
return ret;
pa_pmu->pmu_events.attr_groups = pa_pmu->dev_info->attr_groups;
pa_pmu->num_counters = PA_NR_COUNTERS;
pa_pmu->ops = &hisi_uncore_pa_ops;
pa_pmu->check_event = 0xB0;
pa_pmu->counter_bits = 64;
pa_pmu->dev = &pdev->dev;
pa_pmu->on_cpu = -1;
return 0;
}
static int hisi_pa_pmu_probe(struct platform_device *pdev)
{
struct hisi_pmu *pa_pmu;
char *name;
int ret;
pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL);
if (!pa_pmu)
return -ENOMEM;
ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
pa_pmu->sicl_id, pa_pmu->dev_info->name,
pa_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
return ret;
}
hisi_pmu_init(pa_pmu, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
return ret;
}
platform_set_drvdata(pdev, pa_pmu);
return ret;
}
static int hisi_pa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&pa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
return 0;
}
static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
{ "HISI0273", (kernel_ulong_t)&hisi_h32pa_v2 },
{ "HISI0275", (kernel_ulong_t)&hisi_h32pa_v3 },
{ "HISI0274", (kernel_ulong_t)&hisi_h60pa },
{}
};
MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
static struct platform_driver hisi_pa_pmu_driver = {
.driver = {
.name = "hisi_pa_pmu",
.acpi_match_table = hisi_pa_pmu_acpi_match,
.suppress_bind_attrs = true,
},
.probe = hisi_pa_pmu_probe,
.remove = hisi_pa_pmu_remove,
};
static int __init hisi_pa_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
"AP_PERF_ARM_HISI_PA_ONLINE",
hisi_uncore_pmu_online_cpu,
hisi_uncore_pmu_offline_cpu);
if (ret) {
pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret);
return ret;
}
ret = platform_driver_register(&hisi_pa_pmu_driver);
if (ret)
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
return ret;
}
module_init(hisi_pa_pmu_module_init);
static void __exit hisi_pa_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_pa_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE);
}
module_exit(hisi_pa_pmu_module_exit);
MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <[email protected]>");
MODULE_AUTHOR("Qi Liu <[email protected]>");
| linux-master | drivers/perf/hisilicon/hisi_uncore_pa_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This driver adds support for HNS3 PMU iEP device. Related perf events are
* bandwidth, latency, packet rate, interrupt rate etc.
*
* Copyright (C) 2022 HiSilicon Limited
*/
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-epf.h>
#include <linux/perf_event.h>
#include <linux/smp.h>
/* registers offset address */
#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000
#define HNS3_PMU_REG_CLOCK_FREQ 0x0020
#define HNS3_PMU_REG_BDF 0x0fe0
#define HNS3_PMU_REG_VERSION 0x0fe4
#define HNS3_PMU_REG_DEVICE_ID 0x0fe8
#define HNS3_PMU_REG_EVENT_OFFSET 0x1000
#define HNS3_PMU_REG_EVENT_SIZE 0x1000
#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00
#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04
#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08
#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c
#define HNS3_PMU_REG_EVENT_COUNTER 0x10
#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18
#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28
#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c
#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0)
#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1)
#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2)
#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3)
#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4)
#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5)
#define HNS3_PMU_FILTER_ALL_TC 0xf
#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff
#define HNS3_PMU_CTRL_SUBEVENT_S 4
#define HNS3_PMU_CTRL_FILTER_MODE_S 24
#define HNS3_PMU_GLOBAL_START BIT(0)
#define HNS3_PMU_EVENT_STATUS_RESET BIT(11)
#define HNS3_PMU_EVENT_EN BIT(12)
#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15)
#define HNS3_PMU_QID_PARA_FUNC_S 0
#define HNS3_PMU_QID_PARA_QUEUE_S 16
#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0)
#define HNS3_PMU_QID_CTRL_DONE BIT(1)
#define HNS3_PMU_QID_CTRL_MISS BIT(2)
#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1)
#define HNS3_PMU_MAX_HW_EVENTS 8
/*
* Each hardware event contains two registers (counter and ext_counter) for
* bandwidth, packet rate, latency and interrupt rate. These two registers will
* be triggered to run at the same when a hardware event is enabled. The meaning
* of counter and ext_counter of different event type are different, their
* meaning show as follow:
*
* +----------------+------------------+---------------+
* | event type | counter | ext_counter |
* +----------------+------------------+---------------+
* | bandwidth | byte number | cycle number |
* +----------------+------------------+---------------+
* | packet rate | packet number | cycle number |
* +----------------+------------------+---------------+
* | latency | cycle number | packet number |
* +----------------+------------------+---------------+
* | interrupt rate | interrupt number | cycle number |
* +----------------+------------------+---------------+
*
* The cycle number indicates increment of counter of hardware timer, the
* frequency of hardware timer can be read from hw_clk_freq file.
*
* Performance of each hardware event is calculated by: counter / ext_counter.
*
* Since processing of data is preferred to be done in userspace, we expose
* ext_counter as a separate event for userspace and use bit 16 to indicate it.
* For example, event 0x00001 and 0x10001 are actually one event for hardware
* because bit 0-15 are same. If the bit 16 of one event is 0 means to read
* counter register, otherwise means to read ext_counter register.
*/
/* bandwidth events */
#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001
#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001
#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002
#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002
#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003
#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003
#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004
#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004
#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005
#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005
#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006
#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006
#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008
#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008
#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009
#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009
#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a
#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a
#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b
#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b
#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c
#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c
#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d
#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d
#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e
#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e
#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f
#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f
#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010
#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010
/* packet rate events */
#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100
#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100
#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101
#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101
#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102
#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102
#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103
#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103
#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104
#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104
#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105
#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105
#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106
#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106
#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107
#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107
#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108
#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108
#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109
#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109
#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a
#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a
#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b
#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b
#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c
#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c
#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d
#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d
#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e
#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e
#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f
#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f
#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110
#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110
#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111
#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111
#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112
#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112
/* latency events */
#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202
#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202
#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204
#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204
#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206
#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206
#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207
#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207
#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208
#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208
#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209
#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209
#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e
#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e
#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f
#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f
#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210
#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210
#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211
#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211
#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212
#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212
#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213
#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213
#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214
#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214
#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215
#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215
#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216
#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216
#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217
#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217
#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218
#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218
#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219
#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219
#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a
#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a
#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c
#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c
/* interrupt rate events */
#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300
#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300
/* filter mode supported by each bandwidth event */
#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07
#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f
#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f
#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f
#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f
#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11
#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11
#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b
#define HNS3_PMU_FILTER_BW_WR_EBD 0x11
#define HNS3_PMU_FILTER_BW_RD_FBD 0x01
#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b
#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01
#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01
#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01
#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01
/* filter mode supported by each packet rate event */
#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07
#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07
#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f
#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f
#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f
#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f
#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11
#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f
#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11
#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b
#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11
#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01
#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b
#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01
#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01
#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01
#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01
#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01
#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01
/* filter mode supported by each latency event */
#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01
#define HNS3_PMU_FILTER_DLY_TX 0x01
#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07
#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07
#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07
#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07
#define HNS3_PMU_FILTER_DLY_RPU 0x11
#define HNS3_PMU_FILTER_DLY_TPU 0x1f
#define HNS3_PMU_FILTER_DLY_RPE 0x01
#define HNS3_PMU_FILTER_DLY_TPE 0x0b
#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b
#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b
#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11
#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01
#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b
#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01
#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01
#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01
#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01
#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01
/* filter mode supported by each interrupt rate event */
#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01
enum hns3_pmu_hw_filter_mode {
HNS3_PMU_HW_FILTER_GLOBAL,
HNS3_PMU_HW_FILTER_PORT,
HNS3_PMU_HW_FILTER_PORT_TC,
HNS3_PMU_HW_FILTER_FUNC,
HNS3_PMU_HW_FILTER_FUNC_QUEUE,
HNS3_PMU_HW_FILTER_FUNC_INTR,
};
struct hns3_pmu_event_attr {
u32 event;
u16 filter_support;
};
struct hns3_pmu {
struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
struct hlist_node node;
struct pci_dev *pdev;
struct pmu pmu;
void __iomem *base;
int irq;
int on_cpu;
u32 identifier;
u32 hw_clk_freq; /* hardware clock frequency of PMU */
/* maximum and minimum bdf allowed by PMU */
u16 bdf_min;
u16 bdf_max;
};
#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu))
#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \
static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
{ \
return FIELD_GET(GENMASK_ULL(_end, _start), \
event->attr._config); \
}
HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \
HNS3_PMU_FILTER_BW_##_name})
#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_BW_##_name##_TIME, \
HNS3_PMU_FILTER_BW_##_name})
#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \
HNS3_PMU_FILTER_PPS_##_name})
#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_PPS_##_name##_TIME, \
HNS3_PMU_FILTER_PPS_##_name})
#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_DLY_##_name##_TIME, \
HNS3_PMU_FILTER_DLY_##_name})
#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \
HNS3_PMU_FILTER_DLY_##_name})
#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \
HNS3_PMU_FILTER_INTR_##_name})
#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
HNS3_PMU_EVT_PPS_##_name##_TIME, \
HNS3_PMU_FILTER_INTR_##_name})
static ssize_t hns3_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)eattr->var);
}
static ssize_t hns3_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns3_pmu_event_attr *event;
struct dev_ext_attribute *eattr;
eattr = container_of(attr, struct dev_ext_attribute, attr);
event = eattr->var;
return sysfs_emit(buf, "config=0x%x\n", event->event);
}
static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hns3_pmu_event_attr *event;
struct dev_ext_attribute *eattr;
int len;
eattr = container_of(attr, struct dev_ext_attribute, attr);
event = eattr->var;
len = sysfs_emit_at(buf, 0, "filter mode supported: ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
len += sysfs_emit_at(buf, len, "global ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
len += sysfs_emit_at(buf, len, "port ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
len += sysfs_emit_at(buf, len, "port-tc ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
len += sysfs_emit_at(buf, len, "func ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
len += sysfs_emit_at(buf, len, "func-queue ");
if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
len += sysfs_emit_at(buf, len, "func-intr ");
len += sysfs_emit_at(buf, len, "\n");
return len;
}
#define HNS3_PMU_ATTR(_name, _func, _config) \
(&((struct dev_ext_attribute[]) { \
{ __ATTR(_name, 0444, _func, NULL), (void *)_config } \
})[0].attr.attr)
#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
#define HNS3_PMU_EVENT_ATTR(_name, _event) \
HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
static u8 hns3_pmu_hw_filter_modes[] = {
HNS3_PMU_HW_FILTER_GLOBAL,
HNS3_PMU_HW_FILTER_PORT,
HNS3_PMU_HW_FILTER_PORT_TC,
HNS3_PMU_HW_FILTER_FUNC,
HNS3_PMU_HW_FILTER_FUNC_QUEUE,
HNS3_PMU_HW_FILTER_FUNC_INTR,
};
#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
static ssize_t identifier_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
}
static DEVICE_ATTR_RO(identifier);
static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
}
static DEVICE_ATTR_RO(cpumask);
static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
u16 bdf = hns3_pmu->bdf_min;
return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
PCI_SLOT(bdf), PCI_FUNC(bdf));
}
static DEVICE_ATTR_RO(bdf_min);
static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
u16 bdf = hns3_pmu->bdf_max;
return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
PCI_SLOT(bdf), PCI_FUNC(bdf));
}
static DEVICE_ATTR_RO(bdf_max);
static ssize_t hw_clk_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
}
static DEVICE_ATTR_RO(hw_clk_freq);
static struct attribute *hns3_pmu_events_attr[] = {
/* bandwidth events */
HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
/* packet rate events */
HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
/* latency events */
HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
/* interrupt rate events */
HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
NULL
};
static struct attribute *hns3_pmu_filter_mode_attr[] = {
/* bandwidth events */
HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
/* packet rate events */
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
/* latency events */
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
/* interrupt rate events */
HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
NULL
};
static struct attribute_group hns3_pmu_events_group = {
.name = "events",
.attrs = hns3_pmu_events_attr,
};
static struct attribute_group hns3_pmu_filter_mode_group = {
.name = "filtermode",
.attrs = hns3_pmu_filter_mode_attr,
};
static struct attribute *hns3_pmu_format_attr[] = {
HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
NULL
};
static struct attribute_group hns3_pmu_format_group = {
.name = "format",
.attrs = hns3_pmu_format_attr,
};
static struct attribute *hns3_pmu_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static struct attribute_group hns3_pmu_cpumask_attr_group = {
.attrs = hns3_pmu_cpumask_attrs,
};
static struct attribute *hns3_pmu_identifier_attrs[] = {
&dev_attr_identifier.attr,
NULL
};
static struct attribute_group hns3_pmu_identifier_attr_group = {
.attrs = hns3_pmu_identifier_attrs,
};
static struct attribute *hns3_pmu_bdf_range_attrs[] = {
&dev_attr_bdf_min.attr,
&dev_attr_bdf_max.attr,
NULL
};
static struct attribute_group hns3_pmu_bdf_range_attr_group = {
.attrs = hns3_pmu_bdf_range_attrs,
};
static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
&dev_attr_hw_clk_freq.attr,
NULL
};
static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
.attrs = hns3_pmu_hw_clk_freq_attrs,
};
static const struct attribute_group *hns3_pmu_attr_groups[] = {
&hns3_pmu_events_group,
&hns3_pmu_filter_mode_group,
&hns3_pmu_format_group,
&hns3_pmu_cpumask_attr_group,
&hns3_pmu_identifier_attr_group,
&hns3_pmu_bdf_range_attr_group,
&hns3_pmu_hw_clk_freq_attr_group,
NULL
};
static u32 hns3_pmu_get_event(struct perf_event *event)
{
return hns3_pmu_get_ext_counter_used(event) << 16 |
hns3_pmu_get_event_type(event) << 8 |
hns3_pmu_get_subevent(event);
}
static u32 hns3_pmu_get_real_event(struct perf_event *event)
{
return hns3_pmu_get_event_type(event) << 8 |
hns3_pmu_get_subevent(event);
}
static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
{
return offset + HNS3_PMU_REG_EVENT_OFFSET +
HNS3_PMU_REG_EVENT_SIZE * idx;
}
static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
{
u32 offset = hns3_pmu_get_offset(reg_offset, idx);
return readl(hns3_pmu->base + offset);
}
static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
u32 val)
{
u32 offset = hns3_pmu_get_offset(reg_offset, idx);
writel(val, hns3_pmu->base + offset);
}
static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
{
u32 offset = hns3_pmu_get_offset(reg_offset, idx);
return readq(hns3_pmu->base + offset);
}
static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
u64 val)
{
u32 offset = hns3_pmu_get_offset(reg_offset, idx);
writeq(val, hns3_pmu->base + offset);
}
static bool hns3_pmu_cmp_event(struct perf_event *target,
struct perf_event *event)
{
return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
}
static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
struct perf_event *event)
{
struct perf_event *sibling;
int hw_event_used = 0;
int idx;
for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
sibling = hns3_pmu->hw_events[idx];
if (!sibling)
continue;
hw_event_used++;
if (!hns3_pmu_cmp_event(sibling, event))
continue;
/* Related events is used in group */
if (sibling->group_leader == event->group_leader)
return idx;
}
/* No related event and all hardware events are used up */
if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
return -EBUSY;
/* No related event and there is extra hardware events can be use */
return -ENOENT;
}
static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
{
int idx;
for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
if (!hns3_pmu->hw_events[idx])
return idx;
}
return -EBUSY;
}
static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
{
struct pci_dev *pdev;
if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
return false;
}
pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
PCI_BUS_NUM(bdf),
GET_PCI_DEVFN(bdf));
if (!pdev) {
pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
return false;
}
pci_dev_put(pdev);
return true;
}
static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
u16 queue)
{
u32 val;
val = GET_PCI_DEVFN(bdf);
val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
}
static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
{
bool queue_id_valid = false;
u32 reg_qid_ctrl, val;
int err;
/* enable queue id request */
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
HNS3_PMU_QID_CTRL_REQ_ENABLE);
reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
if (err == -ETIMEDOUT) {
pci_err(hns3_pmu->pdev, "QID request timeout!\n");
goto out;
}
queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
out:
/* disable qid request and clear status */
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
return queue_id_valid;
}
static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
u16 queue)
{
hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
return hns3_pmu_qid_req_start(hns3_pmu, idx);
}
static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
{
struct hns3_pmu_event_attr *pmu_event;
struct dev_ext_attribute *eattr;
struct device_attribute *dattr;
struct attribute *attr;
u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
attr = hns3_pmu_events_attr[i];
dattr = container_of(attr, struct device_attribute, attr);
eattr = container_of(dattr, struct dev_ext_attribute, attr);
pmu_event = eattr->var;
if (event == pmu_event->event)
return pmu_event;
}
return NULL;
}
static int hns3_pmu_set_func_mode(struct perf_event *event,
struct hns3_pmu *hns3_pmu)
{
struct hw_perf_event *hwc = &event->hw;
u16 bdf = hns3_pmu_get_bdf(event);
if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
return -ENOENT;
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
return 0;
}
static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
struct hns3_pmu *hns3_pmu)
{
u16 queue_id = hns3_pmu_get_queue(event);
struct hw_perf_event *hwc = &event->hw;
u16 bdf = hns3_pmu_get_bdf(event);
if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
return -ENOENT;
if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
return -ENOENT;
}
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
return 0;
}
static bool
hns3_pmu_is_enabled_global_mode(struct perf_event *event,
struct hns3_pmu_event_attr *pmu_event)
{
u8 global = hns3_pmu_get_global(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
return false;
return global;
}
static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
struct hns3_pmu_event_attr *pmu_event)
{
u16 queue_id = hns3_pmu_get_queue(event);
u16 bdf = hns3_pmu_get_bdf(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
return false;
else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
return false;
return bdf;
}
static bool
hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
struct hns3_pmu_event_attr *pmu_event)
{
u16 queue_id = hns3_pmu_get_queue(event);
u16 bdf = hns3_pmu_get_bdf(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
return false;
else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
return false;
return bdf;
}
static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
struct hns3_pmu_event_attr *pmu_event)
{
u8 tc_id = hns3_pmu_get_tc(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
return false;
return tc_id == HNS3_PMU_FILTER_ALL_TC;
}
static bool
hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
struct hns3_pmu_event_attr *pmu_event)
{
u8 tc_id = hns3_pmu_get_tc(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
return false;
return tc_id != HNS3_PMU_FILTER_ALL_TC;
}
static bool
hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
struct hns3_pmu *hns3_pmu,
struct hns3_pmu_event_attr *pmu_event)
{
u16 bdf = hns3_pmu_get_bdf(event);
if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
return false;
return hns3_pmu_valid_bdf(hns3_pmu, bdf);
}
static int hns3_pmu_select_filter_mode(struct perf_event *event,
struct hns3_pmu *hns3_pmu)
{
u32 event_id = hns3_pmu_get_event(event);
struct hw_perf_event *hwc = &event->hw;
struct hns3_pmu_event_attr *pmu_event;
pmu_event = hns3_pmu_get_pmu_event(event_id);
if (!pmu_event) {
pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
return -ENOENT;
}
if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
return 0;
}
if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
return hns3_pmu_set_func_mode(event, hns3_pmu);
if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
return 0;
}
if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
return 0;
}
if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
return 0;
}
return -ENOENT;
}
static bool hns3_pmu_validate_event_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
int counters = 1;
int num;
event_group[0] = leader;
if (!is_software_event(leader)) {
if (leader->pmu != event->pmu)
return false;
if (leader != event && !hns3_pmu_cmp_event(leader, event))
event_group[counters++] = event;
}
for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling))
continue;
if (sibling->pmu != event->pmu)
return false;
for (num = 0; num < counters; num++) {
if (hns3_pmu_cmp_event(event_group[num], sibling))
break;
}
if (num == counters)
event_group[counters++] = sibling;
}
return counters <= HNS3_PMU_MAX_HW_EVENTS;
}
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u16 intr_id = hns3_pmu_get_intr(event);
u8 port_id = hns3_pmu_get_port(event);
u16 bdf = hns3_pmu_get_bdf(event);
u8 tc_id = hns3_pmu_get_tc(event);
u8 filter_mode;
filter_mode = *(u8 *)hwc->addr_filters;
switch (filter_mode) {
case HNS3_PMU_HW_FILTER_PORT:
return FILTER_CONDITION_PORT(port_id);
case HNS3_PMU_HW_FILTER_PORT_TC:
return FILTER_CONDITION_PORT_TC(port_id, tc_id);
case HNS3_PMU_HW_FILTER_FUNC:
case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
return GET_PCI_DEVFN(bdf);
case HNS3_PMU_HW_FILTER_FUNC_INTR:
return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
default:
break;
}
return 0;
}
static void hns3_pmu_config_filter(struct perf_event *event)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
u8 event_type = hns3_pmu_get_event_type(event);
u8 subevent_id = hns3_pmu_get_subevent(event);
u16 queue_id = hns3_pmu_get_queue(event);
struct hw_perf_event *hwc = &event->hw;
u8 filter_mode = *(u8 *)hwc->addr_filters;
u16 bdf = hns3_pmu_get_bdf(event);
u32 idx = hwc->idx;
u32 val;
val = event_type;
val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
val = hns3_pmu_get_filter_condition(event);
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
}
static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u32 val;
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
val |= HNS3_PMU_EVENT_EN;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
}
static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u32 val;
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
val &= ~HNS3_PMU_EVENT_EN;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
}
static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u32 val;
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
}
static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
struct hw_perf_event *hwc)
{
u32 idx = hwc->idx;
u32 val;
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
val |= HNS3_PMU_INTR_MASK_OVERFLOW;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
}
static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
{
u32 val;
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
val |= HNS3_PMU_EVENT_STATUS_RESET;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
val &= ~HNS3_PMU_EVENT_STATUS_RESET;
hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
}
static u64 hns3_pmu_read_counter(struct perf_event *event)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
}
static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
u32 idx = event->hw.idx;
hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
}
static void hns3_pmu_init_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
local64_set(&hwc->prev_count, 0);
hns3_pmu_write_counter(event, 0);
}
static int hns3_pmu_event_init(struct perf_event *event)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
int ret;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* Sampling is not supported */
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
event->cpu = hns3_pmu->on_cpu;
idx = hns3_pmu_get_event_idx(hns3_pmu);
if (idx < 0) {
pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
HNS3_PMU_MAX_HW_EVENTS);
return -EBUSY;
}
hwc->idx = idx;
ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
if (ret) {
pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
return ret;
}
if (!hns3_pmu_validate_event_group(event)) {
pci_err(hns3_pmu->pdev, "Invalid event group.\n");
return -EINVAL;
}
if (hns3_pmu_get_ext_counter_used(event))
hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
else
hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
return 0;
}
static void hns3_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 new_cnt, prev_cnt, delta;
do {
prev_cnt = local64_read(&hwc->prev_count);
new_cnt = hns3_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
prev_cnt);
delta = new_cnt - prev_cnt;
local64_add(delta, &event->count);
}
static void hns3_pmu_start(struct perf_event *event, int flags)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
hns3_pmu_config_filter(event);
hns3_pmu_init_counter(event);
hns3_pmu_enable_intr(hns3_pmu, hwc);
hns3_pmu_enable_counter(hns3_pmu, hwc);
perf_event_update_userpage(event);
}
static void hns3_pmu_stop(struct perf_event *event, int flags)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hns3_pmu_disable_counter(hns3_pmu, hwc);
hns3_pmu_disable_intr(hns3_pmu, hwc);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
if (hwc->state & PERF_HES_UPTODATE)
return;
/* Read hardware counter and update the perf counter statistics */
hns3_pmu_read(event);
hwc->state |= PERF_HES_UPTODATE;
}
static int hns3_pmu_add(struct perf_event *event, int flags)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/* Check all working events to find a related event. */
idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
if (idx < 0 && idx != -ENOENT)
return idx;
/* Current event shares an enabled hardware event with related event */
if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
hwc->idx = idx;
goto start_count;
}
idx = hns3_pmu_get_event_idx(hns3_pmu);
if (idx < 0)
return idx;
hwc->idx = idx;
hns3_pmu->hw_events[idx] = event;
start_count:
if (flags & PERF_EF_START)
hns3_pmu_start(event, PERF_EF_RELOAD);
return 0;
}
static void hns3_pmu_del(struct perf_event *event, int flags)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
hns3_pmu_stop(event, PERF_EF_UPDATE);
hns3_pmu->hw_events[hwc->idx] = NULL;
perf_event_update_userpage(event);
}
static void hns3_pmu_enable(struct pmu *pmu)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
u32 val;
val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
val |= HNS3_PMU_GLOBAL_START;
writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
}
static void hns3_pmu_disable(struct pmu *pmu)
{
struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
u32 val;
val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
val &= ~HNS3_PMU_GLOBAL_START;
writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
}
static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
{
u16 device_id;
char *name;
u32 val;
hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
if (!hns3_pmu->base) {
pci_err(pdev, "ioremap failed\n");
return -ENOMEM;
}
hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
hns3_pmu->bdf_min = val & 0xffff;
hns3_pmu->bdf_max = val >> 16;
val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
device_id = val & 0xffff;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
if (!name)
return -ENOMEM;
hns3_pmu->pdev = pdev;
hns3_pmu->on_cpu = -1;
hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
hns3_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
.event_init = hns3_pmu_event_init,
.pmu_enable = hns3_pmu_enable,
.pmu_disable = hns3_pmu_disable,
.add = hns3_pmu_add,
.del = hns3_pmu_del,
.start = hns3_pmu_start,
.stop = hns3_pmu_stop,
.read = hns3_pmu_read,
.task_ctx_nr = perf_invalid_context,
.attr_groups = hns3_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
return 0;
}
static irqreturn_t hns3_pmu_irq(int irq, void *data)
{
struct hns3_pmu *hns3_pmu = data;
u32 intr_status, idx;
for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
intr_status = hns3_pmu_readl(hns3_pmu,
HNS3_PMU_REG_EVENT_INTR_STATUS,
idx);
/*
* As each counter will restart from 0 when it is overflowed,
* extra processing is no need, just clear interrupt status.
*/
if (intr_status)
hns3_pmu_clear_intr_status(hns3_pmu, idx);
}
return IRQ_HANDLED;
}
static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hns3_pmu *hns3_pmu;
hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
if (!hns3_pmu)
return -ENODEV;
if (hns3_pmu->on_cpu == -1) {
hns3_pmu->on_cpu = cpu;
irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
}
return 0;
}
static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hns3_pmu *hns3_pmu;
unsigned int target;
hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
if (!hns3_pmu)
return -ENODEV;
/* Nothing to do if this CPU doesn't own the PMU */
if (hns3_pmu->on_cpu != cpu)
return 0;
/* Choose a new CPU from all online cpus */
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
hns3_pmu->on_cpu = target;
irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
return 0;
}
static void hns3_pmu_free_irq(void *data)
{
struct pci_dev *pdev = data;
pci_free_irq_vectors(pdev);
}
static int hns3_pmu_irq_register(struct pci_dev *pdev,
struct hns3_pmu *hns3_pmu)
{
int irq, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret < 0) {
pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
return ret;
}
ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
if (ret) {
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
return ret;
}
irq = pci_irq_vector(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
hns3_pmu->pmu.name, hns3_pmu);
if (ret) {
pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
return ret;
}
hns3_pmu->irq = irq;
return 0;
}
static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
{
int ret;
ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
if (ret)
return ret;
ret = hns3_pmu_irq_register(pdev, hns3_pmu);
if (ret)
return ret;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
&hns3_pmu->node);
if (ret) {
pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
return ret;
}
ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
if (ret) {
pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
&hns3_pmu->node);
}
return ret;
}
static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
{
struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
perf_pmu_unregister(&hns3_pmu->pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
&hns3_pmu->node);
}
static int hns3_pmu_init_dev(struct pci_dev *pdev)
{
int ret;
ret = pcim_enable_device(pdev);
if (ret) {
pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
if (ret < 0) {
pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
return ret;
}
pci_set_master(pdev);
return 0;
}
static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hns3_pmu *hns3_pmu;
int ret;
hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
if (!hns3_pmu)
return -ENOMEM;
ret = hns3_pmu_init_dev(pdev);
if (ret)
return ret;
ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
if (ret) {
pci_clear_master(pdev);
return ret;
}
pci_set_drvdata(pdev, hns3_pmu);
return ret;
}
static void hns3_pmu_remove(struct pci_dev *pdev)
{
hns3_pmu_uninit_pmu(pdev);
pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
}
static const struct pci_device_id hns3_pmu_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
static struct pci_driver hns3_pmu_driver = {
.name = "hns3_pmu",
.id_table = hns3_pmu_ids,
.probe = hns3_pmu_probe,
.remove = hns3_pmu_remove,
};
static int __init hns3_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
"AP_PERF_ARM_HNS3_PMU_ONLINE",
hns3_pmu_online_cpu,
hns3_pmu_offline_cpu);
if (ret) {
pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
return ret;
}
ret = pci_register_driver(&hns3_pmu_driver);
if (ret) {
pr_err("failed to register pci driver, ret = %d.\n", ret);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
}
return ret;
}
module_init(hns3_pmu_module_init);
static void __exit hns3_pmu_module_exit(void)
{
pci_unregister_driver(&hns3_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
}
module_exit(hns3_pmu_module_exit);
MODULE_DESCRIPTION("HNS3 PMU driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/perf/hisilicon/hns3_pmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/* The industrial I/O core, trigger handling functions
*
* Copyright (c) 2008 Jonathan Cameron
*/
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include <linux/iio/trigger.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/trigger_consumer.h>
/* RFC - Question of approach
* Make the common case (single sensor single trigger)
* simple by starting trigger capture from when first sensors
* is added.
*
* Complex simultaneous start requires use of 'hold' functionality
* of the trigger. (not implemented)
*
* Any other suggestions?
*/
static DEFINE_IDA(iio_trigger_ida);
/* Single list of all available triggers */
static LIST_HEAD(iio_trigger_list);
static DEFINE_MUTEX(iio_trigger_list_lock);
/**
* name_show() - retrieve useful identifying name
* @dev: device associated with the iio_trigger
* @attr: pointer to the device_attribute structure that is
* being processed
* @buf: buffer to print the name into
*
* Return: a negative number on failure or the number of written
* characters on success.
*/
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
return sysfs_emit(buf, "%s\n", trig->name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *iio_trig_dev_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(iio_trig_dev);
static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
if (trig_info->id < 0)
return trig_info->id;
/* Set the name used for the sysfs directory etc */
dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
ret = device_add(&trig_info->dev);
if (ret)
goto error_unregister_id;
/* Add to list of available triggers held by the IIO core */
mutex_lock(&iio_trigger_list_lock);
if (__iio_trigger_find_by_name(trig_info->name)) {
pr_err("Duplicate trigger name '%s'\n", trig_info->name);
ret = -EEXIST;
goto error_device_del;
}
list_add_tail(&trig_info->list, &iio_trigger_list);
mutex_unlock(&iio_trigger_list_lock);
return 0;
error_device_del:
mutex_unlock(&iio_trigger_list_lock);
device_del(&trig_info->dev);
error_unregister_id:
ida_free(&iio_trigger_ida, trig_info->id);
return ret;
}
EXPORT_SYMBOL(iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
mutex_lock(&iio_trigger_list_lock);
list_del(&trig_info->list);
mutex_unlock(&iio_trigger_list_lock);
ida_free(&iio_trigger_ida, trig_info->id);
/* Possible issue in here */
device_del(&trig_info->dev);
}
EXPORT_SYMBOL(iio_trigger_unregister);
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
{
struct iio_dev_opaque *iio_dev_opaque;
if (!indio_dev || !trig)
return -EINVAL;
iio_dev_opaque = to_iio_dev_opaque(indio_dev);
mutex_lock(&iio_dev_opaque->mlock);
WARN_ON(iio_dev_opaque->trig_readonly);
indio_dev->trig = iio_trigger_get(trig);
iio_dev_opaque->trig_readonly = true;
mutex_unlock(&iio_dev_opaque->mlock);
return 0;
}
EXPORT_SYMBOL(iio_trigger_set_immutable);
/* Search for trigger by name, assuming iio_trigger_list_lock held */
static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
{
struct iio_trigger *iter;
list_for_each_entry(iter, &iio_trigger_list, list)
if (!strcmp(iter->name, name))
return iter;
return NULL;
}
static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
{
struct iio_trigger *trig = NULL, *iter;
mutex_lock(&iio_trigger_list_lock);
list_for_each_entry(iter, &iio_trigger_list, list)
if (sysfs_streq(iter->name, name)) {
trig = iter;
iio_trigger_get(trig);
break;
}
mutex_unlock(&iio_trigger_list_lock);
return trig;
}
static void iio_reenable_work_fn(struct work_struct *work)
{
struct iio_trigger *trig = container_of(work, struct iio_trigger,
reenable_work);
/*
* This 'might' occur after the trigger state is set to disabled -
* in that case the driver should skip reenabling.
*/
trig->ops->reenable(trig);
}
/*
* In general, reenable callbacks may need to sleep and this path is
* not performance sensitive, so just queue up a work item
* to reneable the trigger for us.
*
* Races that can cause this.
* 1) A handler occurs entirely in interrupt context so the counter
* the final decrement is still in this interrupt.
* 2) The trigger has been removed, but one last interrupt gets through.
*
* For (1) we must call reenable, but not in atomic context.
* For (2) it should be safe to call reenanble, if drivers never blindly
* reenable after state is off.
*/
static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
{
if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
trig->ops->reenable)
schedule_work(&trig->reenable_work);
}
/**
* iio_trigger_poll() - Call the IRQ trigger handler of the consumers
* @trig: trigger which occurred
*
* This function should only be called from a hard IRQ context.
*/
void iio_trigger_poll(struct iio_trigger *trig)
{
int i;
if (!atomic_read(&trig->use_count)) {
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
else
iio_trigger_notify_done_atomic(trig);
}
}
}
EXPORT_SYMBOL(iio_trigger_poll);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
{
iio_trigger_poll(private);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
/**
* iio_trigger_poll_nested() - Call the threaded trigger handler of the
* consumers
* @trig: trigger which occurred
*
* This function should only be called from a kernel thread context.
*/
void iio_trigger_poll_nested(struct iio_trigger *trig)
{
int i;
if (!atomic_read(&trig->use_count)) {
atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
if (trig->subirqs[i].enabled)
handle_nested_irq(trig->subirq_base + i);
else
iio_trigger_notify_done(trig);
}
}
}
EXPORT_SYMBOL(iio_trigger_poll_nested);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
trig->ops->reenable)
trig->ops->reenable(trig);
}
EXPORT_SYMBOL(iio_trigger_notify_done);
/* Trigger Consumer related functions */
static int iio_trigger_get_irq(struct iio_trigger *trig)
{
int ret;
mutex_lock(&trig->pool_lock);
ret = bitmap_find_free_region(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER,
ilog2(1));
mutex_unlock(&trig->pool_lock);
if (ret >= 0)
ret += trig->subirq_base;
return ret;
}
static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
{
mutex_lock(&trig->pool_lock);
clear_bit(irq - trig->subirq_base, trig->pool);
mutex_unlock(&trig->pool_lock);
}
/* Complexity in here. With certain triggers (datardy) an acknowledgement
* may be needed if the pollfuncs do not include the data read for the
* triggering device.
* This is not currently handled. Alternative of not enabling trigger unless
* the relevant function is in there may be the best option.
*/
/* Worth protecting against double additions? */
int iio_trigger_attach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
bool notinuse =
bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
int ret = 0;
/* Prevent the module from being removed whilst attached to a trigger */
__module_get(iio_dev_opaque->driver_module);
/* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
if (pf->irq < 0) {
pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
goto out_put_module;
}
/* Request irq */
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
if (ret < 0)
goto out_put_irq;
/* Enable trigger in driver */
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret)
goto out_free_irq;
}
/*
* Check if we just registered to our own trigger: we determine that
* this is the case if the IIO device and the trigger device share the
* same parent device.
*/
if (iio_validate_own_trigger(pf->indio_dev, trig))
trig->attached_own_device = true;
return ret;
out_free_irq:
free_irq(pf->irq, pf);
out_put_irq:
iio_trigger_put_irq(trig, pf->irq);
out_put_module:
module_put(iio_dev_opaque->driver_module);
return ret;
}
int iio_trigger_detach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
bool no_other_users =
bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
int ret = 0;
if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
return ret;
}
if (pf->indio_dev->dev.parent == trig->dev.parent)
trig->attached_own_device = false;
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
module_put(iio_dev_opaque->driver_module);
return ret;
}
irqreturn_t iio_pollfunc_store_time(int irq, void *p)
{
struct iio_poll_func *pf = p;
pf->timestamp = iio_get_time_ns(pf->indio_dev);
return IRQ_WAKE_THREAD;
}
EXPORT_SYMBOL(iio_pollfunc_store_time);
struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
struct iio_dev *indio_dev,
const char *fmt,
...)
{
va_list vargs;
struct iio_poll_func *pf;
pf = kmalloc(sizeof(*pf), GFP_KERNEL);
if (!pf)
return NULL;
va_start(vargs, fmt);
pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
va_end(vargs);
if (pf->name == NULL) {
kfree(pf);
return NULL;
}
pf->h = h;
pf->thread = thread;
pf->type = type;
pf->indio_dev = indio_dev;
return pf;
}
EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
void iio_dealloc_pollfunc(struct iio_poll_func *pf)
{
kfree(pf->name);
kfree(pf);
}
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
* current_trigger_show() - trigger consumer sysfs query current trigger
* @dev: device associated with an industrial I/O device
* @attr: pointer to the device_attribute structure that
* is being processed
* @buf: buffer where the current trigger name will be printed into
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
*
* Return: a negative number on failure, the number of characters written
* on success or 0 if no trigger is available
*/
static ssize_t current_trigger_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
if (indio_dev->trig)
return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
return 0;
}
/**
* current_trigger_store() - trigger consumer sysfs set current trigger
* @dev: device associated with an industrial I/O device
* @attr: device attribute that is being processed
* @buf: string buffer that holds the name of the trigger
* @len: length of the trigger name held by buf
*
* For trigger consumers the current_trigger interface allows the trigger
* used for this device to be specified at run time based on the trigger's
* name.
*
* Return: negative error code on failure or length of the buffer
* on success
*/
static ssize_t current_trigger_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_trigger *oldtrig = indio_dev->trig;
struct iio_trigger *trig;
int ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
mutex_unlock(&iio_dev_opaque->mlock);
return -EBUSY;
}
if (iio_dev_opaque->trig_readonly) {
mutex_unlock(&iio_dev_opaque->mlock);
return -EPERM;
}
mutex_unlock(&iio_dev_opaque->mlock);
trig = iio_trigger_acquire_by_name(buf);
if (oldtrig == trig) {
ret = len;
goto out_trigger_put;
}
if (trig && indio_dev->info->validate_trigger) {
ret = indio_dev->info->validate_trigger(indio_dev, trig);
if (ret)
goto out_trigger_put;
}
if (trig && trig->ops && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
goto out_trigger_put;
}
indio_dev->trig = trig;
if (oldtrig) {
if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
iio_trigger_detach_poll_func(oldtrig,
indio_dev->pollfunc_event);
iio_trigger_put(oldtrig);
}
if (indio_dev->trig) {
if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc_event);
}
return len;
out_trigger_put:
if (trig)
iio_trigger_put(trig);
return ret;
}
static DEVICE_ATTR_RW(current_trigger);
static struct attribute *iio_trigger_consumer_attrs[] = {
&dev_attr_current_trigger.attr,
NULL,
};
static const struct attribute_group iio_trigger_consumer_attr_group = {
.name = "trigger",
.attrs = iio_trigger_consumer_attrs,
};
static void iio_trig_release(struct device *device)
{
struct iio_trigger *trig = to_iio_trigger(device);
int i;
if (trig->subirq_base) {
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
irq_modify_status(trig->subirq_base + i,
IRQ_NOAUTOEN,
IRQ_NOREQUEST | IRQ_NOPROBE);
irq_set_chip(trig->subirq_base + i,
NULL);
irq_set_handler(trig->subirq_base + i,
NULL);
}
irq_free_descs(trig->subirq_base,
CONFIG_IIO_CONSUMERS_PER_TRIGGER);
}
kfree(trig->name);
kfree(trig);
}
static const struct device_type iio_trig_type = {
.release = iio_trig_release,
.groups = iio_trig_dev_groups,
};
static void iio_trig_subirqmask(struct irq_data *d)
{
struct irq_chip *chip = irq_data_get_irq_chip(d);
struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
trig->subirqs[d->irq - trig->subirq_base].enabled = false;
}
static void iio_trig_subirqunmask(struct irq_data *d)
{
struct irq_chip *chip = irq_data_get_irq_chip(d);
struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
trig->subirqs[d->irq - trig->subirq_base].enabled = true;
}
static __printf(3, 0)
struct iio_trigger *viio_trigger_alloc(struct device *parent,
struct module *this_mod,
const char *fmt,
va_list vargs)
{
struct iio_trigger *trig;
int i;
trig = kzalloc(sizeof(*trig), GFP_KERNEL);
if (!trig)
return NULL;
trig->dev.parent = parent;
trig->dev.type = &iio_trig_type;
trig->dev.bus = &iio_bus_type;
device_initialize(&trig->dev);
INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
mutex_init(&trig->pool_lock);
trig->subirq_base = irq_alloc_descs(-1, 0,
CONFIG_IIO_CONSUMERS_PER_TRIGGER,
0);
if (trig->subirq_base < 0)
goto free_trig;
trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (trig->name == NULL)
goto free_descs;
INIT_LIST_HEAD(&trig->list);
trig->owner = this_mod;
trig->subirq_chip.name = trig->name;
trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
irq_modify_status(trig->subirq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
return trig;
free_descs:
irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
free_trig:
kfree(trig);
return NULL;
}
/**
* __iio_trigger_alloc - Allocate a trigger
* @parent: Device to allocate iio_trigger for
* @this_mod: module allocating the trigger
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
* string replacing their respective specifiers.
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
struct iio_trigger *__iio_trigger_alloc(struct device *parent,
struct module *this_mod,
const char *fmt, ...)
{
struct iio_trigger *trig;
va_list vargs;
va_start(vargs, fmt);
trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
va_end(vargs);
return trig;
}
EXPORT_SYMBOL(__iio_trigger_alloc);
void iio_trigger_free(struct iio_trigger *trig)
{
if (trig)
put_device(&trig->dev);
}
EXPORT_SYMBOL(iio_trigger_free);
static void devm_iio_trigger_release(struct device *dev, void *res)
{
iio_trigger_free(*(struct iio_trigger **)res);
}
/**
* __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
* @parent: Device to allocate iio_trigger for
* @this_mod: module allocating the trigger
* @fmt: trigger name format. If it includes format
* specifiers, the additional arguments following
* format are formatted and inserted in the resulting
* string replacing their respective specifiers.
*
*
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
struct module *this_mod,
const char *fmt, ...)
{
struct iio_trigger **ptr, *trig;
va_list vargs;
ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return NULL;
/* use raw alloc_dr for kmalloc caller tracing */
va_start(vargs, fmt);
trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
va_end(vargs);
if (trig) {
*ptr = trig;
devres_add(parent, ptr);
} else {
devres_free(ptr);
}
return trig;
}
EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
static void devm_iio_trigger_unreg(void *trigger_info)
{
iio_trigger_unregister(trigger_info);
}
/**
* devm_iio_trigger_register - Resource-managed iio_trigger_register()
* @dev: device this trigger was allocated for
* @trig_info: trigger to register
*
* Managed iio_trigger_register(). The IIO trigger registered with this
* function is automatically unregistered on driver detach. This function
* calls iio_trigger_register() internally. Refer to that function for more
* information.
*
* RETURNS:
* 0 on success, negative error number on failure.
*/
int devm_iio_trigger_register(struct device *dev,
struct iio_trigger *trig_info)
{
int ret;
ret = iio_trigger_register(trig_info);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
bool iio_trigger_using_own(struct iio_dev *indio_dev)
{
return indio_dev->trig->attached_own_device;
}
EXPORT_SYMBOL(iio_trigger_using_own);
/**
* iio_validate_own_trigger - Check if a trigger and IIO device belong to
* the same device
* @idev: the IIO device to check
* @trig: the IIO trigger to check
*
* This function can be used as the validate_trigger callback for triggers that
* can only be attached to their own device.
*
* Return: 0 if both the trigger and the IIO device belong to the same
* device, -EINVAL otherwise.
*/
int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
{
if (idev->dev.parent != trig->dev.parent)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(iio_validate_own_trigger);
/**
* iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
* the same device
* @trig: The IIO trigger to check
* @indio_dev: the IIO device to check
*
* This function can be used as the validate_device callback for triggers that
* can only be attached to their own device.
*
* Return: 0 if both the trigger and the IIO device belong to the same
* device, -EINVAL otherwise.
*/
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev)
{
if (indio_dev->dev.parent != trig->dev.parent)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(iio_trigger_validate_own_device);
int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
return iio_device_register_sysfs_group(indio_dev,
&iio_trigger_consumer_attr_group);
}
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
/* Clean up an associated but not attached trigger reference */
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
| linux-master | drivers/iio/industrialio-trigger.c |
// SPDX-License-Identifier: GPL-2.0-only
/* gain-time-scale conversion helpers for IIO light sensors
*
* Copyright (c) 2023 Matti Vaittinen <[email protected]>
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
#include <linux/units.h>
#include <linux/iio/iio-gts-helper.h>
#include <linux/iio/types.h>
/**
* iio_gts_get_gain - Convert scale to total gain
*
* Internal helper for converting scale to total gain.
*
* @max: Maximum linearized scale. As an example, when scale is created
* in magnitude of NANOs and max scale is 64.1 - The linearized
* scale is 64 100 000 000.
* @scale: Linearized scale to compute the gain for.
*
* Return: (floored) gain corresponding to the scale. -EINVAL if scale
* is invalid.
*/
static int iio_gts_get_gain(const u64 max, const u64 scale)
{
u64 full = max;
int tmp = 1;
if (scale > full || !scale)
return -EINVAL;
if (U64_MAX - full < scale) {
/* Risk of overflow */
if (full - scale < scale)
return 1;
full -= scale;
tmp++;
}
while (full > scale * (u64)tmp)
tmp++;
return tmp;
}
/**
* gain_get_scale_fraction - get the gain or time based on scale and known one
*
* @max: Maximum linearized scale. As an example, when scale is created
* in magnitude of NANOs and max scale is 64.1 - The linearized
* scale is 64 100 000 000.
* @scale: Linearized scale to compute the gain/time for.
* @known: Either integration time or gain depending on which one is known
* @unknown: Pointer to variable where the computed gain/time is stored
*
* Internal helper for computing unknown fraction of total gain.
* Compute either gain or time based on scale and either the gain or time
* depending on which one is known.
*
* Return: 0 on success.
*/
static int gain_get_scale_fraction(const u64 max, u64 scale, int known,
int *unknown)
{
int tot_gain;
tot_gain = iio_gts_get_gain(max, scale);
if (tot_gain < 0)
return tot_gain;
*unknown = tot_gain / known;
/* We require total gain to be exact multiple of known * unknown */
if (!*unknown || *unknown * known != tot_gain)
return -EINVAL;
return 0;
}
static int iio_gts_delinearize(u64 lin_scale, unsigned long scaler,
int *scale_whole, int *scale_nano)
{
int frac;
if (scaler > NANO)
return -EOVERFLOW;
if (!scaler)
return -EINVAL;
frac = do_div(lin_scale, scaler);
*scale_whole = lin_scale;
*scale_nano = frac * (NANO / scaler);
return 0;
}
static int iio_gts_linearize(int scale_whole, int scale_nano,
unsigned long scaler, u64 *lin_scale)
{
/*
* Expect scale to be (mostly) NANO or MICRO. Divide divider instead of
* multiplication followed by division to avoid overflow.
*/
if (scaler > NANO || !scaler)
return -EINVAL;
*lin_scale = (u64)scale_whole * (u64)scaler +
(u64)(scale_nano / (NANO / scaler));
return 0;
}
/**
* iio_gts_total_gain_to_scale - convert gain to scale
* @gts: Gain time scale descriptor
* @total_gain: the gain to be converted
* @scale_int: Pointer to integral part of the scale (typically val1)
* @scale_nano: Pointer to fractional part of the scale (nano or ppb)
*
* Convert the total gain value to scale. NOTE: This does not separate gain
* generated by HW-gain or integration time. It is up to caller to decide what
* part of the total gain is due to integration time and what due to HW-gain.
*
* Return: 0 on success. Negative errno on failure.
*/
int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
int *scale_int, int *scale_nano)
{
u64 tmp;
tmp = gts->max_scale;
do_div(tmp, total_gain);
return iio_gts_delinearize(tmp, NANO, scale_int, scale_nano);
}
EXPORT_SYMBOL_NS_GPL(iio_gts_total_gain_to_scale, IIO_GTS_HELPER);
/**
* iio_gts_purge_avail_scale_table - free-up the available scale tables
* @gts: Gain time scale descriptor
*
* Free the space reserved by iio_gts_build_avail_scale_table().
*/
static void iio_gts_purge_avail_scale_table(struct iio_gts *gts)
{
int i;
if (gts->per_time_avail_scale_tables) {
for (i = 0; i < gts->num_itime; i++)
kfree(gts->per_time_avail_scale_tables[i]);
kfree(gts->per_time_avail_scale_tables);
gts->per_time_avail_scale_tables = NULL;
}
kfree(gts->avail_all_scales_table);
gts->avail_all_scales_table = NULL;
gts->num_avail_all_scales = 0;
}
static int iio_gts_gain_cmp(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
{
int ret, i, j, new_idx, time_idx;
int *all_gains;
size_t gain_bytes;
for (i = 0; i < gts->num_itime; i++) {
/*
* Sort the tables for nice output and for easier finding of
* unique values.
*/
sort(gains[i], gts->num_hwgain, sizeof(int), iio_gts_gain_cmp,
NULL);
/* Convert gains to scales */
for (j = 0; j < gts->num_hwgain; j++) {
ret = iio_gts_total_gain_to_scale(gts, gains[i][j],
&scales[i][2 * j],
&scales[i][2 * j + 1]);
if (ret)
return ret;
}
}
gain_bytes = array_size(gts->num_hwgain, sizeof(int));
all_gains = kcalloc(gts->num_itime, gain_bytes, GFP_KERNEL);
if (!all_gains)
return -ENOMEM;
/*
* We assume all the gains for same integration time were unique.
* It is likely the first time table had greatest time multiplier as
* the times are in the order of preference and greater times are
* usually preferred. Hence we start from the last table which is likely
* to have the smallest total gains.
*/
time_idx = gts->num_itime - 1;
memcpy(all_gains, gains[time_idx], gain_bytes);
new_idx = gts->num_hwgain;
while (time_idx--) {
for (j = 0; j < gts->num_hwgain; j++) {
int candidate = gains[time_idx][j];
int chk;
if (candidate > all_gains[new_idx - 1]) {
all_gains[new_idx] = candidate;
new_idx++;
continue;
}
for (chk = 0; chk < new_idx; chk++)
if (candidate <= all_gains[chk])
break;
if (candidate == all_gains[chk])
continue;
memmove(&all_gains[chk + 1], &all_gains[chk],
(new_idx - chk) * sizeof(int));
all_gains[chk] = candidate;
new_idx++;
}
}
gts->avail_all_scales_table = kcalloc(new_idx, 2 * sizeof(int),
GFP_KERNEL);
if (!gts->avail_all_scales_table) {
ret = -ENOMEM;
goto free_out;
}
gts->num_avail_all_scales = new_idx;
for (i = 0; i < gts->num_avail_all_scales; i++) {
ret = iio_gts_total_gain_to_scale(gts, all_gains[i],
>s->avail_all_scales_table[i * 2],
>s->avail_all_scales_table[i * 2 + 1]);
if (ret) {
kfree(gts->avail_all_scales_table);
gts->num_avail_all_scales = 0;
goto free_out;
}
}
free_out:
kfree(all_gains);
return ret;
}
/**
* iio_gts_build_avail_scale_table - create tables of available scales
* @gts: Gain time scale descriptor
*
* Build the tables which can represent the available scales based on the
* originally given gain and time tables. When both time and gain tables are
* given this results:
* 1. A set of tables representing available scales for each supported
* integration time.
* 2. A single table listing all the unique scales that any combination of
* supported gains and times can provide.
*
* NOTE: Space allocated for the tables must be freed using
* iio_gts_purge_avail_scale_table() when the tables are no longer needed.
*
* Return: 0 on success.
*/
static int iio_gts_build_avail_scale_table(struct iio_gts *gts)
{
int **per_time_gains, **per_time_scales, i, j, ret = -ENOMEM;
per_time_gains = kcalloc(gts->num_itime, sizeof(*per_time_gains), GFP_KERNEL);
if (!per_time_gains)
return ret;
per_time_scales = kcalloc(gts->num_itime, sizeof(*per_time_scales), GFP_KERNEL);
if (!per_time_scales)
goto free_gains;
for (i = 0; i < gts->num_itime; i++) {
per_time_scales[i] = kcalloc(gts->num_hwgain, 2 * sizeof(int),
GFP_KERNEL);
if (!per_time_scales[i])
goto err_free_out;
per_time_gains[i] = kcalloc(gts->num_hwgain, sizeof(int),
GFP_KERNEL);
if (!per_time_gains[i]) {
kfree(per_time_scales[i]);
goto err_free_out;
}
for (j = 0; j < gts->num_hwgain; j++)
per_time_gains[i][j] = gts->hwgain_table[j].gain *
gts->itime_table[i].mul;
}
ret = gain_to_scaletables(gts, per_time_gains, per_time_scales);
if (ret)
goto err_free_out;
kfree(per_time_gains);
gts->per_time_avail_scale_tables = per_time_scales;
return 0;
err_free_out:
for (i--; i; i--) {
kfree(per_time_scales[i]);
kfree(per_time_gains[i]);
}
kfree(per_time_scales);
free_gains:
kfree(per_time_gains);
return ret;
}
static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
int num_times)
{
int i;
for (i = 0; i < num_times; i++) {
int_micro_times[i * 2] = time_us[i] / 1000000;
int_micro_times[i * 2 + 1] = time_us[i] % 1000000;
}
}
/**
* iio_gts_build_avail_time_table - build table of available integration times
* @gts: Gain time scale descriptor
*
* Build the table which can represent the available times to be returned
* to users using the read_avail-callback.
*
* NOTE: Space allocated for the tables must be freed using
* iio_gts_purge_avail_time_table() when the tables are no longer needed.
*
* Return: 0 on success.
*/
static int iio_gts_build_avail_time_table(struct iio_gts *gts)
{
int *times, i, j, idx = 0, *int_micro_times;
if (!gts->num_itime)
return 0;
times = kcalloc(gts->num_itime, sizeof(int), GFP_KERNEL);
if (!times)
return -ENOMEM;
/* Sort times from all tables to one and remove duplicates */
for (i = gts->num_itime - 1; i >= 0; i--) {
int new = gts->itime_table[i].time_us;
if (times[idx] < new) {
times[idx++] = new;
continue;
}
for (j = 0; j <= idx; j++) {
if (times[j] > new) {
memmove(×[j + 1], ×[j],
(idx - j) * sizeof(int));
times[j] = new;
idx++;
}
}
}
/* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */
int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL);
if (int_micro_times) {
/*
* This is just to survive a unlikely corner-case where times in
* the given time table were not unique. Else we could just
* trust the gts->num_itime.
*/
gts->num_avail_time_tables = idx;
iio_gts_us_to_int_micro(times, int_micro_times, idx);
}
gts->avail_time_tables = int_micro_times;
kfree(times);
if (!int_micro_times)
return -ENOMEM;
return 0;
}
/**
* iio_gts_purge_avail_time_table - free-up the available integration time table
* @gts: Gain time scale descriptor
*
* Free the space reserved by iio_gts_build_avail_time_table().
*/
static void iio_gts_purge_avail_time_table(struct iio_gts *gts)
{
if (gts->num_avail_time_tables) {
kfree(gts->avail_time_tables);
gts->avail_time_tables = NULL;
gts->num_avail_time_tables = 0;
}
}
/**
* iio_gts_build_avail_tables - create tables of available scales and int times
* @gts: Gain time scale descriptor
*
* Build the tables which can represent the available scales and available
* integration times. Availability tables are built based on the originally
* given gain and given time tables.
*
* When both time and gain tables are
* given this results:
* 1. A set of sorted tables representing available scales for each supported
* integration time.
* 2. A single sorted table listing all the unique scales that any combination
* of supported gains and times can provide.
* 3. A sorted table of supported integration times
*
* After these tables are built one can use the iio_gts_all_avail_scales(),
* iio_gts_avail_scales_for_time() and iio_gts_avail_times() helpers to
* implement the read_avail operations.
*
* NOTE: Space allocated for the tables must be freed using
* iio_gts_purge_avail_tables() when the tables are no longer needed.
*
* Return: 0 on success.
*/
static int iio_gts_build_avail_tables(struct iio_gts *gts)
{
int ret;
ret = iio_gts_build_avail_scale_table(gts);
if (ret)
return ret;
ret = iio_gts_build_avail_time_table(gts);
if (ret)
iio_gts_purge_avail_scale_table(gts);
return ret;
}
/**
* iio_gts_purge_avail_tables - free-up the availability tables
* @gts: Gain time scale descriptor
*
* Free the space reserved by iio_gts_build_avail_tables(). Frees both the
* integration time and scale tables.
*/
static void iio_gts_purge_avail_tables(struct iio_gts *gts)
{
iio_gts_purge_avail_time_table(gts);
iio_gts_purge_avail_scale_table(gts);
}
static void devm_iio_gts_avail_all_drop(void *res)
{
iio_gts_purge_avail_tables(res);
}
/**
* devm_iio_gts_build_avail_tables - manged add availability tables
* @dev: Pointer to the device whose lifetime tables are bound
* @gts: Gain time scale descriptor
*
* Build the tables which can represent the available scales and available
* integration times. Availability tables are built based on the originally
* given gain and given time tables.
*
* When both time and gain tables are given this results:
* 1. A set of sorted tables representing available scales for each supported
* integration time.
* 2. A single sorted table listing all the unique scales that any combination
* of supported gains and times can provide.
* 3. A sorted table of supported integration times
*
* After these tables are built one can use the iio_gts_all_avail_scales(),
* iio_gts_avail_scales_for_time() and iio_gts_avail_times() helpers to
* implement the read_avail operations.
*
* The tables are automatically released upon device detach.
*
* Return: 0 on success.
*/
static int devm_iio_gts_build_avail_tables(struct device *dev,
struct iio_gts *gts)
{
int ret;
ret = iio_gts_build_avail_tables(gts);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_iio_gts_avail_all_drop, gts);
}
static int sanity_check_time(const struct iio_itime_sel_mul *t)
{
if (t->sel < 0 || t->time_us < 0 || t->mul <= 0)
return -EINVAL;
return 0;
}
static int sanity_check_gain(const struct iio_gain_sel_pair *g)
{
if (g->sel < 0 || g->gain <= 0)
return -EINVAL;
return 0;
}
static int iio_gts_sanity_check(struct iio_gts *gts)
{
int g, t, ret;
if (!gts->num_hwgain && !gts->num_itime)
return -EINVAL;
for (t = 0; t < gts->num_itime; t++) {
ret = sanity_check_time(>s->itime_table[t]);
if (ret)
return ret;
}
for (g = 0; g < gts->num_hwgain; g++) {
ret = sanity_check_gain(>s->hwgain_table[g]);
if (ret)
return ret;
}
for (g = 0; g < gts->num_hwgain; g++) {
for (t = 0; t < gts->num_itime; t++) {
int gain, mul, res;
gain = gts->hwgain_table[g].gain;
mul = gts->itime_table[t].mul;
if (check_mul_overflow(gain, mul, &res))
return -EOVERFLOW;
}
}
return 0;
}
static int iio_init_iio_gts(int max_scale_int, int max_scale_nano,
const struct iio_gain_sel_pair *gain_tbl, int num_gain,
const struct iio_itime_sel_mul *tim_tbl, int num_times,
struct iio_gts *gts)
{
int ret;
memset(gts, 0, sizeof(*gts));
ret = iio_gts_linearize(max_scale_int, max_scale_nano, NANO,
>s->max_scale);
if (ret)
return ret;
gts->hwgain_table = gain_tbl;
gts->num_hwgain = num_gain;
gts->itime_table = tim_tbl;
gts->num_itime = num_times;
return iio_gts_sanity_check(gts);
}
/**
* devm_iio_init_iio_gts - Initialize the gain-time-scale helper
* @dev: Pointer to the device whose lifetime gts resources are
* bound
* @max_scale_int: integer part of the maximum scale value
* @max_scale_nano: fraction part of the maximum scale value
* @gain_tbl: table describing supported gains
* @num_gain: number of gains in the gain table
* @tim_tbl: table describing supported integration times. Provide
* the integration time table sorted so that the preferred
* integration time is in the first array index. The search
* functions like the
* iio_gts_find_time_and_gain_sel_for_scale() start search
* from first provided time.
* @num_times: number of times in the time table
* @gts: pointer to the helper struct
*
* Initialize the gain-time-scale helper for use. Note, gains, times, selectors
* and multipliers must be positive. Negative values are reserved for error
* checking. The total gain (maximum gain * maximum time multiplier) must not
* overflow int. The allocated resources will be released upon device detach.
*
* Return: 0 on success.
*/
int devm_iio_init_iio_gts(struct device *dev, int max_scale_int, int max_scale_nano,
const struct iio_gain_sel_pair *gain_tbl, int num_gain,
const struct iio_itime_sel_mul *tim_tbl, int num_times,
struct iio_gts *gts)
{
int ret;
ret = iio_init_iio_gts(max_scale_int, max_scale_nano, gain_tbl,
num_gain, tim_tbl, num_times, gts);
if (ret)
return ret;
return devm_iio_gts_build_avail_tables(dev, gts);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_init_iio_gts, IIO_GTS_HELPER);
/**
* iio_gts_all_avail_scales - helper for listing all available scales
* @gts: Gain time scale descriptor
* @vals: Returned array of supported scales
* @type: Type of returned scale values
* @length: Amount of returned values in array
*
* Return: a value suitable to be returned from read_avail or a negative error.
*/
int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
int *length)
{
if (!gts->num_avail_all_scales)
return -EINVAL;
*vals = gts->avail_all_scales_table;
*type = IIO_VAL_INT_PLUS_NANO;
*length = gts->num_avail_all_scales * 2;
return IIO_AVAIL_LIST;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_all_avail_scales, IIO_GTS_HELPER);
/**
* iio_gts_avail_scales_for_time - list scales for integration time
* @gts: Gain time scale descriptor
* @time: Integration time for which the scales are listed
* @vals: Returned array of supported scales
* @type: Type of returned scale values
* @length: Amount of returned values in array
*
* Drivers which do not allow scale setting to change integration time can
* use this helper to list only the scales which are valid for given integration
* time.
*
* Return: a value suitable to be returned from read_avail or a negative error.
*/
int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
const int **vals, int *type, int *length)
{
int i;
for (i = 0; i < gts->num_itime; i++)
if (gts->itime_table[i].time_us == time)
break;
if (i == gts->num_itime)
return -EINVAL;
*vals = gts->per_time_avail_scale_tables[i];
*type = IIO_VAL_INT_PLUS_NANO;
*length = gts->num_hwgain * 2;
return IIO_AVAIL_LIST;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_avail_scales_for_time, IIO_GTS_HELPER);
/**
* iio_gts_avail_times - helper for listing available integration times
* @gts: Gain time scale descriptor
* @vals: Returned array of supported times
* @type: Type of returned scale values
* @length: Amount of returned values in array
*
* Return: a value suitable to be returned from read_avail or a negative error.
*/
int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
int *length)
{
if (!gts->num_avail_time_tables)
return -EINVAL;
*vals = gts->avail_time_tables;
*type = IIO_VAL_INT_PLUS_MICRO;
*length = gts->num_avail_time_tables * 2;
return IIO_AVAIL_LIST;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_avail_times, IIO_GTS_HELPER);
/**
* iio_gts_find_sel_by_gain - find selector corresponding to a HW-gain
* @gts: Gain time scale descriptor
* @gain: HW-gain for which matching selector is searched for
*
* Return: a selector matching given HW-gain or -EINVAL if selector was
* not found.
*/
int iio_gts_find_sel_by_gain(struct iio_gts *gts, int gain)
{
int i;
for (i = 0; i < gts->num_hwgain; i++)
if (gts->hwgain_table[i].gain == gain)
return gts->hwgain_table[i].sel;
return -EINVAL;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_sel_by_gain, IIO_GTS_HELPER);
/**
* iio_gts_find_gain_by_sel - find HW-gain corresponding to a selector
* @gts: Gain time scale descriptor
* @sel: selector for which matching HW-gain is searched for
*
* Return: a HW-gain matching given selector or -EINVAL if HW-gain was not
* found.
*/
int iio_gts_find_gain_by_sel(struct iio_gts *gts, int sel)
{
int i;
for (i = 0; i < gts->num_hwgain; i++)
if (gts->hwgain_table[i].sel == sel)
return gts->hwgain_table[i].gain;
return -EINVAL;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_by_sel, IIO_GTS_HELPER);
/**
* iio_gts_get_min_gain - find smallest valid HW-gain
* @gts: Gain time scale descriptor
*
* Return: The smallest HW-gain -EINVAL if no HW-gains were in the tables.
*/
int iio_gts_get_min_gain(struct iio_gts *gts)
{
int i, min = -EINVAL;
for (i = 0; i < gts->num_hwgain; i++) {
int gain = gts->hwgain_table[i].gain;
if (min == -EINVAL)
min = gain;
else
min = min(min, gain);
}
return min;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_get_min_gain, IIO_GTS_HELPER);
/**
* iio_find_closest_gain_low - Find the closest lower matching gain
* @gts: Gain time scale descriptor
* @gain: HW-gain for which the closest match is searched
* @in_range: indicate if the @gain was actually in the range of
* supported gains.
*
* Search for closest supported gain that is lower than or equal to the
* gain given as a parameter. This is usable for drivers which do not require
* user to request exact matching gain but rather for rounding to a supported
* gain value which is equal or lower (setting lower gain is typical for
* avoiding saturation)
*
* Return: The closest matching supported gain or -EINVAL if @gain
* was smaller than the smallest supported gain.
*/
int iio_find_closest_gain_low(struct iio_gts *gts, int gain, bool *in_range)
{
int i, diff = 0;
int best = -1;
*in_range = false;
for (i = 0; i < gts->num_hwgain; i++) {
if (gain == gts->hwgain_table[i].gain) {
*in_range = true;
return gain;
}
if (gain > gts->hwgain_table[i].gain) {
if (!diff) {
diff = gain - gts->hwgain_table[i].gain;
best = i;
} else {
int tmp = gain - gts->hwgain_table[i].gain;
if (tmp < diff) {
diff = tmp;
best = i;
}
}
} else {
/*
* We found valid HW-gain which is greater than
* reference. So, unless we return a failure below we
* will have found an in-range gain
*/
*in_range = true;
}
}
/* The requested gain was smaller than anything we support */
if (!diff) {
*in_range = false;
return -EINVAL;
}
return gts->hwgain_table[best].gain;
}
EXPORT_SYMBOL_NS_GPL(iio_find_closest_gain_low, IIO_GTS_HELPER);
static int iio_gts_get_int_time_gain_multiplier_by_sel(struct iio_gts *gts,
int sel)
{
const struct iio_itime_sel_mul *time;
time = iio_gts_find_itime_by_sel(gts, sel);
if (!time)
return -EINVAL;
return time->mul;
}
/**
* iio_gts_find_gain_for_scale_using_time - Find gain by time and scale
* @gts: Gain time scale descriptor
* @time_sel: Integration time selector corresponding to the time gain is
* searched for
* @scale_int: Integral part of the scale (typically val1)
* @scale_nano: Fractional part of the scale (nano or ppb)
* @gain: Pointer to value where gain is stored.
*
* In some cases the light sensors may want to find a gain setting which
* corresponds given scale and integration time. Sensors which fill the
* gain and time tables may use this helper to retrieve the gain.
*
* Return: 0 on success. -EINVAL if gain matching the parameters is not
* found.
*/
static int iio_gts_find_gain_for_scale_using_time(struct iio_gts *gts, int time_sel,
int scale_int, int scale_nano,
int *gain)
{
u64 scale_linear;
int ret, mul;
ret = iio_gts_linearize(scale_int, scale_nano, NANO, &scale_linear);
if (ret)
return ret;
ret = iio_gts_get_int_time_gain_multiplier_by_sel(gts, time_sel);
if (ret < 0)
return ret;
mul = ret;
ret = gain_get_scale_fraction(gts->max_scale, scale_linear, mul, gain);
if (ret)
return ret;
if (!iio_gts_valid_gain(gts, *gain))
return -EINVAL;
return 0;
}
/**
* iio_gts_find_gain_sel_for_scale_using_time - Fetch gain selector.
* @gts: Gain time scale descriptor
* @time_sel: Integration time selector corresponding to the time gain is
* searched for
* @scale_int: Integral part of the scale (typically val1)
* @scale_nano: Fractional part of the scale (nano or ppb)
* @gain_sel: Pointer to value where gain selector is stored.
*
* See iio_gts_find_gain_for_scale_using_time() for more information
*/
int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
int scale_int, int scale_nano,
int *gain_sel)
{
int gain, ret;
ret = iio_gts_find_gain_for_scale_using_time(gts, time_sel, scale_int,
scale_nano, &gain);
if (ret)
return ret;
ret = iio_gts_find_sel_by_gain(gts, gain);
if (ret < 0)
return ret;
*gain_sel = ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_sel_for_scale_using_time, IIO_GTS_HELPER);
static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
{
const struct iio_itime_sel_mul *itime;
if (!iio_gts_valid_gain(gts, gain))
return -EINVAL;
if (!gts->num_itime)
return gain;
itime = iio_gts_find_itime_by_time(gts, time);
if (!itime)
return -EINVAL;
return gain * itime->mul;
}
static int iio_gts_get_scale_linear(struct iio_gts *gts, int gain, int time,
u64 *scale)
{
int total_gain;
u64 tmp;
total_gain = iio_gts_get_total_gain(gts, gain, time);
if (total_gain < 0)
return total_gain;
tmp = gts->max_scale;
do_div(tmp, total_gain);
*scale = tmp;
return 0;
}
/**
* iio_gts_get_scale - get scale based on integration time and HW-gain
* @gts: Gain time scale descriptor
* @gain: HW-gain for which the scale is computed
* @time: Integration time for which the scale is computed
* @scale_int: Integral part of the scale (typically val1)
* @scale_nano: Fractional part of the scale (nano or ppb)
*
* Compute scale matching the integration time and HW-gain given as parameter.
*
* Return: 0 on success.
*/
int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
int *scale_nano)
{
u64 lin_scale;
int ret;
ret = iio_gts_get_scale_linear(gts, gain, time, &lin_scale);
if (ret)
return ret;
return iio_gts_delinearize(lin_scale, NANO, scale_int, scale_nano);
}
EXPORT_SYMBOL_NS_GPL(iio_gts_get_scale, IIO_GTS_HELPER);
/**
* iio_gts_find_new_gain_sel_by_old_gain_time - compensate for time change
* @gts: Gain time scale descriptor
* @old_gain: Previously set gain
* @old_time_sel: Selector corresponding previously set time
* @new_time_sel: Selector corresponding new time to be set
* @new_gain: Pointer to value where new gain is to be written
*
* We may want to mitigate the scale change caused by setting a new integration
* time (for a light sensor) by also updating the (HW)gain. This helper computes
* new gain value to maintain the scale with new integration time.
*
* Return: 0 if an exactly matching supported new gain was found. When a
* non-zero value is returned, the @new_gain will be set to a negative or
* positive value. The negative value means that no gain could be computed.
* Positive value will be the "best possible new gain there could be". There
* can be two reasons why finding the "best possible" new gain is not deemed
* successful. 1) This new value cannot be supported by the hardware. 2) The new
* gain required to maintain the scale would not be an integer. In this case,
* the "best possible" new gain will be a floored optimal gain, which may or
* may not be supported by the hardware.
*/
int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
int old_gain, int old_time_sel,
int new_time_sel, int *new_gain)
{
const struct iio_itime_sel_mul *itime_old, *itime_new;
u64 scale;
int ret;
*new_gain = -1;
itime_old = iio_gts_find_itime_by_sel(gts, old_time_sel);
if (!itime_old)
return -EINVAL;
itime_new = iio_gts_find_itime_by_sel(gts, new_time_sel);
if (!itime_new)
return -EINVAL;
ret = iio_gts_get_scale_linear(gts, old_gain, itime_old->time_us,
&scale);
if (ret)
return ret;
ret = gain_get_scale_fraction(gts->max_scale, scale, itime_new->mul,
new_gain);
if (ret)
return ret;
if (!iio_gts_valid_gain(gts, *new_gain))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_sel_by_old_gain_time, IIO_GTS_HELPER);
/**
* iio_gts_find_new_gain_by_old_gain_time - compensate for time change
* @gts: Gain time scale descriptor
* @old_gain: Previously set gain
* @old_time: Selector corresponding previously set time
* @new_time: Selector corresponding new time to be set
* @new_gain: Pointer to value where new gain is to be written
*
* We may want to mitigate the scale change caused by setting a new integration
* time (for a light sensor) by also updating the (HW)gain. This helper computes
* new gain value to maintain the scale with new integration time.
*
* Return: 0 if an exactly matching supported new gain was found. When a
* non-zero value is returned, the @new_gain will be set to a negative or
* positive value. The negative value means that no gain could be computed.
* Positive value will be the "best possible new gain there could be". There
* can be two reasons why finding the "best possible" new gain is not deemed
* successful. 1) This new value cannot be supported by the hardware. 2) The new
* gain required to maintain the scale would not be an integer. In this case,
* the "best possible" new gain will be a floored optimal gain, which may or
* may not be supported by the hardware.
*/
int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
int old_time, int new_time,
int *new_gain)
{
const struct iio_itime_sel_mul *itime_new;
u64 scale;
int ret;
*new_gain = -1;
itime_new = iio_gts_find_itime_by_time(gts, new_time);
if (!itime_new)
return -EINVAL;
ret = iio_gts_get_scale_linear(gts, old_gain, old_time, &scale);
if (ret)
return ret;
ret = gain_get_scale_fraction(gts->max_scale, scale, itime_new->mul,
new_gain);
if (ret)
return ret;
if (!iio_gts_valid_gain(gts, *new_gain))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_new_gain_by_old_gain_time, IIO_GTS_HELPER);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matti Vaittinen <[email protected]>");
MODULE_DESCRIPTION("IIO light sensor gain-time-scale helpers");
| linux-master | drivers/iio/industrialio-gts-helper.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Industrial I/O configfs bits
*
* Copyright (c) 2015 Intel Corporation
*/
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/configfs.h>
static const struct config_item_type iio_root_group_type = {
.ct_owner = THIS_MODULE,
};
struct configfs_subsystem iio_configfs_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "iio",
.ci_type = &iio_root_group_type,
},
},
.su_mutex = __MUTEX_INITIALIZER(iio_configfs_subsys.su_mutex),
};
EXPORT_SYMBOL(iio_configfs_subsys);
static int __init iio_configfs_init(void)
{
config_group_init(&iio_configfs_subsys.su_group);
return configfs_register_subsystem(&iio_configfs_subsys);
}
module_init(iio_configfs_init);
static void __exit iio_configfs_exit(void)
{
configfs_unregister_subsystem(&iio_configfs_subsys);
}
module_exit(iio_configfs_exit);
MODULE_AUTHOR("Daniel Baluta <[email protected]>");
MODULE_DESCRIPTION("Industrial I/O configfs support");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/industrialio-configfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The Industrial I/O core, software trigger functions
*
* Copyright (c) 2015 Intel Corporation
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/iio/sw_trigger.h>
#include <linux/iio/configfs.h>
#include <linux/configfs.h>
static struct config_group *iio_triggers_group;
static const struct config_item_type iio_trigger_type_group_type;
static const struct config_item_type iio_triggers_group_type = {
.ct_owner = THIS_MODULE,
};
static LIST_HEAD(iio_trigger_types_list);
static DEFINE_MUTEX(iio_trigger_types_lock);
static
struct iio_sw_trigger_type *__iio_find_sw_trigger_type(const char *name,
unsigned int len)
{
struct iio_sw_trigger_type *t = NULL, *iter;
list_for_each_entry(iter, &iio_trigger_types_list, list)
if (!strcmp(iter->name, name)) {
t = iter;
break;
}
return t;
}
int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
{
struct iio_sw_trigger_type *iter;
int ret = 0;
mutex_lock(&iio_trigger_types_lock);
iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
if (iter)
ret = -EBUSY;
else
list_add_tail(&t->list, &iio_trigger_types_list);
mutex_unlock(&iio_trigger_types_lock);
if (ret)
return ret;
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
if (IS_ERR(t->group)) {
mutex_lock(&iio_trigger_types_lock);
list_del(&t->list);
mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
}
return ret;
}
EXPORT_SYMBOL(iio_register_sw_trigger_type);
void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *t)
{
struct iio_sw_trigger_type *iter;
mutex_lock(&iio_trigger_types_lock);
iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
if (iter)
list_del(&t->list);
mutex_unlock(&iio_trigger_types_lock);
configfs_unregister_default_group(t->group);
}
EXPORT_SYMBOL(iio_unregister_sw_trigger_type);
static
struct iio_sw_trigger_type *iio_get_sw_trigger_type(const char *name)
{
struct iio_sw_trigger_type *t;
mutex_lock(&iio_trigger_types_lock);
t = __iio_find_sw_trigger_type(name, strlen(name));
if (t && !try_module_get(t->owner))
t = NULL;
mutex_unlock(&iio_trigger_types_lock);
return t;
}
struct iio_sw_trigger *iio_sw_trigger_create(const char *type, const char *name)
{
struct iio_sw_trigger *t;
struct iio_sw_trigger_type *tt;
tt = iio_get_sw_trigger_type(type);
if (!tt) {
pr_err("Invalid trigger type: %s\n", type);
return ERR_PTR(-EINVAL);
}
t = tt->ops->probe(name);
if (IS_ERR(t))
goto out_module_put;
t->trigger_type = tt;
return t;
out_module_put:
module_put(tt->owner);
return t;
}
EXPORT_SYMBOL(iio_sw_trigger_create);
void iio_sw_trigger_destroy(struct iio_sw_trigger *t)
{
struct iio_sw_trigger_type *tt = t->trigger_type;
tt->ops->remove(t);
module_put(tt->owner);
}
EXPORT_SYMBOL(iio_sw_trigger_destroy);
static struct config_group *trigger_make_group(struct config_group *group,
const char *name)
{
struct iio_sw_trigger *t;
t = iio_sw_trigger_create(group->cg_item.ci_name, name);
if (IS_ERR(t))
return ERR_CAST(t);
config_item_set_name(&t->group.cg_item, "%s", name);
return &t->group;
}
static void trigger_drop_group(struct config_group *group,
struct config_item *item)
{
struct iio_sw_trigger *t = to_iio_sw_trigger(item);
iio_sw_trigger_destroy(t);
config_item_put(item);
}
static struct configfs_group_operations trigger_ops = {
.make_group = &trigger_make_group,
.drop_item = &trigger_drop_group,
};
static const struct config_item_type iio_trigger_type_group_type = {
.ct_group_ops = &trigger_ops,
.ct_owner = THIS_MODULE,
};
static int __init iio_sw_trigger_init(void)
{
iio_triggers_group =
configfs_register_default_group(&iio_configfs_subsys.su_group,
"triggers",
&iio_triggers_group_type);
return PTR_ERR_OR_ZERO(iio_triggers_group);
}
module_init(iio_sw_trigger_init);
static void __exit iio_sw_trigger_exit(void)
{
configfs_unregister_default_group(iio_triggers_group);
}
module_exit(iio_sw_trigger_exit);
MODULE_AUTHOR("Daniel Baluta <[email protected]>");
MODULE_DESCRIPTION("Industrial I/O software triggers support");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/industrialio-sw-trigger.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Industrial I/O event handling
*
* Copyright (c) 2008 Jonathan Cameron
*
* Based on elements of hwmon and input subsystems.
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
/**
* struct iio_event_interface - chrdev interface for an event line
* @wait: wait queue to allow blocking reads of events
* @det_events: list of detected events
* @dev_attr_list: list of event interface sysfs attribute
* @flags: file operations related flags including busy flag.
* @group: event interface sysfs attribute group
* @read_lock: lock to protect kfifo read operations
* @ioctl_handler: handler for event ioctl() calls
*/
struct iio_event_interface {
wait_queue_head_t wait;
DECLARE_KFIFO(det_events, struct iio_event_data, 16);
struct list_head dev_attr_list;
unsigned long flags;
struct attribute_group group;
struct mutex read_lock;
struct iio_ioctl_handler ioctl_handler;
};
bool iio_event_enabled(const struct iio_event_interface *ev_int)
{
return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
}
/**
* iio_push_event() - try to add event to the list for userspace reading
* @indio_dev: IIO device structure
* @ev_code: What event
* @timestamp: When the event occurred
*
* Note: The caller must make sure that this function is not running
* concurrently for the same indio_dev more than once.
*
* This function may be safely used as soon as a valid reference to iio_dev has
* been obtained via iio_device_alloc(), but any events that are submitted
* before iio_device_register() has successfully completed will be silently
* discarded.
**/
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
struct iio_event_data ev;
int copied;
if (!ev_int)
return 0;
/* Does anyone care? */
if (iio_event_enabled(ev_int)) {
ev.id = ev_code;
ev.timestamp = timestamp;
copied = kfifo_put(&ev_int->det_events, ev);
if (copied != 0)
wake_up_poll(&ev_int->wait, EPOLLIN);
}
return 0;
}
EXPORT_SYMBOL(iio_push_event);
/**
* iio_event_poll() - poll the event queue to find out if it has data
* @filep: File structure pointer to identify the device
* @wait: Poll table pointer to add the wait queue on
*
* Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
* or a negative error code on failure
*/
static __poll_t iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
__poll_t events = 0;
if (!indio_dev->info)
return events;
poll_wait(filep, &ev_int->wait, wait);
if (!kfifo_is_empty(&ev_int->det_events))
events = EPOLLIN | EPOLLRDNORM;
return events;
}
static ssize_t iio_event_chrdev_read(struct file *filep,
char __user *buf,
size_t count,
loff_t *f_ps)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
unsigned int copied;
int ret;
if (!indio_dev->info)
return -ENODEV;
if (count < sizeof(struct iio_event_data))
return -EINVAL;
do {
if (kfifo_is_empty(&ev_int->det_events)) {
if (filep->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(ev_int->wait,
!kfifo_is_empty(&ev_int->det_events) ||
indio_dev->info == NULL);
if (ret)
return ret;
if (indio_dev->info == NULL)
return -ENODEV;
}
if (mutex_lock_interruptible(&ev_int->read_lock))
return -ERESTARTSYS;
ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
mutex_unlock(&ev_int->read_lock);
if (ret)
return ret;
/*
* If we couldn't read anything from the fifo (a different
* thread might have been faster) we either return -EAGAIN if
* the file descriptor is non-blocking, otherwise we go back to
* sleep and wait for more data to arrive.
*/
if (copied == 0 && (filep->f_flags & O_NONBLOCK))
return -EAGAIN;
} while (copied == 0);
return copied;
}
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
return 0;
}
static const struct file_operations iio_event_chrdev_fileops = {
.read = iio_event_chrdev_read,
.poll = iio_event_poll,
.release = iio_event_chrdev_release,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static int iio_event_getfd(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
int fd;
if (ev_int == NULL)
return -ENODEV;
fd = mutex_lock_interruptible(&iio_dev_opaque->mlock);
if (fd)
return fd;
if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
fd = -EBUSY;
goto unlock;
}
iio_device_get(indio_dev);
fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
indio_dev, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
iio_device_put(indio_dev);
} else {
kfifo_reset_out(&ev_int->det_events);
}
unlock:
mutex_unlock(&iio_dev_opaque->mlock);
return fd;
}
static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH] = "thresh",
[IIO_EV_TYPE_MAG] = "mag",
[IIO_EV_TYPE_ROC] = "roc",
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
[IIO_EV_TYPE_GESTURE] = "gesture",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
[IIO_EV_DIR_FALLING] = "falling",
[IIO_EV_DIR_SINGLETAP] = "singletap",
[IIO_EV_DIR_DOUBLETAP] = "doubletap",
};
static const char * const iio_ev_info_text[] = {
[IIO_EV_INFO_ENABLE] = "en",
[IIO_EV_INFO_VALUE] = "value",
[IIO_EV_INFO_HYSTERESIS] = "hysteresis",
[IIO_EV_INFO_PERIOD] = "period",
[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
[IIO_EV_INFO_TIMEOUT] = "timeout",
[IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout",
[IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay",
[IIO_EV_INFO_RUNNING_PERIOD] = "runningperiod",
[IIO_EV_INFO_RUNNING_COUNT] = "runningcount",
};
static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
{
return attr->c->event_spec[attr->address & 0xffff].dir;
}
static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr)
{
return attr->c->event_spec[attr->address & 0xffff].type;
}
static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr)
{
return (attr->address >> 16) & 0xffff;
}
static ssize_t iio_ev_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
bool val;
ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
ret = indio_dev->info->write_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), val);
return (ret < 0) ? ret : len;
}
static ssize_t iio_ev_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val;
val = indio_dev->info->read_event_config(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr));
if (val < 0)
return val;
else
return sysfs_emit(buf, "%d\n", val);
}
static ssize_t iio_ev_value_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, val2, val_arr[2];
int ret;
ret = indio_dev->info->read_event_value(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
&val, &val2);
if (ret < 0)
return ret;
val_arr[0] = val;
val_arr[1] = val2;
return iio_format_value(buf, ret, 2, val_arr);
}
static ssize_t iio_ev_value_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int val, val2;
int ret;
if (!indio_dev->info->write_event_value)
return -EINVAL;
ret = iio_str_to_fixpoint(buf, 100000, &val, &val2);
if (ret)
return ret;
ret = indio_dev->info->write_event_value(indio_dev,
this_attr->c, iio_ev_attr_type(this_attr),
iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr),
val, val2);
if (ret < 0)
return ret;
return len;
}
static int iio_device_add_event(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int spec_index,
enum iio_event_type type, enum iio_event_direction dir,
enum iio_shared_by shared_by, const unsigned long *mask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len);
unsigned int attrcount = 0;
unsigned int i;
char *postfix;
int ret;
for_each_set_bit(i, mask, sizeof(*mask)*8) {
if (i >= ARRAY_SIZE(iio_ev_info_text))
return -EINVAL;
if (dir != IIO_EV_DIR_NONE)
postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_ev_type_text[type],
iio_ev_dir_text[dir],
iio_ev_info_text[i]);
else
postfix = kasprintf(GFP_KERNEL, "%s_%s",
iio_ev_type_text[type],
iio_ev_info_text[i]);
if (postfix == NULL)
return -ENOMEM;
if (i == IIO_EV_INFO_ENABLE) {
show = iio_ev_state_show;
store = iio_ev_state_store;
} else {
show = iio_ev_value_show;
store = iio_ev_value_store;
}
ret = __iio_add_chan_devattr(postfix, chan, show, store,
(i << 16) | spec_index, shared_by, &indio_dev->dev,
NULL,
&iio_dev_opaque->event_interface->dev_attr_list);
kfree(postfix);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
if (ret)
return ret;
attrcount++;
}
return attrcount;
}
static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
int ret = 0, i, attrcount = 0;
enum iio_event_direction dir;
enum iio_event_type type;
for (i = 0; i < chan->num_event_specs; i++) {
type = chan->event_spec[i].type;
dir = chan->event_spec[i].dir;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SEPARATE, &chan->event_spec[i].mask_separate);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_TYPE,
&chan->event_spec[i].mask_shared_by_type);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_DIR,
&chan->event_spec[i].mask_shared_by_dir);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_event(indio_dev, chan, i, type, dir,
IIO_SHARED_BY_ALL,
&chan->event_spec[i].mask_shared_by_all);
if (ret < 0)
return ret;
attrcount += ret;
}
ret = attrcount;
return ret;
}
static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
int j, ret, attrcount = 0;
/* Dynamically created from the channels array */
for (j = 0; j < indio_dev->num_channels; j++) {
ret = iio_device_add_event_sysfs(indio_dev,
&indio_dev->channels[j]);
if (ret < 0)
return ret;
attrcount += ret;
}
return attrcount;
}
static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
int j;
for (j = 0; j < indio_dev->num_channels; j++) {
if (indio_dev->channels[j].num_event_specs != 0)
return true;
}
return false;
}
static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
INIT_KFIFO(ev_int->det_events);
init_waitqueue_head(&ev_int->wait);
mutex_init(&ev_int->read_lock);
}
static long iio_event_ioctl(struct iio_dev *indio_dev, struct file *filp,
unsigned int cmd, unsigned long arg)
{
int __user *ip = (int __user *)arg;
int fd;
if (cmd == IIO_GET_EVENT_FD_IOCTL) {
fd = iio_event_getfd(indio_dev);
if (fd < 0)
return fd;
if (copy_to_user(ip, &fd, sizeof(fd)))
return -EFAULT;
return 0;
}
return IIO_IOCTL_UNHANDLED;
}
static const char *iio_event_group_name = "events";
int iio_device_register_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int;
struct iio_dev_attr *p;
int ret = 0, attrcount_orig = 0, attrcount, attrn;
struct attribute **attr;
if (!(indio_dev->info->event_attrs ||
iio_check_for_dynamic_events(indio_dev)))
return 0;
ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
if (ev_int == NULL)
return -ENOMEM;
iio_dev_opaque->event_interface = ev_int;
INIT_LIST_HEAD(&ev_int->dev_attr_list);
iio_setup_ev_int(ev_int);
if (indio_dev->info->event_attrs != NULL) {
attr = indio_dev->info->event_attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
if (indio_dev->channels) {
ret = __iio_add_event_config_attrs(indio_dev);
if (ret < 0)
goto error_free_setup_event_lines;
attrcount += ret;
}
ev_int->group.name = iio_event_group_name;
ev_int->group.attrs = kcalloc(attrcount + 1,
sizeof(ev_int->group.attrs[0]),
GFP_KERNEL);
if (ev_int->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
}
if (indio_dev->info->event_attrs)
memcpy(ev_int->group.attrs,
indio_dev->info->event_attrs->attrs,
sizeof(ev_int->group.attrs[0]) * attrcount_orig);
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p, &ev_int->dev_attr_list, l)
ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group);
if (ret)
goto error_free_group_attrs;
ev_int->ioctl_handler.ioctl = iio_event_ioctl;
iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev,
&ev_int->ioctl_handler);
return 0;
error_free_group_attrs:
kfree(ev_int->group.attrs);
error_free_setup_event_lines:
iio_free_chan_devattr_list(&ev_int->dev_attr_list);
kfree(ev_int);
iio_dev_opaque->event_interface = NULL;
return ret;
}
/**
* iio_device_wakeup_eventset - Wakes up the event waitqueue
* @indio_dev: The IIO device
*
* Wakes up the event waitqueue used for poll() and blocking read().
* Should usually be called when the device is unregistered.
*/
void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
if (iio_dev_opaque->event_interface == NULL)
return;
wake_up(&iio_dev_opaque->event_interface->wait);
}
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
if (ev_int == NULL)
return;
iio_device_ioctl_handler_unregister(&ev_int->ioctl_handler);
iio_free_chan_devattr_list(&ev_int->dev_attr_list);
kfree(ev_int->group.attrs);
kfree(ev_int);
iio_dev_opaque->event_interface = NULL;
}
| linux-master | drivers/iio/industrialio-event.c |
// SPDX-License-Identifier: GPL-2.0-only
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
* Handling of buffer allocation / resizing.
*
* Things to look at here.
* - Better memory allocation techniques?
* - Alternative access techniques?
*/
#include <linux/anon_inodes.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
[IIO_LE] = "le",
};
static bool iio_buffer_is_active(struct iio_buffer *buf)
{
return !list_empty(&buf->buffer_list);
}
static size_t iio_buffer_data_available(struct iio_buffer *buf)
{
return buf->access->data_available(buf);
}
static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
struct iio_buffer *buf, size_t required)
{
if (!indio_dev->info->hwfifo_flush_to_buffer)
return -ENODEV;
return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
}
static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
size_t to_wait, int to_flush)
{
size_t avail;
int flushed = 0;
/* wakeup if the device was unregistered */
if (!indio_dev->info)
return true;
/* drain the buffer if it was disabled */
if (!iio_buffer_is_active(buf)) {
to_wait = min_t(size_t, to_wait, 1);
to_flush = 0;
}
avail = iio_buffer_data_available(buf);
if (avail >= to_wait) {
/* force a flush for non-blocking reads */
if (!to_wait && avail < to_flush)
iio_buffer_flush_hwfifo(indio_dev, buf,
to_flush - avail);
return true;
}
if (to_flush)
flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
to_wait - avail);
if (flushed <= 0)
return false;
if (avail + flushed >= to_wait)
return true;
return false;
}
/**
* iio_buffer_read() - chrdev read for buffer access
* @filp: File structure pointer for the char device
* @buf: Destination buffer for iio buffer read
* @n: First n bytes to read
* @f_ps: Long offset provided by the user as a seek position
*
* This function relies on all buffer implementations having an
* iio_buffer as their first element.
*
* Return: negative values corresponding to error codes or ret != 0
* for ending the reading activity
**/
static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
int ret = 0;
if (!indio_dev->info)
return -ENODEV;
if (!rb || !rb->access->read)
return -EINVAL;
if (rb->direction != IIO_BUFFER_DIRECTION_IN)
return -EPERM;
datum_size = rb->bytes_per_datum;
/*
* If datum_size is 0 there will never be anything to read from the
* buffer, so signal end of file now.
*/
if (!datum_size)
return 0;
if (filp->f_flags & O_NONBLOCK)
to_wait = 0;
else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
add_wait_queue(&rb->pollq, &wait);
do {
if (!indio_dev->info) {
ret = -ENODEV;
break;
}
if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
continue;
}
ret = rb->access->read(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
remove_wait_queue(&rb->pollq, &wait);
return ret;
}
static size_t iio_buffer_space_available(struct iio_buffer *buf)
{
if (buf->access->space_available)
return buf->access->space_available(buf);
return SIZE_MAX;
}
static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
size_t n, loff_t *f_ps)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int ret = 0;
size_t written;
if (!indio_dev->info)
return -ENODEV;
if (!rb || !rb->access->write)
return -EINVAL;
if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
return -EPERM;
written = 0;
add_wait_queue(&rb->pollq, &wait);
do {
if (!indio_dev->info)
return -ENODEV;
if (!iio_buffer_space_available(rb)) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (filp->f_flags & O_NONBLOCK) {
if (!written)
ret = -EAGAIN;
break;
}
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
continue;
}
ret = rb->access->write(rb, n - written, buf + written);
if (ret < 0)
break;
written += ret;
} while (written != n);
remove_wait_queue(&rb->pollq, &wait);
return ret < 0 ? ret : written;
}
/**
* iio_buffer_poll() - poll the buffer to find out if it has data
* @filp: File structure pointer for device access
* @wait: Poll table structure pointer for which the driver adds
* a wait queue
*
* Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
* or 0 for other cases
*/
static __poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
if (!indio_dev->info || !rb)
return 0;
poll_wait(filp, &rb->pollq, wait);
switch (rb->direction) {
case IIO_BUFFER_DIRECTION_IN:
if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
return EPOLLIN | EPOLLRDNORM;
break;
case IIO_BUFFER_DIRECTION_OUT:
if (iio_buffer_space_available(rb))
return EPOLLOUT | EPOLLWRNORM;
break;
}
return 0;
}
ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
/* check if buffer was opened through new API */
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
return -EBUSY;
return iio_buffer_read(filp, buf, n, f_ps);
}
ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
size_t n, loff_t *f_ps)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
/* check if buffer was opened through new API */
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
return -EBUSY;
return iio_buffer_write(filp, buf, n, f_ps);
}
__poll_t iio_buffer_poll_wrapper(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_buffer *rb = ib->buffer;
/* check if buffer was opened through new API */
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
return 0;
return iio_buffer_poll(filp, wait);
}
/**
* iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
* @indio_dev: The IIO device
*
* Wakes up the event waitqueue used for poll(). Should usually
* be called when the device is unregistered.
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
unsigned int i;
for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
buffer = iio_dev_opaque->attached_buffers[i];
wake_up(&buffer->pollq);
}
}
int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
{
if (!buffer || !buffer->access || !buffer->access->remove_from)
return -EINVAL;
return buffer->access->remove_from(buffer, data);
}
EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
INIT_LIST_HEAD(&buffer->buffer_list);
init_waitqueue_head(&buffer->pollq);
kref_init(&buffer->ref);
if (!buffer->watermark)
buffer->watermark = 1;
}
EXPORT_SYMBOL(iio_buffer_init);
void iio_device_detach_buffers(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
unsigned int i;
for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
buffer = iio_dev_opaque->attached_buffers[i];
iio_buffer_put(buffer);
}
kfree(iio_dev_opaque->attached_buffers);
}
static ssize_t iio_show_scan_index(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
}
static ssize_t iio_show_fixed_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
u8 type = this_attr->c->scan_type.endianness;
if (type == IIO_CPU) {
#ifdef __LITTLE_ENDIAN
type = IIO_LE;
#else
type = IIO_BE;
#endif
}
if (this_attr->c->scan_type.repeat > 1)
return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
this_attr->c->scan_type.storagebits,
this_attr->c->scan_type.repeat,
this_attr->c->scan_type.shift);
else
return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
iio_endian_prefix[type],
this_attr->c->scan_type.sign,
this_attr->c->scan_type.realbits,
this_attr->c->scan_type.storagebits,
this_attr->c->scan_type.shift);
}
static ssize_t iio_scan_el_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
buffer->scan_mask);
return sysfs_emit(buf, "%d\n", ret);
}
/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
unsigned int masklength,
const unsigned long *mask,
bool strict)
{
if (bitmap_empty(mask, masklength))
return NULL;
while (*av_masks) {
if (strict) {
if (bitmap_equal(mask, av_masks, masklength))
return av_masks;
} else {
if (bitmap_subset(mask, av_masks, masklength))
return av_masks;
}
av_masks += BITS_TO_LONGS(masklength);
}
return NULL;
}
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
const unsigned long *mask)
{
if (!indio_dev->setup_ops->validate_scan_mask)
return true;
return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @indio_dev: the iio device
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
*
* Note that at this point we have no way of knowing what other
* buffers might request, hence this code only verifies that the
* individual buffers request is plausible.
*/
static int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
const unsigned long *mask;
unsigned long *trialmask;
if (!indio_dev->masklength) {
WARN(1, "Trying to set scanmask prior to registering buffer\n");
return -EINVAL;
}
trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
if (!trialmask)
return -ENOMEM;
bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
set_bit(bit, trialmask);
if (!iio_validate_scan_mask(indio_dev, trialmask))
goto err_invalid_mask;
if (indio_dev->available_scan_masks) {
mask = iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
trialmask, false);
if (!mask)
goto err_invalid_mask;
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
bitmap_free(trialmask);
return 0;
err_invalid_mask:
bitmap_free(trialmask);
return -EINVAL;
}
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
return 0;
}
static int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
if (bit > indio_dev->masklength)
return -EINVAL;
if (!buffer->scan_mask)
return 0;
/* Ensure return value is 0 or 1. */
return !!test_bit(bit, buffer->scan_mask);
};
static ssize_t iio_scan_el_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
bool state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_buffer *buffer = this_attr->buffer;
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
goto error_ret;
} else if (state && !ret) {
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret < 0 ? ret : len;
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool state;
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_buffer *buffer,
const struct iio_chan_spec *chan)
{
int ret, attrcount = 0;
ret = __iio_add_chan_devattr("index",
chan,
&iio_show_scan_index,
NULL,
0,
IIO_SEPARATE,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
ret = __iio_add_chan_devattr("type",
chan,
&iio_show_fixed_type,
NULL,
0,
0,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
if (chan->type != IIO_TIMESTAMP)
ret = __iio_add_chan_devattr("en",
chan,
&iio_scan_el_show,
&iio_scan_el_store,
chan->scan_index,
0,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
else
ret = __iio_add_chan_devattr("en",
chan,
&iio_scan_el_ts_show,
&iio_scan_el_ts_store,
chan->scan_index,
0,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
if (ret)
return ret;
attrcount++;
ret = attrcount;
return ret;
}
static ssize_t length_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%d\n", buffer->length);
}
static ssize_t length_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
if (val == buffer->length)
return len;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
ret = 0;
}
if (ret)
goto out;
if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
}
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
unsigned int scan_index)
{
const struct iio_chan_spec *ch;
unsigned int bytes;
ch = iio_find_channel_from_si(indio_dev, scan_index);
bytes = ch->scan_type.storagebits / 8;
if (ch->scan_type.repeat > 1)
bytes *= ch->scan_type.repeat;
return bytes;
}
static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_storage_bytes_for_si(indio_dev,
iio_dev_opaque->scan_index_timestamp);
}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
unsigned int bytes = 0;
int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
indio_dev->masklength) {
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
largest = max(largest, length);
}
bytes = ALIGN(bytes, largest);
return bytes;
}
static void iio_buffer_activate(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
iio_buffer_get(buffer);
list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
}
static void iio_buffer_deactivate(struct iio_buffer *buffer)
{
list_del_init(&buffer->buffer_list);
wake_up_interruptible(&buffer->pollq);
iio_buffer_put(buffer);
}
static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *_buffer;
list_for_each_entry_safe(buffer, _buffer,
&iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_deactivate(buffer);
}
static int iio_buffer_enable(struct iio_buffer *buffer,
struct iio_dev *indio_dev)
{
if (!buffer->access->enable)
return 0;
return buffer->access->enable(buffer, indio_dev);
}
static int iio_buffer_disable(struct iio_buffer *buffer,
struct iio_dev *indio_dev)
{
if (!buffer->access->disable)
return 0;
return buffer->access->disable(buffer, indio_dev);
}
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
unsigned int bytes;
if (!buffer->access->set_bytes_per_datum)
return;
bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
buffer->scan_timestamp);
buffer->access->set_bytes_per_datum(buffer, bytes);
}
static int iio_buffer_request_update(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
int ret;
iio_buffer_update_bytes_per_datum(indio_dev, buffer);
if (buffer->access->request_update) {
ret = buffer->access->request_update(buffer);
if (ret) {
dev_dbg(&indio_dev->dev,
"Buffer not started: buffer parameter update failed (%d)\n",
ret);
return ret;
}
}
return 0;
}
static void iio_free_scan_mask(struct iio_dev *indio_dev,
const unsigned long *mask)
{
/* If the mask is dynamically allocated free it, otherwise do nothing */
if (!indio_dev->available_scan_masks)
bitmap_free(mask);
}
struct iio_device_config {
unsigned int mode;
unsigned int watermark;
const unsigned long *scan_mask;
unsigned int scan_bytes;
bool scan_timestamp;
};
static int iio_verify_update(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned long *compound_mask;
const unsigned long *scan_mask;
bool strict_scanmask = false;
struct iio_buffer *buffer;
bool scan_timestamp;
unsigned int modes;
if (insert_buffer &&
bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
dev_dbg(&indio_dev->dev,
"At least one scan element must be enabled first\n");
return -EINVAL;
}
memset(config, 0, sizeof(*config));
config->watermark = ~0;
/*
* If there is just one buffer and we are removing it there is nothing
* to verify.
*/
if (remove_buffer && !insert_buffer &&
list_is_singular(&iio_dev_opaque->buffer_list))
return 0;
modes = indio_dev->modes;
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
if (buffer == remove_buffer)
continue;
modes &= buffer->access->modes;
config->watermark = min(config->watermark, buffer->watermark);
}
if (insert_buffer) {
modes &= insert_buffer->access->modes;
config->watermark = min(config->watermark,
insert_buffer->watermark);
}
/* Definitely possible for devices to support both of these. */
if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
config->mode = INDIO_BUFFER_TRIGGERED;
} else if (modes & INDIO_BUFFER_HARDWARE) {
/*
* Keep things simple for now and only allow a single buffer to
* be connected in hardware mode.
*/
if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
return -EINVAL;
config->mode = INDIO_BUFFER_HARDWARE;
strict_scanmask = true;
} else if (modes & INDIO_BUFFER_SOFTWARE) {
config->mode = INDIO_BUFFER_SOFTWARE;
} else {
/* Can only occur on first buffer */
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
return -EINVAL;
}
/* What scan mask do we actually have? */
compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
if (!compound_mask)
return -ENOMEM;
scan_timestamp = false;
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
if (buffer == remove_buffer)
continue;
bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
indio_dev->masklength);
scan_timestamp |= buffer->scan_timestamp;
}
if (insert_buffer) {
bitmap_or(compound_mask, compound_mask,
insert_buffer->scan_mask, indio_dev->masklength);
scan_timestamp |= insert_buffer->scan_timestamp;
}
if (indio_dev->available_scan_masks) {
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
compound_mask,
strict_scanmask);
bitmap_free(compound_mask);
if (!scan_mask)
return -EINVAL;
} else {
scan_mask = compound_mask;
}
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
scan_mask, scan_timestamp);
config->scan_mask = scan_mask;
config->scan_timestamp = scan_timestamp;
return 0;
}
/**
* struct iio_demux_table - table describing demux memcpy ops
* @from: index to copy from
* @to: index to copy to
* @length: how many bytes to copy
* @l: list head used for management
*/
struct iio_demux_table {
unsigned int from;
unsigned int to;
unsigned int length;
struct list_head l;
};
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
struct iio_demux_table *p, *q;
list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
list_del(&p->l);
kfree(p);
}
}
static int iio_buffer_add_demux(struct iio_buffer *buffer,
struct iio_demux_table **p, unsigned int in_loc,
unsigned int out_loc,
unsigned int length)
{
if (*p && (*p)->from + (*p)->length == in_loc &&
(*p)->to + (*p)->length == out_loc) {
(*p)->length += length;
} else {
*p = kmalloc(sizeof(**p), GFP_KERNEL);
if (!(*p))
return -ENOMEM;
(*p)->from = in_loc;
(*p)->to = out_loc;
(*p)->length = length;
list_add_tail(&(*p)->l, &buffer->demux_list);
}
return 0;
}
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
int ret, in_ind = -1, out_ind, length;
unsigned int in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
/* Clear out any old demux */
iio_buffer_demux_free(buffer);
kfree(buffer->demux_bounce);
buffer->demux_bounce = NULL;
/* First work out which scan mode we will actually have */
if (bitmap_equal(indio_dev->active_scan_mask,
buffer->scan_mask,
indio_dev->masklength))
return 0;
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind,
buffer->scan_mask,
indio_dev->masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
while (in_ind != out_ind) {
length = iio_storage_bytes_for_si(indio_dev, in_ind);
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
}
length = iio_storage_bytes_for_si(indio_dev, in_ind);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
if (ret)
goto error_clear_mux_table;
out_loc += length;
in_loc += length;
}
/* Relies on scan_timestamp being last */
if (buffer->scan_timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
if (ret)
goto error_clear_mux_table;
out_loc += length;
}
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
if (!buffer->demux_bounce) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
return 0;
error_clear_mux_table:
iio_buffer_demux_free(buffer);
return ret;
}
static int iio_update_demux(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int ret;
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_update_demux(indio_dev, buffer);
if (ret < 0)
goto error_clear_mux_table;
}
return 0;
error_clear_mux_table:
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_demux_free(buffer);
return ret;
}
static int iio_enable_buffers(struct iio_dev *indio_dev,
struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *tmp = NULL;
int ret;
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
iio_dev_opaque->currentmode = config->mode;
iio_update_demux(indio_dev);
/* Wind up again */
if (indio_dev->setup_ops->preenable) {
ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
"Buffer not started: buffer preenable failed (%d)\n", ret);
goto err_undo_config;
}
}
if (indio_dev->info->update_scan_mode) {
ret = indio_dev->info
->update_scan_mode(indio_dev,
indio_dev->active_scan_mask);
if (ret < 0) {
dev_dbg(&indio_dev->dev,
"Buffer not started: update scan mode failed (%d)\n",
ret);
goto err_run_postdisable;
}
}
if (indio_dev->info->hwfifo_set_watermark)
indio_dev->info->hwfifo_set_watermark(indio_dev,
config->watermark);
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_enable(buffer, indio_dev);
if (ret) {
tmp = buffer;
goto err_disable_buffers;
}
}
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
if (ret)
goto err_disable_buffers;
}
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
"Buffer not started: postenable failed (%d)\n", ret);
goto err_detach_pollfunc;
}
}
return 0;
err_detach_pollfunc:
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
err_disable_buffers:
buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
buffer_list)
iio_buffer_disable(buffer, indio_dev);
err_run_postdisable:
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
}
static int iio_disable_buffers(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int ret = 0;
int ret2;
/* Wind down existing buffers - iff there are any */
if (list_empty(&iio_dev_opaque->buffer_list))
return 0;
/*
* If things go wrong at some step in disable we still need to continue
* to perform the other steps, otherwise we leave the device in a
* inconsistent state. We return the error code for the first error we
* encountered.
*/
if (indio_dev->setup_ops->predisable) {
ret2 = indio_dev->setup_ops->predisable(indio_dev);
if (ret2 && !ret)
ret = ret2;
}
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret2 = iio_buffer_disable(buffer, indio_dev);
if (ret2 && !ret)
ret = ret2;
}
if (indio_dev->setup_ops->postdisable) {
ret2 = indio_dev->setup_ops->postdisable(indio_dev);
if (ret2 && !ret)
ret = ret2;
}
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
return ret;
}
static int __iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_device_config new_config;
int ret;
ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
&new_config);
if (ret)
return ret;
if (insert_buffer) {
ret = iio_buffer_request_update(indio_dev, insert_buffer);
if (ret)
goto err_free_config;
}
ret = iio_disable_buffers(indio_dev);
if (ret)
goto err_deactivate_all;
if (remove_buffer)
iio_buffer_deactivate(remove_buffer);
if (insert_buffer)
iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */
if (list_empty(&iio_dev_opaque->buffer_list))
return 0;
ret = iio_enable_buffers(indio_dev, &new_config);
if (ret)
goto err_deactivate_all;
return 0;
err_deactivate_all:
/*
* We've already verified that the config is valid earlier. If things go
* wrong in either enable or disable the most likely reason is an IO
* error from the device. In this case there is no good recovery
* strategy. Just make sure to disable everything and leave the device
* in a sane state. With a bit of luck the device might come back to
* life again later and userspace can try again.
*/
iio_buffer_deactivate_all(indio_dev);
err_free_config:
iio_free_scan_mask(indio_dev, new_config.scan_mask);
return ret;
}
int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
if (insert_buffer == remove_buffer)
return 0;
if (insert_buffer &&
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock);
mutex_lock(&iio_dev_opaque->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;
if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL;
if (!insert_buffer && !remove_buffer) {
ret = 0;
goto out_unlock;
}
if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}
ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
out_unlock:
mutex_unlock(&iio_dev_opaque->mlock);
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
iio_disable_buffers(indio_dev);
iio_buffer_deactivate_all(indio_dev);
}
static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
bool inlist;
ret = kstrtobool(buf, &requested_state);
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
ret = __iio_update_buffers(indio_dev, NULL, buffer);
done:
mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
}
static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%u\n", buffer->watermark);
}
static ssize_t watermark_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
unsigned int val;
int ret;
ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
if (!val)
return -EINVAL;
mutex_lock(&iio_dev_opaque->mlock);
if (val > buffer->length) {
ret = -EINVAL;
goto out;
}
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
buffer->watermark = val;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
}
static ssize_t data_available_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
}
static ssize_t direction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
switch (buffer->direction) {
case IIO_BUFFER_DIRECTION_IN:
return sysfs_emit(buf, "in\n");
case IIO_BUFFER_DIRECTION_OUT:
return sysfs_emit(buf, "out\n");
default:
return -EINVAL;
}
}
static DEVICE_ATTR_RW(length);
static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
static DEVICE_ATTR_RW(enable);
static DEVICE_ATTR_RW(watermark);
static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
static DEVICE_ATTR_RO(data_available);
static DEVICE_ATTR_RO(direction);
/*
* When adding new attributes here, put the at the end, at least until
* the code that handles the length/length_ro & watermark/watermark_ro
* assignments gets cleaned up. Otherwise these can create some weird
* duplicate attributes errors under some setups.
*/
static struct attribute *iio_buffer_attrs[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
&dev_attr_watermark.attr,
&dev_attr_data_available.attr,
&dev_attr_direction.attr,
};
#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
struct attribute *attr)
{
struct device_attribute *dattr = to_dev_attr(attr);
struct iio_dev_attr *iio_attr;
iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
if (!iio_attr)
return NULL;
iio_attr->buffer = buffer;
memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
if (!iio_attr->dev_attr.attr.name) {
kfree(iio_attr);
return NULL;
}
sysfs_attr_init(&iio_attr->dev_attr.attr);
list_add(&iio_attr->l, &buffer->buffer_attr_list);
return &iio_attr->dev_attr.attr;
}
static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
struct attribute **buffer_attrs,
int buffer_attrcount,
int scan_el_attrcount)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct attribute_group *group;
struct attribute **attrs;
int ret;
attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
group = &iio_dev_opaque->legacy_buffer_group;
group->attrs = attrs;
group->name = "buffer";
ret = iio_device_register_sysfs_group(indio_dev, group);
if (ret)
goto error_free_buffer_attrs;
attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs) {
ret = -ENOMEM;
goto error_free_buffer_attrs;
}
memcpy(attrs, &buffer_attrs[buffer_attrcount],
scan_el_attrcount * sizeof(*attrs));
group = &iio_dev_opaque->legacy_scan_el_group;
group->attrs = attrs;
group->name = "scan_elements";
ret = iio_device_register_sysfs_group(indio_dev, group);
if (ret)
goto error_free_scan_el_attrs;
return 0;
error_free_scan_el_attrs:
kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
error_free_buffer_attrs:
kfree(iio_dev_opaque->legacy_buffer_group.attrs);
return ret;
}
static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
kfree(iio_dev_opaque->legacy_buffer_group.attrs);
kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
}
static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
{
struct iio_dev_buffer_pair *ib = filep->private_data;
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_buffer *buffer = ib->buffer;
wake_up(&buffer->pollq);
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
iio_device_put(indio_dev);
return 0;
}
static const struct file_operations iio_buffer_chrdev_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read,
.write = iio_buffer_write,
.poll = iio_buffer_poll,
.release = iio_buffer_chrdev_release,
};
static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int __user *ival = (int __user *)arg;
struct iio_dev_buffer_pair *ib;
struct iio_buffer *buffer;
int fd, idx, ret;
if (copy_from_user(&idx, ival, sizeof(idx)))
return -EFAULT;
if (idx >= iio_dev_opaque->attached_buffers_cnt)
return -ENODEV;
iio_device_get(indio_dev);
buffer = iio_dev_opaque->attached_buffers[idx];
if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
ret = -EBUSY;
goto error_iio_dev_put;
}
ib = kzalloc(sizeof(*ib), GFP_KERNEL);
if (!ib) {
ret = -ENOMEM;
goto error_clear_busy_bit;
}
ib->indio_dev = indio_dev;
ib->buffer = buffer;
fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
ib, O_RDWR | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto error_free_ib;
}
if (copy_to_user(ival, &fd, sizeof(fd))) {
/*
* "Leak" the fd, as there's not much we can do about this
* anyway. 'fd' might have been closed already, as
* anon_inode_getfd() called fd_install() on it, which made
* it reachable by userland.
*
* Instead of allowing a malicious user to play tricks with
* us, rely on the process exit path to do any necessary
* cleanup, as in releasing the file, if still needed.
*/
return -EFAULT;
}
return 0;
error_free_ib:
kfree(ib);
error_clear_busy_bit:
clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
error_iio_dev_put:
iio_device_put(indio_dev);
return ret;
}
static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case IIO_BUFFER_GET_FD_IOCTL:
return iio_device_buffer_getfd(indio_dev, arg);
default:
return IIO_IOCTL_UNHANDLED;
}
}
static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
struct iio_dev *indio_dev,
int index)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_dev_attr *p;
const struct iio_dev_attr *id_attr;
struct attribute **attr;
int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
const struct iio_chan_spec *channels;
buffer_attrcount = 0;
if (buffer->attrs) {
while (buffer->attrs[buffer_attrcount])
buffer_attrcount++;
}
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
scan_el_attrcount = 0;
INIT_LIST_HEAD(&buffer->buffer_attr_list);
channels = indio_dev->channels;
if (channels) {
/* new magic */
for (i = 0; i < indio_dev->num_channels; i++) {
if (channels[i].scan_index < 0)
continue;
/* Verify that sample bits fit into storage */
if (channels[i].scan_type.storagebits <
channels[i].scan_type.realbits +
channels[i].scan_type.shift) {
dev_err(&indio_dev->dev,
"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
i, channels[i].scan_type.storagebits,
channels[i].scan_type.realbits,
channels[i].scan_type.shift);
ret = -EINVAL;
goto error_cleanup_dynamic;
}
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
scan_el_attrcount += ret;
if (channels[i].type == IIO_TIMESTAMP)
iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
if (indio_dev->masklength && !buffer->scan_mask) {
buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
GFP_KERNEL);
if (!buffer->scan_mask) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
}
}
}
attrn = buffer_attrcount + scan_el_attrcount;
attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto error_free_scan_mask;
}
memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
if (!buffer->access->set_length)
attr[0] = &dev_attr_length_ro.attr;
if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
attr[2] = &dev_attr_watermark_ro.attr;
if (buffer->attrs)
for (i = 0, id_attr = buffer->attrs[i];
(id_attr = buffer->attrs[i]); i++)
attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
(struct attribute *)&id_attr->dev_attr.attr;
buffer->buffer_group.attrs = attr;
for (i = 0; i < buffer_attrcount; i++) {
struct attribute *wrapped;
wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
if (!wrapped) {
ret = -ENOMEM;
goto error_free_buffer_attrs;
}
attr[i] = wrapped;
}
attrn = 0;
list_for_each_entry(p, &buffer->buffer_attr_list, l)
attr[attrn++] = &p->dev_attr.attr;
buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
if (!buffer->buffer_group.name) {
ret = -ENOMEM;
goto error_free_buffer_attrs;
}
ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
if (ret)
goto error_free_buffer_attr_group_name;
/* we only need to register the legacy groups for the first buffer */
if (index > 0)
return 0;
ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
buffer_attrcount,
scan_el_attrcount);
if (ret)
goto error_free_buffer_attr_group_name;
return 0;
error_free_buffer_attr_group_name:
kfree(buffer->buffer_group.name);
error_free_buffer_attrs:
kfree(buffer->buffer_group.attrs);
error_free_scan_mask:
bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->buffer_attr_list);
return ret;
}
static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
struct iio_dev *indio_dev,
int index)
{
if (index == 0)
iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
bitmap_free(buffer->scan_mask);
kfree(buffer->buffer_group.name);
kfree(buffer->buffer_group.attrs);
iio_free_chan_devattr_list(&buffer->buffer_attr_list);
}
int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_chan_spec *channels;
struct iio_buffer *buffer;
int ret, i, idx;
size_t sz;
channels = indio_dev->channels;
if (channels) {
int ml = indio_dev->masklength;
for (i = 0; i < indio_dev->num_channels; i++)
ml = max(ml, channels[i].scan_index + 1);
indio_dev->masklength = ml;
}
if (!iio_dev_opaque->attached_buffers_cnt)
return 0;
for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
buffer = iio_dev_opaque->attached_buffers[idx];
ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
if (ret)
goto error_unwind_sysfs_and_mask;
}
sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
if (!iio_dev_opaque->buffer_ioctl_handler) {
ret = -ENOMEM;
goto error_unwind_sysfs_and_mask;
}
iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
iio_device_ioctl_handler_register(indio_dev,
iio_dev_opaque->buffer_ioctl_handler);
return 0;
error_unwind_sysfs_and_mask:
while (idx--) {
buffer = iio_dev_opaque->attached_buffers[idx];
__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
}
return ret;
}
void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer;
int i;
if (!iio_dev_opaque->attached_buffers_cnt)
return;
iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
kfree(iio_dev_opaque->buffer_ioctl_handler);
for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
buffer = iio_dev_opaque->attached_buffers[i];
__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
}
}
/**
* iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
* @indio_dev: the iio device
* @mask: scan mask to be checked
*
* Return true if exactly one bit is set in the scan mask, false otherwise. It
* can be used for devices where only one channel can be active for sampling at
* a time.
*/
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask)
{
return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
static const void *iio_demux(struct iio_buffer *buffer,
const void *datain)
{
struct iio_demux_table *t;
if (list_empty(&buffer->demux_list))
return datain;
list_for_each_entry(t, &buffer->demux_list, l)
memcpy(buffer->demux_bounce + t->to,
datain + t->from, t->length);
return buffer->demux_bounce;
}
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{
const void *dataout = iio_demux(buffer, data);
int ret;
ret = buffer->access->store_to(buffer, dataout);
if (ret)
return ret;
/*
* We can't just test for watermark to decide if we wake the poll queue
* because read may request less samples than the watermark.
*/
wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
return 0;
}
/**
* iio_push_to_buffers() - push to a registered buffer.
* @indio_dev: iio_dev structure for device.
* @data: Full scan.
*/
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
struct iio_buffer *buf;
list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_push_to_buffer(buf, data);
if (ret < 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);
/**
* iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
* no alignment or space requirements.
* @indio_dev: iio_dev structure for device.
* @data: channel data excluding the timestamp.
* @data_sz: size of data.
* @timestamp: timestamp for the sample data.
*
* This special variant of iio_push_to_buffers_with_timestamp() does
* not require space for the timestamp, or 8 byte alignment of data.
* It does however require an allocation on first call and additional
* copies on all calls, so should be avoided if possible.
*/
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
const void *data,
size_t data_sz,
int64_t timestamp)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
/*
* Conservative estimate - we can always safely copy the minimum
* of either the data provided or the length of the destination buffer.
* This relaxed limit allows the calling drivers to be lax about
* tracking the size of the data they are pushing, at the cost of
* unnecessary copying of padding.
*/
data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) {
void *bb;
bb = devm_krealloc(&indio_dev->dev,
iio_dev_opaque->bounce_buffer,
indio_dev->scan_bytes, GFP_KERNEL);
if (!bb)
return -ENOMEM;
iio_dev_opaque->bounce_buffer = bb;
iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
}
memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
return iio_push_to_buffers_with_timestamp(indio_dev,
iio_dev_opaque->bounce_buffer,
timestamp);
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
/**
* iio_buffer_release() - Free a buffer's resources
* @ref: Pointer to the kref embedded in the iio_buffer struct
*
* This function is called when the last reference to the buffer has been
* dropped. It will typically free all resources allocated by the buffer. Do not
* call this function manually, always use iio_buffer_put() when done using a
* buffer.
*/
static void iio_buffer_release(struct kref *ref)
{
struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
buffer->access->release(buffer);
}
/**
* iio_buffer_get() - Grab a reference to the buffer
* @buffer: The buffer to grab a reference for, may be NULL
*
* Returns the pointer to the buffer that was passed into the function.
*/
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{
if (buffer)
kref_get(&buffer->ref);
return buffer;
}
EXPORT_SYMBOL_GPL(iio_buffer_get);
/**
* iio_buffer_put() - Release the reference to the buffer
* @buffer: The buffer to release the reference for, may be NULL
*/
void iio_buffer_put(struct iio_buffer *buffer)
{
if (buffer)
kref_put(&buffer->ref, iio_buffer_release);
}
EXPORT_SYMBOL_GPL(iio_buffer_put);
/**
* iio_device_attach_buffer - Attach a buffer to a IIO device
* @indio_dev: The device the buffer should be attached to
* @buffer: The buffer to attach to the device
*
* Return 0 if successful, negative if error.
*
* This function attaches a buffer to a IIO device. The buffer stays attached to
* the device until the device is freed. For legacy reasons, the first attached
* buffer will also be assigned to 'indio_dev->buffer'.
* The array allocated here, will be free'd via the iio_device_detach_buffers()
* call which is handled by the iio_device_free().
*/
int iio_device_attach_buffer(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
cnt++;
new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
if (!new)
return -ENOMEM;
iio_dev_opaque->attached_buffers = new;
buffer = iio_buffer_get(buffer);
/* first buffer is legacy; attach it to the IIO device directly */
if (!indio_dev->buffer)
indio_dev->buffer = buffer;
iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
iio_dev_opaque->attached_buffers_cnt = cnt;
return 0;
}
EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
| linux-master | drivers/iio/industrialio-buffer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Cogent Embedded, Inc.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_event.h>
#include <linux/iio/trigger_consumer.h>
/**
* iio_triggered_event_setup() - Setup pollfunc_event for triggered event
* @indio_dev: IIO device structure
* @h: Function which will be used as pollfunc_event top half
* @thread: Function which will be used as pollfunc_event bottom half
*
* This function combines some common tasks which will normally be performed
* when setting up a triggered event. It will allocate the pollfunc_event and
* set mode to use it for triggered event.
*
* Before calling this function the indio_dev structure should already be
* completely initialized, but not yet registered. In practice this means that
* this function should be called right before iio_device_register().
*
* To free the resources allocated by this function call
* iio_triggered_event_cleanup().
*/
int iio_triggered_event_setup(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p))
{
indio_dev->pollfunc_event = iio_alloc_pollfunc(h,
thread,
IRQF_ONESHOT,
indio_dev,
"%s_consumer%d",
indio_dev->name,
iio_device_id(indio_dev));
if (indio_dev->pollfunc_event == NULL)
return -ENOMEM;
/* Flag that events polling is possible */
indio_dev->modes |= INDIO_EVENT_TRIGGERED;
return 0;
}
EXPORT_SYMBOL(iio_triggered_event_setup);
/**
* iio_triggered_event_cleanup() - Free resources allocated by iio_triggered_event_setup()
* @indio_dev: IIO device structure
*/
void iio_triggered_event_cleanup(struct iio_dev *indio_dev)
{
indio_dev->modes &= ~INDIO_EVENT_TRIGGERED;
iio_dealloc_pollfunc(indio_dev->pollfunc_event);
}
EXPORT_SYMBOL(iio_triggered_event_cleanup);
MODULE_AUTHOR("Vladimir Barinov");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered events");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/industrialio-triggered-event.c |
// SPDX-License-Identifier: GPL-2.0-only
/* The industrial I/O core in kernel channel mapping
*
* Copyright (c) 2011 Jonathan Cameron
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
#include <linux/iio/consumer.h>
struct iio_map_internal {
struct iio_dev *indio_dev;
struct iio_map *map;
struct list_head l;
};
static LIST_HEAD(iio_map_list);
static DEFINE_MUTEX(iio_map_list_lock);
static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
{
int ret = -ENODEV;
struct iio_map_internal *mapi, *next;
list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
if (indio_dev == mapi->indio_dev) {
list_del(&mapi->l);
kfree(mapi);
ret = 0;
}
}
return ret;
}
int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
{
int i = 0, ret = 0;
struct iio_map_internal *mapi;
if (!maps)
return 0;
mutex_lock(&iio_map_list_lock);
while (maps[i].consumer_dev_name) {
mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
if (!mapi) {
ret = -ENOMEM;
goto error_ret;
}
mapi->map = &maps[i];
mapi->indio_dev = indio_dev;
list_add_tail(&mapi->l, &iio_map_list);
i++;
}
error_ret:
if (ret)
iio_map_array_unregister_locked(indio_dev);
mutex_unlock(&iio_map_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_register);
/*
* Remove all map entries associated with the given iio device
*/
int iio_map_array_unregister(struct iio_dev *indio_dev)
{
int ret;
mutex_lock(&iio_map_list_lock);
ret = iio_map_array_unregister_locked(indio_dev);
mutex_unlock(&iio_map_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
static void iio_map_array_unregister_cb(void *indio_dev)
{
iio_map_array_unregister(indio_dev);
}
int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
{
int ret;
ret = iio_map_array_register(indio_dev, maps);
if (ret)
return ret;
return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
}
EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
static const struct iio_chan_spec
*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
{
int i;
const struct iio_chan_spec *chan = NULL;
for (i = 0; i < indio_dev->num_channels; i++)
if (indio_dev->channels[i].datasheet_name &&
strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
chan = &indio_dev->channels[i];
break;
}
return chan;
}
/**
* __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
* @indio_dev: pointer to the iio_dev structure
* @iiospec: IIO specifier as found in the device tree
*
* This is simple translation function, suitable for the most 1:1 mapped
* channels in IIO chips. This function performs only one sanity check:
* whether IIO index is less than num_channels (that is specified in the
* iio_dev).
*/
static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
const struct fwnode_reference_args *iiospec)
{
if (!iiospec->nargs)
return 0;
if (iiospec->args[0] >= indio_dev->num_channels) {
dev_err(&indio_dev->dev, "invalid channel index %llu\n",
iiospec->args[0]);
return -EINVAL;
}
return iiospec->args[0];
}
static int __fwnode_iio_channel_get(struct iio_channel *channel,
struct fwnode_handle *fwnode, int index)
{
struct fwnode_reference_args iiospec;
struct device *idev;
struct iio_dev *indio_dev;
int err;
err = fwnode_property_get_reference_args(fwnode, "io-channels",
"#io-channel-cells", 0,
index, &iiospec);
if (err)
return err;
idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
if (!idev) {
fwnode_handle_put(iiospec.fwnode);
return -EPROBE_DEFER;
}
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
if (indio_dev->info->fwnode_xlate)
index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
else
index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
fwnode_handle_put(iiospec.fwnode);
if (index < 0)
goto err_put;
channel->channel = &indio_dev->channels[index];
return 0;
err_put:
iio_device_put(indio_dev);
return index;
}
static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
int index)
{
struct iio_channel *channel;
int err;
if (index < 0)
return ERR_PTR(-EINVAL);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel)
return ERR_PTR(-ENOMEM);
err = __fwnode_iio_channel_get(channel, fwnode, index);
if (err)
goto err_free_channel;
return channel;
err_free_channel:
kfree(channel);
return ERR_PTR(err);
}
static struct iio_channel *
__fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
{
struct iio_channel *chan;
int index = 0;
/*
* For named iio channels, first look up the name in the
* "io-channel-names" property. If it cannot be found, the
* index will be an error code, and fwnode_iio_channel_get()
* will fail.
*/
if (name)
index = fwnode_property_match_string(fwnode, "io-channel-names",
name);
chan = fwnode_iio_channel_get(fwnode, index);
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
return chan;
if (name) {
if (index >= 0) {
pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
fwnode, name, index);
/*
* In this case, we found 'name' in 'io-channel-names'
* but somehow we still fail so that we should not proceed
* with any other lookup. Hence, explicitly return -EINVAL
* (maybe not the better error code) so that the caller
* won't do a system lookup.
*/
return ERR_PTR(-EINVAL);
}
/*
* If index < 0, then fwnode_property_get_reference_args() fails
* with -EINVAL or -ENOENT (ACPI case) which is expected. We
* should not proceed if we get any other error.
*/
if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
return chan;
} else if (PTR_ERR(chan) != -ENOENT) {
/*
* if !name, then we should only proceed the lookup if
* fwnode_property_get_reference_args() returns -ENOENT.
*/
return chan;
}
/* so we continue the lookup */
return ERR_PTR(-ENODEV);
}
struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
const char *name)
{
struct fwnode_handle *parent;
struct iio_channel *chan;
/* Walk up the tree of devices looking for a matching iio channel */
chan = __fwnode_iio_channel_get_by_name(fwnode, name);
if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
return chan;
/*
* No matching IIO channel found on this node.
* If the parent node has a "io-channel-ranges" property,
* then we can try one of its channels.
*/
fwnode_for_each_parent_node(fwnode, parent) {
if (!fwnode_property_present(parent, "io-channel-ranges")) {
fwnode_handle_put(parent);
return ERR_PTR(-ENODEV);
}
chan = __fwnode_iio_channel_get_by_name(fwnode, name);
if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
fwnode_handle_put(parent);
return chan;
}
}
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct iio_channel *chans;
int i, mapind, nummaps = 0;
int ret;
do {
ret = fwnode_property_get_reference_args(fwnode, "io-channels",
"#io-channel-cells", 0,
nummaps, NULL);
if (ret < 0)
break;
} while (++nummaps);
if (nummaps == 0)
return ERR_PTR(-ENODEV);
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
if (!chans)
return ERR_PTR(-ENOMEM);
/* Search for FW matches */
for (mapind = 0; mapind < nummaps; mapind++) {
ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
if (ret)
goto error_free_chans;
}
return chans;
error_free_chans:
for (i = 0; i < mapind; i++)
iio_device_put(chans[i].indio_dev);
kfree(chans);
return ERR_PTR(ret);
}
static struct iio_channel *iio_channel_get_sys(const char *name,
const char *channel_name)
{
struct iio_map_internal *c_i = NULL, *c = NULL;
struct iio_channel *channel;
int err;
if (!(name || channel_name))
return ERR_PTR(-ENODEV);
/* first find matching entry the channel map */
mutex_lock(&iio_map_list_lock);
list_for_each_entry(c_i, &iio_map_list, l) {
if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
(channel_name &&
strcmp(channel_name, c_i->map->consumer_channel) != 0))
continue;
c = c_i;
iio_device_get(c->indio_dev);
break;
}
mutex_unlock(&iio_map_list_lock);
if (!c)
return ERR_PTR(-ENODEV);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel) {
err = -ENOMEM;
goto error_no_mem;
}
channel->indio_dev = c->indio_dev;
if (c->map->adc_channel_label) {
channel->channel =
iio_chan_spec_from_name(channel->indio_dev,
c->map->adc_channel_label);
if (!channel->channel) {
err = -EINVAL;
goto error_no_chan;
}
}
return channel;
error_no_chan:
kfree(channel);
error_no_mem:
iio_device_put(c->indio_dev);
return ERR_PTR(err);
}
struct iio_channel *iio_channel_get(struct device *dev,
const char *channel_name)
{
const char *name = dev ? dev_name(dev) : NULL;
struct iio_channel *channel;
if (dev) {
channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
channel_name);
if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
return channel;
}
return iio_channel_get_sys(name, channel_name);
}
EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
{
if (!channel)
return;
iio_device_put(channel->indio_dev);
kfree(channel);
}
EXPORT_SYMBOL_GPL(iio_channel_release);
static void devm_iio_channel_free(void *iio_channel)
{
iio_channel_release(iio_channel);
}
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *channel_name)
{
struct iio_channel *channel;
int ret;
channel = iio_channel_get(dev, channel_name);
if (IS_ERR(channel))
return channel;
ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
if (ret)
return ERR_PTR(ret);
return channel;
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
struct fwnode_handle *fwnode,
const char *channel_name)
{
struct iio_channel *channel;
int ret;
channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
if (IS_ERR(channel))
return channel;
ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
if (ret)
return ERR_PTR(ret);
return channel;
}
EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
struct iio_channel *chans;
struct iio_map_internal *c = NULL;
int nummaps = 0;
int mapind = 0;
int i, ret;
if (!dev)
return ERR_PTR(-EINVAL);
chans = fwnode_iio_channel_get_all(dev);
/*
* We only want to carry on if the error is -ENODEV. Anything else
* should be reported up the stack.
*/
if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
return chans;
name = dev_name(dev);
mutex_lock(&iio_map_list_lock);
/* first count the matching maps */
list_for_each_entry(c, &iio_map_list, l)
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
continue;
else
nummaps++;
if (nummaps == 0) {
ret = -ENODEV;
goto error_ret;
}
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
if (!chans) {
ret = -ENOMEM;
goto error_ret;
}
/* for each map fill in the chans element */
list_for_each_entry(c, &iio_map_list, l) {
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
continue;
chans[mapind].indio_dev = c->indio_dev;
chans[mapind].data = c->map->consumer_data;
chans[mapind].channel =
iio_chan_spec_from_name(chans[mapind].indio_dev,
c->map->adc_channel_label);
if (!chans[mapind].channel) {
ret = -EINVAL;
goto error_free_chans;
}
iio_device_get(chans[mapind].indio_dev);
mapind++;
}
if (mapind == 0) {
ret = -ENODEV;
goto error_free_chans;
}
mutex_unlock(&iio_map_list_lock);
return chans;
error_free_chans:
for (i = 0; i < nummaps; i++)
iio_device_put(chans[i].indio_dev);
kfree(chans);
error_ret:
mutex_unlock(&iio_map_list_lock);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all);
void iio_channel_release_all(struct iio_channel *channels)
{
struct iio_channel *chan = &channels[0];
while (chan->indio_dev) {
iio_device_put(chan->indio_dev);
chan++;
}
kfree(channels);
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
static void devm_iio_channel_free_all(void *iio_channels)
{
iio_channel_release_all(iio_channels);
}
struct iio_channel *devm_iio_channel_get_all(struct device *dev)
{
struct iio_channel *channels;
int ret;
channels = iio_channel_get_all(dev);
if (IS_ERR(channels))
return channels;
ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
channels);
if (ret)
return ERR_PTR(ret);
return channels;
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
int unused;
int vals[INDIO_MAX_RAW_ELEMENTS];
int ret;
int val_len = 2;
if (!val2)
val2 = &unused;
if (!iio_channel_has_info(chan->channel, info))
return -EINVAL;
if (chan->indio_dev->info->read_raw_multi) {
ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
chan->channel, INDIO_MAX_RAW_ELEMENTS,
vals, &val_len, info);
*val = vals[0];
*val2 = vals[1];
} else {
ret = chan->indio_dev->info->read_raw(chan->indio_dev,
chan->channel, val, val2, info);
}
return ret;
}
int iio_read_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_raw);
int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed,
unsigned int scale)
{
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
s64 raw64 = raw;
offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
IIO_CHAN_INFO_OFFSET);
if (offset_type >= 0) {
switch (offset_type) {
case IIO_VAL_INT:
break;
case IIO_VAL_INT_PLUS_MICRO:
case IIO_VAL_INT_PLUS_NANO:
/*
* Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
* implicitely truncate the offset to it's integer form.
*/
break;
case IIO_VAL_FRACTIONAL:
offset_val /= offset_val2;
break;
case IIO_VAL_FRACTIONAL_LOG2:
offset_val >>= offset_val2;
break;
default:
return -EINVAL;
}
raw64 += offset_val;
}
scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
IIO_CHAN_INFO_SCALE);
if (scale_type < 0) {
/*
* If no channel scaling is available apply consumer scale to
* raw value and return.
*/
*processed = raw * scale;
return 0;
}
switch (scale_type) {
case IIO_VAL_INT:
*processed = raw64 * scale_val * scale;
break;
case IIO_VAL_INT_PLUS_MICRO:
if (scale_val2 < 0)
*processed = -raw64 * scale_val;
else
*processed = raw64 * scale_val;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000LL);
break;
case IIO_VAL_INT_PLUS_NANO:
if (scale_val2 < 0)
*processed = -raw64 * scale_val;
else
*processed = raw64 * scale_val;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000000LL);
break;
case IIO_VAL_FRACTIONAL:
*processed = div_s64(raw64 * (s64)scale_val * scale,
scale_val2);
break;
case IIO_VAL_FRACTIONAL_LOG2:
*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
break;
default:
return -EINVAL;
}
return 0;
}
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
scale);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
}
EXPORT_SYMBOL_GPL(iio_read_channel_offset);
int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
ret = iio_channel_read(chan, val, NULL,
IIO_CHAN_INFO_PROCESSED);
if (ret < 0)
goto err_unlock;
*val *= scale;
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
goto err_unlock;
ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
scale);
}
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
int iio_read_channel_processed(struct iio_channel *chan, int *val)
{
/* This is just a special case with scale factor 1 */
return iio_read_channel_processed_scale(chan, val, 1);
}
EXPORT_SYMBOL_GPL(iio_read_channel_processed);
int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
{
return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
}
EXPORT_SYMBOL_GPL(iio_read_channel_scale);
static int iio_channel_read_avail(struct iio_channel *chan,
const int **vals, int *type, int *length,
enum iio_chan_info_enum info)
{
if (!iio_channel_has_available(chan->channel, info))
return -EINVAL;
return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
vals, type, length, info);
}
int iio_read_avail_channel_attribute(struct iio_channel *chan,
const int **vals, int *type, int *length,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read_avail(chan, vals, type, length, attribute);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length)
{
int ret;
int type;
ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
IIO_CHAN_INFO_RAW);
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
ret = -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
static int iio_channel_read_max(struct iio_channel *chan,
int *val, int *val2, int *type,
enum iio_chan_info_enum info)
{
const int *vals;
int length;
int ret;
ret = iio_channel_read_avail(chan, &vals, type, &length, info);
if (ret < 0)
return ret;
switch (ret) {
case IIO_AVAIL_RANGE:
switch (*type) {
case IIO_VAL_INT:
*val = vals[2];
break;
default:
*val = vals[4];
if (val2)
*val2 = vals[5];
}
return 0;
case IIO_AVAIL_LIST:
if (length <= 0)
return -EINVAL;
switch (*type) {
case IIO_VAL_INT:
*val = max_array(vals, length);
break;
default:
/* TODO: learn about max for other iio values */
return -EINVAL;
}
return 0;
default:
return -EINVAL;
}
}
int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
int type;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
static int iio_channel_read_min(struct iio_channel *chan,
int *val, int *val2, int *type,
enum iio_chan_info_enum info)
{
const int *vals;
int length;
int ret;
ret = iio_channel_read_avail(chan, &vals, type, &length, info);
if (ret < 0)
return ret;
switch (ret) {
case IIO_AVAIL_RANGE:
switch (*type) {
case IIO_VAL_INT:
*val = vals[0];
break;
default:
*val = vals[0];
if (val2)
*val2 = vals[1];
}
return 0;
case IIO_AVAIL_LIST:
if (length <= 0)
return -EINVAL;
switch (*type) {
case IIO_VAL_INT:
*val = min_array(vals, length);
break;
default:
/* TODO: learn about min for other iio values */
return -EINVAL;
}
return 0;
default:
return -EINVAL;
}
}
int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
int type;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret = 0;
/* Need to verify underlying driver has not gone away */
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
*type = chan->channel->type;
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_get_channel_type);
static int iio_channel_write(struct iio_channel *chan, int val, int val2,
enum iio_chan_info_enum info)
{
return chan->indio_dev->info->write_raw(chan->indio_dev,
chan->channel, val, val2, info);
}
int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
int iio_write_channel_raw(struct iio_channel *chan, int val)
{
return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
{
const struct iio_chan_spec_ext_info *ext_info;
unsigned int i = 0;
if (!chan->channel->ext_info)
return i;
for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
++i;
return i;
}
EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
static const struct iio_chan_spec_ext_info *
iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
{
const struct iio_chan_spec_ext_info *ext_info;
if (!chan->channel->ext_info)
return NULL;
for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
if (!strcmp(attr, ext_info->name))
return ext_info;
}
return NULL;
}
ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
const char *attr, char *buf)
{
const struct iio_chan_spec_ext_info *ext_info;
ext_info = iio_lookup_ext_info(chan, attr);
if (!ext_info)
return -EINVAL;
return ext_info->read(chan->indio_dev, ext_info->private,
chan->channel, buf);
}
EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
const char *buf, size_t len)
{
const struct iio_chan_spec_ext_info *ext_info;
ext_info = iio_lookup_ext_info(chan, attr);
if (!ext_info)
return -EINVAL;
return ext_info->write(chan->indio_dev, ext_info->private,
chan->channel, buf, len);
}
EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
| linux-master | drivers/iio/inkern.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
* Based on elements of hwmon and input subsystems.
*/
#define pr_fmt(fmt) "iio-core: " fmt
#include <linux/anon_inodes.h>
#include <linux/cdev.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/property.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/events.h>
#include <linux/iio/iio-opaque.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
/* IDA to assign each registered device a unique id */
static DEFINE_IDA(iio_ida);
static dev_t iio_devt;
#define IIO_DEV_MAX 256
struct bus_type iio_bus_type = {
.name = "iio",
};
EXPORT_SYMBOL(iio_bus_type);
static struct dentry *iio_debugfs_dentry;
static const char * const iio_direction[] = {
[0] = "in",
[1] = "out",
};
static const char * const iio_chan_type_name_spec[] = {
[IIO_VOLTAGE] = "voltage",
[IIO_CURRENT] = "current",
[IIO_POWER] = "power",
[IIO_ACCEL] = "accel",
[IIO_ANGL_VEL] = "anglvel",
[IIO_MAGN] = "magn",
[IIO_LIGHT] = "illuminance",
[IIO_INTENSITY] = "intensity",
[IIO_PROXIMITY] = "proximity",
[IIO_TEMP] = "temp",
[IIO_INCLI] = "incli",
[IIO_ROT] = "rot",
[IIO_ANGL] = "angl",
[IIO_TIMESTAMP] = "timestamp",
[IIO_CAPACITANCE] = "capacitance",
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
[IIO_PRESSURE] = "pressure",
[IIO_HUMIDITYRELATIVE] = "humidityrelative",
[IIO_ACTIVITY] = "activity",
[IIO_STEPS] = "steps",
[IIO_ENERGY] = "energy",
[IIO_DISTANCE] = "distance",
[IIO_VELOCITY] = "velocity",
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
[IIO_COUNT] = "count",
[IIO_INDEX] = "index",
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
[IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_modifier_names[] = {
[IIO_MOD_X] = "x",
[IIO_MOD_Y] = "y",
[IIO_MOD_Z] = "z",
[IIO_MOD_X_AND_Y] = "x&y",
[IIO_MOD_X_AND_Z] = "x&z",
[IIO_MOD_Y_AND_Z] = "y&z",
[IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
[IIO_MOD_X_OR_Y] = "x|y",
[IIO_MOD_X_OR_Z] = "x|z",
[IIO_MOD_Y_OR_Z] = "y|z",
[IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
[IIO_MOD_LIGHT_BOTH] = "both",
[IIO_MOD_LIGHT_IR] = "ir",
[IIO_MOD_LIGHT_CLEAR] = "clear",
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
[IIO_MOD_LIGHT_UV] = "uv",
[IIO_MOD_LIGHT_DUV] = "duv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
[IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
[IIO_MOD_NORTH_TRUE] = "from_north_true",
[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
[IIO_MOD_RUNNING] = "running",
[IIO_MOD_JOGGING] = "jogging",
[IIO_MOD_WALKING] = "walking",
[IIO_MOD_STILL] = "still",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
[IIO_MOD_I] = "i",
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
[IIO_MOD_VOC] = "voc",
[IIO_MOD_PM1] = "pm1",
[IIO_MOD_PM2P5] = "pm2p5",
[IIO_MOD_PM4] = "pm4",
[IIO_MOD_PM10] = "pm10",
[IIO_MOD_ETHANOL] = "ethanol",
[IIO_MOD_H2] = "h2",
[IIO_MOD_O2] = "o2",
[IIO_MOD_LINEAR_X] = "linear_x",
[IIO_MOD_LINEAR_Y] = "linear_y",
[IIO_MOD_LINEAR_Z] = "linear_z",
[IIO_MOD_PITCH] = "pitch",
[IIO_MOD_YAW] = "yaw",
[IIO_MOD_ROLL] = "roll",
};
/* relies on pairs of these shared then separate */
static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_RAW] = "raw",
[IIO_CHAN_INFO_PROCESSED] = "input",
[IIO_CHAN_INFO_SCALE] = "scale",
[IIO_CHAN_INFO_OFFSET] = "offset",
[IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
[IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
[IIO_CHAN_INFO_PEAK] = "peak_raw",
[IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
[IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
[IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
[IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
= "filter_low_pass_3db_frequency",
[IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
= "filter_high_pass_3db_frequency",
[IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
[IIO_CHAN_INFO_FREQUENCY] = "frequency",
[IIO_CHAN_INFO_PHASE] = "phase",
[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
[IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
[IIO_CHAN_INFO_INT_TIME] = "integration_time",
[IIO_CHAN_INFO_ENABLE] = "en",
[IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
[IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
[IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
};
/**
* iio_device_id() - query the unique ID for the device
* @indio_dev: Device structure whose ID is being queried
*
* The IIO device ID is a unique index used for example for the naming
* of the character device /dev/iio\:device[ID].
*
* Returns: Unique ID for the device.
*/
int iio_device_id(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_dev_opaque->id;
}
EXPORT_SYMBOL_GPL(iio_device_id);
/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device
*
* Returns: True, if the buffer is enabled.
*/
bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_dev_opaque->currentmode &
(INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE |
INDIO_BUFFER_TRIGGERED);
}
EXPORT_SYMBOL_GPL(iio_buffer_enabled);
#if defined(CONFIG_DEBUG_FS)
/*
* There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
* iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
*/
struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_dev_opaque->debugfs_dentry;
}
EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
#endif
/**
* iio_find_channel_from_si() - get channel from its scan index
* @indio_dev: device
* @si: scan index to match
*
* Returns:
* Constant pointer to iio_chan_spec, if scan index matches, NULL on failure.
*/
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
{
int i;
for (i = 0; i < indio_dev->num_channels; i++)
if (indio_dev->channels[i].scan_index == si)
return &indio_dev->channels[i];
return NULL;
}
/* This turns up an awful lot */
ssize_t iio_read_const_attr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
}
EXPORT_SYMBOL(iio_read_const_attr);
/**
* iio_device_set_clock() - Set current timestamping clock for the device
* @indio_dev: IIO device structure containing the device
* @clock_id: timestamping clock POSIX identifier to set.
*
* Returns: 0 on success, or a negative error code.
*/
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
{
int ret;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
ret = mutex_lock_interruptible(&iio_dev_opaque->mlock);
if (ret)
return ret;
if ((ev_int && iio_event_enabled(ev_int)) ||
iio_buffer_enabled(indio_dev)) {
mutex_unlock(&iio_dev_opaque->mlock);
return -EBUSY;
}
iio_dev_opaque->clock_id = clock_id;
mutex_unlock(&iio_dev_opaque->mlock);
return 0;
}
EXPORT_SYMBOL(iio_device_set_clock);
/**
* iio_device_get_clock() - Retrieve current timestamping clock for the device
* @indio_dev: IIO device structure containing the device
*
* Returns: Clock ID of the current timestamping clock for the device.
*/
clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_dev_opaque->clock_id;
}
EXPORT_SYMBOL(iio_device_get_clock);
/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
* @indio_dev: device
*
* Returns: Timestamp of the event in nanoseconds.
*/
s64 iio_get_time_ns(const struct iio_dev *indio_dev)
{
struct timespec64 tp;
switch (iio_device_get_clock(indio_dev)) {
case CLOCK_REALTIME:
return ktime_get_real_ns();
case CLOCK_MONOTONIC:
return ktime_get_ns();
case CLOCK_MONOTONIC_RAW:
return ktime_get_raw_ns();
case CLOCK_REALTIME_COARSE:
return ktime_to_ns(ktime_get_coarse_real());
case CLOCK_MONOTONIC_COARSE:
ktime_get_coarse_ts64(&tp);
return timespec64_to_ns(&tp);
case CLOCK_BOOTTIME:
return ktime_get_boottime_ns();
case CLOCK_TAI:
return ktime_get_clocktai_ns();
default:
BUG();
}
}
EXPORT_SYMBOL(iio_get_time_ns);
static int __init iio_init(void)
{
int ret;
/* Register sysfs bus */
ret = bus_register(&iio_bus_type);
if (ret < 0) {
pr_err("could not register bus type\n");
goto error_nothing;
}
ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
if (ret < 0) {
pr_err("failed to allocate char dev region\n");
goto error_unregister_bus_type;
}
iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
return 0;
error_unregister_bus_type:
bus_unregister(&iio_bus_type);
error_nothing:
return ret;
}
static void __exit iio_exit(void)
{
if (iio_devt)
unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
bus_unregister(&iio_bus_type);
debugfs_remove(iio_debugfs_dentry);
}
#if defined(CONFIG_DEBUG_FS)
static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct iio_dev *indio_dev = file->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned int val = 0;
int ret;
if (*ppos > 0)
return simple_read_from_buffer(userbuf, count, ppos,
iio_dev_opaque->read_buf,
iio_dev_opaque->read_buf_len);
ret = indio_dev->info->debugfs_reg_access(indio_dev,
iio_dev_opaque->cached_reg_addr,
0, &val);
if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
return ret;
}
iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
sizeof(iio_dev_opaque->read_buf),
"0x%X\n", val);
return simple_read_from_buffer(userbuf, count, ppos,
iio_dev_opaque->read_buf,
iio_dev_opaque->read_buf_len);
}
static ssize_t iio_debugfs_write_reg(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
{
struct iio_dev *indio_dev = file->private_data;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned int reg, val;
char buf[80];
int ret;
count = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = 0;
ret = sscanf(buf, "%i %i", ®, &val);
switch (ret) {
case 1:
iio_dev_opaque->cached_reg_addr = reg;
break;
case 2:
iio_dev_opaque->cached_reg_addr = reg;
ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
val, NULL);
if (ret) {
dev_err(indio_dev->dev.parent, "%s: write failed\n",
__func__);
return ret;
}
break;
default:
return -EINVAL;
}
return count;
}
static const struct file_operations iio_debugfs_reg_fops = {
.open = simple_open,
.read = iio_debugfs_read_reg,
.write = iio_debugfs_write_reg,
};
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
}
static void iio_device_register_debugfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque;
if (indio_dev->info->debugfs_reg_access == NULL)
return;
if (!iio_debugfs_dentry)
return;
iio_dev_opaque = to_iio_dev_opaque(indio_dev);
iio_dev_opaque->debugfs_dentry =
debugfs_create_dir(dev_name(&indio_dev->dev),
iio_debugfs_dentry);
debugfs_create_file("direct_reg_access", 0644,
iio_dev_opaque->debugfs_dentry, indio_dev,
&iio_debugfs_reg_fops);
}
#else
static void iio_device_register_debugfs(struct iio_dev *indio_dev)
{
}
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
}
#endif /* CONFIG_DEBUG_FS */
static ssize_t iio_read_channel_ext_info(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
const struct iio_chan_spec_ext_info *ext_info;
ext_info = &this_attr->c->ext_info[this_attr->address];
return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
}
static ssize_t iio_write_channel_ext_info(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
const struct iio_chan_spec_ext_info *ext_info;
ext_info = &this_attr->c->ext_info[this_attr->address];
return ext_info->write(indio_dev, ext_info->private,
this_attr->c, buf, len);
}
ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
unsigned int i;
size_t len = 0;
if (!e->num_items)
return 0;
for (i = 0; i < e->num_items; ++i) {
if (!e->items[i])
continue;
len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
}
/* replace last space with a newline */
buf[len - 1] = '\n';
return len;
}
EXPORT_SYMBOL_GPL(iio_enum_available_read);
ssize_t iio_enum_read(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
int i;
if (!e->get)
return -EINVAL;
i = e->get(indio_dev, chan);
if (i < 0)
return i;
if (i >= e->num_items || !e->items[i])
return -EINVAL;
return sysfs_emit(buf, "%s\n", e->items[i]);
}
EXPORT_SYMBOL_GPL(iio_enum_read);
ssize_t iio_enum_write(struct iio_dev *indio_dev,
uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
size_t len)
{
const struct iio_enum *e = (const struct iio_enum *)priv;
int ret;
if (!e->set)
return -EINVAL;
ret = __sysfs_match_string(e->items, e->num_items, buf);
if (ret < 0)
return ret;
ret = e->set(indio_dev, chan, ret);
return ret ? ret : len;
}
EXPORT_SYMBOL_GPL(iio_enum_write);
static const struct iio_mount_matrix iio_mount_idmatrix = {
.rotation = {
"1", "0", "0",
"0", "1", "0",
"0", "0", "1"
}
};
static int iio_setup_mount_idmatrix(const struct device *dev,
struct iio_mount_matrix *matrix)
{
*matrix = iio_mount_idmatrix;
dev_info(dev, "mounting matrix not found: using identity...\n");
return 0;
}
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf)
{
const struct iio_mount_matrix *mtx;
mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan);
if (IS_ERR(mtx))
return PTR_ERR(mtx);
if (!mtx)
mtx = &iio_mount_idmatrix;
return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
}
EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
/**
* iio_read_mount_matrix() - retrieve iio device mounting matrix from
* device "mount-matrix" property
* @dev: device the mounting matrix property is assigned to
* @matrix: where to store retrieved matrix
*
* If device is assigned no mounting matrix property, a default 3x3 identity
* matrix will be filled in.
*
* Returns: 0 if success, or a negative error code on failure.
*/
int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
{
size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
int err;
err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
if (err == len)
return 0;
if (err >= 0)
/* Invalid number of matrix entries. */
return -EINVAL;
if (err != -EINVAL)
/* Invalid matrix declaration format. */
return err;
/* Matrix was not declared at all: fallback to identity. */
return iio_setup_mount_idmatrix(dev, matrix);
}
EXPORT_SYMBOL(iio_read_mount_matrix);
static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
int size, const int *vals)
{
int tmp0, tmp1;
s64 tmp2;
bool scale_db = false;
switch (type) {
case IIO_VAL_INT:
return sysfs_emit_at(buf, offset, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
return sysfs_emit_at(buf, offset, "-%d.%06u%s",
abs(vals[0]), -vals[1],
scale_db ? " dB" : "");
else
return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
vals[1], scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
return sysfs_emit_at(buf, offset, "-%d.%09u",
abs(vals[0]), -vals[1]);
else
return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
vals[1]);
case IIO_VAL_FRACTIONAL:
tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
if ((tmp2 < 0) && (tmp0 == 0))
return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
else
return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
if (tmp0 == 0 && tmp2 < 0)
return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
else
return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
int l = 0;
for (i = 0; i < size; ++i)
l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
return l;
}
case IIO_VAL_CHAR:
return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
case IIO_VAL_INT_64:
tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
return sysfs_emit_at(buf, offset, "%lld", tmp2);
default:
return 0;
}
}
/**
* iio_format_value() - Formats a IIO value into its string representation
* @buf: The buffer to which the formatted value gets written
* which is assumed to be big enough (i.e. PAGE_SIZE).
* @type: One of the IIO_VAL_* constants. This decides how the val
* and val2 parameters are formatted.
* @size: Number of IIO value entries contained in vals
* @vals: Pointer to the values, exact meaning depends on the
* type parameter.
*
* Returns:
* 0 by default, a negative number on failure or the total number of characters
* written for a type that belongs to the IIO_VAL_* constant.
*/
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
{
ssize_t len;
len = __iio_format_value(buf, 0, type, size, vals);
if (len >= PAGE_SIZE - 1)
return -EFBIG;
return len + sysfs_emit_at(buf, len, "\n");
}
EXPORT_SYMBOL_GPL(iio_format_value);
static ssize_t iio_read_channel_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
if (indio_dev->info->read_label)
return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
if (this_attr->c->extend_name)
return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
return -EINVAL;
}
static ssize_t iio_read_channel_info(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int vals[INDIO_MAX_RAW_ELEMENTS];
int ret;
int val_len = 2;
if (indio_dev->info->read_raw_multi)
ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
INDIO_MAX_RAW_ELEMENTS,
vals, &val_len,
this_attr->address);
else
ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
&vals[0], &vals[1], this_attr->address);
if (ret < 0)
return ret;
return iio_format_value(buf, ret, val_len, vals);
}
static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
const char *prefix, const char *suffix)
{
ssize_t len;
int stride;
int i;
switch (type) {
case IIO_VAL_INT:
stride = 1;
break;
default:
stride = 2;
break;
}
len = sysfs_emit(buf, prefix);
for (i = 0; i <= length - stride; i += stride) {
if (i != 0) {
len += sysfs_emit_at(buf, len, " ");
if (len >= PAGE_SIZE)
return -EFBIG;
}
len += __iio_format_value(buf, len, type, stride, &vals[i]);
if (len >= PAGE_SIZE)
return -EFBIG;
}
len += sysfs_emit_at(buf, len, "%s\n", suffix);
return len;
}
static ssize_t iio_format_avail_list(char *buf, const int *vals,
int type, int length)
{
return iio_format_list(buf, vals, type, length, "", "");
}
static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
{
int length;
/*
* length refers to the array size , not the number of elements.
* The purpose is to print the range [min , step ,max] so length should
* be 3 in case of int, and 6 for other types.
*/
switch (type) {
case IIO_VAL_INT:
length = 3;
break;
default:
length = 6;
break;
}
return iio_format_list(buf, vals, type, length, "[", "]");
}
static ssize_t iio_read_channel_info_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
const int *vals;
int ret;
int length;
int type;
ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
&vals, &type, &length,
this_attr->address);
if (ret < 0)
return ret;
switch (ret) {
case IIO_AVAIL_LIST:
return iio_format_avail_list(buf, vals, type, length);
case IIO_AVAIL_RANGE:
return iio_format_avail_range(buf, vals, type);
default:
return -EINVAL;
}
}
/**
* __iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
* @fract_mult: Multiplier for the first decimal place, should be a power of 10
* @integer: The integer part of the number
* @fract: The fractional part of the number
* @scale_db: True if this should parse as dB
*
* Returns:
* 0 on success, or a negative error code if the string could not be parsed.
*/
static int __iio_str_to_fixpoint(const char *str, int fract_mult,
int *integer, int *fract, bool scale_db)
{
int i = 0, f = 0;
bool integer_part = true, negative = false;
if (fract_mult == 0) {
*fract = 0;
return kstrtoint(str, 0, integer);
}
if (str[0] == '-') {
negative = true;
str++;
} else if (str[0] == '+') {
str++;
}
while (*str) {
if ('0' <= *str && *str <= '9') {
if (integer_part) {
i = i * 10 + *str - '0';
} else {
f += fract_mult * (*str - '0');
fract_mult /= 10;
}
} else if (*str == '\n') {
if (*(str + 1) == '\0')
break;
return -EINVAL;
} else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
/* Ignore the dB suffix */
str += sizeof(" dB") - 1;
continue;
} else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
/* Ignore the dB suffix */
str += sizeof("dB") - 1;
continue;
} else if (*str == '.' && integer_part) {
integer_part = false;
} else {
return -EINVAL;
}
str++;
}
if (negative) {
if (i)
i = -i;
else
f = -f;
}
*integer = i;
*fract = f;
return 0;
}
/**
* iio_str_to_fixpoint() - Parse a fixed-point number from a string
* @str: The string to parse
* @fract_mult: Multiplier for the first decimal place, should be a power of 10
* @integer: The integer part of the number
* @fract: The fractional part of the number
*
* Returns:
* 0 on success, or a negative error code if the string could not be parsed.
*/
int iio_str_to_fixpoint(const char *str, int fract_mult,
int *integer, int *fract)
{
return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
}
EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
static ssize_t iio_write_channel_info(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
int integer, fract = 0;
bool is_char = false;
bool scale_db = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
return -EINVAL;
if (indio_dev->info->write_raw_get_fmt)
switch (indio_dev->info->write_raw_get_fmt(indio_dev,
this_attr->c, this_attr->address)) {
case IIO_VAL_INT:
fract_mult = 0;
break;
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
fallthrough;
case IIO_VAL_INT_PLUS_MICRO:
fract_mult = 100000;
break;
case IIO_VAL_INT_PLUS_NANO:
fract_mult = 100000000;
break;
case IIO_VAL_CHAR:
is_char = true;
break;
default:
return -EINVAL;
}
if (is_char) {
char ch;
if (sscanf(buf, "%c", &ch) != 1)
return -EINVAL;
integer = ch;
} else {
ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
scale_db);
if (ret)
return ret;
}
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
integer, fract, this_attr->address);
if (ret)
return ret;
return len;
}
static
int __iio_device_attr_init(struct device_attribute *dev_attr,
const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*writefunc)(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len),
enum iio_shared_by shared_by)
{
int ret = 0;
char *name = NULL;
char *full_postfix;
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
if (chan->modified && (shared_by == IIO_SEPARATE)) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan->channel2],
chan->extend_name,
postfix);
else
full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
iio_modifier_names[chan->channel2],
postfix);
} else {
if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
full_postfix = kstrdup(postfix, GFP_KERNEL);
else
full_postfix = kasprintf(GFP_KERNEL,
"%s_%s",
chan->extend_name,
postfix);
}
if (full_postfix == NULL)
return -ENOMEM;
if (chan->differential) { /* Differential can not have modifier */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
case IIO_SEPARATE:
if (!chan->indexed) {
WARN(1, "Differential channels must be indexed\n");
ret = -EINVAL;
goto error_free_full_postfix;
}
name = kasprintf(GFP_KERNEL,
"%s_%s%d-%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
iio_chan_type_name_spec[chan->type],
chan->channel2,
full_postfix);
break;
}
} else { /* Single ended */
switch (shared_by) {
case IIO_SHARED_BY_ALL:
name = kasprintf(GFP_KERNEL, "%s", full_postfix);
break;
case IIO_SHARED_BY_DIR:
name = kasprintf(GFP_KERNEL, "%s_%s",
iio_direction[chan->output],
full_postfix);
break;
case IIO_SHARED_BY_TYPE:
name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
case IIO_SEPARATE:
if (chan->indexed)
name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
chan->channel,
full_postfix);
else
name = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_direction[chan->output],
iio_chan_type_name_spec[chan->type],
full_postfix);
break;
}
}
if (name == NULL) {
ret = -ENOMEM;
goto error_free_full_postfix;
}
dev_attr->attr.name = name;
if (readfunc) {
dev_attr->attr.mode |= 0444;
dev_attr->show = readfunc;
}
if (writefunc) {
dev_attr->attr.mode |= 0200;
dev_attr->store = writefunc;
}
error_free_full_postfix:
kfree(full_postfix);
return ret;
}
static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
{
kfree(dev_attr->attr.name);
}
int __iio_add_chan_devattr(const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
struct device_attribute *attr,
char *buf),
ssize_t (*writefunc)(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len),
u64 mask,
enum iio_shared_by shared_by,
struct device *dev,
struct iio_buffer *buffer,
struct list_head *attr_list)
{
int ret;
struct iio_dev_attr *iio_attr, *t;
iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
if (iio_attr == NULL)
return -ENOMEM;
ret = __iio_device_attr_init(&iio_attr->dev_attr,
postfix, chan,
readfunc, writefunc, shared_by);
if (ret)
goto error_iio_dev_attr_free;
iio_attr->c = chan;
iio_attr->address = mask;
iio_attr->buffer = buffer;
list_for_each_entry(t, attr_list, l)
if (strcmp(t->dev_attr.attr.name,
iio_attr->dev_attr.attr.name) == 0) {
if (shared_by == IIO_SEPARATE)
dev_err(dev, "tried to double register : %s\n",
t->dev_attr.attr.name);
ret = -EBUSY;
goto error_device_attr_deinit;
}
list_add(&iio_attr->l, attr_list);
return 0;
error_device_attr_deinit:
__iio_device_attr_deinit(&iio_attr->dev_attr);
error_iio_dev_attr_free:
kfree(iio_attr);
return ret;
}
static int iio_device_add_channel_label(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
if (!indio_dev->info->read_label && !chan->extend_name)
return 0;
ret = __iio_add_chan_devattr("label",
chan,
&iio_read_channel_label,
NULL,
0,
IIO_SEPARATE,
&indio_dev->dev,
NULL,
&iio_dev_opaque->channel_attr_list);
if (ret < 0)
return ret;
return 1;
}
static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
const long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
if (i >= ARRAY_SIZE(iio_chan_info_postfix))
return -EINVAL;
ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
chan,
&iio_read_channel_info,
&iio_write_channel_info,
i,
shared_by,
&indio_dev->dev,
NULL,
&iio_dev_opaque->channel_attr_list);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
if (ret < 0)
return ret;
attrcount++;
}
return attrcount;
}
static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
const long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
char *avail_postfix;
for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
if (i >= ARRAY_SIZE(iio_chan_info_postfix))
return -EINVAL;
avail_postfix = kasprintf(GFP_KERNEL,
"%s_available",
iio_chan_info_postfix[i]);
if (!avail_postfix)
return -ENOMEM;
ret = __iio_add_chan_devattr(avail_postfix,
chan,
&iio_read_channel_info_avail,
NULL,
i,
shared_by,
&indio_dev->dev,
NULL,
&iio_dev_opaque->channel_attr_list);
kfree(avail_postfix);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
if (ret < 0)
return ret;
attrcount++;
}
return attrcount;
}
static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret, attrcount = 0;
const struct iio_chan_spec_ext_info *ext_info;
if (chan->channel < 0)
return 0;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SEPARATE,
&chan->info_mask_separate);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SEPARATE,
&chan->info_mask_separate_available);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SHARED_BY_TYPE,
&chan->info_mask_shared_by_type_available);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SHARED_BY_DIR,
&chan->info_mask_shared_by_dir_available);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SHARED_BY_ALL,
&chan->info_mask_shared_by_all_available);
if (ret < 0)
return ret;
attrcount += ret;
ret = iio_device_add_channel_label(indio_dev, chan);
if (ret < 0)
return ret;
attrcount += ret;
if (chan->ext_info) {
unsigned int i = 0;
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
ret = __iio_add_chan_devattr(ext_info->name,
chan,
ext_info->read ?
&iio_read_channel_ext_info : NULL,
ext_info->write ?
&iio_write_channel_ext_info : NULL,
i,
ext_info->shared,
&indio_dev->dev,
NULL,
&iio_dev_opaque->channel_attr_list);
i++;
if (ret == -EBUSY && ext_info->shared)
continue;
if (ret)
return ret;
attrcount++;
}
}
return attrcount;
}
/**
* iio_free_chan_devattr_list() - Free a list of IIO device attributes
* @attr_list: List of IIO device attributes
*
* This function frees the memory allocated for each of the IIO device
* attributes in the list.
*/
void iio_free_chan_devattr_list(struct list_head *attr_list)
{
struct iio_dev_attr *p, *n;
list_for_each_entry_safe(p, n, attr_list, l) {
kfree_const(p->dev_attr.attr.name);
list_del(&p->l);
kfree(p);
}
}
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sysfs_emit(buf, "%s\n", indio_dev->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t label_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
return sysfs_emit(buf, "%s\n", indio_dev->label);
}
static DEVICE_ATTR_RO(label);
static const char * const clock_names[] = {
[CLOCK_REALTIME] = "realtime",
[CLOCK_MONOTONIC] = "monotonic",
[CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id",
[CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id",
[CLOCK_MONOTONIC_RAW] = "monotonic_raw",
[CLOCK_REALTIME_COARSE] = "realtime_coarse",
[CLOCK_MONOTONIC_COARSE] = "monotonic_coarse",
[CLOCK_BOOTTIME] = "boottime",
[CLOCK_REALTIME_ALARM] = "realtime_alarm",
[CLOCK_BOOTTIME_ALARM] = "boottime_alarm",
[CLOCK_SGI_CYCLE] = "sgi_cycle",
[CLOCK_TAI] = "tai",
};
static ssize_t current_timestamp_clock_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
const clockid_t clk = iio_device_get_clock(indio_dev);
switch (clk) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
case CLOCK_MONOTONIC_RAW:
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC_COARSE:
case CLOCK_BOOTTIME:
case CLOCK_TAI:
break;
default:
BUG();
}
return sysfs_emit(buf, "%s\n", clock_names[clk]);
}
static ssize_t current_timestamp_clock_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
clockid_t clk;
int ret;
ret = sysfs_match_string(clock_names, buf);
if (ret < 0)
return ret;
clk = ret;
switch (clk) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
case CLOCK_MONOTONIC_RAW:
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC_COARSE:
case CLOCK_BOOTTIME:
case CLOCK_TAI:
break;
default:
return -EINVAL;
}
ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
if (ret)
return ret;
return len;
}
int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
const struct attribute_group *group)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct attribute_group **new, **old = iio_dev_opaque->groups;
unsigned int cnt = iio_dev_opaque->groupcounter;
new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
new[iio_dev_opaque->groupcounter++] = group;
new[iio_dev_opaque->groupcounter] = NULL;
iio_dev_opaque->groups = new;
return 0;
}
static DEVICE_ATTR_RW(current_timestamp_clock);
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
struct iio_dev_attr *p;
struct attribute **attr, *clk = NULL;
/* First count elements in any existing group */
if (indio_dev->info->attrs) {
attr = indio_dev->info->attrs->attrs;
while (*attr++ != NULL)
attrcount_orig++;
}
attrcount = attrcount_orig;
/*
* New channel registration method - relies on the fact a group does
* not need to be initialized if its name is NULL.
*/
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
const struct iio_chan_spec *chan =
&indio_dev->channels[i];
if (chan->type == IIO_TIMESTAMP)
clk = &dev_attr_current_timestamp_clock.attr;
ret = iio_device_add_channel_sysfs(indio_dev, chan);
if (ret < 0)
goto error_clear_attrs;
attrcount += ret;
}
if (iio_dev_opaque->event_interface)
clk = &dev_attr_current_timestamp_clock.attr;
if (indio_dev->name)
attrcount++;
if (indio_dev->label)
attrcount++;
if (clk)
attrcount++;
iio_dev_opaque->chan_attr_group.attrs =
kcalloc(attrcount + 1,
sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
GFP_KERNEL);
if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
ret = -ENOMEM;
goto error_clear_attrs;
}
/* Copy across original attributes, and point to original binary attributes */
if (indio_dev->info->attrs) {
memcpy(iio_dev_opaque->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
*attrcount_orig);
iio_dev_opaque->chan_attr_group.is_visible =
indio_dev->info->attrs->is_visible;
iio_dev_opaque->chan_attr_group.bin_attrs =
indio_dev->info->attrs->bin_attrs;
}
attrn = attrcount_orig;
/* Add all elements from the list. */
list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
if (indio_dev->label)
iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
if (clk)
iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
ret = iio_device_register_sysfs_group(indio_dev,
&iio_dev_opaque->chan_attr_group);
if (ret)
goto error_clear_attrs;
return 0;
error_clear_attrs:
iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
return ret;
}
static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
kfree(iio_dev_opaque->chan_attr_group.attrs);
iio_dev_opaque->chan_attr_group.attrs = NULL;
kfree(iio_dev_opaque->groups);
iio_dev_opaque->groups = NULL;
}
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
iio_device_detach_buffers(indio_dev);
lockdep_unregister_key(&iio_dev_opaque->mlock_key);
ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
}
const struct device_type iio_device_type = {
.name = "iio_device",
.release = iio_dev_release,
};
/**
* iio_device_alloc() - allocate an iio_dev from a driver
* @parent: Parent device.
* @sizeof_priv: Space to allocate for private structure.
*
* Returns:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
{
struct iio_dev_opaque *iio_dev_opaque;
struct iio_dev *indio_dev;
size_t alloc_size;
alloc_size = sizeof(struct iio_dev_opaque);
if (sizeof_priv) {
alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN);
alloc_size += sizeof_priv;
}
iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
if (!iio_dev_opaque)
return NULL;
indio_dev = &iio_dev_opaque->indio_dev;
indio_dev->priv = (char *)iio_dev_opaque +
ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
indio_dev->dev.parent = parent;
indio_dev->dev.type = &iio_device_type;
indio_dev->dev.bus = &iio_bus_type;
device_initialize(&indio_dev->dev);
mutex_init(&iio_dev_opaque->mlock);
mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
if (iio_dev_opaque->id < 0) {
/* cannot use a dev_err as the name isn't available */
pr_err("failed to get device id\n");
kfree(iio_dev_opaque);
return NULL;
}
if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
return NULL;
}
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
lockdep_register_key(&iio_dev_opaque->mlock_key);
lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
return indio_dev;
}
EXPORT_SYMBOL(iio_device_alloc);
/**
* iio_device_free() - free an iio_dev from a driver
* @dev: the iio_dev associated with the device
*/
void iio_device_free(struct iio_dev *dev)
{
if (dev)
put_device(&dev->dev);
}
EXPORT_SYMBOL(iio_device_free);
static void devm_iio_device_release(void *iio_dev)
{
iio_device_free(iio_dev);
}
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @parent: Device to allocate iio_dev for, and parent for this IIO device
* @sizeof_priv: Space to allocate for private structure.
*
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
* Returns:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
{
struct iio_dev *iio_dev;
int ret;
iio_dev = iio_device_alloc(parent, sizeof_priv);
if (!iio_dev)
return NULL;
ret = devm_add_action_or_reset(parent, devm_iio_device_release,
iio_dev);
if (ret)
return NULL;
return iio_dev;
}
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
* @inode: Inode structure for identifying the device in the file system
* @filp: File structure for iio device used to keep and later access
* private data
*
* Returns: 0 on success or -EBUSY if the device is already opened
*/
static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev_opaque *iio_dev_opaque =
container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
struct iio_dev_buffer_pair *ib;
if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
return -EBUSY;
iio_device_get(indio_dev);
ib = kmalloc(sizeof(*ib), GFP_KERNEL);
if (!ib) {
iio_device_put(indio_dev);
clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
return -ENOMEM;
}
ib->indio_dev = indio_dev;
ib->buffer = indio_dev->buffer;
filp->private_data = ib;
return 0;
}
/**
* iio_chrdev_release() - chrdev file close buffer access and ioctls
* @inode: Inode structure pointer for the char device
* @filp: File structure pointer for the char device
*
* Returns: 0 for successful release.
*/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_dev_opaque *iio_dev_opaque =
container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
iio_device_put(indio_dev);
return 0;
}
void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
struct iio_ioctl_handler *h)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
}
void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
{
list_del(&h->entry);
}
static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct iio_dev_buffer_pair *ib = filp->private_data;
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_ioctl_handler *h;
int ret = -ENODEV;
mutex_lock(&iio_dev_opaque->info_exist_lock);
/*
* The NULL check here is required to prevent crashing when a device
* is being removed while userspace would still have open file handles
* to try to access this device.
*/
if (!indio_dev->info)
goto out_unlock;
list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
ret = h->ioctl(indio_dev, filp, cmd, arg);
if (ret != IIO_IOCTL_UNHANDLED)
break;
}
if (ret == IIO_IOCTL_UNHANDLED)
ret = -ENODEV;
out_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret;
}
static const struct file_operations iio_buffer_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.read = iio_buffer_read_outer_addr,
.write = iio_buffer_write_outer_addr,
.poll = iio_buffer_poll_addr,
.unlocked_ioctl = iio_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = iio_chrdev_open,
.release = iio_chrdev_release,
};
static const struct file_operations iio_event_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = iio_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = iio_chrdev_open,
.release = iio_chrdev_release,
};
static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
{
int i, j;
const struct iio_chan_spec *channels = indio_dev->channels;
if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
return 0;
for (i = 0; i < indio_dev->num_channels - 1; i++) {
if (channels[i].scan_index < 0)
continue;
for (j = i + 1; j < indio_dev->num_channels; j++)
if (channels[i].scan_index == channels[j].scan_index) {
dev_err(&indio_dev->dev,
"Duplicate scan index %d\n",
channels[i].scan_index);
return -EINVAL;
}
}
return 0;
}
static int iio_check_extended_name(const struct iio_dev *indio_dev)
{
unsigned int i;
if (!indio_dev->info->read_label)
return 0;
for (i = 0; i < indio_dev->num_channels; i++) {
if (indio_dev->channels[i].extend_name) {
dev_err(&indio_dev->dev,
"Cannot use labels and extend_name at the same time\n");
return -EINVAL;
}
}
return 0;
}
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct fwnode_handle *fwnode = NULL;
int ret;
if (!indio_dev->info)
return -EINVAL;
iio_dev_opaque->driver_module = this_mod;
/* If the calling driver did not initialize firmware node, do it here */
if (dev_fwnode(&indio_dev->dev))
fwnode = dev_fwnode(&indio_dev->dev);
/* The default dummy IIO device has no parent */
else if (indio_dev->dev.parent)
fwnode = dev_fwnode(indio_dev->dev.parent);
device_set_node(&indio_dev->dev, fwnode);
fwnode_property_read_string(fwnode, "label", &indio_dev->label);
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
return ret;
ret = iio_check_extended_name(indio_dev);
if (ret < 0)
return ret;
iio_device_register_debugfs(indio_dev);
ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to create buffer sysfs interfaces\n");
goto error_unreg_debugfs;
}
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register sysfs interfaces\n");
goto error_buffer_free_sysfs;
}
ret = iio_device_register_eventset(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
"Failed to register event set\n");
goto error_free_sysfs;
}
if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
iio_device_register_trigger_consumer(indio_dev);
if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
indio_dev->setup_ops == NULL)
indio_dev->setup_ops = &noop_ring_setup_ops;
if (iio_dev_opaque->attached_buffers_cnt)
cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
else if (iio_dev_opaque->event_interface)
cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
iio_dev_opaque->chrdev.owner = this_mod;
}
/* assign device groups now; they should be all registered now */
indio_dev->dev.groups = iio_dev_opaque->groups;
ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
if (ret < 0)
goto error_unreg_eventset;
return 0;
error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
iio_device_unregister_sysfs(indio_dev);
error_buffer_free_sysfs:
iio_buffers_free_sysfs_and_mask(indio_dev);
error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
return ret;
}
EXPORT_SYMBOL(__iio_device_register);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
* @indio_dev: Device structure representing the device.
*/
void iio_device_unregister(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
mutex_lock(&iio_dev_opaque->info_exist_lock);
iio_device_unregister_debugfs(indio_dev);
iio_disable_all_buffers(indio_dev);
indio_dev->info = NULL;
iio_device_wakeup_eventset(indio_dev);
iio_buffer_wakeup_poll(indio_dev);
mutex_unlock(&iio_dev_opaque->info_exist_lock);
iio_buffers_free_sysfs_and_mask(indio_dev);
}
EXPORT_SYMBOL(iio_device_unregister);
static void devm_iio_device_unreg(void *indio_dev)
{
iio_device_unregister(indio_dev);
}
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod)
{
int ret;
ret = __iio_device_register(indio_dev, this_mod);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
}
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
* iio_device_claim_direct_mode - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in direct mode it is guaranteed to stay
* that way until iio_device_release_direct_mode() is called.
*
* Use with iio_device_release_direct_mode()
*
* Returns: 0 on success, -EBUSY on failure.
*/
int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&iio_dev_opaque->mlock);
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
/**
* iio_device_release_direct_mode - releases claim on direct mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in direct mode.
*
* Use with iio_device_claim_direct_mode()
*/
void iio_device_release_direct_mode(struct iio_dev *indio_dev)
{
mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
}
EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
/**
* iio_device_claim_buffer_mode - Keep device in buffer mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in buffer mode it is guaranteed to stay
* that way until iio_device_release_buffer_mode() is called.
*
* Use with iio_device_release_buffer_mode().
*
* Returns: 0 on success, -EBUSY on failure.
*/
int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_enabled(indio_dev))
return 0;
mutex_unlock(&iio_dev_opaque->mlock);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
/**
* iio_device_release_buffer_mode - releases claim on buffer mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in buffer mode.
*
* Use with iio_device_claim_buffer_mode().
*/
void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
{
mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
}
EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
/**
* iio_device_get_current_mode() - helper function providing read-only access to
* the opaque @currentmode variable
* @indio_dev: IIO device structure for device
*/
int iio_device_get_current_mode(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
return iio_dev_opaque->currentmode;
}
EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
subsys_initcall(iio_init);
module_exit(iio_exit);
MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
MODULE_DESCRIPTION("Industrial I/O core");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/industrialio-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The Industrial I/O core, software IIO devices functions
*
* Copyright (c) 2016 Intel Corporation
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/iio/sw_device.h>
#include <linux/iio/configfs.h>
#include <linux/configfs.h>
static struct config_group *iio_devices_group;
static const struct config_item_type iio_device_type_group_type;
static const struct config_item_type iio_devices_group_type = {
.ct_owner = THIS_MODULE,
};
static LIST_HEAD(iio_device_types_list);
static DEFINE_MUTEX(iio_device_types_lock);
static
struct iio_sw_device_type *__iio_find_sw_device_type(const char *name,
unsigned int len)
{
struct iio_sw_device_type *d = NULL, *iter;
list_for_each_entry(iter, &iio_device_types_list, list)
if (!strcmp(iter->name, name)) {
d = iter;
break;
}
return d;
}
int iio_register_sw_device_type(struct iio_sw_device_type *d)
{
struct iio_sw_device_type *iter;
int ret = 0;
mutex_lock(&iio_device_types_lock);
iter = __iio_find_sw_device_type(d->name, strlen(d->name));
if (iter)
ret = -EBUSY;
else
list_add_tail(&d->list, &iio_device_types_list);
mutex_unlock(&iio_device_types_lock);
if (ret)
return ret;
d->group = configfs_register_default_group(iio_devices_group, d->name,
&iio_device_type_group_type);
if (IS_ERR(d->group))
ret = PTR_ERR(d->group);
return ret;
}
EXPORT_SYMBOL(iio_register_sw_device_type);
void iio_unregister_sw_device_type(struct iio_sw_device_type *dt)
{
struct iio_sw_device_type *iter;
mutex_lock(&iio_device_types_lock);
iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
if (iter)
list_del(&dt->list);
mutex_unlock(&iio_device_types_lock);
configfs_unregister_default_group(dt->group);
}
EXPORT_SYMBOL(iio_unregister_sw_device_type);
static
struct iio_sw_device_type *iio_get_sw_device_type(const char *name)
{
struct iio_sw_device_type *dt;
mutex_lock(&iio_device_types_lock);
dt = __iio_find_sw_device_type(name, strlen(name));
if (dt && !try_module_get(dt->owner))
dt = NULL;
mutex_unlock(&iio_device_types_lock);
return dt;
}
struct iio_sw_device *iio_sw_device_create(const char *type, const char *name)
{
struct iio_sw_device *d;
struct iio_sw_device_type *dt;
dt = iio_get_sw_device_type(type);
if (!dt) {
pr_err("Invalid device type: %s\n", type);
return ERR_PTR(-EINVAL);
}
d = dt->ops->probe(name);
if (IS_ERR(d))
goto out_module_put;
d->device_type = dt;
return d;
out_module_put:
module_put(dt->owner);
return d;
}
EXPORT_SYMBOL(iio_sw_device_create);
void iio_sw_device_destroy(struct iio_sw_device *d)
{
struct iio_sw_device_type *dt = d->device_type;
dt->ops->remove(d);
module_put(dt->owner);
}
EXPORT_SYMBOL(iio_sw_device_destroy);
static struct config_group *device_make_group(struct config_group *group,
const char *name)
{
struct iio_sw_device *d;
d = iio_sw_device_create(group->cg_item.ci_name, name);
if (IS_ERR(d))
return ERR_CAST(d);
config_item_set_name(&d->group.cg_item, "%s", name);
return &d->group;
}
static void device_drop_group(struct config_group *group,
struct config_item *item)
{
struct iio_sw_device *d = to_iio_sw_device(item);
iio_sw_device_destroy(d);
config_item_put(item);
}
static struct configfs_group_operations device_ops = {
.make_group = &device_make_group,
.drop_item = &device_drop_group,
};
static const struct config_item_type iio_device_type_group_type = {
.ct_group_ops = &device_ops,
.ct_owner = THIS_MODULE,
};
static int __init iio_sw_device_init(void)
{
iio_devices_group =
configfs_register_default_group(&iio_configfs_subsys.su_group,
"devices",
&iio_devices_group_type);
return PTR_ERR_OR_ZERO(iio_devices_group);
}
module_init(iio_sw_device_init);
static void __exit iio_sw_device_exit(void)
{
configfs_unregister_default_group(iio_devices_group);
}
module_exit(iio_sw_device_exit);
MODULE_AUTHOR("Daniel Baluta <[email protected]>");
MODULE_DESCRIPTION("Industrial I/O software devices support");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/industrialio-sw-device.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HID Sensors Driver
* Copyright (c) 2013, Intel Corporation.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum incl_3d_channel {
CHANNEL_SCAN_INDEX_X,
CHANNEL_SCAN_INDEX_Y,
CHANNEL_SCAN_INDEX_Z,
INCLI_3D_CHANNEL_MAX,
};
#define CHANNEL_SCAN_INDEX_TIMESTAMP INCLI_3D_CHANNEL_MAX
struct incl_3d_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info incl[INCLI_3D_CHANNEL_MAX];
struct {
u32 incl_val[INCLI_3D_CHANNEL_MAX];
u64 timestamp __aligned(8);
} scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
s64 timestamp;
};
static const u32 incl_3d_addresses[INCLI_3D_CHANNEL_MAX] = {
HID_USAGE_SENSOR_ORIENT_TILT_X,
HID_USAGE_SENSOR_ORIENT_TILT_Y,
HID_USAGE_SENSOR_ORIENT_TILT_Z
};
static const u32 incl_3d_sensitivity_addresses[] = {
HID_USAGE_SENSOR_DATA_ORIENTATION,
HID_USAGE_SENSOR_ORIENT_TILT,
};
/* Channel definitions */
static const struct iio_chan_spec incl_3d_channels[] = {
{
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_X,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_X,
}, {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_Y,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Y,
}, {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_Z,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = CHANNEL_SCAN_INDEX_Z,
},
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP),
};
/* Adjust channel real bits based on report descriptor */
static void incl_3d_adjust_channel_bit_mask(struct iio_chan_spec *chan,
int size)
{
chan->scan_type.sign = 's';
/* Real storage bits will change based on the report desc. */
chan->scan_type.realbits = size * 8;
/* Maximum size of a sample to capture is u32 */
chan->scan_type.storagebits = sizeof(u32) * 8;
}
/* Channel read_raw handler */
static int incl_3d_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct incl_3d_state *incl_state = iio_priv(indio_dev);
int report_id = -1;
u32 address;
int ret_type;
s32 min;
*val = 0;
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
hid_sensor_power_state(&incl_state->common_attributes, true);
report_id = incl_state->incl[chan->scan_index].report_id;
min = incl_state->incl[chan->scan_index].logical_minimum;
address = incl_3d_addresses[chan->scan_index];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
incl_state->common_attributes.hsdev,
HID_USAGE_SENSOR_INCLINOMETER_3D, address,
report_id,
SENSOR_HUB_SYNC,
min < 0);
else {
hid_sensor_power_state(&incl_state->common_attributes,
false);
return -EINVAL;
}
hid_sensor_power_state(&incl_state->common_attributes, false);
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
*val = incl_state->scale_pre_decml;
*val2 = incl_state->scale_post_decml;
ret_type = incl_state->scale_precision;
break;
case IIO_CHAN_INFO_OFFSET:
*val = incl_state->value_offset;
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret_type = hid_sensor_read_samp_freq_value(
&incl_state->common_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
ret_type = hid_sensor_read_raw_hyst_value(
&incl_state->common_attributes, val, val2);
break;
default:
ret_type = -EINVAL;
break;
}
return ret_type;
}
/* Channel write_raw handler */
static int incl_3d_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct incl_3d_state *incl_state = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
ret = hid_sensor_write_samp_freq_value(
&incl_state->common_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
ret = hid_sensor_write_raw_hyst_value(
&incl_state->common_attributes, val, val2);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct iio_info incl_3d_info = {
.read_raw = &incl_3d_read_raw,
.write_raw = &incl_3d_write_raw,
};
/* Callback handler to send event after all samples are received and captured */
static int incl_3d_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
void *priv)
{
struct iio_dev *indio_dev = platform_get_drvdata(priv);
struct incl_3d_state *incl_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "incl_3d_proc_event\n");
if (atomic_read(&incl_state->common_attributes.data_ready)) {
if (!incl_state->timestamp)
incl_state->timestamp = iio_get_time_ns(indio_dev);
iio_push_to_buffers_with_timestamp(indio_dev,
&incl_state->scan,
incl_state->timestamp);
incl_state->timestamp = 0;
}
return 0;
}
/* Capture samples in local storage */
static int incl_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
size_t raw_len, char *raw_data,
void *priv)
{
struct iio_dev *indio_dev = platform_get_drvdata(priv);
struct incl_3d_state *incl_state = iio_priv(indio_dev);
int ret = 0;
switch (usage_id) {
case HID_USAGE_SENSOR_ORIENT_TILT_X:
incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_X] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Y:
incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Y] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_ORIENT_TILT_Z:
incl_state->scan.incl_val[CHANNEL_SCAN_INDEX_Z] = *(u32 *)raw_data;
break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
incl_state->timestamp =
hid_sensor_convert_timestamp(&incl_state->common_attributes,
*(s64 *)raw_data);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/* Parse report which is specific to an usage id*/
static int incl_3d_parse_report(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
struct iio_chan_spec *channels,
unsigned usage_id,
struct incl_3d_state *st)
{
int ret;
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_ORIENT_TILT_X,
&st->incl[CHANNEL_SCAN_INDEX_X]);
if (ret)
return ret;
incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_X],
st->incl[CHANNEL_SCAN_INDEX_X].size);
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_ORIENT_TILT_Y,
&st->incl[CHANNEL_SCAN_INDEX_Y]);
if (ret)
return ret;
incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Y],
st->incl[CHANNEL_SCAN_INDEX_Y].size);
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_ORIENT_TILT_Z,
&st->incl[CHANNEL_SCAN_INDEX_Z]);
if (ret)
return ret;
incl_3d_adjust_channel_bit_mask(&channels[CHANNEL_SCAN_INDEX_Z],
st->incl[CHANNEL_SCAN_INDEX_Z].size);
dev_dbg(&pdev->dev, "incl_3d %x:%x, %x:%x, %x:%x\n",
st->incl[0].index,
st->incl[0].report_id,
st->incl[1].index, st->incl[1].report_id,
st->incl[2].index, st->incl[2].report_id);
st->scale_precision = hid_sensor_format_scale(
HID_USAGE_SENSOR_INCLINOMETER_3D,
&st->incl[CHANNEL_SCAN_INDEX_X],
&st->scale_pre_decml, &st->scale_post_decml);
return ret;
}
/* Function to initialize the processing for usage id */
static int hid_incl_3d_probe(struct platform_device *pdev)
{
int ret;
static char *name = "incli_3d";
struct iio_dev *indio_dev;
struct incl_3d_state *incl_state;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
indio_dev = devm_iio_device_alloc(&pdev->dev,
sizeof(struct incl_3d_state));
if (indio_dev == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, indio_dev);
incl_state = iio_priv(indio_dev);
incl_state->common_attributes.hsdev = hsdev;
incl_state->common_attributes.pdev = pdev;
ret = hid_sensor_parse_common_attributes(hsdev,
HID_USAGE_SENSOR_INCLINOMETER_3D,
&incl_state->common_attributes,
incl_3d_sensitivity_addresses,
ARRAY_SIZE(incl_3d_sensitivity_addresses));
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
return ret;
}
indio_dev->channels = devm_kmemdup(&pdev->dev, incl_3d_channels,
sizeof(incl_3d_channels), GFP_KERNEL);
if (!indio_dev->channels) {
dev_err(&pdev->dev, "failed to duplicate channels\n");
return -ENOMEM;
}
ret = incl_3d_parse_report(pdev, hsdev,
(struct iio_chan_spec *)indio_dev->channels,
HID_USAGE_SENSOR_INCLINOMETER_3D,
incl_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes\n");
return ret;
}
indio_dev->num_channels = ARRAY_SIZE(incl_3d_channels);
indio_dev->info = &incl_3d_info;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
atomic_set(&incl_state->common_attributes.data_ready, 0);
ret = hid_sensor_setup_trigger(indio_dev, name,
&incl_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
return ret;
}
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&pdev->dev, "device register failed\n");
goto error_remove_trigger;
}
incl_state->callbacks.send_event = incl_3d_proc_event;
incl_state->callbacks.capture_sample = incl_3d_capture_sample;
incl_state->callbacks.pdev = pdev;
ret = sensor_hub_register_callback(hsdev,
HID_USAGE_SENSOR_INCLINOMETER_3D,
&incl_state->callbacks);
if (ret) {
dev_err(&pdev->dev, "callback reg failed\n");
goto error_iio_unreg;
}
return 0;
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
return ret;
}
/* Function to deinitialize the processing for usage id */
static int hid_incl_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct incl_3d_state *incl_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
return 0;
}
static const struct platform_device_id hid_incl_3d_ids[] = {
{
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200086",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids);
static struct platform_driver hid_incl_3d_platform_driver = {
.id_table = hid_incl_3d_ids,
.driver = {
.name = KBUILD_MODNAME,
.pm = &hid_sensor_pm_ops,
},
.probe = hid_incl_3d_probe,
.remove = hid_incl_3d_remove,
};
module_platform_driver(hid_incl_3d_platform_driver);
MODULE_DESCRIPTION("HID Sensor Inclinometer 3D");
MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(IIO_HID);
| linux-master | drivers/iio/orientation/hid-sensor-incl-3d.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HID Sensors Driver
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
struct dev_rot_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info quaternion;
struct {
s32 sampled_vals[4] __aligned(16);
u64 timestamp __aligned(8);
} scan;
int scale_pre_decml;
int scale_post_decml;
int scale_precision;
int value_offset;
s64 timestamp;
};
static const u32 rotation_sensitivity_addresses[] = {
HID_USAGE_SENSOR_DATA_ORIENTATION,
HID_USAGE_SENSOR_ORIENT_QUATERNION,
};
/* Channel definitions */
static const struct iio_chan_spec dev_rot_channels[] = {
{
.type = IIO_ROT,
.modified = 1,
.channel2 = IIO_MOD_QUATERNION,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_HYSTERESIS),
.scan_index = 0
},
IIO_CHAN_SOFT_TIMESTAMP(1)
};
/* Adjust channel real bits based on report descriptor */
static void dev_rot_adjust_channel_bit_mask(struct iio_chan_spec *chan,
int size)
{
chan->scan_type.sign = 's';
/* Real storage bits will change based on the report desc. */
chan->scan_type.realbits = size * 8;
/* Maximum size of a sample to capture is u32 */
chan->scan_type.storagebits = sizeof(u32) * 8;
chan->scan_type.repeat = 4;
}
/* Channel read_raw handler */
static int dev_rot_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int size, int *vals, int *val_len,
long mask)
{
struct dev_rot_state *rot_state = iio_priv(indio_dev);
int ret_type;
int i;
vals[0] = 0;
vals[1] = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (size >= 4) {
for (i = 0; i < 4; ++i)
vals[i] = rot_state->scan.sampled_vals[i];
ret_type = IIO_VAL_INT_MULTIPLE;
*val_len = 4;
} else
ret_type = -EINVAL;
break;
case IIO_CHAN_INFO_SCALE:
vals[0] = rot_state->scale_pre_decml;
vals[1] = rot_state->scale_post_decml;
return rot_state->scale_precision;
case IIO_CHAN_INFO_OFFSET:
*vals = rot_state->value_offset;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret_type = hid_sensor_read_samp_freq_value(
&rot_state->common_attributes, &vals[0], &vals[1]);
break;
case IIO_CHAN_INFO_HYSTERESIS:
ret_type = hid_sensor_read_raw_hyst_value(
&rot_state->common_attributes, &vals[0], &vals[1]);
break;
default:
ret_type = -EINVAL;
break;
}
return ret_type;
}
/* Channel write_raw handler */
static int dev_rot_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct dev_rot_state *rot_state = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
ret = hid_sensor_write_samp_freq_value(
&rot_state->common_attributes, val, val2);
break;
case IIO_CHAN_INFO_HYSTERESIS:
ret = hid_sensor_write_raw_hyst_value(
&rot_state->common_attributes, val, val2);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct iio_info dev_rot_info = {
.read_raw_multi = &dev_rot_read_raw,
.write_raw = &dev_rot_write_raw,
};
/* Callback handler to send event after all samples are received and captured */
static int dev_rot_proc_event(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
void *priv)
{
struct iio_dev *indio_dev = platform_get_drvdata(priv);
struct dev_rot_state *rot_state = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "dev_rot_proc_event\n");
if (atomic_read(&rot_state->common_attributes.data_ready)) {
if (!rot_state->timestamp)
rot_state->timestamp = iio_get_time_ns(indio_dev);
iio_push_to_buffers_with_timestamp(indio_dev, &rot_state->scan,
rot_state->timestamp);
rot_state->timestamp = 0;
}
return 0;
}
/* Capture samples in local storage */
static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
unsigned usage_id,
size_t raw_len, char *raw_data,
void *priv)
{
struct iio_dev *indio_dev = platform_get_drvdata(priv);
struct dev_rot_state *rot_state = iio_priv(indio_dev);
if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
if (raw_len / 4 == sizeof(s16)) {
rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
} else {
memcpy(&rot_state->scan.sampled_vals, raw_data,
sizeof(rot_state->scan.sampled_vals));
}
dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
sizeof(rot_state->scan.sampled_vals));
} else if (usage_id == HID_USAGE_SENSOR_TIME_TIMESTAMP) {
rot_state->timestamp = hid_sensor_convert_timestamp(&rot_state->common_attributes,
*(s64 *)raw_data);
}
return 0;
}
/* Parse report which is specific to an usage id*/
static int dev_rot_parse_report(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
struct iio_chan_spec *channels,
unsigned usage_id,
struct dev_rot_state *st)
{
int ret;
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_ORIENT_QUATERNION,
&st->quaternion);
if (ret)
return ret;
dev_rot_adjust_channel_bit_mask(&channels[0],
st->quaternion.size / 4);
dev_dbg(&pdev->dev, "dev_rot %x:%x\n", st->quaternion.index,
st->quaternion.report_id);
dev_dbg(&pdev->dev, "dev_rot: attrib size %d\n",
st->quaternion.size);
st->scale_precision = hid_sensor_format_scale(
hsdev->usage,
&st->quaternion,
&st->scale_pre_decml, &st->scale_post_decml);
return 0;
}
/* Function to initialize the processing for usage id */
static int hid_dev_rot_probe(struct platform_device *pdev)
{
int ret;
char *name;
struct iio_dev *indio_dev;
struct dev_rot_state *rot_state;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
indio_dev = devm_iio_device_alloc(&pdev->dev,
sizeof(struct dev_rot_state));
if (indio_dev == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, indio_dev);
rot_state = iio_priv(indio_dev);
rot_state->common_attributes.hsdev = hsdev;
rot_state->common_attributes.pdev = pdev;
switch (hsdev->usage) {
case HID_USAGE_SENSOR_DEVICE_ORIENTATION:
name = "dev_rotation";
break;
case HID_USAGE_SENSOR_RELATIVE_ORIENTATION:
name = "relative_orientation";
break;
case HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION:
name = "geomagnetic_orientation";
break;
default:
return -EINVAL;
}
ret = hid_sensor_parse_common_attributes(hsdev,
hsdev->usage,
&rot_state->common_attributes,
rotation_sensitivity_addresses,
ARRAY_SIZE(rotation_sensitivity_addresses));
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
return ret;
}
indio_dev->channels = devm_kmemdup(&pdev->dev, dev_rot_channels,
sizeof(dev_rot_channels),
GFP_KERNEL);
if (!indio_dev->channels) {
dev_err(&pdev->dev, "failed to duplicate channels\n");
return -ENOMEM;
}
ret = dev_rot_parse_report(pdev, hsdev,
(struct iio_chan_spec *)indio_dev->channels,
hsdev->usage, rot_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes\n");
return ret;
}
indio_dev->num_channels = ARRAY_SIZE(dev_rot_channels);
indio_dev->info = &dev_rot_info;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
atomic_set(&rot_state->common_attributes.data_ready, 0);
ret = hid_sensor_setup_trigger(indio_dev, name,
&rot_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
return ret;
}
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&pdev->dev, "device register failed\n");
goto error_remove_trigger;
}
rot_state->callbacks.send_event = dev_rot_proc_event;
rot_state->callbacks.capture_sample = dev_rot_capture_sample;
rot_state->callbacks.pdev = pdev;
ret = sensor_hub_register_callback(hsdev, hsdev->usage,
&rot_state->callbacks);
if (ret) {
dev_err(&pdev->dev, "callback reg failed\n");
goto error_iio_unreg;
}
return 0;
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return ret;
}
/* Function to deinitialize the processing for usage id */
static int hid_dev_rot_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct dev_rot_state *rot_state = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return 0;
}
static const struct platform_device_id hid_dev_rot_ids[] = {
{
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-20008a",
},
{
/* Relative orientation(AG) sensor */
.name = "HID-SENSOR-20008e",
},
{
/* Geomagnetic orientation(AM) sensor */
.name = "HID-SENSOR-2000c1",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, hid_dev_rot_ids);
static struct platform_driver hid_dev_rot_platform_driver = {
.id_table = hid_dev_rot_ids,
.driver = {
.name = KBUILD_MODNAME,
.pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
.remove = hid_dev_rot_remove,
};
module_platform_driver(hid_dev_rot_platform_driver);
MODULE_DESCRIPTION("HID Sensor Device Rotation");
MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(IIO_HID);
| linux-master | drivers/iio/orientation/hid-sensor-rotation.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
*
* Copyright 2011 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
/* AD7746 Register Definition */
#define AD7746_REG_STATUS 0
#define AD7746_REG_CAP_DATA_HIGH 1
#define AD7746_REG_VT_DATA_HIGH 4
#define AD7746_REG_CAP_SETUP 7
#define AD7746_REG_VT_SETUP 8
#define AD7746_REG_EXC_SETUP 9
#define AD7746_REG_CFG 10
#define AD7746_REG_CAPDACA 11
#define AD7746_REG_CAPDACB 12
#define AD7746_REG_CAP_OFFH 13
#define AD7746_REG_CAP_GAINH 15
#define AD7746_REG_VOLT_GAINH 17
/* Status Register Bit Designations (AD7746_REG_STATUS) */
#define AD7746_STATUS_EXCERR BIT(3)
#define AD7746_STATUS_RDY BIT(2)
#define AD7746_STATUS_RDYVT BIT(1)
#define AD7746_STATUS_RDYCAP BIT(0)
/* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */
#define AD7746_CAPSETUP_CAPEN BIT(7)
#define AD7746_CAPSETUP_CIN2 BIT(6) /* AD7746 only */
#define AD7746_CAPSETUP_CAPDIFF BIT(5)
#define AD7746_CAPSETUP_CACHOP BIT(0)
/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
#define AD7746_VTSETUP_VTEN BIT(7)
#define AD7746_VTSETUP_VTMD_MASK GENMASK(6, 5)
#define AD7746_VTSETUP_VTMD_INT_TEMP 0
#define AD7746_VTSETUP_VTMD_EXT_TEMP 1
#define AD7746_VTSETUP_VTMD_VDD_MON 2
#define AD7746_VTSETUP_VTMD_EXT_VIN 3
#define AD7746_VTSETUP_EXTREF BIT(4)
#define AD7746_VTSETUP_VTSHORT BIT(1)
#define AD7746_VTSETUP_VTCHOP BIT(0)
/* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */
#define AD7746_EXCSETUP_CLKCTRL BIT(7)
#define AD7746_EXCSETUP_EXCON BIT(6)
#define AD7746_EXCSETUP_EXCB BIT(5)
#define AD7746_EXCSETUP_NEXCB BIT(4)
#define AD7746_EXCSETUP_EXCA BIT(3)
#define AD7746_EXCSETUP_NEXCA BIT(2)
#define AD7746_EXCSETUP_EXCLVL_MASK GENMASK(1, 0)
/* Config Register Bit Designations (AD7746_REG_CFG) */
#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
#define AD7746_CONF_MODE_MASK GENMASK(2, 0)
#define AD7746_CONF_MODE_IDLE 0
#define AD7746_CONF_MODE_CONT_CONV 1
#define AD7746_CONF_MODE_SINGLE_CONV 2
#define AD7746_CONF_MODE_PWRDN 3
#define AD7746_CONF_MODE_OFFS_CAL 5
#define AD7746_CONF_MODE_GAIN_CAL 6
/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
#define AD7746_CAPDAC_DACEN BIT(7)
#define AD7746_CAPDAC_DACP_MASK GENMASK(6, 0)
struct ad7746_chip_info {
struct i2c_client *client;
struct mutex lock; /* protect sensor state */
/*
* Capacitive channel digital filter setup;
* conversion time/update rate setup per channel
*/
u8 config;
u8 cap_setup;
u8 vt_setup;
u8 capdac[2][2];
s8 capdac_set;
};
enum ad7746_chan {
VIN,
VIN_VDD,
TEMP_INT,
TEMP_EXT,
CIN1,
CIN1_DIFF,
CIN2,
CIN2_DIFF,
};
struct ad7746_chan_info {
u8 addr;
union {
u8 vtmd;
struct { /* CAP SETUP fields */
unsigned int cin2 : 1;
unsigned int capdiff : 1;
};
};
};
static const struct ad7746_chan_info ad7746_chan_info[] = {
[VIN] = {
.addr = AD7746_REG_VT_DATA_HIGH,
.vtmd = AD7746_VTSETUP_VTMD_EXT_VIN,
},
[VIN_VDD] = {
.addr = AD7746_REG_VT_DATA_HIGH,
.vtmd = AD7746_VTSETUP_VTMD_VDD_MON,
},
[TEMP_INT] = {
.addr = AD7746_REG_VT_DATA_HIGH,
.vtmd = AD7746_VTSETUP_VTMD_INT_TEMP,
},
[TEMP_EXT] = {
.addr = AD7746_REG_VT_DATA_HIGH,
.vtmd = AD7746_VTSETUP_VTMD_EXT_TEMP,
},
[CIN1] = {
.addr = AD7746_REG_CAP_DATA_HIGH,
},
[CIN1_DIFF] = {
.addr = AD7746_REG_CAP_DATA_HIGH,
.capdiff = 1,
},
[CIN2] = {
.addr = AD7746_REG_CAP_DATA_HIGH,
.cin2 = 1,
},
[CIN2_DIFF] = {
.addr = AD7746_REG_CAP_DATA_HIGH,
.cin2 = 1,
.capdiff = 1,
},
};
static const struct iio_chan_spec ad7746_channels[] = {
[VIN] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = VIN,
},
[VIN_VDD] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
.extend_name = "supply",
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = VIN_VDD,
},
[TEMP_INT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = TEMP_INT,
},
[TEMP_EXT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = TEMP_EXT,
},
[CIN1] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = CIN1,
},
[CIN1_DIFF] = {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 0,
.channel2 = 2,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = CIN1_DIFF,
},
[CIN2] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = CIN2,
},
[CIN2_DIFF] = {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 1,
.channel2 = 3,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.address = CIN2_DIFF,
}
};
/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
static const unsigned char ad7746_vt_filter_rate_table[][2] = {
{ 50, 20 + 1 }, { 31, 32 + 1 }, { 16, 62 + 1 }, { 8, 122 + 1 },
};
static const unsigned char ad7746_cap_filter_rate_table[][2] = {
{ 91, 11 + 1 }, { 84, 12 + 1 }, { 50, 20 + 1 }, { 26, 38 + 1 },
{ 16, 62 + 1 }, { 13, 77 + 1 }, { 11, 92 + 1 }, { 9, 110 + 1 },
};
static int ad7746_set_capdac(struct ad7746_chip_info *chip, int channel)
{
int ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_CAPDACA,
chip->capdac[channel][0]);
if (ret < 0)
return ret;
return i2c_smbus_write_byte_data(chip->client,
AD7746_REG_CAPDACB,
chip->capdac[channel][1]);
}
static int ad7746_select_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
u8 vt_setup, cap_setup;
int ret, delay, idx;
switch (chan->type) {
case IIO_CAPACITANCE:
cap_setup = FIELD_PREP(AD7746_CAPSETUP_CIN2,
ad7746_chan_info[chan->address].cin2) |
FIELD_PREP(AD7746_CAPSETUP_CAPDIFF,
ad7746_chan_info[chan->address].capdiff) |
FIELD_PREP(AD7746_CAPSETUP_CAPEN, 1);
vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
ret = ad7746_set_capdac(chip, chan->channel);
if (ret < 0)
return ret;
chip->capdac_set = chan->channel;
break;
case IIO_VOLTAGE:
case IIO_TEMP:
vt_setup = FIELD_PREP(AD7746_VTSETUP_VTMD_MASK,
ad7746_chan_info[chan->address].vtmd) |
FIELD_PREP(AD7746_VTSETUP_VTEN, 1);
cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
break;
default:
return -EINVAL;
}
if (chip->cap_setup != cap_setup) {
ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_CAP_SETUP,
cap_setup);
if (ret < 0)
return ret;
chip->cap_setup = cap_setup;
}
if (chip->vt_setup != vt_setup) {
ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_VT_SETUP,
vt_setup);
if (ret < 0)
return ret;
chip->vt_setup = vt_setup;
}
return delay;
}
static inline ssize_t ad7746_start_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len,
u8 regval)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, timeout = 10;
bool doit;
ret = kstrtobool(buf, &doit);
if (ret < 0)
return ret;
if (!doit)
return 0;
mutex_lock(&chip->lock);
regval |= chip->config;
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
if (ret < 0)
goto unlock;
do {
msleep(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
if (ret < 0)
goto unlock;
} while ((ret == regval) && timeout--);
mutex_unlock(&chip->lock);
return len;
unlock:
mutex_unlock(&chip->lock);
return ret;
}
static ssize_t ad7746_start_offset_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
int ret = ad7746_select_channel(indio_dev,
&ad7746_channels[to_iio_dev_attr(attr)->address]);
if (ret < 0)
return ret;
return ad7746_start_calib(dev, attr, buf, len,
FIELD_PREP(AD7746_CONF_MODE_MASK,
AD7746_CONF_MODE_OFFS_CAL));
}
static ssize_t ad7746_start_gain_calib(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
int ret = ad7746_select_channel(indio_dev,
&ad7746_channels[to_iio_dev_attr(attr)->address]);
if (ret < 0)
return ret;
return ad7746_start_calib(dev, attr, buf, len,
FIELD_PREP(AD7746_CONF_MODE_MASK,
AD7746_CONF_MODE_GAIN_CAL));
}
static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
0200, NULL, ad7746_start_offset_calib, CIN1);
static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
0200, NULL, ad7746_start_offset_calib, CIN2);
static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
0200, NULL, ad7746_start_gain_calib, CIN1);
static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
0200, NULL, ad7746_start_gain_calib, CIN2);
static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
0200, NULL, ad7746_start_gain_calib, VIN);
static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
int val)
{
int i;
for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
if (val >= ad7746_cap_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_CAPFS_MASK;
chip->config |= FIELD_PREP(AD7746_CONF_CAPFS_MASK, i);
return 0;
}
static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
int val)
{
int i;
for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
if (val >= ad7746_vt_filter_rate_table[i][0])
break;
if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_VTFS_MASK;
chip->config |= FIELD_PREP(AD7746_CONF_VTFS_MASK, i);
return 0;
}
static struct attribute *ad7746_attributes[] = {
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
NULL,
};
static const struct attribute_group ad7746_attribute_group = {
.attrs = ad7746_attributes,
};
static int ad7746_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, reg;
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1)
return -EINVAL;
val = (val2 * 1024) / 15625;
switch (chan->type) {
case IIO_CAPACITANCE:
reg = AD7746_REG_CAP_GAINH;
break;
case IIO_VOLTAGE:
reg = AD7746_REG_VOLT_GAINH;
break;
default:
return -EINVAL;
}
mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client, reg, val);
mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
return 0;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < 0 || val > 0xFFFF)
return -EINVAL;
mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client,
AD7746_REG_CAP_OFFH, val);
mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
return 0;
case IIO_CHAN_INFO_OFFSET:
case IIO_CHAN_INFO_ZEROPOINT:
if (val < 0 || val > 43008000) /* 21pF */
return -EINVAL;
/*
* CAPDAC Scale = 21pF_typ / 127
* CIN Scale = 8.192pF / 2^24
* Offset Scale = CAPDAC Scale / CIN Scale = 338646
*/
val /= 338646;
mutex_lock(&chip->lock);
chip->capdac[chan->channel][chan->differential] = val > 0 ?
FIELD_PREP(AD7746_CAPDAC_DACP_MASK, val) | AD7746_CAPDAC_DACEN : 0;
ret = ad7746_set_capdac(chip, chan->channel);
if (ret < 0) {
mutex_unlock(&chip->lock);
return ret;
}
chip->capdac_set = chan->channel;
mutex_unlock(&chip->lock);
return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
switch (chan->type) {
case IIO_CAPACITANCE:
mutex_lock(&chip->lock);
ret = ad7746_store_cap_filter_rate_setup(chip, val);
mutex_unlock(&chip->lock);
return ret;
case IIO_VOLTAGE:
mutex_lock(&chip->lock);
ret = ad7746_store_vt_filter_rate_setup(chip, val);
mutex_unlock(&chip->lock);
return ret;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static const int ad7746_v_samp_freq[] = { 50, 31, 16, 8, };
static const int ad7746_cap_samp_freq[] = { 91, 84, 50, 26, 16, 13, 11, 9, };
static int ad7746_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, const int **vals,
int *type, int *length, long mask)
{
if (mask != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
switch (chan->type) {
case IIO_VOLTAGE:
*vals = ad7746_v_samp_freq;
*length = ARRAY_SIZE(ad7746_v_samp_freq);
break;
case IIO_CAPACITANCE:
*vals = ad7746_cap_samp_freq;
*length = ARRAY_SIZE(ad7746_cap_samp_freq);
break;
default:
return -EINVAL;
}
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
}
static int ad7746_read_channel(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, delay;
u8 data[3];
u8 regval;
ret = ad7746_select_channel(indio_dev, chan);
if (ret < 0)
return ret;
delay = ret;
regval = chip->config | FIELD_PREP(AD7746_CONF_MODE_MASK,
AD7746_CONF_MODE_SINGLE_CONV);
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
if (ret < 0)
return ret;
msleep(delay);
/* Now read the actual register */
ret = i2c_smbus_read_i2c_block_data(chip->client,
ad7746_chan_info[chan->address].addr,
sizeof(data), data);
if (ret < 0)
return ret;
/*
* Offset applied internally becaue the _offset userspace interface is
* needed for the CAP DACs which apply a controllable offset.
*/
*val = get_unaligned_be24(data) - 0x800000;
return 0;
}
static int ad7746_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, idx;
u8 reg;
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&chip->lock);
ret = ad7746_read_channel(indio_dev, chan, val);
mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
reg = AD7746_REG_CAP_GAINH;
break;
case IIO_VOLTAGE:
reg = AD7746_REG_VOLT_GAINH;
break;
default:
return -EINVAL;
}
mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client, reg);
mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
/* 1 + gain_val / 2^16 */
*val = 1;
*val2 = (15625 * ret) / 1024;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client,
AD7746_REG_CAP_OFFH);
mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
case IIO_CHAN_INFO_ZEROPOINT:
*val = FIELD_GET(AD7746_CAPDAC_DACP_MASK,
chip->capdac[chan->channel][chan->differential]) * 338646;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
*val = 0;
*val2 = 488;
return IIO_VAL_INT_PLUS_NANO;
case IIO_VOLTAGE:
/* 1170mV / 2^23 */
*val = 1170;
if (chan->channel == 1)
*val *= 6;
*val2 = 23;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
*val = 125;
*val2 = 8;
return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_CAPACITANCE:
idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
*val = ad7746_cap_filter_rate_table[idx][0];
return IIO_VAL_INT;
case IIO_VOLTAGE:
idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
*val = ad7746_vt_filter_rate_table[idx][0];
return IIO_VAL_INT;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
.read_raw = ad7746_read_raw,
.read_avail = ad7746_read_avail,
.write_raw = ad7746_write_raw,
};
static int ad7746_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ad7746_chip_info *chip;
struct iio_dev *indio_dev;
unsigned char regval = 0;
unsigned int vdd_permille;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
mutex_init(&chip->lock);
chip->client = client;
chip->capdac_set = -1;
indio_dev->name = id->name;
indio_dev->info = &ad7746_info;
indio_dev->channels = ad7746_channels;
if (id->driver_data == 7746)
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
else
indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
indio_dev->modes = INDIO_DIRECT_MODE;
if (device_property_read_bool(dev, "adi,exca-output-en")) {
if (device_property_read_bool(dev, "adi,exca-output-invert"))
regval |= AD7746_EXCSETUP_NEXCA;
else
regval |= AD7746_EXCSETUP_EXCA;
}
if (device_property_read_bool(dev, "adi,excb-output-en")) {
if (device_property_read_bool(dev, "adi,excb-output-invert"))
regval |= AD7746_EXCSETUP_NEXCB;
else
regval |= AD7746_EXCSETUP_EXCB;
}
ret = device_property_read_u32(dev, "adi,excitation-vdd-permille",
&vdd_permille);
if (!ret) {
switch (vdd_permille) {
case 125:
regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 0);
break;
case 250:
regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 1);
break;
case 375:
regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 2);
break;
case 500:
regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 3);
break;
default:
break;
}
}
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_EXC_SETUP,
regval);
if (ret < 0)
return ret;
return devm_iio_device_register(indio_dev->dev.parent, indio_dev);
}
static const struct i2c_device_id ad7746_id[] = {
{ "ad7745", 7745 },
{ "ad7746", 7746 },
{ "ad7747", 7747 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7746_id);
static const struct of_device_id ad7746_of_match[] = {
{ .compatible = "adi,ad7745" },
{ .compatible = "adi,ad7746" },
{ .compatible = "adi,ad7747" },
{ },
};
MODULE_DEVICE_TABLE(of, ad7746_of_match);
static struct i2c_driver ad7746_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ad7746_of_match,
},
.probe = ad7746_probe,
.id_table = ad7746_id,
};
module_i2c_driver(ad7746_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/cdc/ad7746.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* AD7150 capacitive sensor driver supporting AD7150/1/6
*
* Copyright 2010-2011 Analog Devices Inc.
* Copyright 2021 Jonathan Cameron <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#define AD7150_STATUS_REG 0
#define AD7150_STATUS_OUT1 BIT(3)
#define AD7150_STATUS_OUT2 BIT(5)
#define AD7150_CH1_DATA_HIGH_REG 1
#define AD7150_CH2_DATA_HIGH_REG 3
#define AD7150_CH1_AVG_HIGH_REG 5
#define AD7150_CH2_AVG_HIGH_REG 7
#define AD7150_CH1_SENSITIVITY_REG 9
#define AD7150_CH1_THR_HOLD_H_REG 9
#define AD7150_CH1_TIMEOUT_REG 10
#define AD7150_CH_TIMEOUT_RECEDING GENMASK(3, 0)
#define AD7150_CH_TIMEOUT_APPROACHING GENMASK(7, 4)
#define AD7150_CH1_SETUP_REG 11
#define AD7150_CH2_SENSITIVITY_REG 12
#define AD7150_CH2_THR_HOLD_H_REG 12
#define AD7150_CH2_TIMEOUT_REG 13
#define AD7150_CH2_SETUP_REG 14
#define AD7150_CFG_REG 15
#define AD7150_CFG_FIX BIT(7)
#define AD7150_CFG_THRESHTYPE_MSK GENMASK(6, 5)
#define AD7150_CFG_TT_NEG 0x0
#define AD7150_CFG_TT_POS 0x1
#define AD7150_CFG_TT_IN_WINDOW 0x2
#define AD7150_CFG_TT_OUT_WINDOW 0x3
#define AD7150_PD_TIMER_REG 16
#define AD7150_CH1_CAPDAC_REG 17
#define AD7150_CH2_CAPDAC_REG 18
#define AD7150_SN3_REG 19
#define AD7150_SN2_REG 20
#define AD7150_SN1_REG 21
#define AD7150_SN0_REG 22
#define AD7150_ID_REG 23
enum {
AD7150,
AD7151,
};
/**
* struct ad7150_chip_info - instance specific chip data
* @client: i2c client for this device
* @threshold: thresholds for simple capacitance value events
* @thresh_sensitivity: threshold for simple capacitance offset
* from 'average' value.
* @thresh_timeout: a timeout, in samples from the moment an
* adaptive threshold event occurs to when the average
* value jumps to current value. Note made up of two fields,
* 3:0 are for timeout receding - applies if below lower threshold
* 7:4 are for timeout approaching - applies if above upper threshold
* @state_lock: ensure consistent state of this structure wrt the
* hardware.
* @interrupts: one or two interrupt numbers depending on device type.
* @int_enabled: is a given interrupt currently enabled.
* @type: threshold type
* @dir: threshold direction
*/
struct ad7150_chip_info {
struct i2c_client *client;
u16 threshold[2][2];
u8 thresh_sensitivity[2][2];
u8 thresh_timeout[2][2];
struct mutex state_lock;
int interrupts[2];
bool int_enabled[2];
enum iio_event_type type;
enum iio_event_direction dir;
};
static const u8 ad7150_addresses[][6] = {
{ AD7150_CH1_DATA_HIGH_REG, AD7150_CH1_AVG_HIGH_REG,
AD7150_CH1_SETUP_REG, AD7150_CH1_THR_HOLD_H_REG,
AD7150_CH1_SENSITIVITY_REG, AD7150_CH1_TIMEOUT_REG },
{ AD7150_CH2_DATA_HIGH_REG, AD7150_CH2_AVG_HIGH_REG,
AD7150_CH2_SETUP_REG, AD7150_CH2_THR_HOLD_H_REG,
AD7150_CH2_SENSITIVITY_REG, AD7150_CH2_TIMEOUT_REG },
};
static int ad7150_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long mask)
{
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int channel = chan->channel;
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = i2c_smbus_read_word_swapped(chip->client,
ad7150_addresses[channel][0]);
if (ret < 0)
return ret;
*val = ret >> 4;
return IIO_VAL_INT;
case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_swapped(chip->client,
ad7150_addresses[channel][1]);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/*
* Base units for capacitance are nano farads and the value
* calculated from the datasheet formula is in picofarad
* so multiply by 1000
*/
*val = 1000;
*val2 = 40944 >> 4; /* To match shift in _RAW */
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_OFFSET:
*val = -(12288 >> 4); /* To match shift in _RAW */
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
/* Strangely same for both 1 and 2 chan parts */
*val = 100;
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int ad7150_read_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir)
{
struct ad7150_chip_info *chip = iio_priv(indio_dev);
u8 threshtype;
bool thrfixed;
int ret;
ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG_REG);
if (ret < 0)
return ret;
threshtype = FIELD_GET(AD7150_CFG_THRESHTYPE_MSK, ret);
/*check if threshold mode is fixed or adaptive*/
thrfixed = FIELD_GET(AD7150_CFG_FIX, ret);
switch (type) {
case IIO_EV_TYPE_THRESH_ADAPTIVE:
if (dir == IIO_EV_DIR_RISING)
return !thrfixed && (threshtype == AD7150_CFG_TT_POS);
return !thrfixed && (threshtype == AD7150_CFG_TT_NEG);
case IIO_EV_TYPE_THRESH:
if (dir == IIO_EV_DIR_RISING)
return thrfixed && (threshtype == AD7150_CFG_TT_POS);
return thrfixed && (threshtype == AD7150_CFG_TT_NEG);
default:
break;
}
return -EINVAL;
}
/* state_lock should be held to ensure consistent state */
static int ad7150_write_event_params(struct iio_dev *indio_dev,
unsigned int chan,
enum iio_event_type type,
enum iio_event_direction dir)
{
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int rising = (dir == IIO_EV_DIR_RISING);
/* Only update value live, if parameter is in use */
if ((type != chip->type) || (dir != chip->dir))
return 0;
switch (type) {
/* Note completely different from the adaptive versions */
case IIO_EV_TYPE_THRESH: {
u16 value = chip->threshold[rising][chan];
return i2c_smbus_write_word_swapped(chip->client,
ad7150_addresses[chan][3],
value);
}
case IIO_EV_TYPE_THRESH_ADAPTIVE: {
int ret;
u8 sens, timeout;
sens = chip->thresh_sensitivity[rising][chan];
ret = i2c_smbus_write_byte_data(chip->client,
ad7150_addresses[chan][4],
sens);
if (ret)
return ret;
/*
* Single timeout register contains timeouts for both
* directions.
*/
timeout = FIELD_PREP(AD7150_CH_TIMEOUT_APPROACHING,
chip->thresh_timeout[1][chan]);
timeout |= FIELD_PREP(AD7150_CH_TIMEOUT_RECEDING,
chip->thresh_timeout[0][chan]);
return i2c_smbus_write_byte_data(chip->client,
ad7150_addresses[chan][5],
timeout);
}
default:
return -EINVAL;
}
}
static int ad7150_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir, int state)
{
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int ret = 0;
/*
* There is only a single shared control and no on chip
* interrupt disables for the two interrupt lines.
* So, enabling will switch the events configured to enable
* whatever was most recently requested and if necessary enable_irq()
* the interrupt and any disable will disable_irq() for that
* channels interrupt.
*/
if (!state) {
if ((chip->int_enabled[chan->channel]) &&
(type == chip->type) && (dir == chip->dir)) {
disable_irq(chip->interrupts[chan->channel]);
chip->int_enabled[chan->channel] = false;
}
return 0;
}
mutex_lock(&chip->state_lock);
if ((type != chip->type) || (dir != chip->dir)) {
int rising = (dir == IIO_EV_DIR_RISING);
u8 thresh_type, cfg, fixed;
/*
* Need to temporarily disable both interrupts if
* enabled - this is to avoid races around changing
* config and thresholds.
* Note enable/disable_irq() are reference counted so
* no need to check if already enabled.
*/
disable_irq(chip->interrupts[0]);
disable_irq(chip->interrupts[1]);
ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG_REG);
if (ret < 0)
goto error_ret;
cfg = ret & ~(AD7150_CFG_THRESHTYPE_MSK | AD7150_CFG_FIX);
if (type == IIO_EV_TYPE_THRESH_ADAPTIVE)
fixed = 0;
else
fixed = 1;
if (rising)
thresh_type = AD7150_CFG_TT_POS;
else
thresh_type = AD7150_CFG_TT_NEG;
cfg |= FIELD_PREP(AD7150_CFG_FIX, fixed) |
FIELD_PREP(AD7150_CFG_THRESHTYPE_MSK, thresh_type);
ret = i2c_smbus_write_byte_data(chip->client, AD7150_CFG_REG,
cfg);
if (ret < 0)
goto error_ret;
/*
* There is a potential race condition here, but not easy
* to close given we can't disable the interrupt at the
* chip side of things. Rely on the status bit.
*/
chip->type = type;
chip->dir = dir;
/* update control attributes */
ret = ad7150_write_event_params(indio_dev, chan->channel, type,
dir);
if (ret)
goto error_ret;
/* reenable any irq's we disabled whilst changing mode */
enable_irq(chip->interrupts[0]);
enable_irq(chip->interrupts[1]);
}
if (!chip->int_enabled[chan->channel]) {
enable_irq(chip->interrupts[chan->channel]);
chip->int_enabled[chan->channel] = true;
}
error_ret:
mutex_unlock(&chip->state_lock);
return ret;
}
static int ad7150_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int rising = (dir == IIO_EV_DIR_RISING);
/* Complex register sharing going on here */
switch (info) {
case IIO_EV_INFO_VALUE:
switch (type) {
case IIO_EV_TYPE_THRESH_ADAPTIVE:
*val = chip->thresh_sensitivity[rising][chan->channel];
return IIO_VAL_INT;
case IIO_EV_TYPE_THRESH:
*val = chip->threshold[rising][chan->channel];
return IIO_VAL_INT;
default:
return -EINVAL;
}
case IIO_EV_INFO_TIMEOUT:
*val = 0;
*val2 = chip->thresh_timeout[rising][chan->channel] * 10000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static int ad7150_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int val, int val2)
{
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
int rising = (dir == IIO_EV_DIR_RISING);
mutex_lock(&chip->state_lock);
switch (info) {
case IIO_EV_INFO_VALUE:
switch (type) {
case IIO_EV_TYPE_THRESH_ADAPTIVE:
chip->thresh_sensitivity[rising][chan->channel] = val;
break;
case IIO_EV_TYPE_THRESH:
chip->threshold[rising][chan->channel] = val;
break;
default:
ret = -EINVAL;
goto error_ret;
}
break;
case IIO_EV_INFO_TIMEOUT: {
/*
* Raw timeout is in cycles of 10 msecs as long as both
* channels are enabled.
* In terms of INT_PLUS_MICRO, that is in units of 10,000
*/
int timeout = val2 / 10000;
if (val != 0 || timeout < 0 || timeout > 15 || val2 % 10000) {
ret = -EINVAL;
goto error_ret;
}
chip->thresh_timeout[rising][chan->channel] = timeout;
break;
}
default:
ret = -EINVAL;
goto error_ret;
}
/* write back if active */
ret = ad7150_write_event_params(indio_dev, chan->channel, type, dir);
error_ret:
mutex_unlock(&chip->state_lock);
return ret;
}
static const struct iio_event_spec ad7150_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH_ADAPTIVE,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE) |
BIT(IIO_EV_INFO_TIMEOUT),
}, {
.type = IIO_EV_TYPE_THRESH_ADAPTIVE,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE) |
BIT(IIO_EV_INFO_TIMEOUT),
},
};
#define AD7150_CAPACITANCE_CHAN(_chan) { \
.type = IIO_CAPACITANCE, \
.indexed = 1, \
.channel = _chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_AVERAGE_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
.event_spec = ad7150_events, \
.num_event_specs = ARRAY_SIZE(ad7150_events), \
}
#define AD7150_CAPACITANCE_CHAN_NO_IRQ(_chan) { \
.type = IIO_CAPACITANCE, \
.indexed = 1, \
.channel = _chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_AVERAGE_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
}
static const struct iio_chan_spec ad7150_channels[] = {
AD7150_CAPACITANCE_CHAN(0),
AD7150_CAPACITANCE_CHAN(1),
};
static const struct iio_chan_spec ad7150_channels_no_irq[] = {
AD7150_CAPACITANCE_CHAN_NO_IRQ(0),
AD7150_CAPACITANCE_CHAN_NO_IRQ(1),
};
static const struct iio_chan_spec ad7151_channels[] = {
AD7150_CAPACITANCE_CHAN(0),
};
static const struct iio_chan_spec ad7151_channels_no_irq[] = {
AD7150_CAPACITANCE_CHAN_NO_IRQ(0),
};
static irqreturn_t __ad7150_event_handler(void *private, u8 status_mask,
int channel)
{
struct iio_dev *indio_dev = private;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns(indio_dev);
int int_status;
int_status = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS_REG);
if (int_status < 0)
return IRQ_HANDLED;
if (!(int_status & status_mask))
return IRQ_HANDLED;
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_CAPACITANCE, channel,
chip->type, chip->dir),
timestamp);
return IRQ_HANDLED;
}
static irqreturn_t ad7150_event_handler_ch1(int irq, void *private)
{
return __ad7150_event_handler(private, AD7150_STATUS_OUT1, 0);
}
static irqreturn_t ad7150_event_handler_ch2(int irq, void *private)
{
return __ad7150_event_handler(private, AD7150_STATUS_OUT2, 1);
}
static IIO_CONST_ATTR(in_capacitance_thresh_adaptive_timeout_available,
"[0 0.01 0.15]");
static struct attribute *ad7150_event_attributes[] = {
&iio_const_attr_in_capacitance_thresh_adaptive_timeout_available
.dev_attr.attr,
NULL,
};
static const struct attribute_group ad7150_event_attribute_group = {
.attrs = ad7150_event_attributes,
.name = "events",
};
static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
.read_raw = &ad7150_read_raw,
.read_event_config = &ad7150_read_event_config,
.write_event_config = &ad7150_write_event_config,
.read_event_value = &ad7150_read_event_value,
.write_event_value = &ad7150_write_event_value,
};
static const struct iio_info ad7150_info_no_irq = {
.read_raw = &ad7150_read_raw,
};
static int ad7150_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ad7150_chip_info *chip;
struct iio_dev *indio_dev;
bool use_irq = true;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
mutex_init(&chip->state_lock);
chip->client = client;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_regulator_get_enable(&client->dev, "vdd");
if (ret)
return ret;
chip->interrupts[0] = fwnode_irq_get(dev_fwnode(&client->dev), 0);
if (chip->interrupts[0] < 0)
use_irq = false;
else if (id->driver_data == AD7150) {
chip->interrupts[1] = fwnode_irq_get(dev_fwnode(&client->dev), 1);
if (chip->interrupts[1] < 0)
use_irq = false;
}
if (use_irq) {
irq_set_status_flags(chip->interrupts[0], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&client->dev,
chip->interrupts[0],
NULL,
&ad7150_event_handler_ch1,
IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"ad7150_irq1",
indio_dev);
if (ret)
return ret;
indio_dev->info = &ad7150_info;
switch (id->driver_data) {
case AD7150:
indio_dev->channels = ad7150_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7150_channels);
irq_set_status_flags(chip->interrupts[1], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&client->dev,
chip->interrupts[1],
NULL,
&ad7150_event_handler_ch2,
IRQF_TRIGGER_RISING |
IRQF_ONESHOT,
"ad7150_irq2",
indio_dev);
if (ret)
return ret;
break;
case AD7151:
indio_dev->channels = ad7151_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7151_channels);
break;
default:
return -EINVAL;
}
} else {
indio_dev->info = &ad7150_info_no_irq;
switch (id->driver_data) {
case AD7150:
indio_dev->channels = ad7150_channels_no_irq;
indio_dev->num_channels =
ARRAY_SIZE(ad7150_channels_no_irq);
break;
case AD7151:
indio_dev->channels = ad7151_channels_no_irq;
indio_dev->num_channels =
ARRAY_SIZE(ad7151_channels_no_irq);
break;
default:
return -EINVAL;
}
}
return devm_iio_device_register(indio_dev->dev.parent, indio_dev);
}
static const struct i2c_device_id ad7150_id[] = {
{ "ad7150", AD7150 },
{ "ad7151", AD7151 },
{ "ad7156", AD7150 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7150_id);
static const struct of_device_id ad7150_of_match[] = {
{ "adi,ad7150" },
{ "adi,ad7151" },
{ "adi,ad7156" },
{}
};
static struct i2c_driver ad7150_driver = {
.driver = {
.name = "ad7150",
.of_match_table = ad7150_of_match,
},
.probe = ad7150_probe,
.id_table = ad7150_id,
};
module_i2c_driver(ad7150_driver);
MODULE_AUTHOR("Barry Song <[email protected]>");
MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/cdc/ad7150.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Maxim Integrated MAX5432-MAX5435 digital potentiometer driver
* Copyright (C) 2019 Martin Kaiser <[email protected]>
*
* Datasheet:
* https://datasheets.maximintegrated.com/en/ds/MAX5432-MAX5435.pdf
*/
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
/* All chip variants have 32 wiper positions. */
#define MAX5432_MAX_POS 31
#define MAX5432_OHM_50K (50 * 1000)
#define MAX5432_OHM_100K (100 * 1000)
/* Update the volatile (currently active) setting. */
#define MAX5432_CMD_VREG 0x11
struct max5432_data {
struct i2c_client *client;
unsigned long ohm;
};
static const struct iio_chan_spec max5432_channels[] = {
{
.type = IIO_RESISTANCE,
.indexed = 1,
.output = 1,
.channel = 0,
.address = MAX5432_CMD_VREG,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
}
};
static int max5432_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct max5432_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_SCALE)
return -EINVAL;
if (unlikely(data->ohm > INT_MAX))
return -ERANGE;
*val = data->ohm;
*val2 = MAX5432_MAX_POS;
return IIO_VAL_FRACTIONAL;
}
static int max5432_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct max5432_data *data = iio_priv(indio_dev);
u8 data_byte;
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val < 0 || val > MAX5432_MAX_POS)
return -EINVAL;
if (val2 != 0)
return -EINVAL;
/* Wiper position is in bits D7-D3. (D2-D0 are don't care bits.) */
data_byte = val << 3;
return i2c_smbus_write_byte_data(data->client, chan->address,
data_byte);
}
static const struct iio_info max5432_info = {
.read_raw = max5432_read_raw,
.write_raw = max5432_write_raw,
};
static int max5432_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct max5432_data *data;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct max5432_data));
if (!indio_dev)
return -ENOMEM;
i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
data->client = client;
data->ohm = (unsigned long)device_get_match_data(dev);
indio_dev->info = &max5432_info;
indio_dev->channels = max5432_channels;
indio_dev->num_channels = ARRAY_SIZE(max5432_channels);
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static const struct of_device_id max5432_dt_ids[] = {
{ .compatible = "maxim,max5432", .data = (void *)MAX5432_OHM_50K },
{ .compatible = "maxim,max5433", .data = (void *)MAX5432_OHM_100K },
{ .compatible = "maxim,max5434", .data = (void *)MAX5432_OHM_50K },
{ .compatible = "maxim,max5435", .data = (void *)MAX5432_OHM_100K },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, max5432_dt_ids);
static struct i2c_driver max5432_driver = {
.driver = {
.name = "max5432",
.of_match_table = max5432_dt_ids,
},
.probe = max5432_probe,
};
module_i2c_driver(max5432_driver);
MODULE_AUTHOR("Martin Kaiser <[email protected]>");
MODULE_DESCRIPTION("max5432-max5435 digital potentiometers");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/max5432.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Analog Devices AD5110 digital potentiometer driver
*
* Copyright (C) 2021 Mugilraj Dhavachelvan <[email protected]>
*
* Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/AD5110_5112_5114.pdf
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
/* AD5110 commands */
#define AD5110_EEPROM_WR 1
#define AD5110_RDAC_WR 2
#define AD5110_SHUTDOWN 3
#define AD5110_RESET 4
#define AD5110_RDAC_RD 5
#define AD5110_EEPROM_RD 6
/* AD5110_EEPROM_RD data */
#define AD5110_WIPER_POS 0
#define AD5110_RESISTOR_TOL 1
#define AD5110_WIPER_RESISTANCE 70
struct ad5110_cfg {
int max_pos;
int kohms;
int shift;
};
enum ad5110_type {
AD5110_10,
AD5110_80,
AD5112_05,
AD5112_10,
AD5112_80,
AD5114_10,
AD5114_80,
};
static const struct ad5110_cfg ad5110_cfg[] = {
[AD5110_10] = { .max_pos = 128, .kohms = 10 },
[AD5110_80] = { .max_pos = 128, .kohms = 80 },
[AD5112_05] = { .max_pos = 64, .kohms = 5, .shift = 1 },
[AD5112_10] = { .max_pos = 64, .kohms = 10, .shift = 1 },
[AD5112_80] = { .max_pos = 64, .kohms = 80, .shift = 1 },
[AD5114_10] = { .max_pos = 32, .kohms = 10, .shift = 2 },
[AD5114_80] = { .max_pos = 32, .kohms = 80, .shift = 2 },
};
struct ad5110_data {
struct i2c_client *client;
s16 tol; /* resistor tolerance */
bool enable;
struct mutex lock;
const struct ad5110_cfg *cfg;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec ad5110_channels[] = {
{
.type = IIO_RESISTANCE,
.output = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_ENABLE),
},
};
static int ad5110_read(struct ad5110_data *data, u8 cmd, int *val)
{
int ret;
mutex_lock(&data->lock);
data->buf[0] = cmd;
data->buf[1] = *val;
ret = i2c_master_send_dmasafe(data->client, data->buf, sizeof(data->buf));
if (ret < 0) {
goto error;
} else if (ret != sizeof(data->buf)) {
ret = -EIO;
goto error;
}
ret = i2c_master_recv_dmasafe(data->client, data->buf, 1);
if (ret < 0) {
goto error;
} else if (ret != 1) {
ret = -EIO;
goto error;
}
*val = data->buf[0];
ret = 0;
error:
mutex_unlock(&data->lock);
return ret;
}
static int ad5110_write(struct ad5110_data *data, u8 cmd, u8 val)
{
int ret;
mutex_lock(&data->lock);
data->buf[0] = cmd;
data->buf[1] = val;
ret = i2c_master_send_dmasafe(data->client, data->buf, sizeof(data->buf));
if (ret < 0) {
goto error;
} else if (ret != sizeof(data->buf)) {
ret = -EIO;
goto error;
}
ret = 0;
error:
mutex_unlock(&data->lock);
return ret;
}
static int ad5110_resistor_tol(struct ad5110_data *data, u8 cmd, int val)
{
int ret;
ret = ad5110_read(data, cmd, &val);
if (ret)
return ret;
data->tol = data->cfg->kohms * (val & GENMASK(6, 0)) * 10 / 8;
if (!(val & BIT(7)))
data->tol *= -1;
return 0;
}
static ssize_t store_eeprom_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5110_data *data = iio_priv(indio_dev);
int val = AD5110_WIPER_POS;
int ret;
ret = ad5110_read(data, AD5110_EEPROM_RD, &val);
if (ret)
return ret;
val = val >> data->cfg->shift;
return iio_format_value(buf, IIO_VAL_INT, 1, &val);
}
static ssize_t store_eeprom_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad5110_data *data = iio_priv(indio_dev);
int ret;
ret = ad5110_write(data, AD5110_EEPROM_WR, 0);
if (ret) {
dev_err(&data->client->dev, "RDAC to EEPROM write failed\n");
return ret;
}
/* The storing of EEPROM data takes approximately 18 ms. */
msleep(20);
return len;
}
static IIO_DEVICE_ATTR_RW(store_eeprom, 0);
static struct attribute *ad5110_attributes[] = {
&iio_dev_attr_store_eeprom.dev_attr.attr,
NULL
};
static const struct attribute_group ad5110_attribute_group = {
.attrs = ad5110_attributes,
};
static int ad5110_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ad5110_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = ad5110_read(data, AD5110_RDAC_RD, val);
if (ret)
return ret;
*val = *val >> data->cfg->shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
*val = AD5110_WIPER_RESISTANCE * data->cfg->max_pos;
*val2 = 1000 * data->cfg->kohms + data->tol;
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms + data->tol;
*val2 = data->cfg->max_pos;
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_ENABLE:
*val = data->enable;
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int ad5110_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct ad5110_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > data->cfg->max_pos || val < 0)
return -EINVAL;
return ad5110_write(data, AD5110_RDAC_WR, val << data->cfg->shift);
case IIO_CHAN_INFO_ENABLE:
if (val < 0 || val > 1)
return -EINVAL;
if (data->enable == val)
return 0;
ret = ad5110_write(data, AD5110_SHUTDOWN, val ? 0 : 1);
if (ret)
return ret;
data->enable = val;
return 0;
default:
return -EINVAL;
}
}
static const struct iio_info ad5110_info = {
.read_raw = ad5110_read_raw,
.write_raw = ad5110_write_raw,
.attrs = &ad5110_attribute_group,
};
#define AD5110_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &ad5110_cfg[cfg], \
}
static const struct of_device_id ad5110_of_match[] = {
AD5110_COMPATIBLE("adi,ad5110-10", AD5110_10),
AD5110_COMPATIBLE("adi,ad5110-80", AD5110_80),
AD5110_COMPATIBLE("adi,ad5112-05", AD5112_05),
AD5110_COMPATIBLE("adi,ad5112-10", AD5112_10),
AD5110_COMPATIBLE("adi,ad5112-80", AD5112_80),
AD5110_COMPATIBLE("adi,ad5114-10", AD5114_10),
AD5110_COMPATIBLE("adi,ad5114-80", AD5114_80),
{ }
};
MODULE_DEVICE_TABLE(of, ad5110_of_match);
static const struct i2c_device_id ad5110_id[] = {
{ "ad5110-10", AD5110_10 },
{ "ad5110-80", AD5110_80 },
{ "ad5112-05", AD5112_05 },
{ "ad5112-10", AD5112_10 },
{ "ad5112-80", AD5112_80 },
{ "ad5114-10", AD5114_10 },
{ "ad5114-80", AD5114_80 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5110_id);
static int ad5110_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct ad5110_data *data;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
data->client = client;
mutex_init(&data->lock);
data->enable = 1;
data->cfg = device_get_match_data(dev);
/* refresh RDAC register with EEPROM */
ret = ad5110_write(data, AD5110_RESET, 0);
if (ret) {
dev_err(dev, "Refresh RDAC with EEPROM failed\n");
return ret;
}
ret = ad5110_resistor_tol(data, AD5110_EEPROM_RD, AD5110_RESISTOR_TOL);
if (ret) {
dev_err(dev, "Read resistor tolerance failed\n");
return ret;
}
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad5110_info;
indio_dev->channels = ad5110_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5110_channels);
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver ad5110_driver = {
.driver = {
.name = "ad5110",
.of_match_table = ad5110_of_match,
},
.probe = ad5110_probe,
.id_table = ad5110_id,
};
module_i2c_driver(ad5110_driver);
MODULE_AUTHOR("Mugilraj Dhavachelvan <[email protected]>");
MODULE_DESCRIPTION("AD5110 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/ad5110.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Industrial I/O driver for Microchip digital potentiometers
* Copyright (c) 2018 Axentia Technologies AB
* Author: Peter Rosin <[email protected]>
*
* Datasheet: http://www.microchip.com/downloads/en/DeviceDoc/22147a.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm)
* mcp4017 1 128 5, 10, 50, 100
* mcp4018 1 128 5, 10, 50, 100
* mcp4019 1 128 5, 10, 50, 100
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#define MCP4018_WIPER_MAX 127
struct mcp4018_cfg {
int kohms;
};
enum mcp4018_type {
MCP4018_502,
MCP4018_103,
MCP4018_503,
MCP4018_104,
};
static const struct mcp4018_cfg mcp4018_cfg[] = {
[MCP4018_502] = { .kohms = 5, },
[MCP4018_103] = { .kohms = 10, },
[MCP4018_503] = { .kohms = 50, },
[MCP4018_104] = { .kohms = 100, },
};
struct mcp4018_data {
struct i2c_client *client;
const struct mcp4018_cfg *cfg;
};
static const struct iio_chan_spec mcp4018_channel = {
.type = IIO_RESISTANCE,
.indexed = 1,
.output = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
};
static int mcp4018_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4018_data *data = iio_priv(indio_dev);
s32 ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = i2c_smbus_read_byte(data->client);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = MCP4018_WIPER_MAX;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int mcp4018_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mcp4018_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > MCP4018_WIPER_MAX || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return i2c_smbus_write_byte(data->client, val);
}
static const struct iio_info mcp4018_info = {
.read_raw = mcp4018_read_raw,
.write_raw = mcp4018_write_raw,
};
#define MCP4018_ID_TABLE(_name, cfg) { \
.name = _name, \
.driver_data = (kernel_ulong_t)&mcp4018_cfg[cfg], \
}
static const struct i2c_device_id mcp4018_id[] = {
MCP4018_ID_TABLE("mcp4017-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4017-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4017-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4017-104", MCP4018_104),
MCP4018_ID_TABLE("mcp4018-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4018-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4018-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4018-104", MCP4018_104),
MCP4018_ID_TABLE("mcp4019-502", MCP4018_502),
MCP4018_ID_TABLE("mcp4019-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4019-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4019-104", MCP4018_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mcp4018_id);
#define MCP4018_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &mcp4018_cfg[cfg], \
}
static const struct of_device_id mcp4018_of_match[] = {
MCP4018_COMPATIBLE("microchip,mcp4017-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4017-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4017-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4017-104", MCP4018_104),
MCP4018_COMPATIBLE("microchip,mcp4018-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4018-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4018-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4018-104", MCP4018_104),
MCP4018_COMPATIBLE("microchip,mcp4019-502", MCP4018_502),
MCP4018_COMPATIBLE("microchip,mcp4019-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4019-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4019-104", MCP4018_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mcp4018_of_match);
static int mcp4018_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mcp4018_data *data;
struct iio_dev *indio_dev;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE)) {
dev_err(dev, "SMBUS Byte transfers not supported\n");
return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
data->cfg = i2c_get_match_data(client);
indio_dev->info = &mcp4018_info;
indio_dev->channels = &mcp4018_channel;
indio_dev->num_channels = 1;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver mcp4018_driver = {
.driver = {
.name = "mcp4018",
.of_match_table = mcp4018_of_match,
},
.probe = mcp4018_probe,
.id_table = mcp4018_id,
};
module_i2c_driver(mcp4018_driver);
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_DESCRIPTION("MCP4018 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/mcp4018.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Maxim Integrated MAX5481-MAX5484 digital potentiometer driver
* Copyright 2016 Rockwell Collins
*
* Datasheet:
* https://datasheets.maximintegrated.com/en/ds/MAX5481-MAX5484.pdf
*/
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
/* write wiper reg */
#define MAX5481_WRITE_WIPER (0 << 4)
/* copy wiper reg to NV reg */
#define MAX5481_COPY_AB_TO_NV (2 << 4)
/* copy NV reg to wiper reg */
#define MAX5481_COPY_NV_TO_AB (3 << 4)
#define MAX5481_MAX_POS 1023
enum max5481_variant {
max5481,
max5482,
max5483,
max5484,
};
struct max5481_cfg {
int kohms;
};
static const struct max5481_cfg max5481_cfg[] = {
[max5481] = { .kohms = 10, },
[max5482] = { .kohms = 50, },
[max5483] = { .kohms = 10, },
[max5484] = { .kohms = 50, },
};
struct max5481_data {
struct spi_device *spi;
const struct max5481_cfg *cfg;
u8 msg[3] __aligned(IIO_DMA_MINALIGN);
};
#define MAX5481_CHANNEL { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = 0, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
}
static const struct iio_chan_spec max5481_channels[] = {
MAX5481_CHANNEL,
};
static int max5481_write_cmd(struct max5481_data *data, u8 cmd, u16 val)
{
struct spi_device *spi = data->spi;
data->msg[0] = cmd;
switch (cmd) {
case MAX5481_WRITE_WIPER:
data->msg[1] = val >> 2;
data->msg[2] = (val & 0x3) << 6;
return spi_write(spi, data->msg, 3);
case MAX5481_COPY_AB_TO_NV:
case MAX5481_COPY_NV_TO_AB:
return spi_write(spi, data->msg, 1);
default:
return -EIO;
}
}
static int max5481_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct max5481_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_SCALE)
return -EINVAL;
*val = 1000 * data->cfg->kohms;
*val2 = MAX5481_MAX_POS;
return IIO_VAL_FRACTIONAL;
}
static int max5481_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct max5481_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val < 0 || val > MAX5481_MAX_POS)
return -EINVAL;
return max5481_write_cmd(data, MAX5481_WRITE_WIPER, val);
}
static const struct iio_info max5481_info = {
.read_raw = max5481_read_raw,
.write_raw = max5481_write_raw,
};
static const struct of_device_id max5481_match[] = {
{ .compatible = "maxim,max5481", .data = &max5481_cfg[max5481] },
{ .compatible = "maxim,max5482", .data = &max5481_cfg[max5482] },
{ .compatible = "maxim,max5483", .data = &max5481_cfg[max5483] },
{ .compatible = "maxim,max5484", .data = &max5481_cfg[max5484] },
{ }
};
MODULE_DEVICE_TABLE(of, max5481_match);
static void max5481_wiper_save(void *data)
{
max5481_write_cmd(data, MAX5481_COPY_AB_TO_NV, 0);
}
static int max5481_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct max5481_data *data;
const struct spi_device_id *id = spi_get_device_id(spi);
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
data->spi = spi;
data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg)
data->cfg = &max5481_cfg[id->driver_data];
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
/* variant specific configuration */
indio_dev->info = &max5481_info;
indio_dev->channels = max5481_channels;
indio_dev->num_channels = ARRAY_SIZE(max5481_channels);
/* restore wiper from NV */
ret = max5481_write_cmd(data, MAX5481_COPY_NV_TO_AB, 0);
if (ret < 0)
return ret;
ret = devm_add_action(&spi->dev, max5481_wiper_save, data);
if (ret < 0)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id max5481_id_table[] = {
{ "max5481", max5481 },
{ "max5482", max5482 },
{ "max5483", max5483 },
{ "max5484", max5484 },
{ }
};
MODULE_DEVICE_TABLE(spi, max5481_id_table);
static struct spi_driver max5481_driver = {
.driver = {
.name = "max5481",
.of_match_table = max5481_match,
},
.probe = max5481_probe,
.id_table = max5481_id_table,
};
module_spi_driver(max5481_driver);
MODULE_AUTHOR("Maury Anderson <[email protected]>");
MODULE_DESCRIPTION("max5481 SPI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/max5481.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Maxim Integrated DS1803 and similar digital potentiometer driver
* Copyright (c) 2016 Slawomir Stepien
* Copyright (c) 2022 Jagath Jog J
*
* Datasheet: https://datasheets.maximintegrated.com/en/ds/DS1803.pdf
* Datasheet: https://datasheets.maximintegrated.com/en/ds/DS3502.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* ds1803 2 256 10, 50, 100 0101xxx
* ds3502 1 128 10 01010xx
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#define DS1803_WIPER_0 0xA9
#define DS1803_WIPER_1 0xAA
#define DS3502_WR_IVR 0x00
enum ds1803_type {
DS1803_010,
DS1803_050,
DS1803_100,
DS3502,
};
struct ds1803_cfg {
int wipers;
int avail[3];
int kohms;
const struct iio_chan_spec *channels;
u8 num_channels;
int (*read)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val);
};
struct ds1803_data {
struct i2c_client *client;
const struct ds1803_cfg *cfg;
};
#define DS1803_CHANNEL(ch, addr) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.address = (addr), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec ds1803_channels[] = {
DS1803_CHANNEL(0, DS1803_WIPER_0),
DS1803_CHANNEL(1, DS1803_WIPER_1),
};
static const struct iio_chan_spec ds3502_channels[] = {
DS1803_CHANNEL(0, DS3502_WR_IVR),
};
static int ds1803_read(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
u8 result[ARRAY_SIZE(ds1803_channels)];
ret = i2c_master_recv(data->client, result, indio_dev->num_channels);
if (ret < 0)
return ret;
*val = result[chan->channel];
return ret;
}
static int ds3502_read(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
ret = i2c_smbus_read_byte_data(data->client, chan->address);
if (ret < 0)
return ret;
*val = ret;
return ret;
}
static const struct ds1803_cfg ds1803_cfg[] = {
[DS1803_010] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 10,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS1803_050] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 50,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS1803_100] = {
.wipers = 2,
.avail = { 0, 1, 255 },
.kohms = 100,
.channels = ds1803_channels,
.num_channels = ARRAY_SIZE(ds1803_channels),
.read = ds1803_read,
},
[DS3502] = {
.wipers = 1,
.avail = { 0, 1, 127 },
.kohms = 10,
.channels = ds3502_channels,
.num_channels = ARRAY_SIZE(ds3502_channels),
.read = ds3502_read,
},
};
static int ds1803_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = data->cfg->read(indio_dev, chan, val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->avail[2]; /* Max wiper position */
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int ds1803_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
u8 addr = chan->address;
int max_pos = data->cfg->avail[2];
if (val2 != 0)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > max_pos || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return i2c_smbus_write_byte_data(data->client, addr, val);
}
static int ds1803_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type,
int *length, long mask)
{
struct ds1803_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
*vals = data->cfg->avail;
*length = ARRAY_SIZE(data->cfg->avail);
*type = IIO_VAL_INT;
return IIO_AVAIL_RANGE;
}
return -EINVAL;
}
static const struct iio_info ds1803_info = {
.read_raw = ds1803_read_raw,
.write_raw = ds1803_write_raw,
.read_avail = ds1803_read_avail,
};
static int ds1803_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ds1803_data *data;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
data->client = client;
data->cfg = device_get_match_data(dev);
if (!data->cfg)
data->cfg = &ds1803_cfg[id->driver_data];
indio_dev->info = &ds1803_info;
indio_dev->channels = data->cfg->channels;
indio_dev->num_channels = data->cfg->num_channels;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static const struct of_device_id ds1803_dt_ids[] = {
{ .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
{ .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
{ .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
{ .compatible = "maxim,ds3502", .data = &ds1803_cfg[DS3502] },
{}
};
MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
static const struct i2c_device_id ds1803_id[] = {
{ "ds1803-010", DS1803_010 },
{ "ds1803-050", DS1803_050 },
{ "ds1803-100", DS1803_100 },
{ "ds3502", DS3502 },
{}
};
MODULE_DEVICE_TABLE(i2c, ds1803_id);
static struct i2c_driver ds1803_driver = {
.driver = {
.name = "ds1803",
.of_match_table = ds1803_dt_ids,
},
.probe = ds1803_probe,
.id_table = ds1803_id,
};
module_i2c_driver(ds1803_driver);
MODULE_AUTHOR("Slawomir Stepien <[email protected]>");
MODULE_AUTHOR("Jagath Jog J <[email protected]>");
MODULE_DESCRIPTION("DS1803 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/ds1803.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Analog Devices AD5272 digital potentiometer driver
* Copyright (C) 2018 Phil Reid <[email protected]>
*
* Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/AD5272_5274.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* ad5272 1 1024 20, 50, 100 01011xx
* ad5274 1 256 20, 100 01011xx
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#define AD5272_RDAC_WR 1
#define AD5272_RDAC_RD 2
#define AD5272_RESET 4
#define AD5272_CTL 7
#define AD5272_RDAC_WR_EN BIT(1)
struct ad5272_cfg {
int max_pos;
int kohms;
int shift;
};
enum ad5272_type {
AD5272_020,
AD5272_050,
AD5272_100,
AD5274_020,
AD5274_100,
};
static const struct ad5272_cfg ad5272_cfg[] = {
[AD5272_020] = { .max_pos = 1024, .kohms = 20 },
[AD5272_050] = { .max_pos = 1024, .kohms = 50 },
[AD5272_100] = { .max_pos = 1024, .kohms = 100 },
[AD5274_020] = { .max_pos = 256, .kohms = 20, .shift = 2 },
[AD5274_100] = { .max_pos = 256, .kohms = 100, .shift = 2 },
};
struct ad5272_data {
struct i2c_client *client;
struct mutex lock;
const struct ad5272_cfg *cfg;
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const struct iio_chan_spec ad5272_channel = {
.type = IIO_RESISTANCE,
.output = 1,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
};
static int ad5272_write(struct ad5272_data *data, int reg, int val)
{
int ret;
data->buf[0] = (reg << 2) | ((val >> 8) & 0x3);
data->buf[1] = (u8)val;
mutex_lock(&data->lock);
ret = i2c_master_send(data->client, data->buf, sizeof(data->buf));
mutex_unlock(&data->lock);
return ret < 0 ? ret : 0;
}
static int ad5272_read(struct ad5272_data *data, int reg, int *val)
{
int ret;
data->buf[0] = reg << 2;
data->buf[1] = 0;
mutex_lock(&data->lock);
ret = i2c_master_send(data->client, data->buf, sizeof(data->buf));
if (ret < 0)
goto error;
ret = i2c_master_recv(data->client, data->buf, sizeof(data->buf));
if (ret < 0)
goto error;
*val = ((data->buf[0] & 0x3) << 8) | data->buf[1];
ret = 0;
error:
mutex_unlock(&data->lock);
return ret;
}
static int ad5272_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ad5272_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW: {
ret = ad5272_read(data, AD5272_RDAC_RD, val);
*val = *val >> data->cfg->shift;
return ret ? ret : IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->max_pos;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int ad5272_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct ad5272_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val >= data->cfg->max_pos || val < 0 || val2)
return -EINVAL;
return ad5272_write(data, AD5272_RDAC_WR, val << data->cfg->shift);
}
static const struct iio_info ad5272_info = {
.read_raw = ad5272_read_raw,
.write_raw = ad5272_write_raw,
};
static int ad5272_reset(struct ad5272_data *data)
{
struct gpio_desc *reset_gpio;
reset_gpio = devm_gpiod_get_optional(&data->client->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(reset_gpio))
return PTR_ERR(reset_gpio);
if (reset_gpio) {
udelay(1);
gpiod_set_value(reset_gpio, 0);
} else {
ad5272_write(data, AD5272_RESET, 0);
}
usleep_range(1000, 2000);
return 0;
}
static int ad5272_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct ad5272_data *data;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
data->client = client;
mutex_init(&data->lock);
data->cfg = &ad5272_cfg[id->driver_data];
ret = ad5272_reset(data);
if (ret)
return ret;
ret = ad5272_write(data, AD5272_CTL, AD5272_RDAC_WR_EN);
if (ret < 0)
return -ENODEV;
indio_dev->info = &ad5272_info;
indio_dev->channels = &ad5272_channel;
indio_dev->num_channels = 1;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static const struct of_device_id ad5272_dt_ids[] = {
{ .compatible = "adi,ad5272-020", .data = (void *)AD5272_020 },
{ .compatible = "adi,ad5272-050", .data = (void *)AD5272_050 },
{ .compatible = "adi,ad5272-100", .data = (void *)AD5272_100 },
{ .compatible = "adi,ad5274-020", .data = (void *)AD5274_020 },
{ .compatible = "adi,ad5274-100", .data = (void *)AD5274_100 },
{}
};
MODULE_DEVICE_TABLE(of, ad5272_dt_ids);
static const struct i2c_device_id ad5272_id[] = {
{ "ad5272-020", AD5272_020 },
{ "ad5272-050", AD5272_050 },
{ "ad5272-100", AD5272_100 },
{ "ad5274-020", AD5274_020 },
{ "ad5274-100", AD5274_100 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad5272_id);
static struct i2c_driver ad5272_driver = {
.driver = {
.name = "ad5272",
.of_match_table = ad5272_dt_ids,
},
.probe = ad5272_probe,
.id_table = ad5272_id,
};
module_i2c_driver(ad5272_driver);
MODULE_AUTHOR("Phil Reid <[email protected]>");
MODULE_DESCRIPTION("AD5272 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/ad5272.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Industrial I/O driver for Microchip digital potentiometers
* Copyright (c) 2015 Axentia Technologies AB
* Author: Peter Rosin <[email protected]>
*
* Datasheet: http://www.microchip.com/downloads/en/DeviceDoc/22096b.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* mcp4531 1 129 5, 10, 50, 100 010111x
* mcp4532 1 129 5, 10, 50, 100 01011xx
* mcp4541 1 129 5, 10, 50, 100 010111x
* mcp4542 1 129 5, 10, 50, 100 01011xx
* mcp4551 1 257 5, 10, 50, 100 010111x
* mcp4552 1 257 5, 10, 50, 100 01011xx
* mcp4561 1 257 5, 10, 50, 100 010111x
* mcp4562 1 257 5, 10, 50, 100 01011xx
* mcp4631 2 129 5, 10, 50, 100 0101xxx
* mcp4632 2 129 5, 10, 50, 100 01011xx
* mcp4641 2 129 5, 10, 50, 100 0101xxx
* mcp4642 2 129 5, 10, 50, 100 01011xx
* mcp4651 2 257 5, 10, 50, 100 0101xxx
* mcp4652 2 257 5, 10, 50, 100 01011xx
* mcp4661 2 257 5, 10, 50, 100 0101xxx
* mcp4662 2 257 5, 10, 50, 100 01011xx
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/iio/iio.h>
struct mcp4531_cfg {
int wipers;
int avail[3];
int kohms;
};
enum mcp4531_type {
MCP453x_502,
MCP453x_103,
MCP453x_503,
MCP453x_104,
MCP454x_502,
MCP454x_103,
MCP454x_503,
MCP454x_104,
MCP455x_502,
MCP455x_103,
MCP455x_503,
MCP455x_104,
MCP456x_502,
MCP456x_103,
MCP456x_503,
MCP456x_104,
MCP463x_502,
MCP463x_103,
MCP463x_503,
MCP463x_104,
MCP464x_502,
MCP464x_103,
MCP464x_503,
MCP464x_104,
MCP465x_502,
MCP465x_103,
MCP465x_503,
MCP465x_104,
MCP466x_502,
MCP466x_103,
MCP466x_503,
MCP466x_104,
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
[MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
[MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
[MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
[MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
[MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 5, },
[MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 10, },
[MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 50, },
[MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
[MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
[MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
[MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
[MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
[MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 5, },
[MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 10, },
[MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 50, },
[MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
[MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
[MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
[MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
[MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
[MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 5, },
[MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 10, },
[MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 50, },
[MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
[MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
[MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
[MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
[MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
[MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 5, },
[MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 10, },
[MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 50, },
[MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
#define MCP4531_INCR (1 << 2)
#define MCP4531_DECR (2 << 2)
#define MCP4531_READ (3 << 2)
#define MCP4531_WIPER_SHIFT (4)
struct mcp4531_data {
struct i2c_client *client;
const struct mcp4531_cfg *cfg;
};
#define MCP4531_CHANNEL(ch) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec mcp4531_channels[] = {
MCP4531_CHANNEL(0),
MCP4531_CHANNEL(1),
};
static int mcp4531_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp4531_data *data = iio_priv(indio_dev);
int address = chan->channel << MCP4531_WIPER_SHIFT;
s32 ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = i2c_smbus_read_word_swapped(data->client,
MCP4531_READ | address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->avail[2];
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int mcp4531_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
struct mcp4531_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
*length = ARRAY_SIZE(data->cfg->avail);
*vals = data->cfg->avail;
*type = IIO_VAL_INT;
return IIO_AVAIL_RANGE;
}
return -EINVAL;
}
static int mcp4531_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mcp4531_data *data = iio_priv(indio_dev);
int address = chan->channel << MCP4531_WIPER_SHIFT;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > data->cfg->avail[2] || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
return i2c_smbus_write_byte_data(data->client,
MCP4531_WRITE | address | (val >> 8),
val & 0xff);
}
static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
.read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
};
#define MCP4531_ID_TABLE(_name, cfg) { \
.name = _name, \
.driver_data = (kernel_ulong_t)&mcp4531_cfg[cfg], \
}
static const struct i2c_device_id mcp4531_id[] = {
MCP4531_ID_TABLE("mcp4531-502", MCP453x_502),
MCP4531_ID_TABLE("mcp4531-103", MCP453x_103),
MCP4531_ID_TABLE("mcp4531-503", MCP453x_503),
MCP4531_ID_TABLE("mcp4531-104", MCP453x_104),
MCP4531_ID_TABLE("mcp4532-502", MCP453x_502),
MCP4531_ID_TABLE("mcp4532-103", MCP453x_103),
MCP4531_ID_TABLE("mcp4532-503", MCP453x_503),
MCP4531_ID_TABLE("mcp4532-104", MCP453x_104),
MCP4531_ID_TABLE("mcp4541-502", MCP454x_502),
MCP4531_ID_TABLE("mcp4541-103", MCP454x_103),
MCP4531_ID_TABLE("mcp4541-503", MCP454x_503),
MCP4531_ID_TABLE("mcp4541-104", MCP454x_104),
MCP4531_ID_TABLE("mcp4542-502", MCP454x_502),
MCP4531_ID_TABLE("mcp4542-103", MCP454x_103),
MCP4531_ID_TABLE("mcp4542-503", MCP454x_503),
MCP4531_ID_TABLE("mcp4542-104", MCP454x_104),
MCP4531_ID_TABLE("mcp4551-502", MCP455x_502),
MCP4531_ID_TABLE("mcp4551-103", MCP455x_103),
MCP4531_ID_TABLE("mcp4551-503", MCP455x_503),
MCP4531_ID_TABLE("mcp4551-104", MCP455x_104),
MCP4531_ID_TABLE("mcp4552-502", MCP455x_502),
MCP4531_ID_TABLE("mcp4552-103", MCP455x_103),
MCP4531_ID_TABLE("mcp4552-503", MCP455x_503),
MCP4531_ID_TABLE("mcp4552-104", MCP455x_104),
MCP4531_ID_TABLE("mcp4561-502", MCP456x_502),
MCP4531_ID_TABLE("mcp4561-103", MCP456x_103),
MCP4531_ID_TABLE("mcp4561-503", MCP456x_503),
MCP4531_ID_TABLE("mcp4561-104", MCP456x_104),
MCP4531_ID_TABLE("mcp4562-502", MCP456x_502),
MCP4531_ID_TABLE("mcp4562-103", MCP456x_103),
MCP4531_ID_TABLE("mcp4562-503", MCP456x_503),
MCP4531_ID_TABLE("mcp4562-104", MCP456x_104),
MCP4531_ID_TABLE("mcp4631-502", MCP463x_502),
MCP4531_ID_TABLE("mcp4631-103", MCP463x_103),
MCP4531_ID_TABLE("mcp4631-503", MCP463x_503),
MCP4531_ID_TABLE("mcp4631-104", MCP463x_104),
MCP4531_ID_TABLE("mcp4632-502", MCP463x_502),
MCP4531_ID_TABLE("mcp4632-103", MCP463x_103),
MCP4531_ID_TABLE("mcp4632-503", MCP463x_503),
MCP4531_ID_TABLE("mcp4632-104", MCP463x_104),
MCP4531_ID_TABLE("mcp4641-502", MCP464x_502),
MCP4531_ID_TABLE("mcp4641-103", MCP464x_103),
MCP4531_ID_TABLE("mcp4641-503", MCP464x_503),
MCP4531_ID_TABLE("mcp4641-104", MCP464x_104),
MCP4531_ID_TABLE("mcp4642-502", MCP464x_502),
MCP4531_ID_TABLE("mcp4642-103", MCP464x_103),
MCP4531_ID_TABLE("mcp4642-503", MCP464x_503),
MCP4531_ID_TABLE("mcp4642-104", MCP464x_104),
MCP4531_ID_TABLE("mcp4651-502", MCP465x_502),
MCP4531_ID_TABLE("mcp4651-103", MCP465x_103),
MCP4531_ID_TABLE("mcp4651-503", MCP465x_503),
MCP4531_ID_TABLE("mcp4651-104", MCP465x_104),
MCP4531_ID_TABLE("mcp4652-502", MCP465x_502),
MCP4531_ID_TABLE("mcp4652-103", MCP465x_103),
MCP4531_ID_TABLE("mcp4652-503", MCP465x_503),
MCP4531_ID_TABLE("mcp4652-104", MCP465x_104),
MCP4531_ID_TABLE("mcp4661-502", MCP466x_502),
MCP4531_ID_TABLE("mcp4661-103", MCP466x_103),
MCP4531_ID_TABLE("mcp4661-503", MCP466x_503),
MCP4531_ID_TABLE("mcp4661-104", MCP466x_104),
MCP4531_ID_TABLE("mcp4662-502", MCP466x_502),
MCP4531_ID_TABLE("mcp4662-103", MCP466x_103),
MCP4531_ID_TABLE("mcp4662-503", MCP466x_503),
MCP4531_ID_TABLE("mcp4662-104", MCP466x_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
#define MCP4531_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &mcp4531_cfg[cfg], \
}
static const struct of_device_id mcp4531_of_match[] = {
MCP4531_COMPATIBLE("microchip,mcp4531-502", MCP453x_502),
MCP4531_COMPATIBLE("microchip,mcp4531-103", MCP453x_103),
MCP4531_COMPATIBLE("microchip,mcp4531-503", MCP453x_503),
MCP4531_COMPATIBLE("microchip,mcp4531-104", MCP453x_104),
MCP4531_COMPATIBLE("microchip,mcp4532-502", MCP453x_502),
MCP4531_COMPATIBLE("microchip,mcp4532-103", MCP453x_103),
MCP4531_COMPATIBLE("microchip,mcp4532-503", MCP453x_503),
MCP4531_COMPATIBLE("microchip,mcp4532-104", MCP453x_104),
MCP4531_COMPATIBLE("microchip,mcp4541-502", MCP454x_502),
MCP4531_COMPATIBLE("microchip,mcp4541-103", MCP454x_103),
MCP4531_COMPATIBLE("microchip,mcp4541-503", MCP454x_503),
MCP4531_COMPATIBLE("microchip,mcp4541-104", MCP454x_104),
MCP4531_COMPATIBLE("microchip,mcp4542-502", MCP454x_502),
MCP4531_COMPATIBLE("microchip,mcp4542-103", MCP454x_103),
MCP4531_COMPATIBLE("microchip,mcp4542-503", MCP454x_503),
MCP4531_COMPATIBLE("microchip,mcp4542-104", MCP454x_104),
MCP4531_COMPATIBLE("microchip,mcp4551-502", MCP455x_502),
MCP4531_COMPATIBLE("microchip,mcp4551-103", MCP455x_103),
MCP4531_COMPATIBLE("microchip,mcp4551-503", MCP455x_503),
MCP4531_COMPATIBLE("microchip,mcp4551-104", MCP455x_104),
MCP4531_COMPATIBLE("microchip,mcp4552-502", MCP455x_502),
MCP4531_COMPATIBLE("microchip,mcp4552-103", MCP455x_103),
MCP4531_COMPATIBLE("microchip,mcp4552-503", MCP455x_503),
MCP4531_COMPATIBLE("microchip,mcp4552-104", MCP455x_104),
MCP4531_COMPATIBLE("microchip,mcp4561-502", MCP456x_502),
MCP4531_COMPATIBLE("microchip,mcp4561-103", MCP456x_103),
MCP4531_COMPATIBLE("microchip,mcp4561-503", MCP456x_503),
MCP4531_COMPATIBLE("microchip,mcp4561-104", MCP456x_104),
MCP4531_COMPATIBLE("microchip,mcp4562-502", MCP456x_502),
MCP4531_COMPATIBLE("microchip,mcp4562-103", MCP456x_103),
MCP4531_COMPATIBLE("microchip,mcp4562-503", MCP456x_503),
MCP4531_COMPATIBLE("microchip,mcp4562-104", MCP456x_104),
MCP4531_COMPATIBLE("microchip,mcp4631-502", MCP463x_502),
MCP4531_COMPATIBLE("microchip,mcp4631-103", MCP463x_103),
MCP4531_COMPATIBLE("microchip,mcp4631-503", MCP463x_503),
MCP4531_COMPATIBLE("microchip,mcp4631-104", MCP463x_104),
MCP4531_COMPATIBLE("microchip,mcp4632-502", MCP463x_502),
MCP4531_COMPATIBLE("microchip,mcp4632-103", MCP463x_103),
MCP4531_COMPATIBLE("microchip,mcp4632-503", MCP463x_503),
MCP4531_COMPATIBLE("microchip,mcp4632-104", MCP463x_104),
MCP4531_COMPATIBLE("microchip,mcp4641-502", MCP464x_502),
MCP4531_COMPATIBLE("microchip,mcp4641-103", MCP464x_103),
MCP4531_COMPATIBLE("microchip,mcp4641-503", MCP464x_503),
MCP4531_COMPATIBLE("microchip,mcp4641-104", MCP464x_104),
MCP4531_COMPATIBLE("microchip,mcp4642-502", MCP464x_502),
MCP4531_COMPATIBLE("microchip,mcp4642-103", MCP464x_103),
MCP4531_COMPATIBLE("microchip,mcp4642-503", MCP464x_503),
MCP4531_COMPATIBLE("microchip,mcp4642-104", MCP464x_104),
MCP4531_COMPATIBLE("microchip,mcp4651-502", MCP465x_502),
MCP4531_COMPATIBLE("microchip,mcp4651-103", MCP465x_103),
MCP4531_COMPATIBLE("microchip,mcp4651-503", MCP465x_503),
MCP4531_COMPATIBLE("microchip,mcp4651-104", MCP465x_104),
MCP4531_COMPATIBLE("microchip,mcp4652-502", MCP465x_502),
MCP4531_COMPATIBLE("microchip,mcp4652-103", MCP465x_103),
MCP4531_COMPATIBLE("microchip,mcp4652-503", MCP465x_503),
MCP4531_COMPATIBLE("microchip,mcp4652-104", MCP465x_104),
MCP4531_COMPATIBLE("microchip,mcp4661-502", MCP466x_502),
MCP4531_COMPATIBLE("microchip,mcp4661-103", MCP466x_103),
MCP4531_COMPATIBLE("microchip,mcp4661-503", MCP466x_503),
MCP4531_COMPATIBLE("microchip,mcp4661-104", MCP466x_104),
MCP4531_COMPATIBLE("microchip,mcp4662-502", MCP466x_502),
MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103),
MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503),
MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104),
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mcp4531_of_match);
static int mcp4531_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct mcp4531_data *data;
struct iio_dev *indio_dev;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(dev, "SMBUS Word Data not supported\n");
return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
data->cfg = i2c_get_match_data(client);
indio_dev->info = &mcp4531_info;
indio_dev->channels = mcp4531_channels;
indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver mcp4531_driver = {
.driver = {
.name = "mcp4531",
.of_match_table = mcp4531_of_match,
},
.probe = mcp4531_probe,
.id_table = mcp4531_id,
};
module_i2c_driver(mcp4531_driver);
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_DESCRIPTION("MCP4531 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/mcp4531.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* max5487.c - Support for MAX5487, MAX5488, MAX5489 digital potentiometers
*
* Copyright (C) 2016 Cristina-Gabriela Moraru <[email protected]>
*/
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/iio.h>
#define MAX5487_WRITE_WIPER_A (0x01 << 8)
#define MAX5487_WRITE_WIPER_B (0x02 << 8)
/* copy both wiper regs to NV regs */
#define MAX5487_COPY_AB_TO_NV (0x23 << 8)
/* copy both NV regs to wiper regs */
#define MAX5487_COPY_NV_TO_AB (0x33 << 8)
#define MAX5487_MAX_POS 255
struct max5487_data {
struct spi_device *spi;
int kohms;
};
#define MAX5487_CHANNEL(ch, addr) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = ch, \
.address = addr, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
}
static const struct iio_chan_spec max5487_channels[] = {
MAX5487_CHANNEL(0, MAX5487_WRITE_WIPER_A),
MAX5487_CHANNEL(1, MAX5487_WRITE_WIPER_B),
};
static int max5487_write_cmd(struct spi_device *spi, u16 cmd)
{
return spi_write(spi, (const void *) &cmd, sizeof(u16));
}
static int max5487_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct max5487_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_SCALE)
return -EINVAL;
*val = 1000 * data->kohms;
*val2 = MAX5487_MAX_POS;
return IIO_VAL_FRACTIONAL;
}
static int max5487_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct max5487_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val < 0 || val > MAX5487_MAX_POS)
return -EINVAL;
return max5487_write_cmd(data->spi, chan->address | val);
}
static const struct iio_info max5487_info = {
.read_raw = max5487_read_raw,
.write_raw = max5487_write_raw,
};
static int max5487_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct max5487_data *data;
const struct spi_device_id *id = spi_get_device_id(spi);
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
spi_set_drvdata(spi, indio_dev);
data = iio_priv(indio_dev);
data->spi = spi;
data->kohms = id->driver_data;
indio_dev->info = &max5487_info;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = max5487_channels;
indio_dev->num_channels = ARRAY_SIZE(max5487_channels);
/* restore both wiper regs from NV regs */
ret = max5487_write_cmd(data->spi, MAX5487_COPY_NV_TO_AB);
if (ret < 0)
return ret;
return iio_device_register(indio_dev);
}
static void max5487_spi_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
int ret;
iio_device_unregister(indio_dev);
/* save both wiper regs to NV regs */
ret = max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
if (ret)
dev_warn(&spi->dev, "Failed to save wiper regs to NV regs\n");
}
static const struct spi_device_id max5487_id[] = {
{ "MAX5487", 10 },
{ "MAX5488", 50 },
{ "MAX5489", 100 },
{ }
};
MODULE_DEVICE_TABLE(spi, max5487_id);
static const struct acpi_device_id max5487_acpi_match[] = {
{ "MAX5487", 10 },
{ "MAX5488", 50 },
{ "MAX5489", 100 },
{ },
};
MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
static struct spi_driver max5487_driver = {
.driver = {
.name = "max5487",
.acpi_match_table = ACPI_PTR(max5487_acpi_match),
},
.id_table = max5487_id,
.probe = max5487_spi_probe,
.remove = max5487_spi_remove
};
module_spi_driver(max5487_driver);
MODULE_AUTHOR("Cristina-Gabriela Moraru <[email protected]>");
MODULE_DESCRIPTION("max5487 SPI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/max5487.c |
// SPDX-License-Identifier: GPL-2.0
/*
*
* x9250.c -- Renesas X9250 potentiometers IIO driver
*
* Copyright 2023 CS GROUP France
*
* Author: Herve Codina <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
struct x9250_cfg {
const char *name;
int kohms;
};
struct x9250 {
struct spi_device *spi;
const struct x9250_cfg *cfg;
struct gpio_desc *wp_gpio;
};
#define X9250_ID 0x50
#define X9250_CMD_RD_WCR(_p) (0x90 | (_p))
#define X9250_CMD_WR_WCR(_p) (0xa0 | (_p))
static int x9250_write8(struct x9250 *x9250, u8 cmd, u8 val)
{
u8 txbuf[3];
txbuf[0] = X9250_ID;
txbuf[1] = cmd;
txbuf[2] = val;
return spi_write_then_read(x9250->spi, txbuf, ARRAY_SIZE(txbuf), NULL, 0);
}
static int x9250_read8(struct x9250 *x9250, u8 cmd, u8 *val)
{
u8 txbuf[2];
txbuf[0] = X9250_ID;
txbuf[1] = cmd;
return spi_write_then_read(x9250->spi, txbuf, ARRAY_SIZE(txbuf), val, 1);
}
#define X9250_CHANNEL(ch) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec x9250_channels[] = {
X9250_CHANNEL(0),
X9250_CHANNEL(1),
X9250_CHANNEL(2),
X9250_CHANNEL(3),
};
static int x9250_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct x9250 *x9250 = iio_priv(indio_dev);
int ch = chan->channel;
int ret;
u8 v;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = x9250_read8(x9250, X9250_CMD_RD_WCR(ch), &v);
if (ret)
return ret;
*val = v;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * x9250->cfg->kohms;
*val2 = U8_MAX;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int x9250_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
const int **vals, int *type, int *length, long mask)
{
static const int range[] = {0, 1, 255}; /* min, step, max */
switch (mask) {
case IIO_CHAN_INFO_RAW:
*length = ARRAY_SIZE(range);
*vals = range;
*type = IIO_VAL_INT;
return IIO_AVAIL_RANGE;
}
return -EINVAL;
}
static int x9250_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct x9250 *x9250 = iio_priv(indio_dev);
int ch = chan->channel;
int ret;
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val > U8_MAX || val < 0)
return -EINVAL;
gpiod_set_value_cansleep(x9250->wp_gpio, 0);
ret = x9250_write8(x9250, X9250_CMD_WR_WCR(ch), val);
gpiod_set_value_cansleep(x9250->wp_gpio, 1);
return ret;
}
static const struct iio_info x9250_info = {
.read_raw = x9250_read_raw,
.read_avail = x9250_read_avail,
.write_raw = x9250_write_raw,
};
enum x9250_type {
X9250T,
X9250U,
};
static const struct x9250_cfg x9250_cfg[] = {
[X9250T] = { .name = "x9250t", .kohms = 100, },
[X9250U] = { .name = "x9250u", .kohms = 50, },
};
static const char *const x9250_regulator_names[] = {
"vcc",
"avp",
"avn",
};
static int x9250_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct x9250 *x9250;
int ret;
ret = devm_regulator_bulk_get_enable(&spi->dev, ARRAY_SIZE(x9250_regulator_names),
x9250_regulator_names);
if (ret)
return dev_err_probe(&spi->dev, ret, "Failed to get regulators\n");
/*
* The x9250 needs a 5ms maximum delay after the power-supplies are set
* before performing the first write (1ms for the first read).
*/
usleep_range(5000, 6000);
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*x9250));
if (!indio_dev)
return -ENOMEM;
x9250 = iio_priv(indio_dev);
x9250->spi = spi;
x9250->cfg = spi_get_device_match_data(spi);
x9250->wp_gpio = devm_gpiod_get_optional(&spi->dev, "wp", GPIOD_OUT_LOW);
if (IS_ERR(x9250->wp_gpio))
return dev_err_probe(&spi->dev, PTR_ERR(x9250->wp_gpio),
"failed to get wp gpio\n");
indio_dev->info = &x9250_info;
indio_dev->channels = x9250_channels;
indio_dev->num_channels = ARRAY_SIZE(x9250_channels);
indio_dev->name = x9250->cfg->name;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id x9250_of_match[] = {
{ .compatible = "renesas,x9250t", .data = &x9250_cfg[X9250T]},
{ .compatible = "renesas,x9250u", .data = &x9250_cfg[X9250U]},
{ }
};
MODULE_DEVICE_TABLE(of, x9250_of_match);
static const struct spi_device_id x9250_id_table[] = {
{ "x9250t", (kernel_ulong_t)&x9250_cfg[X9250T] },
{ "x9250u", (kernel_ulong_t)&x9250_cfg[X9250U] },
{ }
};
MODULE_DEVICE_TABLE(spi, x9250_id_table);
static struct spi_driver x9250_spi_driver = {
.driver = {
.name = "x9250",
.of_match_table = x9250_of_match,
},
.id_table = x9250_id_table,
.probe = x9250_probe,
};
module_spi_driver(x9250_spi_driver);
MODULE_AUTHOR("Herve Codina <[email protected]>");
MODULE_DESCRIPTION("X9250 ALSA SoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/potentiometer/x9250.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Industrial I/O driver for Microchip digital potentiometers
*
* Copyright (c) 2016 Slawomir Stepien
* Based on: Peter Rosin's code from mcp4531.c
*
* Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/22060b.pdf
*
* DEVID #Wipers #Positions Resistor Opts (kOhm)
* mcp4131 1 129 5, 10, 50, 100
* mcp4132 1 129 5, 10, 50, 100
* mcp4141 1 129 5, 10, 50, 100
* mcp4142 1 129 5, 10, 50, 100
* mcp4151 1 257 5, 10, 50, 100
* mcp4152 1 257 5, 10, 50, 100
* mcp4161 1 257 5, 10, 50, 100
* mcp4162 1 257 5, 10, 50, 100
* mcp4231 2 129 5, 10, 50, 100
* mcp4232 2 129 5, 10, 50, 100
* mcp4241 2 129 5, 10, 50, 100
* mcp4242 2 129 5, 10, 50, 100
* mcp4251 2 257 5, 10, 50, 100
* mcp4252 2 257 5, 10, 50, 100
* mcp4261 2 257 5, 10, 50, 100
* mcp4262 2 257 5, 10, 50, 100
*/
/*
* TODO:
* 1. Write wiper setting to EEPROM for EEPROM capable models.
*/
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#define MCP4131_WRITE (0x00 << 2)
#define MCP4131_READ (0x03 << 2)
#define MCP4131_WIPER_SHIFT 4
#define MCP4131_CMDERR(r) ((r[0]) & 0x02)
#define MCP4131_RAW(r) ((r[0]) == 0xff ? 0x100 : (r[1]))
struct mcp4131_cfg {
int wipers;
int max_pos;
int kohms;
};
enum mcp4131_type {
MCP413x_502 = 0,
MCP413x_103,
MCP413x_503,
MCP413x_104,
MCP414x_502,
MCP414x_103,
MCP414x_503,
MCP414x_104,
MCP415x_502,
MCP415x_103,
MCP415x_503,
MCP415x_104,
MCP416x_502,
MCP416x_103,
MCP416x_503,
MCP416x_104,
MCP423x_502,
MCP423x_103,
MCP423x_503,
MCP423x_104,
MCP424x_502,
MCP424x_103,
MCP424x_503,
MCP424x_104,
MCP425x_502,
MCP425x_103,
MCP425x_503,
MCP425x_104,
MCP426x_502,
MCP426x_103,
MCP426x_503,
MCP426x_104,
};
static const struct mcp4131_cfg mcp4131_cfg[] = {
[MCP413x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
[MCP413x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
[MCP413x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
[MCP413x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
[MCP414x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
[MCP414x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
[MCP414x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
[MCP414x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
[MCP415x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
[MCP415x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
[MCP415x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
[MCP415x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
[MCP416x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
[MCP416x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
[MCP416x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
[MCP416x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
[MCP423x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
[MCP423x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
[MCP423x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
[MCP423x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
[MCP424x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
[MCP424x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
[MCP424x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
[MCP424x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
[MCP425x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
[MCP425x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
[MCP425x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
[MCP425x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
[MCP426x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
[MCP426x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
[MCP426x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
[MCP426x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
};
struct mcp4131_data {
struct spi_device *spi;
const struct mcp4131_cfg *cfg;
struct mutex lock;
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
#define MCP4131_CHANNEL(ch) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
}
static const struct iio_chan_spec mcp4131_channels[] = {
MCP4131_CHANNEL(0),
MCP4131_CHANNEL(1),
};
static int mcp4131_read(struct spi_device *spi, void *buf, size_t len)
{
struct spi_transfer t = {
.tx_buf = buf, /* We need to send addr, cmd and 12 bits */
.rx_buf = buf,
.len = len,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
return spi_sync(spi, &m);
}
static int mcp4131_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
int err;
struct mcp4131_data *data = iio_priv(indio_dev);
int address = chan->channel;
switch (mask) {
case IIO_CHAN_INFO_RAW:
mutex_lock(&data->lock);
data->buf[0] = (address << MCP4131_WIPER_SHIFT) | MCP4131_READ;
data->buf[1] = 0;
err = mcp4131_read(data->spi, data->buf, 2);
if (err) {
mutex_unlock(&data->lock);
return err;
}
/* Error, bad address/command combination */
if (!MCP4131_CMDERR(data->buf)) {
mutex_unlock(&data->lock);
return -EIO;
}
*val = MCP4131_RAW(data->buf);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->max_pos;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int mcp4131_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
int err;
struct mcp4131_data *data = iio_priv(indio_dev);
int address = chan->channel << MCP4131_WIPER_SHIFT;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val > data->cfg->max_pos || val < 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
mutex_lock(&data->lock);
data->buf[0] = address << MCP4131_WIPER_SHIFT;
data->buf[0] |= MCP4131_WRITE | (val >> 8);
data->buf[1] = val & 0xFF; /* 8 bits here */
err = spi_write(data->spi, data->buf, 2);
mutex_unlock(&data->lock);
return err;
}
static const struct iio_info mcp4131_info = {
.read_raw = mcp4131_read_raw,
.write_raw = mcp4131_write_raw,
};
static int mcp4131_probe(struct spi_device *spi)
{
int err;
struct device *dev = &spi->dev;
unsigned long devid;
struct mcp4131_data *data;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
data->spi = spi;
data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg) {
devid = spi_get_device_id(spi)->driver_data;
data->cfg = &mcp4131_cfg[devid];
}
mutex_init(&data->lock);
indio_dev->info = &mcp4131_info;
indio_dev->channels = mcp4131_channels;
indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = spi_get_device_id(spi)->name;
err = devm_iio_device_register(dev, indio_dev);
if (err) {
dev_info(&spi->dev, "Unable to register %s\n", indio_dev->name);
return err;
}
return 0;
}
static const struct of_device_id mcp4131_dt_ids[] = {
{ .compatible = "microchip,mcp4131-502",
.data = &mcp4131_cfg[MCP413x_502] },
{ .compatible = "microchip,mcp4131-103",
.data = &mcp4131_cfg[MCP413x_103] },
{ .compatible = "microchip,mcp4131-503",
.data = &mcp4131_cfg[MCP413x_503] },
{ .compatible = "microchip,mcp4131-104",
.data = &mcp4131_cfg[MCP413x_104] },
{ .compatible = "microchip,mcp4132-502",
.data = &mcp4131_cfg[MCP413x_502] },
{ .compatible = "microchip,mcp4132-103",
.data = &mcp4131_cfg[MCP413x_103] },
{ .compatible = "microchip,mcp4132-503",
.data = &mcp4131_cfg[MCP413x_503] },
{ .compatible = "microchip,mcp4132-104",
.data = &mcp4131_cfg[MCP413x_104] },
{ .compatible = "microchip,mcp4141-502",
.data = &mcp4131_cfg[MCP414x_502] },
{ .compatible = "microchip,mcp4141-103",
.data = &mcp4131_cfg[MCP414x_103] },
{ .compatible = "microchip,mcp4141-503",
.data = &mcp4131_cfg[MCP414x_503] },
{ .compatible = "microchip,mcp4141-104",
.data = &mcp4131_cfg[MCP414x_104] },
{ .compatible = "microchip,mcp4142-502",
.data = &mcp4131_cfg[MCP414x_502] },
{ .compatible = "microchip,mcp4142-103",
.data = &mcp4131_cfg[MCP414x_103] },
{ .compatible = "microchip,mcp4142-503",
.data = &mcp4131_cfg[MCP414x_503] },
{ .compatible = "microchip,mcp4142-104",
.data = &mcp4131_cfg[MCP414x_104] },
{ .compatible = "microchip,mcp4151-502",
.data = &mcp4131_cfg[MCP415x_502] },
{ .compatible = "microchip,mcp4151-103",
.data = &mcp4131_cfg[MCP415x_103] },
{ .compatible = "microchip,mcp4151-503",
.data = &mcp4131_cfg[MCP415x_503] },
{ .compatible = "microchip,mcp4151-104",
.data = &mcp4131_cfg[MCP415x_104] },
{ .compatible = "microchip,mcp4152-502",
.data = &mcp4131_cfg[MCP415x_502] },
{ .compatible = "microchip,mcp4152-103",
.data = &mcp4131_cfg[MCP415x_103] },
{ .compatible = "microchip,mcp4152-503",
.data = &mcp4131_cfg[MCP415x_503] },
{ .compatible = "microchip,mcp4152-104",
.data = &mcp4131_cfg[MCP415x_104] },
{ .compatible = "microchip,mcp4161-502",
.data = &mcp4131_cfg[MCP416x_502] },
{ .compatible = "microchip,mcp4161-103",
.data = &mcp4131_cfg[MCP416x_103] },
{ .compatible = "microchip,mcp4161-503",
.data = &mcp4131_cfg[MCP416x_503] },
{ .compatible = "microchip,mcp4161-104",
.data = &mcp4131_cfg[MCP416x_104] },
{ .compatible = "microchip,mcp4162-502",
.data = &mcp4131_cfg[MCP416x_502] },
{ .compatible = "microchip,mcp4162-103",
.data = &mcp4131_cfg[MCP416x_103] },
{ .compatible = "microchip,mcp4162-503",
.data = &mcp4131_cfg[MCP416x_503] },
{ .compatible = "microchip,mcp4162-104",
.data = &mcp4131_cfg[MCP416x_104] },
{ .compatible = "microchip,mcp4231-502",
.data = &mcp4131_cfg[MCP423x_502] },
{ .compatible = "microchip,mcp4231-103",
.data = &mcp4131_cfg[MCP423x_103] },
{ .compatible = "microchip,mcp4231-503",
.data = &mcp4131_cfg[MCP423x_503] },
{ .compatible = "microchip,mcp4231-104",
.data = &mcp4131_cfg[MCP423x_104] },
{ .compatible = "microchip,mcp4232-502",
.data = &mcp4131_cfg[MCP423x_502] },
{ .compatible = "microchip,mcp4232-103",
.data = &mcp4131_cfg[MCP423x_103] },
{ .compatible = "microchip,mcp4232-503",
.data = &mcp4131_cfg[MCP423x_503] },
{ .compatible = "microchip,mcp4232-104",
.data = &mcp4131_cfg[MCP423x_104] },
{ .compatible = "microchip,mcp4241-502",
.data = &mcp4131_cfg[MCP424x_502] },
{ .compatible = "microchip,mcp4241-103",
.data = &mcp4131_cfg[MCP424x_103] },
{ .compatible = "microchip,mcp4241-503",
.data = &mcp4131_cfg[MCP424x_503] },
{ .compatible = "microchip,mcp4241-104",
.data = &mcp4131_cfg[MCP424x_104] },
{ .compatible = "microchip,mcp4242-502",
.data = &mcp4131_cfg[MCP424x_502] },
{ .compatible = "microchip,mcp4242-103",
.data = &mcp4131_cfg[MCP424x_103] },
{ .compatible = "microchip,mcp4242-503",
.data = &mcp4131_cfg[MCP424x_503] },
{ .compatible = "microchip,mcp4242-104",
.data = &mcp4131_cfg[MCP424x_104] },
{ .compatible = "microchip,mcp4251-502",
.data = &mcp4131_cfg[MCP425x_502] },
{ .compatible = "microchip,mcp4251-103",
.data = &mcp4131_cfg[MCP425x_103] },
{ .compatible = "microchip,mcp4251-503",
.data = &mcp4131_cfg[MCP425x_503] },
{ .compatible = "microchip,mcp4251-104",
.data = &mcp4131_cfg[MCP425x_104] },
{ .compatible = "microchip,mcp4252-502",
.data = &mcp4131_cfg[MCP425x_502] },
{ .compatible = "microchip,mcp4252-103",
.data = &mcp4131_cfg[MCP425x_103] },
{ .compatible = "microchip,mcp4252-503",
.data = &mcp4131_cfg[MCP425x_503] },
{ .compatible = "microchip,mcp4252-104",
.data = &mcp4131_cfg[MCP425x_104] },
{ .compatible = "microchip,mcp4261-502",
.data = &mcp4131_cfg[MCP426x_502] },
{ .compatible = "microchip,mcp4261-103",
.data = &mcp4131_cfg[MCP426x_103] },
{ .compatible = "microchip,mcp4261-503",
.data = &mcp4131_cfg[MCP426x_503] },
{ .compatible = "microchip,mcp4261-104",
.data = &mcp4131_cfg[MCP426x_104] },
{ .compatible = "microchip,mcp4262-502",
.data = &mcp4131_cfg[MCP426x_502] },
{ .compatible = "microchip,mcp4262-103",
.data = &mcp4131_cfg[MCP426x_103] },
{ .compatible = "microchip,mcp4262-503",
.data = &mcp4131_cfg[MCP426x_503] },
{ .compatible = "microchip,mcp4262-104",
.data = &mcp4131_cfg[MCP426x_104] },
{}
};
MODULE_DEVICE_TABLE(of, mcp4131_dt_ids);
static const struct spi_device_id mcp4131_id[] = {
{ "mcp4131-502", MCP413x_502 },
{ "mcp4131-103", MCP413x_103 },
{ "mcp4131-503", MCP413x_503 },
{ "mcp4131-104", MCP413x_104 },
{ "mcp4132-502", MCP413x_502 },
{ "mcp4132-103", MCP413x_103 },
{ "mcp4132-503", MCP413x_503 },
{ "mcp4132-104", MCP413x_104 },
{ "mcp4141-502", MCP414x_502 },
{ "mcp4141-103", MCP414x_103 },
{ "mcp4141-503", MCP414x_503 },
{ "mcp4141-104", MCP414x_104 },
{ "mcp4142-502", MCP414x_502 },
{ "mcp4142-103", MCP414x_103 },
{ "mcp4142-503", MCP414x_503 },
{ "mcp4142-104", MCP414x_104 },
{ "mcp4151-502", MCP415x_502 },
{ "mcp4151-103", MCP415x_103 },
{ "mcp4151-503", MCP415x_503 },
{ "mcp4151-104", MCP415x_104 },
{ "mcp4152-502", MCP415x_502 },
{ "mcp4152-103", MCP415x_103 },
{ "mcp4152-503", MCP415x_503 },
{ "mcp4152-104", MCP415x_104 },
{ "mcp4161-502", MCP416x_502 },
{ "mcp4161-103", MCP416x_103 },
{ "mcp4161-503", MCP416x_503 },
{ "mcp4161-104", MCP416x_104 },
{ "mcp4162-502", MCP416x_502 },
{ "mcp4162-103", MCP416x_103 },
{ "mcp4162-503", MCP416x_503 },
{ "mcp4162-104", MCP416x_104 },
{ "mcp4231-502", MCP423x_502 },
{ "mcp4231-103", MCP423x_103 },
{ "mcp4231-503", MCP423x_503 },
{ "mcp4231-104", MCP423x_104 },
{ "mcp4232-502", MCP423x_502 },
{ "mcp4232-103", MCP423x_103 },
{ "mcp4232-503", MCP423x_503 },
{ "mcp4232-104", MCP423x_104 },
{ "mcp4241-502", MCP424x_502 },
{ "mcp4241-103", MCP424x_103 },
{ "mcp4241-503", MCP424x_503 },
{ "mcp4241-104", MCP424x_104 },
{ "mcp4242-502", MCP424x_502 },
{ "mcp4242-103", MCP424x_103 },
{ "mcp4242-503", MCP424x_503 },
{ "mcp4242-104", MCP424x_104 },
{ "mcp4251-502", MCP425x_502 },
{ "mcp4251-103", MCP425x_103 },
{ "mcp4251-503", MCP425x_503 },
{ "mcp4251-104", MCP425x_104 },
{ "mcp4252-502", MCP425x_502 },
{ "mcp4252-103", MCP425x_103 },
{ "mcp4252-503", MCP425x_503 },
{ "mcp4252-104", MCP425x_104 },
{ "mcp4261-502", MCP426x_502 },
{ "mcp4261-103", MCP426x_103 },
{ "mcp4261-503", MCP426x_503 },
{ "mcp4261-104", MCP426x_104 },
{ "mcp4262-502", MCP426x_502 },
{ "mcp4262-103", MCP426x_103 },
{ "mcp4262-503", MCP426x_503 },
{ "mcp4262-104", MCP426x_104 },
{}
};
MODULE_DEVICE_TABLE(spi, mcp4131_id);
static struct spi_driver mcp4131_driver = {
.driver = {
.name = "mcp4131",
.of_match_table = mcp4131_dt_ids,
},
.probe = mcp4131_probe,
.id_table = mcp4131_id,
};
module_spi_driver(mcp4131_driver);
MODULE_AUTHOR("Slawomir Stepien <[email protected]>");
MODULE_DESCRIPTION("MCP4131 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/mcp4131.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Industrial I/O driver for Microchip digital potentiometers
*
* Copyright (c) 2018 Chris Coffey <[email protected]>
* Based on: Slawomir Stepien's code from mcp4131.c
*
* Datasheet: https://ww1.microchip.com/downloads/en/devicedoc/11195c.pdf
*
* DEVID #Wipers #Positions Resistance (kOhm)
* mcp41010 1 256 10
* mcp41050 1 256 50
* mcp41100 1 256 100
* mcp42010 2 256 10
* mcp42050 2 256 50
* mcp42100 2 256 100
*/
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#define MCP41010_MAX_WIPERS 2
#define MCP41010_WRITE BIT(4)
#define MCP41010_WIPER_MAX 255
#define MCP41010_WIPER_CHANNEL BIT(0)
struct mcp41010_cfg {
char name[16];
int wipers;
int kohms;
};
enum mcp41010_type {
MCP41010,
MCP41050,
MCP41100,
MCP42010,
MCP42050,
MCP42100,
};
static const struct mcp41010_cfg mcp41010_cfg[] = {
[MCP41010] = { .name = "mcp41010", .wipers = 1, .kohms = 10, },
[MCP41050] = { .name = "mcp41050", .wipers = 1, .kohms = 50, },
[MCP41100] = { .name = "mcp41100", .wipers = 1, .kohms = 100, },
[MCP42010] = { .name = "mcp42010", .wipers = 2, .kohms = 10, },
[MCP42050] = { .name = "mcp42050", .wipers = 2, .kohms = 50, },
[MCP42100] = { .name = "mcp42100", .wipers = 2, .kohms = 100, },
};
struct mcp41010_data {
struct spi_device *spi;
const struct mcp41010_cfg *cfg;
struct mutex lock; /* Protect write sequences */
unsigned int value[MCP41010_MAX_WIPERS]; /* Cache wiper values */
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
#define MCP41010_CHANNEL(ch) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
}
static const struct iio_chan_spec mcp41010_channels[] = {
MCP41010_CHANNEL(0),
MCP41010_CHANNEL(1),
};
static int mcp41010_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mcp41010_data *data = iio_priv(indio_dev);
int channel = chan->channel;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*val = data->value[channel];
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = MCP41010_WIPER_MAX;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int mcp41010_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
int err;
struct mcp41010_data *data = iio_priv(indio_dev);
int channel = chan->channel;
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val > MCP41010_WIPER_MAX || val < 0)
return -EINVAL;
mutex_lock(&data->lock);
data->buf[0] = MCP41010_WIPER_CHANNEL << channel;
data->buf[0] |= MCP41010_WRITE;
data->buf[1] = val & 0xff;
err = spi_write(data->spi, data->buf, sizeof(data->buf));
if (!err)
data->value[channel] = val;
mutex_unlock(&data->lock);
return err;
}
static const struct iio_info mcp41010_info = {
.read_raw = mcp41010_read_raw,
.write_raw = mcp41010_write_raw,
};
static int mcp41010_probe(struct spi_device *spi)
{
int err;
struct device *dev = &spi->dev;
struct mcp41010_data *data;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
data->spi = spi;
data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg)
data->cfg = &mcp41010_cfg[spi_get_device_id(spi)->driver_data];
mutex_init(&data->lock);
indio_dev->info = &mcp41010_info;
indio_dev->channels = mcp41010_channels;
indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = data->cfg->name;
err = devm_iio_device_register(dev, indio_dev);
if (err)
dev_info(&spi->dev, "Unable to register %s\n", indio_dev->name);
return err;
}
static const struct of_device_id mcp41010_match[] = {
{ .compatible = "microchip,mcp41010", .data = &mcp41010_cfg[MCP41010] },
{ .compatible = "microchip,mcp41050", .data = &mcp41010_cfg[MCP41050] },
{ .compatible = "microchip,mcp41100", .data = &mcp41010_cfg[MCP41100] },
{ .compatible = "microchip,mcp42010", .data = &mcp41010_cfg[MCP42010] },
{ .compatible = "microchip,mcp42050", .data = &mcp41010_cfg[MCP42050] },
{ .compatible = "microchip,mcp42100", .data = &mcp41010_cfg[MCP42100] },
{}
};
MODULE_DEVICE_TABLE(of, mcp41010_match);
static const struct spi_device_id mcp41010_id[] = {
{ "mcp41010", MCP41010 },
{ "mcp41050", MCP41050 },
{ "mcp41100", MCP41100 },
{ "mcp42010", MCP42010 },
{ "mcp42050", MCP42050 },
{ "mcp42100", MCP42100 },
{}
};
MODULE_DEVICE_TABLE(spi, mcp41010_id);
static struct spi_driver mcp41010_driver = {
.driver = {
.name = "mcp41010",
.of_match_table = mcp41010_match,
},
.probe = mcp41010_probe,
.id_table = mcp41010_id,
};
module_spi_driver(mcp41010_driver);
MODULE_AUTHOR("Chris Coffey <[email protected]>");
MODULE_DESCRIPTION("MCP41010 digital potentiometer");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/potentiometer/mcp41010.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* tpl0102.c - Support for Texas Instruments digital potentiometers
*
* Copyright (C) 2016, 2018
* Author: Matt Ranostay <[email protected]>
*
* TODO: enable/disable hi-z output control
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
struct tpl0102_cfg {
int wipers;
int avail[3];
int kohms;
};
enum tpl0102_type {
CAT5140_503,
CAT5140_104,
TPL0102_104,
TPL0401_103,
};
static const struct tpl0102_cfg tpl0102_cfg[] = {
/* on-semiconductor parts */
[CAT5140_503] = { .wipers = 1, .avail = { 0, 1, 255 }, .kohms = 50, },
[CAT5140_104] = { .wipers = 1, .avail = { 0, 1, 255 }, .kohms = 100, },
/* ti parts */
[TPL0102_104] = { .wipers = 2, .avail = { 0, 1, 255 }, .kohms = 100 },
[TPL0401_103] = { .wipers = 1, .avail = { 0, 1, 127 }, .kohms = 10, },
};
struct tpl0102_data {
struct regmap *regmap;
const struct tpl0102_cfg *cfg;
};
static const struct regmap_config tpl0102_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
#define TPL0102_CHANNEL(ch) { \
.type = IIO_RESISTANCE, \
.indexed = 1, \
.output = 1, \
.channel = (ch), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW), \
}
static const struct iio_chan_spec tpl0102_channels[] = {
TPL0102_CHANNEL(0),
TPL0102_CHANNEL(1),
};
static int tpl0102_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct tpl0102_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW: {
int ret = regmap_read(data->regmap, chan->channel, val);
return ret ? ret : IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
*val = 1000 * data->cfg->kohms;
*val2 = data->cfg->avail[2] + 1;
return IIO_VAL_FRACTIONAL;
}
return -EINVAL;
}
static int tpl0102_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
struct tpl0102_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
*length = ARRAY_SIZE(data->cfg->avail);
*vals = data->cfg->avail;
*type = IIO_VAL_INT;
return IIO_AVAIL_RANGE;
}
return -EINVAL;
}
static int tpl0102_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct tpl0102_data *data = iio_priv(indio_dev);
if (mask != IIO_CHAN_INFO_RAW)
return -EINVAL;
if (val > data->cfg->avail[2] || val < 0)
return -EINVAL;
return regmap_write(data->regmap, chan->channel, val);
}
static const struct iio_info tpl0102_info = {
.read_raw = tpl0102_read_raw,
.read_avail = tpl0102_read_avail,
.write_raw = tpl0102_write_raw,
};
static int tpl0102_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct tpl0102_data *data;
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->cfg = &tpl0102_cfg[id->driver_data];
data->regmap = devm_regmap_init_i2c(client, &tpl0102_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "regmap initialization failed\n");
return PTR_ERR(data->regmap);
}
indio_dev->info = &tpl0102_info;
indio_dev->channels = tpl0102_channels;
indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
}
static const struct i2c_device_id tpl0102_id[] = {
{ "cat5140-503", CAT5140_503 },
{ "cat5140-104", CAT5140_104 },
{ "tpl0102-104", TPL0102_104 },
{ "tpl0401-103", TPL0401_103 },
{}
};
MODULE_DEVICE_TABLE(i2c, tpl0102_id);
static struct i2c_driver tpl0102_driver = {
.driver = {
.name = "tpl0102",
},
.probe = tpl0102_probe,
.id_table = tpl0102_id,
};
module_i2c_driver(tpl0102_driver);
MODULE_AUTHOR("Matt Ranostay <[email protected]>");
MODULE_DESCRIPTION("TPL0102 digital potentiometer");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/potentiometer/tpl0102.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Jonathan Cameron
*
* Buffer handling elements of industrial I/O reference driver.
* Uses the kfifo buffer.
*
* To test without hardware use the sysfs trigger.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bitmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include "iio_simple_dummy.h"
/* Some fake data */
static const s16 fakedata[] = {
[DUMMY_INDEX_VOLTAGE_0] = 7,
[DUMMY_INDEX_DIFFVOLTAGE_1M2] = -33,
[DUMMY_INDEX_DIFFVOLTAGE_3M4] = -2,
[DUMMY_INDEX_ACCELX] = 344,
};
/**
* iio_simple_dummy_trigger_h() - the trigger handler function
* @irq: the interrupt number
* @p: private data - always a pointer to the poll func.
*
* This is the guts of buffered capture. On a trigger event occurring,
* if the pollfunc is attached then this handler is called as a threaded
* interrupt (and hence may sleep). It is responsible for grabbing data
* from the device and pushing it into the associated buffer.
*/
static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
int i = 0, j;
u16 *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
if (!data)
goto done;
/*
* Three common options here:
* hardware scans:
* certain combinations of channels make up a fast read. The capture
* will consist of all of them. Hence we just call the grab data
* function and fill the buffer without processing.
* software scans:
* can be considered to be random access so efficient reading is just
* a case of minimal bus transactions.
* software culled hardware scans:
* occasionally a driver may process the nearest hardware scan to avoid
* storing elements that are not desired. This is the fiddliest option
* by far.
* Here let's pretend we have random access. And the values are in the
* constant table fakedata.
*/
for_each_set_bit(j, indio_dev->active_scan_mask, indio_dev->masklength)
data[i++] = fakedata[j];
iio_push_to_buffers_with_timestamp(indio_dev, data,
iio_get_time_ns(indio_dev));
kfree(data);
done:
/*
* Tell the core we are done with this trigger and ready for the
* next one.
*/
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
};
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
{
return iio_triggered_buffer_setup(indio_dev, NULL,
iio_simple_dummy_trigger_h,
&iio_simple_dummy_buffer_setup_ops);
}
/**
* iio_simple_dummy_unconfigure_buffer() - release buffer resources
* @indio_dev: device instance state
*/
void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
{
iio_triggered_buffer_cleanup(indio_dev);
}
| linux-master | drivers/iio/dummy/iio_simple_dummy_buffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Jonathan Cameron
*
* Companion module to the iio simple dummy example driver.
* The purpose of this is to generate 'fake' event interrupts thus
* allowing that driver's code to be as close as possible to that of
* a normal driver talking to hardware. The approach used here
* is not intended to be general and just happens to work for this
* particular use case.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sysfs.h>
#include "iio_dummy_evgen.h"
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/irq_sim.h>
/* Fiddly bit of faking and irq without hardware */
#define IIO_EVENTGEN_NO 10
/**
* struct iio_dummy_eventgen - event generator specific state
* @regs: irq regs we are faking
* @lock: protect the evgen state
* @inuse: mask of which irqs are connected
* @irq_sim: interrupt simulator
* @base: base of irq range
* @irq_sim_domain: irq simulator domain
*/
struct iio_dummy_eventgen {
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
bool inuse[IIO_EVENTGEN_NO];
struct irq_domain *irq_sim_domain;
};
/* We can only ever have one instance of this 'device' */
static struct iio_dummy_eventgen *iio_evgen;
static int iio_dummy_evgen_create(void)
{
int ret;
iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL);
if (!iio_evgen)
return -ENOMEM;
iio_evgen->irq_sim_domain = irq_domain_create_sim(NULL,
IIO_EVENTGEN_NO);
if (IS_ERR(iio_evgen->irq_sim_domain)) {
ret = PTR_ERR(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
return ret;
}
mutex_init(&iio_evgen->lock);
return 0;
}
/**
* iio_dummy_evgen_get_irq() - get an evgen provided irq for a device
*
* This function will give a free allocated irq to a client device.
* That irq can then be caused to 'fire' by using the associated sysfs file.
*/
int iio_dummy_evgen_get_irq(void)
{
int i, ret = 0;
if (!iio_evgen)
return -ENODEV;
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++) {
if (!iio_evgen->inuse[i]) {
ret = irq_create_mapping(iio_evgen->irq_sim_domain, i);
iio_evgen->inuse[i] = true;
break;
}
}
mutex_unlock(&iio_evgen->lock);
if (i == IIO_EVENTGEN_NO)
return -ENOMEM;
return ret;
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
/**
* iio_dummy_evgen_release_irq() - give the irq back.
* @irq: irq being returned to the pool
*
* Used by client driver instances to give the irqs back when they disconnect
*/
void iio_dummy_evgen_release_irq(int irq)
{
struct irq_data *irqd = irq_get_irq_data(irq);
mutex_lock(&iio_evgen->lock);
iio_evgen->inuse[irqd_to_hwirq(irqd)] = false;
irq_dispose_mapping(irq);
mutex_unlock(&iio_evgen->lock);
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq);
struct iio_dummy_regs *iio_dummy_evgen_get_regs(int irq)
{
struct irq_data *irqd = irq_get_irq_data(irq);
return &iio_evgen->regs[irqd_to_hwirq(irqd)];
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
static void iio_dummy_evgen_free(void)
{
irq_domain_remove_sim(iio_evgen->irq_sim_domain);
kfree(iio_evgen);
}
static void iio_evgen_release(struct device *dev)
{
iio_dummy_evgen_free();
}
static ssize_t iio_evgen_poke(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long event;
int ret, irq;
ret = kstrtoul(buf, 10, &event);
if (ret)
return ret;
iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
iio_evgen->regs[this_attr->address].reg_data = event;
irq = irq_find_mapping(iio_evgen->irq_sim_domain, this_attr->address);
ret = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true);
if (ret)
return ret;
return len;
}
static IIO_DEVICE_ATTR(poke_ev0, S_IWUSR, NULL, &iio_evgen_poke, 0);
static IIO_DEVICE_ATTR(poke_ev1, S_IWUSR, NULL, &iio_evgen_poke, 1);
static IIO_DEVICE_ATTR(poke_ev2, S_IWUSR, NULL, &iio_evgen_poke, 2);
static IIO_DEVICE_ATTR(poke_ev3, S_IWUSR, NULL, &iio_evgen_poke, 3);
static IIO_DEVICE_ATTR(poke_ev4, S_IWUSR, NULL, &iio_evgen_poke, 4);
static IIO_DEVICE_ATTR(poke_ev5, S_IWUSR, NULL, &iio_evgen_poke, 5);
static IIO_DEVICE_ATTR(poke_ev6, S_IWUSR, NULL, &iio_evgen_poke, 6);
static IIO_DEVICE_ATTR(poke_ev7, S_IWUSR, NULL, &iio_evgen_poke, 7);
static IIO_DEVICE_ATTR(poke_ev8, S_IWUSR, NULL, &iio_evgen_poke, 8);
static IIO_DEVICE_ATTR(poke_ev9, S_IWUSR, NULL, &iio_evgen_poke, 9);
static struct attribute *iio_evgen_attrs[] = {
&iio_dev_attr_poke_ev0.dev_attr.attr,
&iio_dev_attr_poke_ev1.dev_attr.attr,
&iio_dev_attr_poke_ev2.dev_attr.attr,
&iio_dev_attr_poke_ev3.dev_attr.attr,
&iio_dev_attr_poke_ev4.dev_attr.attr,
&iio_dev_attr_poke_ev5.dev_attr.attr,
&iio_dev_attr_poke_ev6.dev_attr.attr,
&iio_dev_attr_poke_ev7.dev_attr.attr,
&iio_dev_attr_poke_ev8.dev_attr.attr,
&iio_dev_attr_poke_ev9.dev_attr.attr,
NULL,
};
static const struct attribute_group iio_evgen_group = {
.attrs = iio_evgen_attrs,
};
static const struct attribute_group *iio_evgen_groups[] = {
&iio_evgen_group,
NULL
};
static struct device iio_evgen_dev = {
.bus = &iio_bus_type,
.groups = iio_evgen_groups,
.release = &iio_evgen_release,
};
static __init int iio_dummy_evgen_init(void)
{
int ret = iio_dummy_evgen_create();
if (ret < 0)
return ret;
device_initialize(&iio_evgen_dev);
dev_set_name(&iio_evgen_dev, "iio_evgen");
ret = device_add(&iio_evgen_dev);
if (ret)
put_device(&iio_evgen_dev);
return ret;
}
module_init(iio_dummy_evgen_init);
static __exit void iio_dummy_evgen_exit(void)
{
device_unregister(&iio_evgen_dev);
}
module_exit(iio_dummy_evgen_exit);
MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
MODULE_DESCRIPTION("IIO dummy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/dummy/iio_dummy_evgen.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Jonathan Cameron
*
* A reference industrial I/O driver to illustrate the functionality available.
*
* There are numerous real drivers to illustrate the finer points.
* The purpose of this driver is to provide a driver with far more comments
* and explanatory notes than any 'real' driver would have.
* Anyone starting out writing an IIO driver should first make sure they
* understand all of this driver except those bits specifically marked
* as being present to allow us to 'fake' the presence of hardware.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sw_device.h>
#include "iio_simple_dummy.h"
static const struct config_item_type iio_dummy_type = {
.ct_owner = THIS_MODULE,
};
/**
* struct iio_dummy_accel_calibscale - realworld to register mapping
* @val: first value in read_raw - here integer part.
* @val2: second value in read_raw etc - here micro part.
* @regval: register value - magic device specific numbers.
*/
struct iio_dummy_accel_calibscale {
int val;
int val2;
int regval; /* what would be written to hardware */
};
static const struct iio_dummy_accel_calibscale dummy_scales[] = {
{ 0, 100, 0x8 }, /* 0.000100 */
{ 0, 133, 0x7 }, /* 0.000133 */
{ 733, 13, 0x9 }, /* 733.000013 */
};
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
/*
* simple event - triggered when value rises above
* a threshold
*/
static const struct iio_event_spec iio_dummy_event = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
};
/*
* simple step detect event - triggered when a step is detected
*/
static const struct iio_event_spec step_detect_event = {
.type = IIO_EV_TYPE_CHANGE,
.dir = IIO_EV_DIR_NONE,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),
};
/*
* simple transition event - triggered when the reported running confidence
* value rises above a threshold value
*/
static const struct iio_event_spec iio_running_event = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
};
/*
* simple transition event - triggered when the reported walking confidence
* value falls under a threshold value
*/
static const struct iio_event_spec iio_walking_event = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
};
#endif
/*
* iio_dummy_channels - Description of available channels
*
* This array of structures tells the IIO core about what the device
* actually provides for a given channel.
*/
static const struct iio_chan_spec iio_dummy_channels[] = {
/* indexed ADC channel in_voltage0_raw etc */
{
.type = IIO_VOLTAGE,
/* Channel has a numeric index of 0 */
.indexed = 1,
.channel = 0,
/* What other information is available? */
.info_mask_separate =
/*
* in_voltage0_raw
* Raw (unscaled no bias removal etc) measurement
* from the device.
*/
BIT(IIO_CHAN_INFO_RAW) |
/*
* in_voltage0_offset
* Offset for userspace to apply prior to scale
* when converting to standard units (microvolts)
*/
BIT(IIO_CHAN_INFO_OFFSET) |
/*
* in_voltage0_scale
* Multipler for userspace to apply post offset
* when converting to standard units (microvolts)
*/
BIT(IIO_CHAN_INFO_SCALE),
/*
* sampling_frequency
* The frequency in Hz at which the channels are sampled
*/
.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
/* The ordering of elements in the buffer via an enum */
.scan_index = DUMMY_INDEX_VOLTAGE_0,
.scan_type = { /* Description of storage in buffer */
.sign = 'u', /* unsigned */
.realbits = 13, /* 13 bits */
.storagebits = 16, /* 16 bits used for storage */
.shift = 0, /* zero shift */
},
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
.event_spec = &iio_dummy_event,
.num_event_specs = 1,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
},
/* Differential ADC channel in_voltage1-voltage2_raw etc*/
{
.type = IIO_VOLTAGE,
.differential = 1,
/*
* Indexing for differential channels uses channel
* for the positive part, channel2 for the negative.
*/
.indexed = 1,
.channel = 1,
.channel2 = 2,
/*
* in_voltage1-voltage2_raw
* Raw (unscaled no bias removal etc) measurement
* from the device.
*/
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
/*
* in_voltage-voltage_scale
* Shared version of scale - shared by differential
* input channels of type IIO_VOLTAGE.
*/
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
/*
* sampling_frequency
* The frequency in Hz at which the channels are sampled
*/
.scan_index = DUMMY_INDEX_DIFFVOLTAGE_1M2,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
.realbits = 12, /* 12 bits */
.storagebits = 16, /* 16 bits used for storage */
.shift = 0, /* zero shift */
},
},
/* Differential ADC channel in_voltage3-voltage4_raw etc*/
{
.type = IIO_VOLTAGE,
.differential = 1,
.indexed = 1,
.channel = 3,
.channel2 = 4,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = DUMMY_INDEX_DIFFVOLTAGE_3M4,
.scan_type = {
.sign = 's',
.realbits = 11,
.storagebits = 16,
.shift = 0,
},
},
/*
* 'modified' (i.e. axis specified) acceleration channel
* in_accel_z_raw
*/
{
.type = IIO_ACCEL,
.modified = 1,
/* Channel 2 is use for modifiers */
.channel2 = IIO_MOD_X,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
/*
* Internal bias and gain correction values. Applied
* by the hardware or driver prior to userspace
* seeing the readings. Typically part of hardware
* calibration.
*/
BIT(IIO_CHAN_INFO_CALIBSCALE) |
BIT(IIO_CHAN_INFO_CALIBBIAS),
.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = DUMMY_INDEX_ACCELX,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
.realbits = 16, /* 16 bits */
.storagebits = 16, /* 16 bits used for storage */
.shift = 0, /* zero shift */
},
},
/*
* Convenience macro for timestamps. 4 is the index in
* the buffer.
*/
IIO_CHAN_SOFT_TIMESTAMP(4),
/* DAC channel out_voltage0_raw */
{
.type = IIO_VOLTAGE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.scan_index = -1, /* No buffer support */
.output = 1,
.indexed = 1,
.channel = 0,
},
{
.type = IIO_STEPS,
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_ENABLE) |
BIT(IIO_CHAN_INFO_CALIBHEIGHT),
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
.scan_index = -1, /* No buffer support */
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
.event_spec = &step_detect_event,
.num_event_specs = 1,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
},
{
.type = IIO_ACTIVITY,
.modified = 1,
.channel2 = IIO_MOD_RUNNING,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
.scan_index = -1, /* No buffer support */
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
.event_spec = &iio_running_event,
.num_event_specs = 1,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
},
{
.type = IIO_ACTIVITY,
.modified = 1,
.channel2 = IIO_MOD_WALKING,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
.scan_index = -1, /* No buffer support */
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
.event_spec = &iio_walking_event,
.num_event_specs = 1,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
},
};
/**
* iio_dummy_read_raw() - data read function.
* @indio_dev: the struct iio_dev associated with this device instance
* @chan: the channel whose data is to be read
* @val: first element of returned value (typically INT)
* @val2: second element of returned value (typically MICRO)
* @mask: what we actually want to read as per the info_mask_*
* in iio_chan_spec.
*/
static int iio_dummy_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long mask)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
int ret = -EINVAL;
mutex_lock(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
switch (chan->type) {
case IIO_VOLTAGE:
if (chan->output) {
/* Set integer part to cached value */
*val = st->dac_val;
ret = IIO_VAL_INT;
} else if (chan->differential) {
if (chan->channel == 1)
*val = st->differential_adc_val[0];
else
*val = st->differential_adc_val[1];
ret = IIO_VAL_INT;
} else {
*val = st->single_ended_adc_val;
ret = IIO_VAL_INT;
}
break;
case IIO_ACCEL:
*val = st->accel_val;
ret = IIO_VAL_INT;
break;
default:
break;
}
break;
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_STEPS:
*val = st->steps;
ret = IIO_VAL_INT;
break;
case IIO_ACTIVITY:
switch (chan->channel2) {
case IIO_MOD_RUNNING:
*val = st->activity_running;
ret = IIO_VAL_INT;
break;
case IIO_MOD_WALKING:
*val = st->activity_walking;
ret = IIO_VAL_INT;
break;
default:
break;
}
break;
default:
break;
}
break;
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
switch (chan->differential) {
case 0:
/* only single ended adc -> 0.001333 */
*val = 0;
*val2 = 1333;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
case 1:
/* all differential adc -> 0.000001344 */
*val = 0;
*val2 = 1344;
ret = IIO_VAL_INT_PLUS_NANO;
}
break;
default:
break;
}
break;
case IIO_CHAN_INFO_CALIBBIAS:
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_CALIBSCALE:
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = 3;
*val2 = 33;
ret = IIO_VAL_INT_PLUS_NANO;
break;
case IIO_CHAN_INFO_ENABLE:
switch (chan->type) {
case IIO_STEPS:
*val = st->steps_enabled;
ret = IIO_VAL_INT;
break;
default:
break;
}
break;
case IIO_CHAN_INFO_CALIBHEIGHT:
switch (chan->type) {
case IIO_STEPS:
*val = st->height;
ret = IIO_VAL_INT;
break;
default:
break;
}
break;
default:
break;
}
mutex_unlock(&st->lock);
return ret;
}
/**
* iio_dummy_write_raw() - data write function.
* @indio_dev: the struct iio_dev associated with this device instance
* @chan: the channel whose data is to be written
* @val: first element of value to set (typically INT)
* @val2: second element of value to set (typically MICRO)
* @mask: what we actually want to write as per the info_mask_*
* in iio_chan_spec.
*
* Note that all raw writes are assumed IIO_VAL_INT and info mask elements
* are assumed to be IIO_INT_PLUS_MICRO unless the callback write_raw_get_fmt
* in struct iio_info is provided by the driver.
*/
static int iio_dummy_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
int i;
int ret = 0;
struct iio_dummy_state *st = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_VOLTAGE:
if (chan->output == 0)
return -EINVAL;
/* Locking not required as writing single value */
mutex_lock(&st->lock);
st->dac_val = val;
mutex_unlock(&st->lock);
return 0;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_STEPS:
mutex_lock(&st->lock);
st->steps = val;
mutex_unlock(&st->lock);
return 0;
case IIO_ACTIVITY:
if (val < 0)
val = 0;
if (val > 100)
val = 100;
switch (chan->channel2) {
case IIO_MOD_RUNNING:
st->activity_running = val;
return 0;
case IIO_MOD_WALKING:
st->activity_walking = val;
return 0;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBSCALE:
mutex_lock(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
if (val == dummy_scales[i].val &&
val2 == dummy_scales[i].val2)
break;
if (i == ARRAY_SIZE(dummy_scales))
ret = -EINVAL;
else
st->accel_calibscale = &dummy_scales[i];
mutex_unlock(&st->lock);
return ret;
case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&st->lock);
st->accel_calibbias = val;
mutex_unlock(&st->lock);
return 0;
case IIO_CHAN_INFO_ENABLE:
switch (chan->type) {
case IIO_STEPS:
mutex_lock(&st->lock);
st->steps_enabled = val;
mutex_unlock(&st->lock);
return 0;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBHEIGHT:
switch (chan->type) {
case IIO_STEPS:
st->height = val;
return 0;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
/*
* Device type specific information.
*/
static const struct iio_info iio_dummy_info = {
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
.read_event_config = &iio_simple_dummy_read_event_config,
.write_event_config = &iio_simple_dummy_write_event_config,
.read_event_value = &iio_simple_dummy_read_event_value,
.write_event_value = &iio_simple_dummy_write_event_value,
#endif /* CONFIG_IIO_SIMPLE_DUMMY_EVENTS */
};
/**
* iio_dummy_init_device() - device instance specific init
* @indio_dev: the iio device structure
*
* Most drivers have one of these to set up default values,
* reset the device to known state etc.
*/
static int iio_dummy_init_device(struct iio_dev *indio_dev)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
st->dac_val = 0;
st->single_ended_adc_val = 73;
st->differential_adc_val[0] = 33;
st->differential_adc_val[1] = -34;
st->accel_val = 34;
st->accel_calibbias = -7;
st->accel_calibscale = &dummy_scales[0];
st->steps = 47;
st->activity_running = 98;
st->activity_walking = 4;
return 0;
}
/**
* iio_dummy_probe() - device instance probe
* @name: name of this instance.
*
* Arguments are bus type specific.
* I2C: iio_dummy_probe(struct i2c_client *client,
* const struct i2c_device_id *id)
* SPI: iio_dummy_probe(struct spi_device *spi)
*/
static struct iio_sw_device *iio_dummy_probe(const char *name)
{
int ret;
struct iio_dev *indio_dev;
struct iio_dummy_state *st;
struct iio_sw_device *swd;
struct device *parent = NULL;
/*
* With hardware: Set the parent device.
* parent = &spi->dev;
* parent = &client->dev;
*/
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
if (!swd)
return ERR_PTR(-ENOMEM);
/*
* Allocate an IIO device.
*
* This structure contains all generic state
* information about the device instance.
* It also has a region (accessed by iio_priv()
* for chip specific state information.
*/
indio_dev = iio_device_alloc(parent, sizeof(*st));
if (!indio_dev) {
ret = -ENOMEM;
goto error_free_swd;
}
st = iio_priv(indio_dev);
mutex_init(&st->lock);
iio_dummy_init_device(indio_dev);
/*
* Make the iio_dev struct available to remove function.
* Bus equivalents
* i2c_set_clientdata(client, indio_dev);
* spi_set_drvdata(spi, indio_dev);
*/
swd->device = indio_dev;
/*
* Set the device name.
*
* This is typically a part number and obtained from the module
* id table.
* e.g. for i2c and spi:
* indio_dev->name = id->name;
* indio_dev->name = spi_get_device_id(spi)->name;
*/
indio_dev->name = kstrdup(name, GFP_KERNEL);
if (!indio_dev->name) {
ret = -ENOMEM;
goto error_free_device;
}
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
indio_dev->num_channels = ARRAY_SIZE(iio_dummy_channels);
/*
* Provide device type specific interface functions and
* constant data.
*/
indio_dev->info = &iio_dummy_info;
/* Specify that device provides sysfs type interfaces */
indio_dev->modes = INDIO_DIRECT_MODE;
ret = iio_simple_dummy_events_register(indio_dev);
if (ret < 0)
goto error_free_name;
ret = iio_simple_dummy_configure_buffer(indio_dev);
if (ret < 0)
goto error_unregister_events;
ret = iio_device_register(indio_dev);
if (ret < 0)
goto error_unconfigure_buffer;
iio_swd_group_init_type_name(swd, name, &iio_dummy_type);
return swd;
error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
iio_simple_dummy_events_unregister(indio_dev);
error_free_name:
kfree(indio_dev->name);
error_free_device:
iio_device_free(indio_dev);
error_free_swd:
kfree(swd);
return ERR_PTR(ret);
}
/**
* iio_dummy_remove() - device instance removal function
* @swd: pointer to software IIO device abstraction
*
* Parameters follow those of iio_dummy_probe for buses.
*/
static int iio_dummy_remove(struct iio_sw_device *swd)
{
/*
* Get a pointer to the device instance iio_dev structure
* from the bus subsystem. E.g.
* struct iio_dev *indio_dev = i2c_get_clientdata(client);
* struct iio_dev *indio_dev = spi_get_drvdata(spi);
*/
struct iio_dev *indio_dev = swd->device;
/* Unregister the device */
iio_device_unregister(indio_dev);
/* Device specific code to power down etc */
/* Buffered capture related cleanup */
iio_simple_dummy_unconfigure_buffer(indio_dev);
iio_simple_dummy_events_unregister(indio_dev);
/* Free all structures */
kfree(indio_dev->name);
iio_device_free(indio_dev);
return 0;
}
/*
* module_iio_sw_device_driver() - device driver registration
*
* Varies depending on bus type of the device. As there is no device
* here, call probe directly. For information on device registration
* i2c:
* Documentation/i2c/writing-clients.rst
* spi:
* Documentation/spi/spi-summary.rst
*/
static const struct iio_sw_device_ops iio_dummy_device_ops = {
.probe = iio_dummy_probe,
.remove = iio_dummy_remove,
};
static struct iio_sw_device_type iio_dummy_device = {
.name = "dummy",
.owner = THIS_MODULE,
.ops = &iio_dummy_device_ops,
};
module_iio_sw_device_driver(iio_dummy_device);
MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
MODULE_DESCRIPTION("IIO dummy driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/dummy/iio_simple_dummy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011 Jonathan Cameron
*
* Event handling elements of industrial I/O reference driver.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include "iio_simple_dummy.h"
/* Evgen 'fakes' interrupt events for this example */
#include "iio_dummy_evgen.h"
/**
* iio_simple_dummy_read_event_config() - is event enabled?
* @indio_dev: the device instance data
* @chan: channel for the event whose state is being queried
* @type: type of the event whose state is being queried
* @dir: direction of the vent whose state is being queried
*
* This function would normally query the relevant registers or a cache to
* discover if the event generation is enabled on the device.
*/
int iio_simple_dummy_read_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
return st->event_en;
}
/**
* iio_simple_dummy_write_event_config() - set whether event is enabled
* @indio_dev: the device instance data
* @chan: channel for the event whose state is being set
* @type: type of the event whose state is being set
* @dir: direction of the vent whose state is being set
* @state: whether to enable or disable the device.
*
* This function would normally set the relevant registers on the devices
* so that it generates the specified event. Here it just sets up a cached
* value.
*/
int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
int state)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
/*
* Deliberately over the top code splitting to illustrate
* how this is done when multiple events exist.
*/
switch (chan->type) {
case IIO_VOLTAGE:
switch (type) {
case IIO_EV_TYPE_THRESH:
if (dir == IIO_EV_DIR_RISING)
st->event_en = state;
else
return -EINVAL;
break;
default:
return -EINVAL;
}
break;
case IIO_ACTIVITY:
switch (type) {
case IIO_EV_TYPE_THRESH:
st->event_en = state;
break;
default:
return -EINVAL;
}
break;
case IIO_STEPS:
switch (type) {
case IIO_EV_TYPE_CHANGE:
st->event_en = state;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
/**
* iio_simple_dummy_read_event_value() - get value associated with event
* @indio_dev: device instance specific data
* @chan: channel for the event whose value is being read
* @type: type of the event whose value is being read
* @dir: direction of the vent whose value is being read
* @info: info type of the event whose value is being read
* @val: value for the event code.
* @val2: unused
*
* Many devices provide a large set of events of which only a subset may
* be enabled at a time, with value registers whose meaning changes depending
* on the event enabled. This often means that the driver must cache the values
* associated with each possible events so that the right value is in place when
* the enabled event is changed.
*/
int iio_simple_dummy_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
*val = st->event_val;
return IIO_VAL_INT;
}
/**
* iio_simple_dummy_write_event_value() - set value associate with event
* @indio_dev: device instance specific data
* @chan: channel for the event whose value is being set
* @type: type of the event whose value is being set
* @dir: direction of the vent whose value is being set
* @info: info type of the event whose value is being set
* @val: the value to be set.
* @val2: unused
*/
int iio_simple_dummy_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int val, int val2)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
st->event_val = val;
return 0;
}
static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct iio_dummy_state *st = iio_priv(indio_dev);
st->event_timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
/**
* iio_simple_dummy_event_handler() - identify and pass on event
* @irq: irq of event line
* @private: pointer to device instance state.
*
* This handler is responsible for querying the device to find out what
* event occurred and for then pushing that event towards userspace.
* Here only one event occurs so we push that directly on with locally
* grabbed timestamp.
*/
static irqreturn_t iio_simple_dummy_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct iio_dummy_state *st = iio_priv(indio_dev);
dev_dbg(&indio_dev->dev, "id %x event %x\n",
st->regs->reg_id, st->regs->reg_data);
switch (st->regs->reg_data) {
case 0:
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_VOLTAGE, 0, 0,
IIO_EV_DIR_RISING,
IIO_EV_TYPE_THRESH, 0, 0, 0),
st->event_timestamp);
break;
case 1:
if (st->activity_running > st->event_val)
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_ACTIVITY, 0,
IIO_MOD_RUNNING,
IIO_EV_DIR_RISING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
st->event_timestamp);
break;
case 2:
if (st->activity_walking < st->event_val)
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_ACTIVITY, 0,
IIO_MOD_WALKING,
IIO_EV_DIR_FALLING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
st->event_timestamp);
break;
case 3:
iio_push_event(indio_dev,
IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
IIO_EV_DIR_NONE,
IIO_EV_TYPE_CHANGE, 0, 0, 0),
st->event_timestamp);
break;
default:
break;
}
return IRQ_HANDLED;
}
/**
* iio_simple_dummy_events_register() - setup interrupt handling for events
* @indio_dev: device instance data
*
* This function requests the threaded interrupt to handle the events.
* Normally the irq is a hardware interrupt and the number comes
* from board configuration files. Here we get it from a companion
* module that fakes the interrupt for us. Note that module in
* no way forms part of this example. Just assume that events magically
* appear via the provided interrupt.
*/
int iio_simple_dummy_events_register(struct iio_dev *indio_dev)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
int ret;
/* Fire up event source - normally not present */
st->event_irq = iio_dummy_evgen_get_irq();
if (st->event_irq < 0) {
ret = st->event_irq;
goto error_ret;
}
st->regs = iio_dummy_evgen_get_regs(st->event_irq);
ret = request_threaded_irq(st->event_irq,
&iio_simple_dummy_get_timestamp,
&iio_simple_dummy_event_handler,
IRQF_ONESHOT,
"iio_simple_event",
indio_dev);
if (ret < 0)
goto error_free_evgen;
return 0;
error_free_evgen:
iio_dummy_evgen_release_irq(st->event_irq);
error_ret:
return ret;
}
/**
* iio_simple_dummy_events_unregister() - tidy up interrupt handling on remove
* @indio_dev: device instance data
*/
void iio_simple_dummy_events_unregister(struct iio_dev *indio_dev)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
free_irq(st->event_irq, indio_dev);
/* Not part of normal driver */
iio_dummy_evgen_release_irq(st->event_irq);
}
| linux-master | drivers/iio/dummy/iio_simple_dummy_events.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer Trigger driver
*
* Copyright (C) STMicroelectronics 2017
*
* Author: Fabrice Gasnier <[email protected]>.
*
* Inspired by Benjamin Gaignard's stm32-timer-trigger driver
*/
#include <linux/iio/timer/stm32-lptim-trigger.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
/* List Low-Power Timer triggers */
static const char * const stm32_lptim_triggers[] = {
LPTIM1_OUT,
LPTIM2_OUT,
LPTIM3_OUT,
};
struct stm32_lptim_trigger {
struct device *dev;
const char *trg;
};
static int stm32_lptim_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev)
{
if (indio_dev->modes & INDIO_HARDWARE_TRIGGERED)
return 0;
return -EINVAL;
}
static const struct iio_trigger_ops stm32_lptim_trigger_ops = {
.validate_device = stm32_lptim_validate_device,
};
/**
* is_stm32_lptim_trigger
* @trig: trigger to be checked
*
* return true if the trigger is a valid STM32 IIO Low-Power Timer Trigger
* either return false
*/
bool is_stm32_lptim_trigger(struct iio_trigger *trig)
{
return (trig->ops == &stm32_lptim_trigger_ops);
}
EXPORT_SYMBOL(is_stm32_lptim_trigger);
static int stm32_lptim_setup_trig(struct stm32_lptim_trigger *priv)
{
struct iio_trigger *trig;
trig = devm_iio_trigger_alloc(priv->dev, "%s", priv->trg);
if (!trig)
return -ENOMEM;
trig->dev.parent = priv->dev->parent;
trig->ops = &stm32_lptim_trigger_ops;
iio_trigger_set_drvdata(trig, priv);
return devm_iio_trigger_register(priv->dev, trig);
}
static int stm32_lptim_trigger_probe(struct platform_device *pdev)
{
struct stm32_lptim_trigger *priv;
u32 index;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (device_property_read_u32(&pdev->dev, "reg", &index))
return -EINVAL;
if (index >= ARRAY_SIZE(stm32_lptim_triggers))
return -EINVAL;
priv->dev = &pdev->dev;
priv->trg = stm32_lptim_triggers[index];
return stm32_lptim_setup_trig(priv);
}
static const struct of_device_id stm32_lptim_trig_of_match[] = {
{ .compatible = "st,stm32-lptimer-trigger", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_lptim_trig_of_match);
static struct platform_driver stm32_lptim_trigger_driver = {
.probe = stm32_lptim_trigger_probe,
.driver = {
.name = "stm32-lptimer-trigger",
.of_match_table = stm32_lptim_trig_of_match,
},
};
module_platform_driver(stm32_lptim_trigger_driver);
MODULE_AUTHOR("Fabrice Gasnier <[email protected]>");
MODULE_ALIAS("platform:stm32-lptimer-trigger");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM trigger driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/trigger/stm32-lptimer-trigger.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Industrial I/O - generic interrupt based trigger support
*
* Copyright (c) 2008-2013 Jonathan Cameron
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
struct iio_interrupt_trigger_info {
unsigned int irq;
};
static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private)
{
iio_trigger_poll(private);
return IRQ_HANDLED;
}
static int iio_interrupt_trigger_probe(struct platform_device *pdev)
{
struct iio_interrupt_trigger_info *trig_info;
struct iio_trigger *trig;
unsigned long irqflags;
struct resource *irq_res;
int irq, ret = 0;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res == NULL)
return -ENODEV;
irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
irq = irq_res->start;
trig = iio_trigger_alloc(NULL, "irqtrig%d", irq);
if (!trig) {
ret = -ENOMEM;
goto error_ret;
}
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info) {
ret = -ENOMEM;
goto error_free_trigger;
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
ret = request_irq(irq, iio_interrupt_trigger_poll,
irqflags, trig->name, trig);
if (ret) {
dev_err(&pdev->dev,
"request IRQ-%d failed", irq);
goto error_free_trig_info;
}
ret = iio_trigger_register(trig);
if (ret)
goto error_release_irq;
platform_set_drvdata(pdev, trig);
return 0;
/* First clean up the partly allocated trigger */
error_release_irq:
free_irq(irq, trig);
error_free_trig_info:
kfree(trig_info);
error_free_trigger:
iio_trigger_free(trig);
error_ret:
return ret;
}
static int iio_interrupt_trigger_remove(struct platform_device *pdev)
{
struct iio_trigger *trig;
struct iio_interrupt_trigger_info *trig_info;
trig = platform_get_drvdata(pdev);
trig_info = iio_trigger_get_drvdata(trig);
iio_trigger_unregister(trig);
free_irq(trig_info->irq, trig);
kfree(trig_info);
iio_trigger_free(trig);
return 0;
}
static struct platform_driver iio_interrupt_trigger_driver = {
.probe = iio_interrupt_trigger_probe,
.remove = iio_interrupt_trigger_remove,
.driver = {
.name = "iio_interrupt_trigger",
},
};
module_platform_driver(iio_interrupt_trigger_driver);
MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
MODULE_DESCRIPTION("Interrupt trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/trigger/iio-trig-interrupt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Benjamin Gaignard <[email protected]>
*
*/
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/timer/stm32-timer-trigger.h>
#include <linux/iio/trigger.h>
#include <linux/mfd/stm32-timers.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#define MAX_TRIGGERS 7
#define MAX_VALIDS 5
/* List the triggers created by each timer */
static const void *triggers_table[][MAX_TRIGGERS] = {
{ TIM1_TRGO, TIM1_TRGO2, TIM1_CH1, TIM1_CH2, TIM1_CH3, TIM1_CH4,},
{ TIM2_TRGO, TIM2_CH1, TIM2_CH2, TIM2_CH3, TIM2_CH4,},
{ TIM3_TRGO, TIM3_CH1, TIM3_CH2, TIM3_CH3, TIM3_CH4,},
{ TIM4_TRGO, TIM4_CH1, TIM4_CH2, TIM4_CH3, TIM4_CH4,},
{ TIM5_TRGO, TIM5_CH1, TIM5_CH2, TIM5_CH3, TIM5_CH4,},
{ TIM6_TRGO,},
{ TIM7_TRGO,},
{ TIM8_TRGO, TIM8_TRGO2, TIM8_CH1, TIM8_CH2, TIM8_CH3, TIM8_CH4,},
{ TIM9_TRGO, TIM9_CH1, TIM9_CH2,},
{ TIM10_OC1,},
{ TIM11_OC1,},
{ TIM12_TRGO, TIM12_CH1, TIM12_CH2,},
{ TIM13_OC1,},
{ TIM14_OC1,},
{ TIM15_TRGO,},
{ TIM16_OC1,},
{ TIM17_OC1,},
};
/* List the triggers accepted by each timer */
static const void *valids_table[][MAX_VALIDS] = {
{ TIM5_TRGO, TIM2_TRGO, TIM3_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM2_TRGO, TIM5_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM2_TRGO, TIM3_TRGO, TIM8_TRGO,},
{ TIM2_TRGO, TIM3_TRGO, TIM4_TRGO, TIM8_TRGO,},
{ }, /* timer 6 */
{ }, /* timer 7 */
{ TIM1_TRGO, TIM2_TRGO, TIM4_TRGO, TIM5_TRGO,},
{ TIM2_TRGO, TIM3_TRGO, TIM10_OC1, TIM11_OC1,},
{ }, /* timer 10 */
{ }, /* timer 11 */
{ TIM4_TRGO, TIM5_TRGO, TIM13_OC1, TIM14_OC1,},
};
static const void *stm32h7_valids_table[][MAX_VALIDS] = {
{ TIM15_TRGO, TIM2_TRGO, TIM3_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM2_TRGO, TIM15_TRGO, TIM4_TRGO,},
{ TIM1_TRGO, TIM2_TRGO, TIM3_TRGO, TIM8_TRGO,},
{ TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
{ }, /* timer 6 */
{ }, /* timer 7 */
{ TIM1_TRGO, TIM2_TRGO, TIM4_TRGO, TIM5_TRGO,},
{ }, /* timer 9 */
{ }, /* timer 10 */
{ }, /* timer 11 */
{ TIM4_TRGO, TIM5_TRGO, TIM13_OC1, TIM14_OC1,},
{ }, /* timer 13 */
{ }, /* timer 14 */
{ TIM1_TRGO, TIM3_TRGO, TIM16_OC1, TIM17_OC1,},
{ }, /* timer 16 */
{ }, /* timer 17 */
};
struct stm32_timer_trigger_regs {
u32 cr1;
u32 cr2;
u32 psc;
u32 arr;
u32 cnt;
u32 smcr;
};
struct stm32_timer_trigger {
struct device *dev;
struct regmap *regmap;
struct clk *clk;
bool enabled;
u32 max_arr;
const void *triggers;
const void *valids;
bool has_trgo2;
struct mutex lock; /* concurrent sysfs configuration */
struct list_head tr_list;
struct stm32_timer_trigger_regs bak;
};
struct stm32_timer_trigger_cfg {
const void *(*valids_table)[MAX_VALIDS];
const unsigned int num_valids_table;
};
static bool stm32_timer_is_trgo2_name(const char *name)
{
return !!strstr(name, "trgo2");
}
static bool stm32_timer_is_trgo_name(const char *name)
{
return (!!strstr(name, "trgo") && !strstr(name, "trgo2"));
}
static int stm32_timer_start(struct stm32_timer_trigger *priv,
struct iio_trigger *trig,
unsigned int frequency)
{
unsigned long long prd, div;
int prescaler = 0;
u32 ccer;
/* Period and prescaler values depends of clock rate */
div = (unsigned long long)clk_get_rate(priv->clk);
do_div(div, frequency);
prd = div;
/*
* Increase prescaler value until we get a result that fit
* with auto reload register maximum value.
*/
while (div > priv->max_arr) {
prescaler++;
div = prd;
do_div(div, (prescaler + 1));
}
prd = div;
if (prescaler > MAX_TIM_PSC) {
dev_err(priv->dev, "prescaler exceeds the maximum value\n");
return -EINVAL;
}
/* Check if nobody else use the timer */
regmap_read(priv->regmap, TIM_CCER, &ccer);
if (ccer & TIM_CCER_CCXE)
return -EBUSY;
mutex_lock(&priv->lock);
if (!priv->enabled) {
priv->enabled = true;
clk_enable(priv->clk);
}
regmap_write(priv->regmap, TIM_PSC, prescaler);
regmap_write(priv->regmap, TIM_ARR, prd - 1);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
/* Force master mode to update mode */
if (stm32_timer_is_trgo2_name(trig->name))
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2,
0x2 << TIM_CR2_MMS2_SHIFT);
else
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS,
0x2 << TIM_CR2_MMS_SHIFT);
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
/* Enable controller */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN);
mutex_unlock(&priv->lock);
return 0;
}
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
struct iio_trigger *trig)
{
u32 ccer;
regmap_read(priv->regmap, TIM_CCER, &ccer);
if (ccer & TIM_CCER_CCXE)
return;
mutex_lock(&priv->lock);
/* Stop timer */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
/* Force disable master mode */
if (stm32_timer_is_trgo2_name(trig->name))
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
else
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
/* Make sure that registers are updated */
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
if (priv->enabled) {
priv->enabled = false;
clk_disable(priv->clk);
}
mutex_unlock(&priv->lock);
}
static ssize_t stm32_tt_store_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct stm32_timer_trigger *priv = iio_trigger_get_drvdata(trig);
unsigned int freq;
int ret;
ret = kstrtouint(buf, 10, &freq);
if (ret)
return ret;
if (freq == 0) {
stm32_timer_stop(priv, trig);
} else {
ret = stm32_timer_start(priv, trig, freq);
if (ret)
return ret;
}
return len;
}
static ssize_t stm32_tt_read_frequency(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct stm32_timer_trigger *priv = iio_trigger_get_drvdata(trig);
u32 psc, arr, cr1;
unsigned long long freq = 0;
regmap_read(priv->regmap, TIM_CR1, &cr1);
regmap_read(priv->regmap, TIM_PSC, &psc);
regmap_read(priv->regmap, TIM_ARR, &arr);
if (cr1 & TIM_CR1_CEN) {
freq = (unsigned long long)clk_get_rate(priv->clk);
do_div(freq, psc + 1);
do_div(freq, arr + 1);
}
return sprintf(buf, "%d\n", (unsigned int)freq);
}
static IIO_DEV_ATTR_SAMP_FREQ(0660,
stm32_tt_read_frequency,
stm32_tt_store_frequency);
#define MASTER_MODE_MAX 7
#define MASTER_MODE2_MAX 15
static char *master_mode_table[] = {
"reset",
"enable",
"update",
"compare_pulse",
"OC1REF",
"OC2REF",
"OC3REF",
"OC4REF",
/* Master mode selection 2 only */
"OC5REF",
"OC6REF",
"compare_pulse_OC4REF",
"compare_pulse_OC6REF",
"compare_pulse_OC4REF_r_or_OC6REF_r",
"compare_pulse_OC4REF_r_or_OC6REF_f",
"compare_pulse_OC5REF_r_or_OC6REF_r",
"compare_pulse_OC5REF_r_or_OC6REF_f",
};
static ssize_t stm32_tt_show_master_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
struct iio_trigger *trig = to_iio_trigger(dev);
u32 cr2;
regmap_read(priv->regmap, TIM_CR2, &cr2);
if (stm32_timer_is_trgo2_name(trig->name))
cr2 = (cr2 & TIM_CR2_MMS2) >> TIM_CR2_MMS2_SHIFT;
else
cr2 = (cr2 & TIM_CR2_MMS) >> TIM_CR2_MMS_SHIFT;
return sysfs_emit(buf, "%s\n", master_mode_table[cr2]);
}
static ssize_t stm32_tt_store_master_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
struct iio_trigger *trig = to_iio_trigger(dev);
u32 mask, shift, master_mode_max;
int i;
if (stm32_timer_is_trgo2_name(trig->name)) {
mask = TIM_CR2_MMS2;
shift = TIM_CR2_MMS2_SHIFT;
master_mode_max = MASTER_MODE2_MAX;
} else {
mask = TIM_CR2_MMS;
shift = TIM_CR2_MMS_SHIFT;
master_mode_max = MASTER_MODE_MAX;
}
for (i = 0; i <= master_mode_max; i++) {
if (!strncmp(master_mode_table[i], buf,
strlen(master_mode_table[i]))) {
mutex_lock(&priv->lock);
if (!priv->enabled) {
/* Clock should be enabled first */
priv->enabled = true;
clk_enable(priv->clk);
}
regmap_update_bits(priv->regmap, TIM_CR2, mask,
i << shift);
mutex_unlock(&priv->lock);
return len;
}
}
return -EINVAL;
}
static ssize_t stm32_tt_show_master_mode_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
unsigned int i, master_mode_max;
size_t len = 0;
if (stm32_timer_is_trgo2_name(trig->name))
master_mode_max = MASTER_MODE2_MAX;
else
master_mode_max = MASTER_MODE_MAX;
for (i = 0; i <= master_mode_max; i++)
len += scnprintf(buf + len, PAGE_SIZE - len,
"%s ", master_mode_table[i]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
return len;
}
static IIO_DEVICE_ATTR(master_mode_available, 0444,
stm32_tt_show_master_mode_avail, NULL, 0);
static IIO_DEVICE_ATTR(master_mode, 0660,
stm32_tt_show_master_mode,
stm32_tt_store_master_mode,
0);
static struct attribute *stm32_trigger_attrs[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_dev_attr_master_mode.dev_attr.attr,
&iio_dev_attr_master_mode_available.dev_attr.attr,
NULL,
};
static const struct attribute_group stm32_trigger_attr_group = {
.attrs = stm32_trigger_attrs,
};
static const struct attribute_group *stm32_trigger_attr_groups[] = {
&stm32_trigger_attr_group,
NULL,
};
static const struct iio_trigger_ops timer_trigger_ops = {
};
static void stm32_unregister_iio_triggers(struct stm32_timer_trigger *priv)
{
struct iio_trigger *tr;
list_for_each_entry(tr, &priv->tr_list, alloc_list)
iio_trigger_unregister(tr);
}
static int stm32_register_iio_triggers(struct stm32_timer_trigger *priv)
{
int ret;
const char * const *cur = priv->triggers;
INIT_LIST_HEAD(&priv->tr_list);
while (cur && *cur) {
struct iio_trigger *trig;
bool cur_is_trgo = stm32_timer_is_trgo_name(*cur);
bool cur_is_trgo2 = stm32_timer_is_trgo2_name(*cur);
if (cur_is_trgo2 && !priv->has_trgo2) {
cur++;
continue;
}
trig = devm_iio_trigger_alloc(priv->dev, "%s", *cur);
if (!trig)
return -ENOMEM;
trig->dev.parent = priv->dev->parent;
trig->ops = &timer_trigger_ops;
/*
* sampling frequency and master mode attributes
* should only be available on trgo/trgo2 triggers
*/
if (cur_is_trgo || cur_is_trgo2)
trig->dev.groups = stm32_trigger_attr_groups;
iio_trigger_set_drvdata(trig, priv);
ret = iio_trigger_register(trig);
if (ret) {
stm32_unregister_iio_triggers(priv);
return ret;
}
list_add_tail(&trig->alloc_list, &priv->tr_list);
cur++;
}
return 0;
}
static int stm32_counter_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
regmap_read(priv->regmap, TIM_CNT, &dat);
*val = dat;
return IIO_VAL_INT;
case IIO_CHAN_INFO_ENABLE:
regmap_read(priv->regmap, TIM_CR1, &dat);
*val = (dat & TIM_CR1_CEN) ? 1 : 0;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
regmap_read(priv->regmap, TIM_SMCR, &dat);
dat &= TIM_SMCR_SMS;
*val = 1;
*val2 = 0;
/* in quadrature case scale = 0.25 */
if (dat == 3)
*val2 = 2;
return IIO_VAL_FRACTIONAL_LOG2;
}
return -EINVAL;
}
static int stm32_counter_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
return regmap_write(priv->regmap, TIM_CNT, val);
case IIO_CHAN_INFO_SCALE:
/* fixed scale */
return -EINVAL;
case IIO_CHAN_INFO_ENABLE:
mutex_lock(&priv->lock);
if (val) {
if (!priv->enabled) {
priv->enabled = true;
clk_enable(priv->clk);
}
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
} else {
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
0);
if (priv->enabled) {
priv->enabled = false;
clk_disable(priv->clk);
}
}
mutex_unlock(&priv->lock);
return 0;
}
return -EINVAL;
}
static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
struct iio_trigger *trig)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
const char * const *cur = priv->valids;
unsigned int i = 0;
if (!is_stm32_timer_trigger(trig))
return -EINVAL;
while (cur && *cur) {
if (!strncmp(trig->name, *cur, strlen(trig->name))) {
regmap_update_bits(priv->regmap,
TIM_SMCR, TIM_SMCR_TS,
i << TIM_SMCR_TS_SHIFT);
return 0;
}
cur++;
i++;
}
return -EINVAL;
}
static const struct iio_info stm32_trigger_info = {
.validate_trigger = stm32_counter_validate_trigger,
.read_raw = stm32_counter_read_raw,
.write_raw = stm32_counter_write_raw
};
static const char *const stm32_trigger_modes[] = {
"trigger",
};
static int stm32_set_trigger_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
unsigned int mode)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, TIM_SMCR_SMS);
return 0;
}
static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
}
static const struct iio_enum stm32_trigger_mode_enum = {
.items = stm32_trigger_modes,
.num_items = ARRAY_SIZE(stm32_trigger_modes),
.set = stm32_set_trigger_mode,
.get = stm32_get_trigger_mode
};
static const char *const stm32_enable_modes[] = {
"always",
"gated",
"triggered",
};
static int stm32_enable_mode2sms(int mode)
{
switch (mode) {
case 0:
return 0;
case 1:
return 5;
case 2:
return 6;
}
return -EINVAL;
}
static int stm32_set_enable_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
unsigned int mode)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
if (sms < 0)
return sms;
/*
* Triggered mode sets CEN bit automatically by hardware. So, first
* enable counter clock, so it can use it. Keeps it in sync with CEN.
*/
mutex_lock(&priv->lock);
if (sms == 6 && !priv->enabled) {
clk_enable(priv->clk);
priv->enabled = true;
}
mutex_unlock(&priv->lock);
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
return 0;
}
static int stm32_sms2enable_mode(int mode)
{
switch (mode) {
case 0:
return 0;
case 5:
return 1;
case 6:
return 2;
}
return -EINVAL;
}
static int stm32_get_enable_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
smcr &= TIM_SMCR_SMS;
return stm32_sms2enable_mode(smcr);
}
static const struct iio_enum stm32_enable_mode_enum = {
.items = stm32_enable_modes,
.num_items = ARRAY_SIZE(stm32_enable_modes),
.set = stm32_set_enable_mode,
.get = stm32_get_enable_mode
};
static ssize_t stm32_count_get_preset(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
char *buf)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
return snprintf(buf, PAGE_SIZE, "%u\n", arr);
}
static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
unsigned int preset;
int ret;
ret = kstrtouint(buf, 0, &preset);
if (ret)
return ret;
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, preset);
return len;
}
static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
{
.name = "preset",
.shared = IIO_SEPARATE,
.read = stm32_count_get_preset,
.write = stm32_count_set_preset
},
IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
IIO_ENUM_AVAILABLE("enable_mode", IIO_SHARED_BY_TYPE, &stm32_enable_mode_enum),
IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
IIO_ENUM_AVAILABLE("trigger_mode", IIO_SHARED_BY_TYPE, &stm32_trigger_mode_enum),
{}
};
static const struct iio_chan_spec stm32_trigger_channel = {
.type = IIO_COUNT,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_ENABLE) |
BIT(IIO_CHAN_INFO_SCALE),
.ext_info = stm32_trigger_count_info,
.indexed = 1
};
static struct stm32_timer_trigger *stm32_setup_counter_device(struct device *dev)
{
struct iio_dev *indio_dev;
int ret;
indio_dev = devm_iio_device_alloc(dev,
sizeof(struct stm32_timer_trigger));
if (!indio_dev)
return NULL;
indio_dev->name = dev_name(dev);
indio_dev->info = &stm32_trigger_info;
indio_dev->modes = INDIO_HARDWARE_TRIGGERED;
indio_dev->num_channels = 1;
indio_dev->channels = &stm32_trigger_channel;
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return NULL;
return iio_priv(indio_dev);
}
/**
* is_stm32_timer_trigger
* @trig: trigger to be checked
*
* return true if the trigger is a valid stm32 iio timer trigger
* either return false
*/
bool is_stm32_timer_trigger(struct iio_trigger *trig)
{
return (trig->ops == &timer_trigger_ops);
}
EXPORT_SYMBOL(is_stm32_timer_trigger);
static void stm32_timer_detect_trgo2(struct stm32_timer_trigger *priv)
{
u32 val;
/*
* Master mode selection 2 bits can only be written and read back when
* timer supports it.
*/
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, TIM_CR2_MMS2);
regmap_read(priv->regmap, TIM_CR2, &val);
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
priv->has_trgo2 = !!val;
}
static int stm32_timer_trigger_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct stm32_timer_trigger *priv;
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
const struct stm32_timer_trigger_cfg *cfg;
unsigned int index;
int ret;
ret = device_property_read_u32(dev, "reg", &index);
if (ret)
return ret;
cfg = device_get_match_data(dev);
if (index >= ARRAY_SIZE(triggers_table) ||
index >= cfg->num_valids_table)
return -EINVAL;
/* Create an IIO device only if we have triggers to be validated */
if (*cfg->valids_table[index])
priv = stm32_setup_counter_device(dev);
else
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
priv->triggers = triggers_table[index];
priv->valids = cfg->valids_table[index];
stm32_timer_detect_trgo2(priv);
mutex_init(&priv->lock);
ret = stm32_register_iio_triggers(priv);
if (ret)
return ret;
platform_set_drvdata(pdev, priv);
return 0;
}
static int stm32_timer_trigger_remove(struct platform_device *pdev)
{
struct stm32_timer_trigger *priv = platform_get_drvdata(pdev);
u32 val;
/* Unregister triggers before everything can be safely turned off */
stm32_unregister_iio_triggers(priv);
/* Check if nobody else use the timer, then disable it */
regmap_read(priv->regmap, TIM_CCER, &val);
if (!(val & TIM_CCER_CCXE))
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
if (priv->enabled)
clk_disable(priv->clk);
return 0;
}
static int stm32_timer_trigger_suspend(struct device *dev)
{
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
/* Only take care of enabled timer: don't disturb other MFD child */
if (priv->enabled) {
/* Backup registers that may get lost in low power mode */
regmap_read(priv->regmap, TIM_CR1, &priv->bak.cr1);
regmap_read(priv->regmap, TIM_CR2, &priv->bak.cr2);
regmap_read(priv->regmap, TIM_PSC, &priv->bak.psc);
regmap_read(priv->regmap, TIM_ARR, &priv->bak.arr);
regmap_read(priv->regmap, TIM_CNT, &priv->bak.cnt);
regmap_read(priv->regmap, TIM_SMCR, &priv->bak.smcr);
/* Disable the timer */
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
clk_disable(priv->clk);
}
return 0;
}
static int stm32_timer_trigger_resume(struct device *dev)
{
struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
int ret;
if (priv->enabled) {
ret = clk_enable(priv->clk);
if (ret)
return ret;
/* restore master/slave modes */
regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
regmap_write(priv->regmap, TIM_CR2, priv->bak.cr2);
/* restore sampling_frequency (trgo / trgo2 triggers) */
regmap_write(priv->regmap, TIM_PSC, priv->bak.psc);
regmap_write(priv->regmap, TIM_ARR, priv->bak.arr);
regmap_write(priv->regmap, TIM_CNT, priv->bak.cnt);
/* Also re-enables the timer */
regmap_write(priv->regmap, TIM_CR1, priv->bak.cr1);
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(stm32_timer_trigger_pm_ops,
stm32_timer_trigger_suspend,
stm32_timer_trigger_resume);
static const struct stm32_timer_trigger_cfg stm32_timer_trg_cfg = {
.valids_table = valids_table,
.num_valids_table = ARRAY_SIZE(valids_table),
};
static const struct stm32_timer_trigger_cfg stm32h7_timer_trg_cfg = {
.valids_table = stm32h7_valids_table,
.num_valids_table = ARRAY_SIZE(stm32h7_valids_table),
};
static const struct of_device_id stm32_trig_of_match[] = {
{
.compatible = "st,stm32-timer-trigger",
.data = (void *)&stm32_timer_trg_cfg,
}, {
.compatible = "st,stm32h7-timer-trigger",
.data = (void *)&stm32h7_timer_trg_cfg,
},
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, stm32_trig_of_match);
static struct platform_driver stm32_timer_trigger_driver = {
.probe = stm32_timer_trigger_probe,
.remove = stm32_timer_trigger_remove,
.driver = {
.name = "stm32-timer-trigger",
.of_match_table = stm32_trig_of_match,
.pm = pm_sleep_ptr(&stm32_timer_trigger_pm_ops),
},
};
module_platform_driver(stm32_timer_trigger_driver);
MODULE_ALIAS("platform:stm32-timer-trigger");
MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/trigger/stm32-timer-trigger.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011 Analog Devices Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/irq_work.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
struct iio_sysfs_trig {
struct iio_trigger *trig;
struct irq_work work;
int id;
struct list_head l;
};
static LIST_HEAD(iio_sysfs_trig_list);
static DEFINE_MUTEX(iio_sysfs_trig_list_mut);
static int iio_sysfs_trigger_probe(int id);
static ssize_t iio_sysfs_trig_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
unsigned long input;
ret = kstrtoul(buf, 10, &input);
if (ret)
return ret;
ret = iio_sysfs_trigger_probe(input);
if (ret)
return ret;
return len;
}
static DEVICE_ATTR(add_trigger, S_IWUSR, NULL, &iio_sysfs_trig_add);
static int iio_sysfs_trigger_remove(int id);
static ssize_t iio_sysfs_trig_remove(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
int ret;
unsigned long input;
ret = kstrtoul(buf, 10, &input);
if (ret)
return ret;
ret = iio_sysfs_trigger_remove(input);
if (ret)
return ret;
return len;
}
static DEVICE_ATTR(remove_trigger, S_IWUSR, NULL, &iio_sysfs_trig_remove);
static struct attribute *iio_sysfs_trig_attrs[] = {
&dev_attr_add_trigger.attr,
&dev_attr_remove_trigger.attr,
NULL,
};
static const struct attribute_group iio_sysfs_trig_group = {
.attrs = iio_sysfs_trig_attrs,
};
static const struct attribute_group *iio_sysfs_trig_groups[] = {
&iio_sysfs_trig_group,
NULL
};
/* Nothing to actually do upon release */
static void iio_trigger_sysfs_release(struct device *dev)
{
}
static struct device iio_sysfs_trig_dev = {
.bus = &iio_bus_type,
.groups = iio_sysfs_trig_groups,
.release = &iio_trigger_sysfs_release,
};
static void iio_sysfs_trigger_work(struct irq_work *work)
{
struct iio_sysfs_trig *trig = container_of(work, struct iio_sysfs_trig,
work);
iio_trigger_poll(trig->trig);
}
static ssize_t iio_sysfs_trigger_poll(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct iio_sysfs_trig *sysfs_trig = iio_trigger_get_drvdata(trig);
irq_work_queue(&sysfs_trig->work);
return count;
}
static DEVICE_ATTR(trigger_now, S_IWUSR, NULL, iio_sysfs_trigger_poll);
static struct attribute *iio_sysfs_trigger_attrs[] = {
&dev_attr_trigger_now.attr,
NULL,
};
static const struct attribute_group iio_sysfs_trigger_attr_group = {
.attrs = iio_sysfs_trigger_attrs,
};
static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
&iio_sysfs_trigger_attr_group,
NULL
};
static int iio_sysfs_trigger_probe(int id)
{
struct iio_sysfs_trig *t;
int ret;
bool foundit = false;
mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(t, &iio_sysfs_trig_list, l)
if (id == t->id) {
foundit = true;
break;
}
if (foundit) {
ret = -EINVAL;
goto err_unlock;
}
t = kmalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
ret = -ENOMEM;
goto err_unlock;
}
t->id = id;
t->trig = iio_trigger_alloc(&iio_sysfs_trig_dev, "sysfstrig%d", id);
if (!t->trig) {
ret = -ENOMEM;
goto err_free_sys_trig;
}
t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
iio_trigger_set_drvdata(t->trig, t);
t->work = IRQ_WORK_INIT_HARD(iio_sysfs_trigger_work);
ret = iio_trigger_register(t->trig);
if (ret)
goto err_free_trig;
list_add(&t->l, &iio_sysfs_trig_list);
__module_get(THIS_MODULE);
mutex_unlock(&iio_sysfs_trig_list_mut);
return 0;
err_free_trig:
iio_trigger_free(t->trig);
err_free_sys_trig:
kfree(t);
err_unlock:
mutex_unlock(&iio_sysfs_trig_list_mut);
return ret;
}
static int iio_sysfs_trigger_remove(int id)
{
struct iio_sysfs_trig *t = NULL, *iter;
mutex_lock(&iio_sysfs_trig_list_mut);
list_for_each_entry(iter, &iio_sysfs_trig_list, l)
if (id == iter->id) {
t = iter;
break;
}
if (!t) {
mutex_unlock(&iio_sysfs_trig_list_mut);
return -EINVAL;
}
iio_trigger_unregister(t->trig);
irq_work_sync(&t->work);
iio_trigger_free(t->trig);
list_del(&t->l);
kfree(t);
module_put(THIS_MODULE);
mutex_unlock(&iio_sysfs_trig_list_mut);
return 0;
}
static int __init iio_sysfs_trig_init(void)
{
int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
ret = device_add(&iio_sysfs_trig_dev);
if (ret)
put_device(&iio_sysfs_trig_dev);
return ret;
}
module_init(iio_sysfs_trig_init);
static void __exit iio_sysfs_trig_exit(void)
{
device_unregister(&iio_sysfs_trig_dev);
}
module_exit(iio_sysfs_trig_exit);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("Sysfs based trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:iio-trig-sysfs");
| linux-master | drivers/iio/trigger/iio-trig-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The industrial I/O periodic hrtimer trigger driver
*
* Copyright (C) Intuitive Aerial AB
* Written by Marten Svanfeldt, [email protected]
* Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <[email protected]>
* Copyright (C) 2015, Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hrtimer.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/sw_trigger.h>
/* Defined locally, not in time64.h yet. */
#define PSEC_PER_SEC 1000000000000LL
/* default sampling frequency - 100Hz */
#define HRTIMER_DEFAULT_SAMPLING_FREQUENCY 100
struct iio_hrtimer_info {
struct iio_sw_trigger swt;
struct hrtimer timer;
int sampling_frequency[2];
ktime_t period;
};
static const struct config_item_type iio_hrtimer_type = {
.ct_owner = THIS_MODULE,
};
static
ssize_t iio_hrtimer_show_sampling_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct iio_hrtimer_info *info = iio_trigger_get_drvdata(trig);
return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO,
ARRAY_SIZE(info->sampling_frequency),
info->sampling_frequency);
}
static
ssize_t iio_hrtimer_store_sampling_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_trigger *trig = to_iio_trigger(dev);
struct iio_hrtimer_info *info = iio_trigger_get_drvdata(trig);
unsigned long long val;
u64 period;
int integer, fract, ret;
ret = iio_str_to_fixpoint(buf, 100, &integer, &fract);
if (ret)
return ret;
if (integer < 0 || fract < 0)
return -ERANGE;
val = fract + 1000ULL * integer; /* mHz */
if (!val || val > UINT_MAX)
return -EINVAL;
info->sampling_frequency[0] = integer; /* Hz */
info->sampling_frequency[1] = fract * 1000; /* uHz */
period = PSEC_PER_SEC;
do_div(period, val);
info->period = period; /* nS */
return len;
}
static DEVICE_ATTR(sampling_frequency, S_IRUGO | S_IWUSR,
iio_hrtimer_show_sampling_frequency,
iio_hrtimer_store_sampling_frequency);
static struct attribute *iio_hrtimer_attrs[] = {
&dev_attr_sampling_frequency.attr,
NULL
};
static const struct attribute_group iio_hrtimer_attr_group = {
.attrs = iio_hrtimer_attrs,
};
static const struct attribute_group *iio_hrtimer_attr_groups[] = {
&iio_hrtimer_attr_group,
NULL
};
static enum hrtimer_restart iio_hrtimer_trig_handler(struct hrtimer *timer)
{
struct iio_hrtimer_info *info;
info = container_of(timer, struct iio_hrtimer_info, timer);
hrtimer_forward_now(timer, info->period);
iio_trigger_poll(info->swt.trigger);
return HRTIMER_RESTART;
}
static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state)
{
struct iio_hrtimer_info *trig_info;
trig_info = iio_trigger_get_drvdata(trig);
if (state)
hrtimer_start(&trig_info->timer, trig_info->period,
HRTIMER_MODE_REL_HARD);
else
hrtimer_cancel(&trig_info->timer);
return 0;
}
static const struct iio_trigger_ops iio_hrtimer_trigger_ops = {
.set_trigger_state = iio_trig_hrtimer_set_state,
};
static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
{
struct iio_hrtimer_info *trig_info;
int ret;
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info)
return ERR_PTR(-ENOMEM);
trig_info->swt.trigger = iio_trigger_alloc(NULL, "%s", name);
if (!trig_info->swt.trigger) {
ret = -ENOMEM;
goto err_free_trig_info;
}
iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info);
trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
trig_info->timer.function = iio_hrtimer_trig_handler;
trig_info->sampling_frequency[0] = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency[0];
ret = iio_trigger_register(trig_info->swt.trigger);
if (ret)
goto err_free_trigger;
iio_swt_group_init_type_name(&trig_info->swt, name, &iio_hrtimer_type);
return &trig_info->swt;
err_free_trigger:
iio_trigger_free(trig_info->swt.trigger);
err_free_trig_info:
kfree(trig_info);
return ERR_PTR(ret);
}
static int iio_trig_hrtimer_remove(struct iio_sw_trigger *swt)
{
struct iio_hrtimer_info *trig_info;
trig_info = iio_trigger_get_drvdata(swt->trigger);
iio_trigger_unregister(swt->trigger);
/* cancel the timer after unreg to make sure no one rearms it */
hrtimer_cancel(&trig_info->timer);
iio_trigger_free(swt->trigger);
kfree(trig_info);
return 0;
}
static const struct iio_sw_trigger_ops iio_trig_hrtimer_ops = {
.probe = iio_trig_hrtimer_probe,
.remove = iio_trig_hrtimer_remove,
};
static struct iio_sw_trigger_type iio_trig_hrtimer = {
.name = "hrtimer",
.owner = THIS_MODULE,
.ops = &iio_trig_hrtimer_ops,
};
module_iio_sw_trigger_driver(iio_trig_hrtimer);
MODULE_AUTHOR("Marten Svanfeldt <[email protected]>");
MODULE_AUTHOR("Daniel Baluta <[email protected]>");
MODULE_DESCRIPTION("Periodic hrtimer trigger for the IIO subsystem");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/trigger/iio-trig-hrtimer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016 Jonathan Cameron <[email protected]>
*
* Based on a mashup of the hrtimer trigger and continuous sampling proposal of
* Gregor Boirie <[email protected]>
*
* Note this is still rather experimental and may eat babies.
*
* Todo
* * Protect against connection of devices that 'need' the top half
* handler.
* * Work out how to run top half handlers in this context if it is
* safe to do so (timestamp grabbing for example)
*
* Tested against a max1363. Used about 33% cpu for the thread and 20%
* for generic_buffer piping to /dev/null. Watermark set at 64 on a 128
* element kfifo buffer.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/sw_trigger.h>
struct iio_loop_info {
struct iio_sw_trigger swt;
struct task_struct *task;
};
static const struct config_item_type iio_loop_type = {
.ct_owner = THIS_MODULE,
};
static int iio_loop_thread(void *data)
{
struct iio_trigger *trig = data;
set_freezable();
do {
iio_trigger_poll_nested(trig);
} while (likely(!kthread_freezable_should_stop(NULL)));
return 0;
}
static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
{
struct iio_loop_info *loop_trig = iio_trigger_get_drvdata(trig);
if (state) {
loop_trig->task = kthread_run(iio_loop_thread,
trig, trig->name);
if (IS_ERR(loop_trig->task)) {
dev_err(&trig->dev,
"failed to create trigger loop thread\n");
return PTR_ERR(loop_trig->task);
}
} else {
kthread_stop(loop_trig->task);
}
return 0;
}
static const struct iio_trigger_ops iio_loop_trigger_ops = {
.set_trigger_state = iio_loop_trigger_set_state,
};
static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
{
struct iio_loop_info *trig_info;
int ret;
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info)
return ERR_PTR(-ENOMEM);
trig_info->swt.trigger = iio_trigger_alloc(NULL, "%s", name);
if (!trig_info->swt.trigger) {
ret = -ENOMEM;
goto err_free_trig_info;
}
iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info);
trig_info->swt.trigger->ops = &iio_loop_trigger_ops;
ret = iio_trigger_register(trig_info->swt.trigger);
if (ret)
goto err_free_trigger;
iio_swt_group_init_type_name(&trig_info->swt, name, &iio_loop_type);
return &trig_info->swt;
err_free_trigger:
iio_trigger_free(trig_info->swt.trigger);
err_free_trig_info:
kfree(trig_info);
return ERR_PTR(ret);
}
static int iio_trig_loop_remove(struct iio_sw_trigger *swt)
{
struct iio_loop_info *trig_info;
trig_info = iio_trigger_get_drvdata(swt->trigger);
iio_trigger_unregister(swt->trigger);
iio_trigger_free(swt->trigger);
kfree(trig_info);
return 0;
}
static const struct iio_sw_trigger_ops iio_trig_loop_ops = {
.probe = iio_trig_loop_probe,
.remove = iio_trig_loop_remove,
};
static struct iio_sw_trigger_type iio_trig_loop = {
.name = "loop",
.owner = THIS_MODULE,
.ops = &iio_trig_loop_ops,
};
module_iio_sw_trigger_driver(iio_trig_loop);
MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
MODULE_DESCRIPTION("Loop based trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:iio-trig-loop");
| linux-master | drivers/iio/trigger/iio-trig-loop.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* lmp91000.c - Support for Texas Instruments digital potentiostats
*
* Copyright (C) 2016, 2018
* Author: Matt Ranostay <[email protected]>
*
* TODO: bias voltage + polarity control, and multiple chip support
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/consumer.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#define LMP91000_REG_LOCK 0x01
#define LMP91000_REG_TIACN 0x10
#define LMP91000_REG_TIACN_GAIN_SHIFT 2
#define LMP91000_REG_REFCN 0x11
#define LMP91000_REG_REFCN_EXT_REF 0x20
#define LMP91000_REG_REFCN_50_ZERO 0x80
#define LMP91000_REG_MODECN 0x12
#define LMP91000_REG_MODECN_3LEAD 0x03
#define LMP91000_REG_MODECN_TEMP 0x07
#define LMP91000_DRV_NAME "lmp91000"
static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
120000, 350000 };
static const int lmp91000_rload[] = { 10, 33, 50, 100 };
#define LMP91000_TEMP_BASE -40
static const u16 lmp91000_temp_lut[] = {
1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004, 996,
987, 979, 971, 962, 954, 945, 937, 929, 920, 912,
903, 895, 886, 878, 870, 861 };
static const struct regmap_config lmp91000_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
struct lmp91000_data {
struct regmap *regmap;
struct device *dev;
struct iio_trigger *trig;
struct iio_cb_buffer *cb_buffer;
struct iio_channel *adc_chan;
struct completion completion;
u8 chan_select;
/* 64-bit data + 64-bit naturally aligned timestamp */
u32 buffer[4] __aligned(8);
};
static const struct iio_chan_spec lmp91000_channels[] = {
{ /* chemical channel mV */
.type = IIO_VOLTAGE,
.channel = 0,
.address = LMP91000_REG_MODECN_3LEAD,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
.sign = 's',
.realbits = 32,
.storagebits = 32,
},
},
IIO_CHAN_SOFT_TIMESTAMP(1),
{ /* temperature channel mV */
.type = IIO_TEMP,
.channel = 1,
.address = LMP91000_REG_MODECN_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
.scan_index = -1,
},
};
static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
{
int state, ret;
ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
if (ret)
return -EINVAL;
ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
if (ret)
return -EINVAL;
/* delay till first temperature reading is complete */
if (state != channel && channel == LMP91000_REG_MODECN_TEMP)
usleep_range(3000, 4000);
data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
iio_trigger_poll_nested(data->trig);
ret = wait_for_completion_timeout(&data->completion, HZ);
reinit_completion(&data->completion);
if (!ret)
return -ETIMEDOUT;
*val = data->buffer[data->chan_select];
return 0;
}
static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
{
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct lmp91000_data *data = iio_priv(indio_dev);
int ret, val;
memset(data->buffer, 0, sizeof(data->buffer));
ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
if (!ret) {
data->buffer[0] = val;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns(indio_dev));
}
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static int lmp91000_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct lmp91000_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED: {
int ret = iio_channel_start_all_cb(data->cb_buffer);
if (ret)
return ret;
ret = lmp91000_read(data, chan->address, val);
iio_channel_stop_all_cb(data->cb_buffer);
if (ret)
return ret;
if (mask == IIO_CHAN_INFO_PROCESSED) {
int tmp, i;
ret = iio_convert_raw_to_processed(data->adc_chan,
*val, &tmp, 1);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
if (lmp91000_temp_lut[i] < tmp)
break;
*val = (LMP91000_TEMP_BASE + i) * 1000;
}
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_OFFSET:
return iio_read_channel_offset(data->adc_chan, val, val2);
case IIO_CHAN_INFO_SCALE:
return iio_read_channel_scale(data->adc_chan, val, val2);
}
return -EINVAL;
}
static const struct iio_info lmp91000_info = {
.read_raw = lmp91000_read_raw,
};
static int lmp91000_read_config(struct lmp91000_data *data)
{
struct device *dev = data->dev;
unsigned int reg, val;
int i, ret;
ret = device_property_read_u32(dev, "ti,tia-gain-ohm", &val);
if (ret) {
if (!device_property_read_bool(dev, "ti,external-tia-resistor")) {
dev_err(dev, "no ti,tia-gain-ohm defined and external resistor not specified\n");
return ret;
}
val = 0;
}
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
if (lmp91000_tia_gain[i] == val) {
reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
ret = 0;
break;
}
}
if (ret) {
dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
return ret;
}
ret = device_property_read_u32(dev, "ti,rload-ohm", &val);
if (ret) {
val = 100;
dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
}
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
if (lmp91000_rload[i] == val) {
reg |= i;
ret = 0;
break;
}
}
if (ret) {
dev_err(dev, "invalid ti,rload-ohm %d\n", val);
return ret;
}
regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
regmap_write(data->regmap, LMP91000_REG_REFCN,
LMP91000_REG_REFCN_EXT_REF | LMP91000_REG_REFCN_50_ZERO);
regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
return 0;
}
static int lmp91000_buffer_cb(const void *val, void *private)
{
struct iio_dev *indio_dev = private;
struct lmp91000_data *data = iio_priv(indio_dev);
data->buffer[data->chan_select] = *((int *)val);
complete_all(&data->completion);
return 0;
}
static int lmp91000_buffer_postenable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
return iio_channel_start_all_cb(data->cb_buffer);
}
static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
iio_channel_stop_all_cb(data->cb_buffer);
return 0;
}
static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
.postenable = lmp91000_buffer_postenable,
.predisable = lmp91000_buffer_predisable,
};
static int lmp91000_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lmp91000_data *data;
struct iio_dev *indio_dev;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
indio_dev->info = &lmp91000_info;
indio_dev->channels = lmp91000_channels;
indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
indio_dev->name = LMP91000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
i2c_set_clientdata(client, indio_dev);
data = iio_priv(indio_dev);
data->dev = dev;
data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "regmap initialization failed.\n");
return PTR_ERR(data->regmap);
}
data->trig = devm_iio_trigger_alloc(dev, "%s-mux%d",
indio_dev->name,
iio_device_id(indio_dev));
if (!data->trig) {
dev_err(dev, "cannot allocate iio trigger.\n");
return -ENOMEM;
}
init_completion(&data->completion);
ret = lmp91000_read_config(data);
if (ret)
return ret;
ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
data->trig);
if (ret) {
dev_err(dev, "cannot set immutable trigger.\n");
return ret;
}
ret = iio_trigger_register(data->trig);
if (ret) {
dev_err(dev, "cannot register iio trigger.\n");
return ret;
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
&lmp91000_buffer_handler,
&lmp91000_buffer_setup_ops);
if (ret)
goto error_unreg_trigger;
data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
indio_dev);
if (IS_ERR(data->cb_buffer)) {
if (PTR_ERR(data->cb_buffer) == -ENODEV)
ret = -EPROBE_DEFER;
else
ret = PTR_ERR(data->cb_buffer);
goto error_unreg_buffer;
}
data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_cb_buffer;
return 0;
error_unreg_cb_buffer:
iio_channel_release_all_cb(data->cb_buffer);
error_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
error_unreg_trigger:
iio_trigger_unregister(data->trig);
return ret;
}
static void lmp91000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct lmp91000_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_channel_stop_all_cb(data->cb_buffer);
iio_channel_release_all_cb(data->cb_buffer);
iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->trig);
}
static const struct of_device_id lmp91000_of_match[] = {
{ .compatible = "ti,lmp91000", },
{ .compatible = "ti,lmp91002", },
{ },
};
MODULE_DEVICE_TABLE(of, lmp91000_of_match);
static const struct i2c_device_id lmp91000_id[] = {
{ "lmp91000", 0 },
{ "lmp91002", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, lmp91000_id);
static struct i2c_driver lmp91000_driver = {
.driver = {
.name = LMP91000_DRV_NAME,
.of_match_table = lmp91000_of_match,
},
.probe = lmp91000_probe,
.remove = lmp91000_remove,
.id_table = lmp91000_id,
};
module_i2c_driver(lmp91000_driver);
MODULE_AUTHOR("Matt Ranostay <[email protected]>");
MODULE_DESCRIPTION("LMP91000 digital potentiostat");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/potentiostat/lmp91000.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kunit tests for IIO rescale conversions
*
* Copyright (c) 2021 Liam Beguin <[email protected]>
*/
#include <linux/gcd.h>
#include <linux/overflow.h>
#include <linux/iio/afe/rescale.h>
#include <linux/iio/iio.h>
#include <kunit/test.h>
struct rescale_tc_data {
const char *name;
const s32 numerator;
const s32 denominator;
const s32 offset;
const int schan_val;
const int schan_val2;
const int schan_off;
const int schan_scale_type;
const char *expected;
const char *expected_off;
};
static const struct rescale_tc_data scale_cases[] = {
/*
* Typical use cases
*/
{
.name = "typical IIO_VAL_INT, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT,
.schan_val = 42,
.expected = "5210.918114143",
},
{
.name = "typical IIO_VAL_INT, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT,
.schan_val = 42,
.expected = "-5210.918114143",
},
{
.name = "typical IIO_VAL_FRACTIONAL, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 42,
.schan_val2 = 20,
.expected = "260.545905707",
},
{
.name = "typical IIO_VAL_FRACTIONAL, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 42,
.schan_val2 = 20,
.expected = "-260.545905707",
},
{
.name = "typical IIO_VAL_FRACTIONAL_LOG2, positive",
.numerator = 42,
.denominator = 53,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 4096,
.schan_val2 = 16,
.expected = "0.049528301",
},
{
.name = "typical IIO_VAL_FRACTIONAL_LOG2, negative",
.numerator = -42,
.denominator = 53,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 4096,
.schan_val2 = 16,
.expected = "-0.049528301",
},
{
.name = "typical IIO_VAL_INT_PLUS_NANO, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456,
.expected = "1240.710106203",
},
{
.name = "typical IIO_VAL_INT_PLUS_NANO, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456,
.expected = "-1240.710106203",
},
{
.name = "typical IIO_VAL_INT_PLUS_MICRO, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 1234,
.expected = "1240.84789",
},
{
.name = "typical IIO_VAL_INT_PLUS_MICRO, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 1234,
.expected = "-1240.84789",
},
/*
* Use cases with small scales involving divisions
*/
{
.name = "small IIO_VAL_FRACTIONAL, 261/509 scaled by 90/1373754273",
.numerator = 261,
.denominator = 509,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 90,
.schan_val2 = 1373754273,
.expected = "0.000000033594",
},
{
.name = "small IIO_VAL_FRACTIONAL, 90/1373754273 scaled by 261/509",
.numerator = 90,
.denominator = 1373754273,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 261,
.schan_val2 = 509,
.expected = "0.000000033594",
},
{
.name = "small IIO_VAL_FRACTIONAL, 760/1373754273 scaled by 427/2727",
.numerator = 760,
.denominator = 1373754273,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 427,
.schan_val2 = 2727,
.expected = "0.000000086626",
},
{
.name = "small IIO_VAL_FRACTIONAL, 761/1373754273 scaled by 427/2727",
.numerator = 761,
.denominator = 1373754273,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 427,
.schan_val2 = 2727,
.expected = "0.000000086740",
},
{
.name = "small IIO_VAL_FRACTIONAL, 5/32768 scaled by 3/10000",
.numerator = 5,
.denominator = 32768,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 3,
.schan_val2 = 10000,
.expected = "0.0000000457763671875",
},
{
.name = "small IIO_VAL_FRACTIONAL, 0 < scale < 1",
.numerator = 6,
.denominator = 6,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 1,
.schan_val2 = 3,
.expected = "0.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL, -1 < scale < 0",
.numerator = -6,
.denominator = 6,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 1,
.schan_val2 = 3,
.expected = "-0.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL, 0 < scale < 2",
.numerator = 8,
.denominator = 2,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 1,
.schan_val2 = 3,
.expected = "1.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL, -2 < scale < 0",
.numerator = -8,
.denominator = 2,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 1,
.schan_val2 = 3,
.expected = "-1.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, 760/32768 scaled by 15/22",
.numerator = 760,
.denominator = 32768,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 15,
.schan_val2 = 22,
.expected = "0.000000082946",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, 761/32768 scaled by 15/22",
.numerator = 761,
.denominator = 32768,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 15,
.schan_val2 = 22,
.expected = "0.000000083055",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, 0 < scale < 1",
.numerator = 16,
.denominator = 3,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 1,
.schan_val2 = 4,
.expected = "0.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, -1 < scale < 0",
.numerator = -16,
.denominator = 3,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 1,
.schan_val2 = 4,
.expected = "-0.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, 0 < scale < 2",
.numerator = 8,
.denominator = 3,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 1,
.schan_val2 = 1,
.expected = "1.3333333333333333",
},
{
.name = "small IIO_VAL_FRACTIONAL_LOG2, -2 < scale < 0",
.numerator = -8,
.denominator = 3,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 1,
.schan_val2 = 1,
.expected = "-1.3333333333333333",
},
{
.name = "small IIO_VAL_INT_PLUS_MICRO, positive",
.numerator = 1,
.denominator = 2,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 5,
.schan_val2 = 1234,
.expected = "2.500617",
},
{
.name = "small IIO_VAL_INT_PLUS_MICRO, negative",
.numerator = -1,
.denominator = 2,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 5,
.schan_val2 = 1234,
.expected = "-2.500617",
},
/*
* INT_PLUS_{MICRO,NANO} positive/negative corner cases
*/
{
.name = "negative IIO_VAL_INT_PLUS_NANO, negative schan",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = 123456,
.expected = "-1240.710106203",
},
{
.name = "negative IIO_VAL_INT_PLUS_NANO, both negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = 123456,
.expected = "1240.710106203",
},
{
.name = "negative IIO_VAL_INT_PLUS_NANO, 3 negative",
.numerator = -1000000,
.denominator = -8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = 123456,
.expected = "-1240.710106203",
},
{
.name = "negative IIO_VAL_INT_PLUS_NANO, 4 negative",
.numerator = -1000000,
.denominator = -8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = -123456,
.expected = "-1240.710106203",
},
{
.name = "negative IIO_VAL_INT_PLUS_NANO, negative, *val = 0",
.numerator = 1,
.denominator = -10,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 0,
.schan_val2 = 123456789,
.expected = "-0.012345678",
},
/*
* INT_PLUS_{MICRO,NANO} decimal part overflow
*/
{
.name = "decimal overflow IIO_VAL_INT_PLUS_NANO, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456789,
.expected = "1256.01200856",
},
{
.name = "decimal overflow IIO_VAL_INT_PLUS_NANO, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456789,
.expected = "-1256.01200856",
},
{
.name = "decimal overflow IIO_VAL_INT_PLUS_NANO, negative schan",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = 123456789,
.expected = "-1256.01200856",
},
{
.name = "decimal overflow IIO_VAL_INT_PLUS_MICRO, positive",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 123456789,
.expected = "16557.914267",
},
{
.name = "decimal overflow IIO_VAL_INT_PLUS_MICRO, negative",
.numerator = -1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 123456789,
.expected = "-16557.914267",
},
{
.name = "decimal overflow IIO_VAL_INT_PLUS_MICRO, negative schan",
.numerator = 1000000,
.denominator = 8060,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = -10,
.schan_val2 = 123456789,
.expected = "-16557.914267",
},
/*
* 32-bit overflow conditions
*/
{
.name = "overflow IIO_VAL_FRACTIONAL, positive",
.numerator = 2,
.denominator = 20,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = S32_MAX,
.schan_val2 = 1,
.expected = "214748364.7",
},
{
.name = "overflow IIO_VAL_FRACTIONAL, negative",
.numerator = -2,
.denominator = 20,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = S32_MAX,
.schan_val2 = 1,
.expected = "-214748364.7",
},
{
.name = "overflow IIO_VAL_FRACTIONAL_LOG2, positive",
.numerator = S32_MAX,
.denominator = 4096,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 4096,
.schan_val2 = 16,
.expected = "32767.99998474121",
},
{
.name = "overflow IIO_VAL_FRACTIONAL_LOG2, negative",
.numerator = S32_MAX,
.denominator = 4096,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = -4096,
.schan_val2 = 16,
.expected = "-32767.99998474121",
},
{
.name = "overflow IIO_VAL_INT_PLUS_NANO, positive",
.numerator = 2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = S32_MAX,
.expected = "1.214748364",
},
{
.name = "overflow IIO_VAL_INT_PLUS_NANO, negative",
.numerator = -2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = S32_MAX,
.expected = "-1.214748364",
},
{
.name = "overflow IIO_VAL_INT_PLUS_NANO, negative schan",
.numerator = 2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = -10,
.schan_val2 = S32_MAX,
.expected = "-1.214748364",
},
{
.name = "overflow IIO_VAL_INT_PLUS_MICRO, positive",
.numerator = 2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = S32_MAX,
.expected = "215.748364",
},
{
.name = "overflow IIO_VAL_INT_PLUS_MICRO, negative",
.numerator = -2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = S32_MAX,
.expected = "-215.748364",
},
{
.name = "overflow IIO_VAL_INT_PLUS_MICRO, negative schan",
.numerator = 2,
.denominator = 20,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = -10,
.schan_val2 = S32_MAX,
.expected = "-215.748364",
},
};
static const struct rescale_tc_data offset_cases[] = {
/*
* Typical use cases
*/
{
.name = "typical IIO_VAL_INT, positive",
.offset = 1234,
.schan_scale_type = IIO_VAL_INT,
.schan_val = 123,
.schan_val2 = 0,
.schan_off = 14,
.expected_off = "24", /* 23.872 */
},
{
.name = "typical IIO_VAL_INT, negative",
.offset = -1234,
.schan_scale_type = IIO_VAL_INT,
.schan_val = 12,
.schan_val2 = 0,
.schan_off = 14,
.expected_off = "-88", /* -88.83333333333333 */
},
{
.name = "typical IIO_VAL_FRACTIONAL, positive",
.offset = 1234,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 12,
.schan_val2 = 34,
.schan_off = 14,
.expected_off = "3510", /* 3510.333333333333 */
},
{
.name = "typical IIO_VAL_FRACTIONAL, negative",
.offset = -1234,
.schan_scale_type = IIO_VAL_FRACTIONAL,
.schan_val = 12,
.schan_val2 = 34,
.schan_off = 14,
.expected_off = "-3482", /* -3482.333333333333 */
},
{
.name = "typical IIO_VAL_FRACTIONAL_LOG2, positive",
.offset = 1234,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 12,
.schan_val2 = 16,
.schan_off = 14,
.expected_off = "6739299", /* 6739299.333333333 */
},
{
.name = "typical IIO_VAL_FRACTIONAL_LOG2, negative",
.offset = -1234,
.schan_scale_type = IIO_VAL_FRACTIONAL_LOG2,
.schan_val = 12,
.schan_val2 = 16,
.schan_off = 14,
.expected_off = "-6739271", /* -6739271.333333333 */
},
{
.name = "typical IIO_VAL_INT_PLUS_NANO, positive",
.offset = 1234,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456789,
.schan_off = 14,
.expected_off = "135", /* 135.8951219647469 */
},
{
.name = "typical IIO_VAL_INT_PLUS_NANO, negative",
.offset = -1234,
.schan_scale_type = IIO_VAL_INT_PLUS_NANO,
.schan_val = 10,
.schan_val2 = 123456789,
.schan_off = 14,
.expected_off = "-107", /* -107.89512196474689 */
},
{
.name = "typical IIO_VAL_INT_PLUS_MICRO, positive",
.offset = 1234,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 123456789,
.schan_off = 14,
.expected_off = "23", /* 23.246438560723952 */
},
{
.name = "typical IIO_VAL_INT_PLUS_MICRO, negative",
.offset = -12345,
.schan_scale_type = IIO_VAL_INT_PLUS_MICRO,
.schan_val = 10,
.schan_val2 = 123456789,
.schan_off = 14,
.expected_off = "-78", /* -78.50185091745313 */
},
};
static void case_to_desc(const struct rescale_tc_data *t, char *desc)
{
strcpy(desc, t->name);
}
KUNIT_ARRAY_PARAM(iio_rescale_scale, scale_cases, case_to_desc);
KUNIT_ARRAY_PARAM(iio_rescale_offset, offset_cases, case_to_desc);
/**
* iio_str_to_nano() - Parse a fixed-point string to get an
* IIO_VAL_INT_PLUS_NANO value
* @str: The string to parse
* @nano: The number as an integer
*
* Returns 0 on success, or a negative error code if the string cound not be
* parsed.
*/
static int iio_str_to_nano(const char *str, s64 *nano)
{
int tmp, tmp2;
int ret = 0;
/*
* iio_str_to_fixpoint() uses 10^8 here instead of 10^9 as fract_mult is
* the multiplier for the first decimal place.
*/
ret = iio_str_to_fixpoint(str, 100000000, &tmp, &tmp2);
if (ret < 0)
return ret;
if (tmp < 0)
tmp2 *= -1;
*nano = (s64)tmp * 1000000000UL + tmp2;
return ret;
}
/**
* iio_test_relative_error_ppm() - Compute relative error (in parts-per-million)
* between two fixed-point strings
* @real_str: The real value as a string
* @exp_str: The expected value as a string
*
* Returns a negative error code if the strings cound not be parsed, or the
* relative error in parts-per-million.
*/
static int iio_test_relative_error_ppm(const char *real_str, const char *exp_str)
{
s64 real, exp, err;
int ret;
ret = iio_str_to_nano(real_str, &real);
if (ret < 0)
return ret;
ret = iio_str_to_nano(exp_str, &exp);
if (ret < 0)
return ret;
if (!exp) {
pr_err("Expected value is null, relative error is undefined\n");
return -EINVAL;
}
err = 1000000UL * abs(exp - real);
return (int)div64_u64(err, abs(exp));
}
static void iio_rescale_test_scale(struct kunit *test)
{
struct rescale_tc_data *t = (struct rescale_tc_data *)test->param_value;
char *buff = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
struct rescale rescale;
int values[2];
int rel_ppm;
int ret;
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
values[0] = t->schan_val;
values[1] = t->schan_val2;
ret = rescale_process_scale(&rescale, t->schan_scale_type,
&values[0], &values[1]);
ret = iio_format_value(buff, ret, 2, values);
KUNIT_EXPECT_EQ(test, (int)strlen(buff), ret);
rel_ppm = iio_test_relative_error_ppm(buff, t->expected);
KUNIT_EXPECT_GE_MSG(test, rel_ppm, 0, "failed to compute ppm\n");
KUNIT_EXPECT_EQ_MSG(test, rel_ppm, 0,
"\t real=%s"
"\texpected=%s\n",
buff, t->expected);
}
static void iio_rescale_test_offset(struct kunit *test)
{
struct rescale_tc_data *t = (struct rescale_tc_data *)test->param_value;
char *buff_off = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
struct rescale rescale;
int values[2];
int ret;
rescale.numerator = t->numerator;
rescale.denominator = t->denominator;
rescale.offset = t->offset;
values[0] = t->schan_val;
values[1] = t->schan_val2;
ret = rescale_process_offset(&rescale, t->schan_scale_type,
t->schan_val, t->schan_val2, t->schan_off,
&values[0], &values[1]);
ret = iio_format_value(buff_off, ret, 2, values);
KUNIT_EXPECT_EQ(test, (int)strlen(buff_off), ret);
KUNIT_EXPECT_STREQ(test, strim(buff_off), t->expected_off);
}
static struct kunit_case iio_rescale_test_cases[] = {
KUNIT_CASE_PARAM(iio_rescale_test_scale, iio_rescale_scale_gen_params),
KUNIT_CASE_PARAM(iio_rescale_test_offset, iio_rescale_offset_gen_params),
{}
};
static struct kunit_suite iio_rescale_test_suite = {
.name = "iio-rescale",
.test_cases = iio_rescale_test_cases,
};
kunit_test_suite(iio_rescale_test_suite);
MODULE_AUTHOR("Liam Beguin <[email protected]>");
MODULE_DESCRIPTION("Test IIO rescale conversion functions");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(IIO_RESCALE);
| linux-master | drivers/iio/test/iio-test-rescale.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Unit tests for IIO formatting functions
*
* Copyright (c) 2020 Lars-Peter Clausen <[email protected]>
*/
#include <kunit/test.h>
#include <linux/iio/iio.h>
#define IIO_TEST_FORMAT_EXPECT_EQ(_test, _buf, _ret, _val) do { \
KUNIT_EXPECT_EQ(_test, strlen(_buf), _ret); \
KUNIT_EXPECT_STREQ(_test, (_buf), (_val)); \
} while (0)
static void iio_test_iio_format_value_integer(struct kunit *test)
{
char *buf;
int val;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
val = 42;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "42\n");
val = -23;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-23\n");
val = 0;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0\n");
val = INT_MAX;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "2147483647\n");
val = INT_MIN;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-2147483648\n");
}
static void iio_test_iio_format_value_fixedpoint(struct kunit *test)
{
int values[2];
char *buf;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
/* positive >= 1 */
values[0] = 1;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010 dB\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000000010\n");
/* positive < 1 */
values[0] = 0;
values[1] = 12;
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012 dB\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000012\n");
/* negative <= -1 */
values[0] = -1;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010 dB\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000000010\n");
/* negative > -1 */
values[0] = 0;
values[1] = -123;
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123 dB\n");
ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000123\n");
}
static void iio_test_iio_format_value_fractional(struct kunit *test)
{
int values[2];
char *buf;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
/* positive < 1 */
values[0] = 1;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.100000000\n");
/* positive >= 1 */
values[0] = 100;
values[1] = 3;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "33.333333333\n");
/* negative > -1 */
values[0] = -1;
values[1] = 1000000000;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000001\n");
/* negative <= -1 */
values[0] = -200;
values[1] = 3;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-66.666666666\n");
/* Zero */
values[0] = 0;
values[1] = -10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_fractional_log2(struct kunit *test)
{
int values[2];
char *buf;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
/* positive < 1 */
values[0] = 123;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.120117187\n");
/* positive >= 1 */
values[0] = 1234567;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1205.631835937\n");
/* negative > -1 */
values[0] = -123;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.120117187\n");
/* negative <= -1 */
values[0] = -1234567;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1205.631835937\n");
/* Zero */
values[0] = 0;
values[1] = 10;
ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_multiple(struct kunit *test)
{
int values[] = {1, -2, 3, -4, 5};
char *buf;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
ret = iio_format_value(buf, IIO_VAL_INT_MULTIPLE,
ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1 -2 3 -4 5 \n");
}
static void iio_test_iio_format_value_integer_64(struct kunit *test)
{
int values[2];
s64 value;
char *buf;
int ret;
buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
value = 24;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "24\n");
value = -24;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-24\n");
value = 0;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0\n");
value = UINT_MAX;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "4294967295\n");
value = -((s64)UINT_MAX);
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-4294967295\n");
value = LLONG_MAX;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "9223372036854775807\n");
value = LLONG_MIN;
values[0] = lower_32_bits(value);
values[1] = upper_32_bits(value);
ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-9223372036854775808\n");
}
static struct kunit_case iio_format_test_cases[] = {
KUNIT_CASE(iio_test_iio_format_value_integer),
KUNIT_CASE(iio_test_iio_format_value_fixedpoint),
KUNIT_CASE(iio_test_iio_format_value_fractional),
KUNIT_CASE(iio_test_iio_format_value_fractional_log2),
KUNIT_CASE(iio_test_iio_format_value_multiple),
KUNIT_CASE(iio_test_iio_format_value_integer_64),
{}
};
static struct kunit_suite iio_format_test_suite = {
.name = "iio-format",
.test_cases = iio_format_test_cases,
};
kunit_test_suite(iio_format_test_suite);
MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
MODULE_DESCRIPTION("Test IIO formatting functions");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/test/iio-test-format.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IIO multiplexer driver
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <[email protected]>
*/
#include <linux/err.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mux/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
struct mux_ext_info_cache {
char *data;
ssize_t size;
};
struct mux_child {
struct mux_ext_info_cache *ext_info_cache;
};
struct mux {
int cached_state;
struct mux_control *control;
struct iio_channel *parent;
struct iio_dev *indio_dev;
struct iio_chan_spec *chan;
struct iio_chan_spec_ext_info *ext_info;
struct mux_child *child;
u32 delay_us;
};
static int iio_mux_select(struct mux *mux, int idx)
{
struct mux_child *child = &mux->child[idx];
struct iio_chan_spec const *chan = &mux->chan[idx];
int ret;
int i;
ret = mux_control_select_delay(mux->control, chan->channel,
mux->delay_us);
if (ret < 0) {
mux->cached_state = -1;
return ret;
}
if (mux->cached_state == chan->channel)
return 0;
if (chan->ext_info) {
for (i = 0; chan->ext_info[i].name; ++i) {
const char *attr = chan->ext_info[i].name;
struct mux_ext_info_cache *cache;
cache = &child->ext_info_cache[i];
if (cache->size < 0)
continue;
ret = iio_write_channel_ext_info(mux->parent, attr,
cache->data,
cache->size);
if (ret < 0) {
mux_control_deselect(mux->control);
mux->cached_state = -1;
return ret;
}
}
}
mux->cached_state = chan->channel;
return 0;
}
static void iio_mux_deselect(struct mux *mux)
{
mux_control_deselect(mux->control);
}
static int mux_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct mux *mux = iio_priv(indio_dev);
int idx = chan - mux->chan;
int ret;
ret = iio_mux_select(mux, idx);
if (ret < 0)
return ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_read_channel_raw(mux->parent, val);
break;
case IIO_CHAN_INFO_SCALE:
ret = iio_read_channel_scale(mux->parent, val, val2);
break;
default:
ret = -EINVAL;
}
iio_mux_deselect(mux);
return ret;
}
static int mux_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
struct mux *mux = iio_priv(indio_dev);
int idx = chan - mux->chan;
int ret;
ret = iio_mux_select(mux, idx);
if (ret < 0)
return ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
*type = IIO_VAL_INT;
ret = iio_read_avail_channel_raw(mux->parent, vals, length);
break;
default:
ret = -EINVAL;
}
iio_mux_deselect(mux);
return ret;
}
static int mux_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mux *mux = iio_priv(indio_dev);
int idx = chan - mux->chan;
int ret;
ret = iio_mux_select(mux, idx);
if (ret < 0)
return ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = iio_write_channel_raw(mux->parent, val);
break;
default:
ret = -EINVAL;
}
iio_mux_deselect(mux);
return ret;
}
static const struct iio_info mux_info = {
.read_raw = mux_read_raw,
.read_avail = mux_read_avail,
.write_raw = mux_write_raw,
};
static ssize_t mux_read_ext_info(struct iio_dev *indio_dev, uintptr_t private,
struct iio_chan_spec const *chan, char *buf)
{
struct mux *mux = iio_priv(indio_dev);
int idx = chan - mux->chan;
ssize_t ret;
ret = iio_mux_select(mux, idx);
if (ret < 0)
return ret;
ret = iio_read_channel_ext_info(mux->parent,
mux->ext_info[private].name,
buf);
iio_mux_deselect(mux);
return ret;
}
static ssize_t mux_write_ext_info(struct iio_dev *indio_dev, uintptr_t private,
struct iio_chan_spec const *chan,
const char *buf, size_t len)
{
struct device *dev = indio_dev->dev.parent;
struct mux *mux = iio_priv(indio_dev);
int idx = chan - mux->chan;
char *new;
ssize_t ret;
if (len >= PAGE_SIZE)
return -EINVAL;
ret = iio_mux_select(mux, idx);
if (ret < 0)
return ret;
new = devm_kmemdup(dev, buf, len + 1, GFP_KERNEL);
if (!new) {
iio_mux_deselect(mux);
return -ENOMEM;
}
new[len] = 0;
ret = iio_write_channel_ext_info(mux->parent,
mux->ext_info[private].name,
buf, len);
if (ret < 0) {
iio_mux_deselect(mux);
devm_kfree(dev, new);
return ret;
}
devm_kfree(dev, mux->child[idx].ext_info_cache[private].data);
mux->child[idx].ext_info_cache[private].data = new;
mux->child[idx].ext_info_cache[private].size = len;
iio_mux_deselect(mux);
return ret;
}
static int mux_configure_channel(struct device *dev, struct mux *mux,
u32 state, const char *label, int idx)
{
struct mux_child *child = &mux->child[idx];
struct iio_chan_spec *chan = &mux->chan[idx];
struct iio_chan_spec const *pchan = mux->parent->channel;
char *page = NULL;
int num_ext_info;
int i;
int ret;
chan->indexed = 1;
chan->output = pchan->output;
chan->datasheet_name = label;
chan->ext_info = mux->ext_info;
ret = iio_get_channel_type(mux->parent, &chan->type);
if (ret < 0) {
dev_err(dev, "failed to get parent channel type\n");
return ret;
}
if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
if (state >= mux_control_states(mux->control)) {
dev_err(dev, "too many channels\n");
return -EINVAL;
}
chan->channel = state;
num_ext_info = iio_get_channel_ext_info_count(mux->parent);
if (num_ext_info) {
page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
if (!page)
return -ENOMEM;
}
child->ext_info_cache = devm_kcalloc(dev,
num_ext_info,
sizeof(*child->ext_info_cache),
GFP_KERNEL);
if (!child->ext_info_cache)
return -ENOMEM;
for (i = 0; i < num_ext_info; ++i) {
child->ext_info_cache[i].size = -1;
if (!pchan->ext_info[i].write)
continue;
if (!pchan->ext_info[i].read)
continue;
ret = iio_read_channel_ext_info(mux->parent,
mux->ext_info[i].name,
page);
if (ret < 0) {
dev_err(dev, "failed to get ext_info '%s'\n",
pchan->ext_info[i].name);
return ret;
}
if (ret >= PAGE_SIZE) {
dev_err(dev, "too large ext_info '%s'\n",
pchan->ext_info[i].name);
return -EINVAL;
}
child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
GFP_KERNEL);
if (!child->ext_info_cache[i].data)
return -ENOMEM;
child->ext_info_cache[i].data[ret] = 0;
child->ext_info_cache[i].size = ret;
}
if (page)
devm_kfree(dev, page);
return 0;
}
static int mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct iio_channel *parent;
struct mux *mux;
const char **labels;
int all_children;
int children;
u32 state;
int sizeof_ext_info;
int sizeof_priv;
int i;
int ret;
parent = devm_iio_channel_get(dev, "parent");
if (IS_ERR(parent))
return dev_err_probe(dev, PTR_ERR(parent),
"failed to get parent channel\n");
sizeof_ext_info = iio_get_channel_ext_info_count(parent);
if (sizeof_ext_info) {
sizeof_ext_info += 1; /* one extra entry for the sentinel */
sizeof_ext_info *= sizeof(*mux->ext_info);
}
all_children = device_property_string_array_count(dev, "channels");
if (all_children < 0)
return all_children;
labels = devm_kmalloc_array(dev, all_children, sizeof(*labels), GFP_KERNEL);
if (!labels)
return -ENOMEM;
ret = device_property_read_string_array(dev, "channels", labels, all_children);
if (ret < 0)
return ret;
children = 0;
for (state = 0; state < all_children; state++) {
if (*labels[state])
children++;
}
if (children <= 0) {
dev_err(dev, "not even a single child\n");
return -EINVAL;
}
sizeof_priv = sizeof(*mux);
sizeof_priv += sizeof(*mux->child) * children;
sizeof_priv += sizeof(*mux->chan) * children;
sizeof_priv += sizeof_ext_info;
indio_dev = devm_iio_device_alloc(dev, sizeof_priv);
if (!indio_dev)
return -ENOMEM;
mux = iio_priv(indio_dev);
mux->child = (struct mux_child *)(mux + 1);
mux->chan = (struct iio_chan_spec *)(mux->child + children);
platform_set_drvdata(pdev, indio_dev);
mux->parent = parent;
mux->cached_state = -1;
mux->delay_us = 0;
device_property_read_u32(dev, "settle-time-us", &mux->delay_us);
indio_dev->name = dev_name(dev);
indio_dev->info = &mux_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mux->chan;
indio_dev->num_channels = children;
if (sizeof_ext_info) {
mux->ext_info = devm_kmemdup(dev,
parent->channel->ext_info,
sizeof_ext_info, GFP_KERNEL);
if (!mux->ext_info)
return -ENOMEM;
for (i = 0; mux->ext_info[i].name; ++i) {
if (parent->channel->ext_info[i].read)
mux->ext_info[i].read = mux_read_ext_info;
if (parent->channel->ext_info[i].write)
mux->ext_info[i].write = mux_write_ext_info;
mux->ext_info[i].private = i;
}
}
mux->control = devm_mux_control_get(dev, NULL);
if (IS_ERR(mux->control))
return dev_err_probe(dev, PTR_ERR(mux->control),
"failed to get control-mux\n");
i = 0;
for (state = 0; state < all_children; state++) {
if (!*labels[state])
continue;
ret = mux_configure_channel(dev, mux, state, labels[state], i++);
if (ret < 0)
return ret;
}
ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
dev_err(dev, "failed to register iio device\n");
return ret;
}
return 0;
}
static const struct of_device_id mux_match[] = {
{ .compatible = "io-channel-mux" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mux_match);
static struct platform_driver mux_driver = {
.probe = mux_probe,
.driver = {
.name = "iio-mux",
.of_match_table = mux_match,
},
};
module_platform_driver(mux_driver);
MODULE_DESCRIPTION("IIO multiplexer driver");
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/multiplexer/iio-mux.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADF4350/ADF4351 SPI Wideband Synthesizer driver
*
* Copyright 2012-2013 Analog Devices Inc.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/gcd.h>
#include <linux/gpio/consumer.h>
#include <asm/div64.h>
#include <linux/clk.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/frequency/adf4350.h>
enum {
ADF4350_FREQ,
ADF4350_FREQ_REFIN,
ADF4350_FREQ_RESOLUTION,
ADF4350_PWRDOWN,
};
struct adf4350_state {
struct spi_device *spi;
struct regulator *reg;
struct gpio_desc *lock_detect_gpiod;
struct adf4350_platform_data *pdata;
struct clk *clk;
unsigned long clkin;
unsigned long chspc; /* Channel Spacing */
unsigned long fpfd; /* Phase Frequency Detector */
unsigned long min_out_freq;
unsigned r0_fract;
unsigned r0_int;
unsigned r1_mod;
unsigned r4_rf_div_sel;
unsigned long regs[6];
unsigned long regs_hw[6];
unsigned long long freq_req;
/*
* Lock to protect the state of the device from potential concurrent
* writes. The device is configured via a sequence of SPI writes,
* and this lock is meant to prevent the start of another sequence
* before another one has finished.
*/
struct mutex lock;
/*
* DMA (thus cache coherency maintenance) may require that
* transfer buffers live in their own cache lines.
*/
__be32 val __aligned(IIO_DMA_MINALIGN);
};
static struct adf4350_platform_data default_pdata = {
.channel_spacing = 10000,
.r2_user_settings = ADF4350_REG2_PD_POLARITY_POS |
ADF4350_REG2_CHARGE_PUMP_CURR_uA(2500),
.r3_user_settings = ADF4350_REG3_12BIT_CLKDIV_MODE(0),
.r4_user_settings = ADF4350_REG4_OUTPUT_PWR(3) |
ADF4350_REG4_MUTE_TILL_LOCK_EN,
};
static int adf4350_sync_config(struct adf4350_state *st)
{
int ret, i, doublebuf = 0;
for (i = ADF4350_REG5; i >= ADF4350_REG0; i--) {
if ((st->regs_hw[i] != st->regs[i]) ||
((i == ADF4350_REG0) && doublebuf)) {
switch (i) {
case ADF4350_REG1:
case ADF4350_REG4:
doublebuf = 1;
break;
}
st->val = cpu_to_be32(st->regs[i] | i);
ret = spi_write(st->spi, &st->val, 4);
if (ret < 0)
return ret;
st->regs_hw[i] = st->regs[i];
dev_dbg(&st->spi->dev, "[%d] 0x%X\n",
i, (u32)st->regs[i] | i);
}
}
return 0;
}
static int adf4350_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval)
{
struct adf4350_state *st = iio_priv(indio_dev);
int ret;
if (reg > ADF4350_REG5)
return -EINVAL;
mutex_lock(&st->lock);
if (readval == NULL) {
st->regs[reg] = writeval & ~(BIT(0) | BIT(1) | BIT(2));
ret = adf4350_sync_config(st);
} else {
*readval = st->regs_hw[reg];
ret = 0;
}
mutex_unlock(&st->lock);
return ret;
}
static int adf4350_tune_r_cnt(struct adf4350_state *st, unsigned short r_cnt)
{
struct adf4350_platform_data *pdata = st->pdata;
do {
r_cnt++;
st->fpfd = (st->clkin * (pdata->ref_doubler_en ? 2 : 1)) /
(r_cnt * (pdata->ref_div2_en ? 2 : 1));
} while (st->fpfd > ADF4350_MAX_FREQ_PFD);
return r_cnt;
}
static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
{
struct adf4350_platform_data *pdata = st->pdata;
u64 tmp;
u32 div_gcd, prescaler, chspc;
u16 mdiv, r_cnt = 0;
u8 band_sel_div;
if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
return -EINVAL;
if (freq > ADF4350_MAX_FREQ_45_PRESC) {
prescaler = ADF4350_REG1_PRESCALER;
mdiv = 75;
} else {
prescaler = 0;
mdiv = 23;
}
st->r4_rf_div_sel = 0;
while (freq < ADF4350_MIN_VCO_FREQ) {
freq <<= 1;
st->r4_rf_div_sel++;
}
/*
* Allow a predefined reference division factor
* if not set, compute our own
*/
if (pdata->ref_div_factor)
r_cnt = pdata->ref_div_factor - 1;
chspc = st->chspc;
do {
do {
do {
r_cnt = adf4350_tune_r_cnt(st, r_cnt);
st->r1_mod = st->fpfd / chspc;
if (r_cnt > ADF4350_MAX_R_CNT) {
/* try higher spacing values */
chspc++;
r_cnt = 0;
}
} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
} while (r_cnt == 0);
tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
st->r0_fract = do_div(tmp, st->r1_mod);
st->r0_int = tmp;
} while (mdiv > st->r0_int);
band_sel_div = DIV_ROUND_UP(st->fpfd, ADF4350_MAX_BANDSEL_CLK);
if (st->r0_fract && st->r1_mod) {
div_gcd = gcd(st->r1_mod, st->r0_fract);
st->r1_mod /= div_gcd;
st->r0_fract /= div_gcd;
} else {
st->r0_fract = 0;
st->r1_mod = 1;
}
dev_dbg(&st->spi->dev, "VCO: %llu Hz, PFD %lu Hz\n"
"REF_DIV %d, R0_INT %d, R0_FRACT %d\n"
"R1_MOD %d, RF_DIV %d\nPRESCALER %s, BAND_SEL_DIV %d\n",
freq, st->fpfd, r_cnt, st->r0_int, st->r0_fract, st->r1_mod,
1 << st->r4_rf_div_sel, prescaler ? "8/9" : "4/5",
band_sel_div);
st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) |
ADF4350_REG0_FRACT(st->r0_fract);
st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) |
ADF4350_REG1_MOD(st->r1_mod) |
prescaler;
st->regs[ADF4350_REG2] =
ADF4350_REG2_10BIT_R_CNT(r_cnt) |
ADF4350_REG2_DOUBLE_BUFF_EN |
(pdata->ref_doubler_en ? ADF4350_REG2_RMULT2_EN : 0) |
(pdata->ref_div2_en ? ADF4350_REG2_RDIV2_EN : 0) |
(pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
st->regs[ADF4350_REG3] = pdata->r3_user_settings &
(ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
ADF4350_REG3_12BIT_CLKDIV_MODE(0x3) |
ADF4350_REG3_12BIT_CSR_EN |
ADF4351_REG3_CHARGE_CANCELLATION_EN |
ADF4351_REG3_ANTI_BACKLASH_3ns_EN |
ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH);
st->regs[ADF4350_REG4] =
ADF4350_REG4_FEEDBACK_FUND |
ADF4350_REG4_RF_DIV_SEL(st->r4_rf_div_sel) |
ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(band_sel_div) |
ADF4350_REG4_RF_OUT_EN |
(pdata->r4_user_settings &
(ADF4350_REG4_OUTPUT_PWR(0x3) |
ADF4350_REG4_AUX_OUTPUT_PWR(0x3) |
ADF4350_REG4_AUX_OUTPUT_EN |
ADF4350_REG4_AUX_OUTPUT_FUND |
ADF4350_REG4_MUTE_TILL_LOCK_EN));
st->regs[ADF4350_REG5] = ADF4350_REG5_LD_PIN_MODE_DIGITAL;
st->freq_req = freq;
return adf4350_sync_config(st);
}
static ssize_t adf4350_write(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct adf4350_state *st = iio_priv(indio_dev);
unsigned long long readin;
unsigned long tmp;
int ret;
ret = kstrtoull(buf, 10, &readin);
if (ret)
return ret;
mutex_lock(&st->lock);
switch ((u32)private) {
case ADF4350_FREQ:
ret = adf4350_set_freq(st, readin);
break;
case ADF4350_FREQ_REFIN:
if (readin > ADF4350_MAX_FREQ_REFIN) {
ret = -EINVAL;
break;
}
if (st->clk) {
tmp = clk_round_rate(st->clk, readin);
if (tmp != readin) {
ret = -EINVAL;
break;
}
ret = clk_set_rate(st->clk, tmp);
if (ret < 0)
break;
}
st->clkin = readin;
ret = adf4350_set_freq(st, st->freq_req);
break;
case ADF4350_FREQ_RESOLUTION:
if (readin == 0)
ret = -EINVAL;
else
st->chspc = readin;
break;
case ADF4350_PWRDOWN:
if (readin)
st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
else
st->regs[ADF4350_REG2] &= ~ADF4350_REG2_POWER_DOWN_EN;
adf4350_sync_config(st);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t adf4350_read(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
char *buf)
{
struct adf4350_state *st = iio_priv(indio_dev);
unsigned long long val;
int ret = 0;
mutex_lock(&st->lock);
switch ((u32)private) {
case ADF4350_FREQ:
val = (u64)((st->r0_int * st->r1_mod) + st->r0_fract) *
(u64)st->fpfd;
do_div(val, st->r1_mod * (1 << st->r4_rf_div_sel));
/* PLL unlocked? return error */
if (st->lock_detect_gpiod)
if (!gpiod_get_value(st->lock_detect_gpiod)) {
dev_dbg(&st->spi->dev, "PLL un-locked\n");
ret = -EBUSY;
}
break;
case ADF4350_FREQ_REFIN:
if (st->clk)
st->clkin = clk_get_rate(st->clk);
val = st->clkin;
break;
case ADF4350_FREQ_RESOLUTION:
val = st->chspc;
break;
case ADF4350_PWRDOWN:
val = !!(st->regs[ADF4350_REG2] & ADF4350_REG2_POWER_DOWN_EN);
break;
default:
ret = -EINVAL;
val = 0;
}
mutex_unlock(&st->lock);
return ret < 0 ? ret : sprintf(buf, "%llu\n", val);
}
#define _ADF4350_EXT_INFO(_name, _ident) { \
.name = _name, \
.read = adf4350_read, \
.write = adf4350_write, \
.private = _ident, \
.shared = IIO_SEPARATE, \
}
static const struct iio_chan_spec_ext_info adf4350_ext_info[] = {
/* Ideally we use IIO_CHAN_INFO_FREQUENCY, but there are
* values > 2^32 in order to support the entire frequency range
* in Hz. Using scale is a bit ugly.
*/
_ADF4350_EXT_INFO("frequency", ADF4350_FREQ),
_ADF4350_EXT_INFO("frequency_resolution", ADF4350_FREQ_RESOLUTION),
_ADF4350_EXT_INFO("refin_frequency", ADF4350_FREQ_REFIN),
_ADF4350_EXT_INFO("powerdown", ADF4350_PWRDOWN),
{ },
};
static const struct iio_chan_spec adf4350_chan = {
.type = IIO_ALTVOLTAGE,
.indexed = 1,
.output = 1,
.ext_info = adf4350_ext_info,
};
static const struct iio_info adf4350_info = {
.debugfs_reg_access = &adf4350_reg_access,
};
static struct adf4350_platform_data *adf4350_parse_dt(struct device *dev)
{
struct adf4350_platform_data *pdata;
unsigned int tmp;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
snprintf(pdata->name, sizeof(pdata->name), "%pfw", dev_fwnode(dev));
tmp = 10000;
device_property_read_u32(dev, "adi,channel-spacing", &tmp);
pdata->channel_spacing = tmp;
tmp = 0;
device_property_read_u32(dev, "adi,power-up-frequency", &tmp);
pdata->power_up_frequency = tmp;
tmp = 0;
device_property_read_u32(dev, "adi,reference-div-factor", &tmp);
pdata->ref_div_factor = tmp;
pdata->ref_doubler_en = device_property_read_bool(dev, "adi,reference-doubler-enable");
pdata->ref_div2_en = device_property_read_bool(dev, "adi,reference-div2-enable");
/* r2_user_settings */
pdata->r2_user_settings = 0;
if (device_property_read_bool(dev, "adi,phase-detector-polarity-positive-enable"))
pdata->r2_user_settings |= ADF4350_REG2_PD_POLARITY_POS;
if (device_property_read_bool(dev, "adi,lock-detect-precision-6ns-enable"))
pdata->r2_user_settings |= ADF4350_REG2_LDP_6ns;
if (device_property_read_bool(dev, "adi,lock-detect-function-integer-n-enable"))
pdata->r2_user_settings |= ADF4350_REG2_LDF_INT_N;
tmp = 2500;
device_property_read_u32(dev, "adi,charge-pump-current", &tmp);
pdata->r2_user_settings |= ADF4350_REG2_CHARGE_PUMP_CURR_uA(tmp);
tmp = 0;
device_property_read_u32(dev, "adi,muxout-select", &tmp);
pdata->r2_user_settings |= ADF4350_REG2_MUXOUT(tmp);
if (device_property_read_bool(dev, "adi,low-spur-mode-enable"))
pdata->r2_user_settings |= ADF4350_REG2_NOISE_MODE(0x3);
/* r3_user_settings */
pdata->r3_user_settings = 0;
if (device_property_read_bool(dev, "adi,cycle-slip-reduction-enable"))
pdata->r3_user_settings |= ADF4350_REG3_12BIT_CSR_EN;
if (device_property_read_bool(dev, "adi,charge-cancellation-enable"))
pdata->r3_user_settings |= ADF4351_REG3_CHARGE_CANCELLATION_EN;
if (device_property_read_bool(dev, "adi,anti-backlash-3ns-enable"))
pdata->r3_user_settings |= ADF4351_REG3_ANTI_BACKLASH_3ns_EN;
if (device_property_read_bool(dev, "adi,band-select-clock-mode-high-enable"))
pdata->r3_user_settings |= ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH;
tmp = 0;
device_property_read_u32(dev, "adi,12bit-clk-divider", &tmp);
pdata->r3_user_settings |= ADF4350_REG3_12BIT_CLKDIV(tmp);
tmp = 0;
device_property_read_u32(dev, "adi,clk-divider-mode", &tmp);
pdata->r3_user_settings |= ADF4350_REG3_12BIT_CLKDIV_MODE(tmp);
/* r4_user_settings */
pdata->r4_user_settings = 0;
if (device_property_read_bool(dev, "adi,aux-output-enable"))
pdata->r4_user_settings |= ADF4350_REG4_AUX_OUTPUT_EN;
if (device_property_read_bool(dev, "adi,aux-output-fundamental-enable"))
pdata->r4_user_settings |= ADF4350_REG4_AUX_OUTPUT_FUND;
if (device_property_read_bool(dev, "adi,mute-till-lock-enable"))
pdata->r4_user_settings |= ADF4350_REG4_MUTE_TILL_LOCK_EN;
tmp = 0;
device_property_read_u32(dev, "adi,output-power", &tmp);
pdata->r4_user_settings |= ADF4350_REG4_OUTPUT_PWR(tmp);
tmp = 0;
device_property_read_u32(dev, "adi,aux-output-power", &tmp);
pdata->r4_user_settings |= ADF4350_REG4_AUX_OUTPUT_PWR(tmp);
return pdata;
}
static int adf4350_probe(struct spi_device *spi)
{
struct adf4350_platform_data *pdata;
struct iio_dev *indio_dev;
struct adf4350_state *st;
struct clk *clk = NULL;
int ret;
if (dev_fwnode(&spi->dev)) {
pdata = adf4350_parse_dt(&spi->dev);
if (pdata == NULL)
return -EINVAL;
} else {
pdata = spi->dev.platform_data;
}
if (!pdata) {
dev_warn(&spi->dev, "no platform data? using default\n");
pdata = &default_pdata;
}
if (!pdata->clkin) {
clk = devm_clk_get(&spi->dev, "clkin");
if (IS_ERR(clk))
return -EPROBE_DEFER;
ret = clk_prepare_enable(clk);
if (ret < 0)
return ret;
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_clk;
}
st = iio_priv(indio_dev);
st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_disable_clk;
}
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->pdata = pdata;
indio_dev->name = (pdata->name[0] != 0) ? pdata->name :
spi_get_device_id(spi)->name;
indio_dev->info = &adf4350_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = &adf4350_chan;
indio_dev->num_channels = 1;
mutex_init(&st->lock);
st->chspc = pdata->channel_spacing;
if (clk) {
st->clk = clk;
st->clkin = clk_get_rate(clk);
} else {
st->clkin = pdata->clkin;
}
st->min_out_freq = spi_get_device_id(spi)->driver_data == 4351 ?
ADF4351_MIN_OUT_FREQ : ADF4350_MIN_OUT_FREQ;
memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
st->lock_detect_gpiod = devm_gpiod_get_optional(&spi->dev, NULL,
GPIOD_IN);
if (IS_ERR(st->lock_detect_gpiod)) {
ret = PTR_ERR(st->lock_detect_gpiod);
goto error_disable_reg;
}
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
if (ret)
goto error_disable_reg;
}
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
return 0;
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_disable_clk:
clk_disable_unprepare(clk);
return ret;
}
static void adf4350_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adf4350_state *st = iio_priv(indio_dev);
struct regulator *reg = st->reg;
st->regs[ADF4350_REG2] |= ADF4350_REG2_POWER_DOWN_EN;
adf4350_sync_config(st);
iio_device_unregister(indio_dev);
clk_disable_unprepare(st->clk);
if (!IS_ERR(reg))
regulator_disable(reg);
}
static const struct of_device_id adf4350_of_match[] = {
{ .compatible = "adi,adf4350", },
{ .compatible = "adi,adf4351", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, adf4350_of_match);
static const struct spi_device_id adf4350_id[] = {
{"adf4350", 4350},
{"adf4351", 4351},
{}
};
MODULE_DEVICE_TABLE(spi, adf4350_id);
static struct spi_driver adf4350_driver = {
.driver = {
.name = "adf4350",
.of_match_table = adf4350_of_match,
},
.probe = adf4350_probe,
.remove = adf4350_remove,
.id_table = adf4350_id,
};
module_spi_driver(adf4350_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("Analog Devices ADF4350/ADF4351 PLL");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/frequency/adf4350.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADRF6780 driver
*
* Copyright 2021 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <asm/unaligned.h>
/* ADRF6780 Register Map */
#define ADRF6780_REG_CONTROL 0x00
#define ADRF6780_REG_ALARM_READBACK 0x01
#define ADRF6780_REG_ALARM_MASKS 0x02
#define ADRF6780_REG_ENABLE 0x03
#define ADRF6780_REG_LINEARIZE 0x04
#define ADRF6780_REG_LO_PATH 0x05
#define ADRF6780_REG_ADC_CONTROL 0x06
#define ADRF6780_REG_ADC_OUTPUT 0x0C
/* ADRF6780_REG_CONTROL Map */
#define ADRF6780_PARITY_EN_MSK BIT(15)
#define ADRF6780_SOFT_RESET_MSK BIT(14)
#define ADRF6780_CHIP_ID_MSK GENMASK(11, 4)
#define ADRF6780_CHIP_ID 0xA
#define ADRF6780_CHIP_REVISION_MSK GENMASK(3, 0)
/* ADRF6780_REG_ALARM_READBACK Map */
#define ADRF6780_PARITY_ERROR_MSK BIT(15)
#define ADRF6780_TOO_FEW_ERRORS_MSK BIT(14)
#define ADRF6780_TOO_MANY_ERRORS_MSK BIT(13)
#define ADRF6780_ADDRESS_RANGE_ERROR_MSK BIT(12)
/* ADRF6780_REG_ENABLE Map */
#define ADRF6780_VGA_BUFFER_EN_MSK BIT(8)
#define ADRF6780_DETECTOR_EN_MSK BIT(7)
#define ADRF6780_LO_BUFFER_EN_MSK BIT(6)
#define ADRF6780_IF_MODE_EN_MSK BIT(5)
#define ADRF6780_IQ_MODE_EN_MSK BIT(4)
#define ADRF6780_LO_X2_EN_MSK BIT(3)
#define ADRF6780_LO_PPF_EN_MSK BIT(2)
#define ADRF6780_LO_EN_MSK BIT(1)
#define ADRF6780_UC_BIAS_EN_MSK BIT(0)
/* ADRF6780_REG_LINEARIZE Map */
#define ADRF6780_RDAC_LINEARIZE_MSK GENMASK(7, 0)
/* ADRF6780_REG_LO_PATH Map */
#define ADRF6780_LO_SIDEBAND_MSK BIT(10)
#define ADRF6780_Q_PATH_PHASE_ACCURACY_MSK GENMASK(7, 4)
#define ADRF6780_I_PATH_PHASE_ACCURACY_MSK GENMASK(3, 0)
/* ADRF6780_REG_ADC_CONTROL Map */
#define ADRF6780_VDET_OUTPUT_SELECT_MSK BIT(3)
#define ADRF6780_ADC_START_MSK BIT(2)
#define ADRF6780_ADC_EN_MSK BIT(1)
#define ADRF6780_ADC_CLOCK_EN_MSK BIT(0)
/* ADRF6780_REG_ADC_OUTPUT Map */
#define ADRF6780_ADC_STATUS_MSK BIT(8)
#define ADRF6780_ADC_VALUE_MSK GENMASK(7, 0)
struct adrf6780_state {
struct spi_device *spi;
struct clk *clkin;
/* Protect against concurrent accesses to the device */
struct mutex lock;
bool vga_buff_en;
bool lo_buff_en;
bool if_mode_en;
bool iq_mode_en;
bool lo_x2_en;
bool lo_ppf_en;
bool lo_en;
bool uc_bias_en;
bool lo_sideband;
bool vdet_out_en;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static int __adrf6780_spi_read(struct adrf6780_state *st, unsigned int reg,
unsigned int *val)
{
int ret;
struct spi_transfer t = {0};
st->data[0] = 0x80 | (reg << 1);
st->data[1] = 0x0;
st->data[2] = 0x0;
t.rx_buf = &st->data[0];
t.tx_buf = &st->data[0];
t.len = 3;
ret = spi_sync_transfer(st->spi, &t, 1);
if (ret)
return ret;
*val = (get_unaligned_be24(&st->data[0]) >> 1) & GENMASK(15, 0);
return ret;
}
static int adrf6780_spi_read(struct adrf6780_state *st, unsigned int reg,
unsigned int *val)
{
int ret;
mutex_lock(&st->lock);
ret = __adrf6780_spi_read(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __adrf6780_spi_write(struct adrf6780_state *st,
unsigned int reg,
unsigned int val)
{
put_unaligned_be24((val << 1) | (reg << 17), &st->data[0]);
return spi_write(st->spi, &st->data[0], 3);
}
static int adrf6780_spi_write(struct adrf6780_state *st, unsigned int reg,
unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __adrf6780_spi_write(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __adrf6780_spi_update_bits(struct adrf6780_state *st,
unsigned int reg, unsigned int mask,
unsigned int val)
{
int ret;
unsigned int data, temp;
ret = __adrf6780_spi_read(st, reg, &data);
if (ret)
return ret;
temp = (data & ~mask) | (val & mask);
return __adrf6780_spi_write(st, reg, temp);
}
static int adrf6780_spi_update_bits(struct adrf6780_state *st, unsigned int reg,
unsigned int mask, unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __adrf6780_spi_update_bits(st, reg, mask, val);
mutex_unlock(&st->lock);
return ret;
}
static int adrf6780_read_adc_raw(struct adrf6780_state *st, unsigned int *read_val)
{
int ret;
mutex_lock(&st->lock);
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
ADRF6780_ADC_EN_MSK |
ADRF6780_ADC_CLOCK_EN_MSK |
ADRF6780_ADC_START_MSK,
FIELD_PREP(ADRF6780_ADC_EN_MSK, 1) |
FIELD_PREP(ADRF6780_ADC_CLOCK_EN_MSK, 1) |
FIELD_PREP(ADRF6780_ADC_START_MSK, 1));
if (ret)
goto exit;
/* Recommended delay for the ADC to be ready*/
usleep_range(200, 250);
ret = __adrf6780_spi_read(st, ADRF6780_REG_ADC_OUTPUT, read_val);
if (ret)
goto exit;
if (!(*read_val & ADRF6780_ADC_STATUS_MSK)) {
ret = -EINVAL;
goto exit;
}
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
ADRF6780_ADC_START_MSK,
FIELD_PREP(ADRF6780_ADC_START_MSK, 0));
if (ret)
goto exit;
ret = __adrf6780_spi_read(st, ADRF6780_REG_ADC_OUTPUT, read_val);
exit:
mutex_unlock(&st->lock);
return ret;
}
static int adrf6780_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
{
struct adrf6780_state *dev = iio_priv(indio_dev);
unsigned int data;
int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
ret = adrf6780_read_adc_raw(dev, &data);
if (ret)
return ret;
*val = data & ADRF6780_ADC_VALUE_MSK;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = adrf6780_spi_read(dev, ADRF6780_REG_LINEARIZE, &data);
if (ret)
return ret;
*val = data & ADRF6780_RDAC_LINEARIZE_MSK;
return IIO_VAL_INT;
case IIO_CHAN_INFO_PHASE:
ret = adrf6780_spi_read(dev, ADRF6780_REG_LO_PATH, &data);
if (ret)
return ret;
switch (chan->channel2) {
case IIO_MOD_I:
*val = data & ADRF6780_I_PATH_PHASE_ACCURACY_MSK;
return IIO_VAL_INT;
case IIO_MOD_Q:
*val = FIELD_GET(ADRF6780_Q_PATH_PHASE_ACCURACY_MSK,
data);
return IIO_VAL_INT;
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static int adrf6780_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long info)
{
struct adrf6780_state *st = iio_priv(indio_dev);
switch (info) {
case IIO_CHAN_INFO_SCALE:
return adrf6780_spi_write(st, ADRF6780_REG_LINEARIZE, val);
case IIO_CHAN_INFO_PHASE:
switch (chan->channel2) {
case IIO_MOD_I:
return adrf6780_spi_update_bits(st,
ADRF6780_REG_LO_PATH,
ADRF6780_I_PATH_PHASE_ACCURACY_MSK,
FIELD_PREP(ADRF6780_I_PATH_PHASE_ACCURACY_MSK, val));
case IIO_MOD_Q:
return adrf6780_spi_update_bits(st,
ADRF6780_REG_LO_PATH,
ADRF6780_Q_PATH_PHASE_ACCURACY_MSK,
FIELD_PREP(ADRF6780_Q_PATH_PHASE_ACCURACY_MSK, val));
default:
return -EINVAL;
}
default:
return -EINVAL;
}
}
static int adrf6780_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
unsigned int write_val,
unsigned int *read_val)
{
struct adrf6780_state *st = iio_priv(indio_dev);
if (read_val)
return adrf6780_spi_read(st, reg, read_val);
else
return adrf6780_spi_write(st, reg, write_val);
}
static const struct iio_info adrf6780_info = {
.read_raw = adrf6780_read_raw,
.write_raw = adrf6780_write_raw,
.debugfs_reg_access = &adrf6780_reg_access,
};
#define ADRF6780_CHAN_ADC(_channel) { \
.type = IIO_ALTVOLTAGE, \
.output = 0, \
.indexed = 1, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
}
#define ADRF6780_CHAN_RDAC(_channel) { \
.type = IIO_ALTVOLTAGE, \
.output = 1, \
.indexed = 1, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) \
}
#define ADRF6780_CHAN_IQ_PHASE(_channel, rf_comp) { \
.type = IIO_ALTVOLTAGE, \
.modified = 1, \
.output = 1, \
.indexed = 1, \
.channel2 = IIO_MOD_##rf_comp, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_PHASE) \
}
static const struct iio_chan_spec adrf6780_channels[] = {
ADRF6780_CHAN_ADC(0),
ADRF6780_CHAN_RDAC(0),
ADRF6780_CHAN_IQ_PHASE(0, I),
ADRF6780_CHAN_IQ_PHASE(0, Q),
};
static int adrf6780_reset(struct adrf6780_state *st)
{
int ret;
struct spi_device *spi = st->spi;
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_CONTROL,
ADRF6780_SOFT_RESET_MSK,
FIELD_PREP(ADRF6780_SOFT_RESET_MSK, 1));
if (ret) {
dev_err(&spi->dev, "ADRF6780 SPI software reset failed.\n");
return ret;
}
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_CONTROL,
ADRF6780_SOFT_RESET_MSK,
FIELD_PREP(ADRF6780_SOFT_RESET_MSK, 0));
if (ret) {
dev_err(&spi->dev, "ADRF6780 SPI software reset disable failed.\n");
return ret;
}
return 0;
}
static int adrf6780_init(struct adrf6780_state *st)
{
int ret;
unsigned int chip_id, enable_reg, enable_reg_msk;
struct spi_device *spi = st->spi;
/* Perform a software reset */
ret = adrf6780_reset(st);
if (ret)
return ret;
ret = __adrf6780_spi_read(st, ADRF6780_REG_CONTROL, &chip_id);
if (ret)
return ret;
chip_id = FIELD_GET(ADRF6780_CHIP_ID_MSK, chip_id);
if (chip_id != ADRF6780_CHIP_ID) {
dev_err(&spi->dev, "ADRF6780 Invalid Chip ID.\n");
return -EINVAL;
}
enable_reg_msk = ADRF6780_VGA_BUFFER_EN_MSK |
ADRF6780_DETECTOR_EN_MSK |
ADRF6780_LO_BUFFER_EN_MSK |
ADRF6780_IF_MODE_EN_MSK |
ADRF6780_IQ_MODE_EN_MSK |
ADRF6780_LO_X2_EN_MSK |
ADRF6780_LO_PPF_EN_MSK |
ADRF6780_LO_EN_MSK |
ADRF6780_UC_BIAS_EN_MSK;
enable_reg = FIELD_PREP(ADRF6780_VGA_BUFFER_EN_MSK, st->vga_buff_en) |
FIELD_PREP(ADRF6780_DETECTOR_EN_MSK, 1) |
FIELD_PREP(ADRF6780_LO_BUFFER_EN_MSK, st->lo_buff_en) |
FIELD_PREP(ADRF6780_IF_MODE_EN_MSK, st->if_mode_en) |
FIELD_PREP(ADRF6780_IQ_MODE_EN_MSK, st->iq_mode_en) |
FIELD_PREP(ADRF6780_LO_X2_EN_MSK, st->lo_x2_en) |
FIELD_PREP(ADRF6780_LO_PPF_EN_MSK, st->lo_ppf_en) |
FIELD_PREP(ADRF6780_LO_EN_MSK, st->lo_en) |
FIELD_PREP(ADRF6780_UC_BIAS_EN_MSK, st->uc_bias_en);
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_ENABLE,
enable_reg_msk, enable_reg);
if (ret)
return ret;
ret = __adrf6780_spi_update_bits(st, ADRF6780_REG_LO_PATH,
ADRF6780_LO_SIDEBAND_MSK,
FIELD_PREP(ADRF6780_LO_SIDEBAND_MSK, st->lo_sideband));
if (ret)
return ret;
return __adrf6780_spi_update_bits(st, ADRF6780_REG_ADC_CONTROL,
ADRF6780_VDET_OUTPUT_SELECT_MSK,
FIELD_PREP(ADRF6780_VDET_OUTPUT_SELECT_MSK, st->vdet_out_en));
}
static void adrf6780_properties_parse(struct adrf6780_state *st)
{
struct spi_device *spi = st->spi;
st->vga_buff_en = device_property_read_bool(&spi->dev, "adi,vga-buff-en");
st->lo_buff_en = device_property_read_bool(&spi->dev, "adi,lo-buff-en");
st->if_mode_en = device_property_read_bool(&spi->dev, "adi,if-mode-en");
st->iq_mode_en = device_property_read_bool(&spi->dev, "adi,iq-mode-en");
st->lo_x2_en = device_property_read_bool(&spi->dev, "adi,lo-x2-en");
st->lo_ppf_en = device_property_read_bool(&spi->dev, "adi,lo-ppf-en");
st->lo_en = device_property_read_bool(&spi->dev, "adi,lo-en");
st->uc_bias_en = device_property_read_bool(&spi->dev, "adi,uc-bias-en");
st->lo_sideband = device_property_read_bool(&spi->dev, "adi,lo-sideband");
st->vdet_out_en = device_property_read_bool(&spi->dev, "adi,vdet-out-en");
}
static void adrf6780_powerdown(void *data)
{
/* Disable all components in the Enable Register */
adrf6780_spi_write(data, ADRF6780_REG_ENABLE, 0x0);
}
static int adrf6780_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adrf6780_state *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
indio_dev->info = &adrf6780_info;
indio_dev->name = "adrf6780";
indio_dev->channels = adrf6780_channels;
indio_dev->num_channels = ARRAY_SIZE(adrf6780_channels);
st->spi = spi;
adrf6780_properties_parse(st);
st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
mutex_init(&st->lock);
ret = adrf6780_init(st);
if (ret)
return ret;
ret = devm_add_action_or_reset(&spi->dev, adrf6780_powerdown, st);
if (ret)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id adrf6780_id[] = {
{ "adrf6780", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, adrf6780_id);
static const struct of_device_id adrf6780_of_match[] = {
{ .compatible = "adi,adrf6780" },
{}
};
MODULE_DEVICE_TABLE(of, adrf6780_of_match);
static struct spi_driver adrf6780_driver = {
.driver = {
.name = "adrf6780",
.of_match_table = adrf6780_of_match,
},
.probe = adrf6780_probe,
.id_table = adrf6780_id,
};
module_spi_driver(adrf6780_driver);
MODULE_AUTHOR("Antoniu Miclaus <[email protected]");
MODULE_DESCRIPTION("Analog Devices ADRF6780");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/frequency/adrf6780.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Analog Devices ADF4371 SPI Wideband Synthesizer driver
*
* Copyright 2019 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gcd.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
/* Registers address macro */
#define ADF4371_REG(x) (x)
/* ADF4371_REG0 */
#define ADF4371_ADDR_ASC_MSK BIT(2)
#define ADF4371_ADDR_ASC(x) FIELD_PREP(ADF4371_ADDR_ASC_MSK, x)
#define ADF4371_ADDR_ASC_R_MSK BIT(5)
#define ADF4371_ADDR_ASC_R(x) FIELD_PREP(ADF4371_ADDR_ASC_R_MSK, x)
#define ADF4371_RESET_CMD 0x81
/* ADF4371_REG17 */
#define ADF4371_FRAC2WORD_L_MSK GENMASK(7, 1)
#define ADF4371_FRAC2WORD_L(x) FIELD_PREP(ADF4371_FRAC2WORD_L_MSK, x)
#define ADF4371_FRAC1WORD_MSK BIT(0)
#define ADF4371_FRAC1WORD(x) FIELD_PREP(ADF4371_FRAC1WORD_MSK, x)
/* ADF4371_REG18 */
#define ADF4371_FRAC2WORD_H_MSK GENMASK(6, 0)
#define ADF4371_FRAC2WORD_H(x) FIELD_PREP(ADF4371_FRAC2WORD_H_MSK, x)
/* ADF4371_REG1A */
#define ADF4371_MOD2WORD_MSK GENMASK(5, 0)
#define ADF4371_MOD2WORD(x) FIELD_PREP(ADF4371_MOD2WORD_MSK, x)
/* ADF4371_REG24 */
#define ADF4371_RF_DIV_SEL_MSK GENMASK(6, 4)
#define ADF4371_RF_DIV_SEL(x) FIELD_PREP(ADF4371_RF_DIV_SEL_MSK, x)
/* ADF4371_REG25 */
#define ADF4371_MUTE_LD_MSK BIT(7)
#define ADF4371_MUTE_LD(x) FIELD_PREP(ADF4371_MUTE_LD_MSK, x)
/* ADF4371_REG32 */
#define ADF4371_TIMEOUT_MSK GENMASK(1, 0)
#define ADF4371_TIMEOUT(x) FIELD_PREP(ADF4371_TIMEOUT_MSK, x)
/* ADF4371_REG34 */
#define ADF4371_VCO_ALC_TOUT_MSK GENMASK(4, 0)
#define ADF4371_VCO_ALC_TOUT(x) FIELD_PREP(ADF4371_VCO_ALC_TOUT_MSK, x)
/* Specifications */
#define ADF4371_MIN_VCO_FREQ 4000000000ULL /* 4000 MHz */
#define ADF4371_MAX_VCO_FREQ 8000000000ULL /* 8000 MHz */
#define ADF4371_MAX_OUT_RF8_FREQ ADF4371_MAX_VCO_FREQ /* Hz */
#define ADF4371_MIN_OUT_RF8_FREQ (ADF4371_MIN_VCO_FREQ / 64) /* Hz */
#define ADF4371_MAX_OUT_RF16_FREQ (ADF4371_MAX_VCO_FREQ * 2) /* Hz */
#define ADF4371_MIN_OUT_RF16_FREQ (ADF4371_MIN_VCO_FREQ * 2) /* Hz */
#define ADF4371_MAX_OUT_RF32_FREQ (ADF4371_MAX_VCO_FREQ * 4) /* Hz */
#define ADF4371_MIN_OUT_RF32_FREQ (ADF4371_MIN_VCO_FREQ * 4) /* Hz */
#define ADF4371_MAX_FREQ_PFD 250000000UL /* Hz */
#define ADF4371_MAX_FREQ_REFIN 600000000UL /* Hz */
/* MOD1 is a 24-bit primary modulus with fixed value of 2^25 */
#define ADF4371_MODULUS1 33554432ULL
/* MOD2 is the programmable, 14-bit auxiliary fractional modulus */
#define ADF4371_MAX_MODULUS2 BIT(14)
#define ADF4371_CHECK_RANGE(freq, range) \
((freq > ADF4371_MAX_ ## range) || (freq < ADF4371_MIN_ ## range))
enum {
ADF4371_FREQ,
ADF4371_POWER_DOWN,
ADF4371_CHANNEL_NAME
};
enum {
ADF4371_CH_RF8,
ADF4371_CH_RFAUX8,
ADF4371_CH_RF16,
ADF4371_CH_RF32
};
enum adf4371_variant {
ADF4371,
ADF4372
};
struct adf4371_pwrdown {
unsigned int reg;
unsigned int bit;
};
static const char * const adf4371_ch_names[] = {
"RF8x", "RFAUX8x", "RF16x", "RF32x"
};
static const struct adf4371_pwrdown adf4371_pwrdown_ch[4] = {
[ADF4371_CH_RF8] = { ADF4371_REG(0x25), 2 },
[ADF4371_CH_RFAUX8] = { ADF4371_REG(0x72), 3 },
[ADF4371_CH_RF16] = { ADF4371_REG(0x25), 3 },
[ADF4371_CH_RF32] = { ADF4371_REG(0x25), 4 },
};
static const struct reg_sequence adf4371_reg_defaults[] = {
{ ADF4371_REG(0x0), 0x18 },
{ ADF4371_REG(0x12), 0x40 },
{ ADF4371_REG(0x1E), 0x48 },
{ ADF4371_REG(0x20), 0x14 },
{ ADF4371_REG(0x22), 0x00 },
{ ADF4371_REG(0x23), 0x00 },
{ ADF4371_REG(0x24), 0x80 },
{ ADF4371_REG(0x25), 0x07 },
{ ADF4371_REG(0x27), 0xC5 },
{ ADF4371_REG(0x28), 0x83 },
{ ADF4371_REG(0x2C), 0x44 },
{ ADF4371_REG(0x2D), 0x11 },
{ ADF4371_REG(0x2E), 0x12 },
{ ADF4371_REG(0x2F), 0x94 },
{ ADF4371_REG(0x32), 0x04 },
{ ADF4371_REG(0x35), 0xFA },
{ ADF4371_REG(0x36), 0x30 },
{ ADF4371_REG(0x39), 0x07 },
{ ADF4371_REG(0x3A), 0x55 },
{ ADF4371_REG(0x3E), 0x0C },
{ ADF4371_REG(0x3F), 0x80 },
{ ADF4371_REG(0x40), 0x50 },
{ ADF4371_REG(0x41), 0x28 },
{ ADF4371_REG(0x47), 0xC0 },
{ ADF4371_REG(0x52), 0xF4 },
{ ADF4371_REG(0x70), 0x03 },
{ ADF4371_REG(0x71), 0x60 },
{ ADF4371_REG(0x72), 0x32 },
};
static const struct regmap_config adf4371_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.read_flag_mask = BIT(7),
};
struct adf4371_chip_info {
unsigned int num_channels;
const struct iio_chan_spec *channels;
};
struct adf4371_state {
struct spi_device *spi;
struct regmap *regmap;
struct clk *clkin;
/*
* Lock for accessing device registers. Some operations require
* multiple consecutive R/W operations, during which the device
* shouldn't be interrupted. The buffers are also shared across
* all operations so need to be protected on stand alone reads and
* writes.
*/
struct mutex lock;
const struct adf4371_chip_info *chip_info;
unsigned long clkin_freq;
unsigned long fpfd;
unsigned int integer;
unsigned int fract1;
unsigned int fract2;
unsigned int mod2;
unsigned int rf_div_sel;
unsigned int ref_div_factor;
u8 buf[10] __aligned(IIO_DMA_MINALIGN);
};
static unsigned long long adf4371_pll_fract_n_get_rate(struct adf4371_state *st,
u32 channel)
{
unsigned long long val, tmp;
unsigned int ref_div_sel;
val = (((u64)st->integer * ADF4371_MODULUS1) + st->fract1) * st->fpfd;
tmp = (u64)st->fract2 * st->fpfd;
do_div(tmp, st->mod2);
val += tmp + ADF4371_MODULUS1 / 2;
if (channel == ADF4371_CH_RF8 || channel == ADF4371_CH_RFAUX8)
ref_div_sel = st->rf_div_sel;
else
ref_div_sel = 0;
do_div(val, ADF4371_MODULUS1 * (1 << ref_div_sel));
if (channel == ADF4371_CH_RF16)
val <<= 1;
else if (channel == ADF4371_CH_RF32)
val <<= 2;
return val;
}
static void adf4371_pll_fract_n_compute(unsigned long long vco,
unsigned long long pfd,
unsigned int *integer,
unsigned int *fract1,
unsigned int *fract2,
unsigned int *mod2)
{
unsigned long long tmp;
u32 gcd_div;
tmp = do_div(vco, pfd);
tmp = tmp * ADF4371_MODULUS1;
*fract2 = do_div(tmp, pfd);
*integer = vco;
*fract1 = tmp;
*mod2 = pfd;
while (*mod2 > ADF4371_MAX_MODULUS2) {
*mod2 >>= 1;
*fract2 >>= 1;
}
gcd_div = gcd(*fract2, *mod2);
*mod2 /= gcd_div;
*fract2 /= gcd_div;
}
static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
unsigned int channel)
{
u32 cp_bleed;
u8 int_mode = 0;
int ret;
switch (channel) {
case ADF4371_CH_RF8:
case ADF4371_CH_RFAUX8:
if (ADF4371_CHECK_RANGE(freq, OUT_RF8_FREQ))
return -EINVAL;
st->rf_div_sel = 0;
while (freq < ADF4371_MIN_VCO_FREQ) {
freq <<= 1;
st->rf_div_sel++;
}
break;
case ADF4371_CH_RF16:
/* ADF4371 RF16 8000...16000 MHz */
if (ADF4371_CHECK_RANGE(freq, OUT_RF16_FREQ))
return -EINVAL;
freq >>= 1;
break;
case ADF4371_CH_RF32:
/* ADF4371 RF32 16000...32000 MHz */
if (ADF4371_CHECK_RANGE(freq, OUT_RF32_FREQ))
return -EINVAL;
freq >>= 2;
break;
default:
return -EINVAL;
}
adf4371_pll_fract_n_compute(freq, st->fpfd, &st->integer, &st->fract1,
&st->fract2, &st->mod2);
st->buf[0] = st->integer >> 8;
st->buf[1] = 0x40; /* REG12 default */
st->buf[2] = 0x00;
st->buf[3] = st->fract1 & 0xFF;
st->buf[4] = st->fract1 >> 8;
st->buf[5] = st->fract1 >> 16;
st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
ADF4371_FRAC1WORD(st->fract1 >> 24);
st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
st->buf[8] = st->mod2 & 0xFF;
st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
ret = regmap_bulk_write(st->regmap, ADF4371_REG(0x11), st->buf, 10);
if (ret < 0)
return ret;
/*
* The R counter allows the input reference frequency to be
* divided down to produce the reference clock to the PFD
*/
ret = regmap_write(st->regmap, ADF4371_REG(0x1F), st->ref_div_factor);
if (ret < 0)
return ret;
ret = regmap_update_bits(st->regmap, ADF4371_REG(0x24),
ADF4371_RF_DIV_SEL_MSK,
ADF4371_RF_DIV_SEL(st->rf_div_sel));
if (ret < 0)
return ret;
cp_bleed = DIV_ROUND_UP(400 * 1750, st->integer * 375);
cp_bleed = clamp(cp_bleed, 1U, 255U);
ret = regmap_write(st->regmap, ADF4371_REG(0x26), cp_bleed);
if (ret < 0)
return ret;
/*
* Set to 1 when in INT mode (when FRAC1 = FRAC2 = 0),
* and set to 0 when in FRAC mode.
*/
if (st->fract1 == 0 && st->fract2 == 0)
int_mode = 0x01;
ret = regmap_write(st->regmap, ADF4371_REG(0x2B), int_mode);
if (ret < 0)
return ret;
return regmap_write(st->regmap, ADF4371_REG(0x10), st->integer & 0xFF);
}
static ssize_t adf4371_read(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
char *buf)
{
struct adf4371_state *st = iio_priv(indio_dev);
unsigned long long val = 0;
unsigned int readval, reg, bit;
int ret;
switch ((u32)private) {
case ADF4371_FREQ:
val = adf4371_pll_fract_n_get_rate(st, chan->channel);
ret = regmap_read(st->regmap, ADF4371_REG(0x7C), &readval);
if (ret < 0)
break;
if (readval == 0x00) {
dev_dbg(&st->spi->dev, "PLL un-locked\n");
ret = -EBUSY;
}
break;
case ADF4371_POWER_DOWN:
reg = adf4371_pwrdown_ch[chan->channel].reg;
bit = adf4371_pwrdown_ch[chan->channel].bit;
ret = regmap_read(st->regmap, reg, &readval);
if (ret < 0)
break;
val = !(readval & BIT(bit));
break;
case ADF4371_CHANNEL_NAME:
return sprintf(buf, "%s\n", adf4371_ch_names[chan->channel]);
default:
ret = -EINVAL;
val = 0;
break;
}
return ret < 0 ? ret : sprintf(buf, "%llu\n", val);
}
static ssize_t adf4371_write(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct adf4371_state *st = iio_priv(indio_dev);
unsigned long long freq;
bool power_down;
unsigned int bit, readval, reg;
int ret;
mutex_lock(&st->lock);
switch ((u32)private) {
case ADF4371_FREQ:
ret = kstrtoull(buf, 10, &freq);
if (ret)
break;
ret = adf4371_set_freq(st, freq, chan->channel);
break;
case ADF4371_POWER_DOWN:
ret = kstrtobool(buf, &power_down);
if (ret)
break;
reg = adf4371_pwrdown_ch[chan->channel].reg;
bit = adf4371_pwrdown_ch[chan->channel].bit;
ret = regmap_read(st->regmap, reg, &readval);
if (ret < 0)
break;
readval &= ~BIT(bit);
readval |= (!power_down << bit);
ret = regmap_write(st->regmap, reg, readval);
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&st->lock);
return ret ? ret : len;
}
#define _ADF4371_EXT_INFO(_name, _ident) { \
.name = _name, \
.read = adf4371_read, \
.write = adf4371_write, \
.private = _ident, \
.shared = IIO_SEPARATE, \
}
static const struct iio_chan_spec_ext_info adf4371_ext_info[] = {
/*
* Ideally we use IIO_CHAN_INFO_FREQUENCY, but there are
* values > 2^32 in order to support the entire frequency range
* in Hz. Using scale is a bit ugly.
*/
_ADF4371_EXT_INFO("frequency", ADF4371_FREQ),
_ADF4371_EXT_INFO("powerdown", ADF4371_POWER_DOWN),
_ADF4371_EXT_INFO("name", ADF4371_CHANNEL_NAME),
{ },
};
#define ADF4371_CHANNEL(index) { \
.type = IIO_ALTVOLTAGE, \
.output = 1, \
.channel = index, \
.ext_info = adf4371_ext_info, \
.indexed = 1, \
}
static const struct iio_chan_spec adf4371_chan[] = {
ADF4371_CHANNEL(ADF4371_CH_RF8),
ADF4371_CHANNEL(ADF4371_CH_RFAUX8),
ADF4371_CHANNEL(ADF4371_CH_RF16),
ADF4371_CHANNEL(ADF4371_CH_RF32),
};
static const struct adf4371_chip_info adf4371_chip_info[] = {
[ADF4371] = {
.channels = adf4371_chan,
.num_channels = 4,
},
[ADF4372] = {
.channels = adf4371_chan,
.num_channels = 3,
}
};
static int adf4371_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
unsigned int writeval,
unsigned int *readval)
{
struct adf4371_state *st = iio_priv(indio_dev);
if (readval)
return regmap_read(st->regmap, reg, readval);
else
return regmap_write(st->regmap, reg, writeval);
}
static const struct iio_info adf4371_info = {
.debugfs_reg_access = &adf4371_reg_access,
};
static int adf4371_setup(struct adf4371_state *st)
{
unsigned int synth_timeout = 2, timeout = 1, vco_alc_timeout = 1;
unsigned int vco_band_div, tmp;
int ret;
/* Perform a software reset */
ret = regmap_write(st->regmap, ADF4371_REG(0x0), ADF4371_RESET_CMD);
if (ret < 0)
return ret;
ret = regmap_multi_reg_write(st->regmap, adf4371_reg_defaults,
ARRAY_SIZE(adf4371_reg_defaults));
if (ret < 0)
return ret;
/* Mute to Lock Detect */
if (device_property_read_bool(&st->spi->dev, "adi,mute-till-lock-en")) {
ret = regmap_update_bits(st->regmap, ADF4371_REG(0x25),
ADF4371_MUTE_LD_MSK,
ADF4371_MUTE_LD(1));
if (ret < 0)
return ret;
}
/* Set address in ascending order, so the bulk_write() will work */
ret = regmap_update_bits(st->regmap, ADF4371_REG(0x0),
ADF4371_ADDR_ASC_MSK | ADF4371_ADDR_ASC_R_MSK,
ADF4371_ADDR_ASC(1) | ADF4371_ADDR_ASC_R(1));
if (ret < 0)
return ret;
/*
* Calculate and maximize PFD frequency
* fPFD = REFIN × ((1 + D)/(R × (1 + T)))
* Where D is the REFIN doubler bit, T is the reference divide by 2,
* R is the reference division factor
* TODO: it is assumed D and T equal 0.
*/
do {
st->ref_div_factor++;
st->fpfd = st->clkin_freq / st->ref_div_factor;
} while (st->fpfd > ADF4371_MAX_FREQ_PFD);
/* Calculate Timeouts */
vco_band_div = DIV_ROUND_UP(st->fpfd, 2400000U);
tmp = DIV_ROUND_CLOSEST(st->fpfd, 1000000U);
do {
timeout++;
if (timeout > 1023) {
timeout = 2;
synth_timeout++;
}
} while (synth_timeout * 1024 + timeout <= 20 * tmp);
do {
vco_alc_timeout++;
} while (vco_alc_timeout * 1024 - timeout <= 50 * tmp);
st->buf[0] = vco_band_div;
st->buf[1] = timeout & 0xFF;
st->buf[2] = ADF4371_TIMEOUT(timeout >> 8) | 0x04;
st->buf[3] = synth_timeout;
st->buf[4] = ADF4371_VCO_ALC_TOUT(vco_alc_timeout);
return regmap_bulk_write(st->regmap, ADF4371_REG(0x30), st->buf, 5);
}
static int adf4371_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct iio_dev *indio_dev;
struct adf4371_state *st;
struct regmap *regmap;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
regmap = devm_regmap_init_spi(spi, &adf4371_regmap_config);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->regmap = regmap;
mutex_init(&st->lock);
st->chip_info = &adf4371_chip_info[id->driver_data];
indio_dev->name = id->name;
indio_dev->info = &adf4371_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
st->clkin = devm_clk_get_enabled(&spi->dev, "clkin");
if (IS_ERR(st->clkin))
return PTR_ERR(st->clkin);
st->clkin_freq = clk_get_rate(st->clkin);
ret = adf4371_setup(st);
if (ret < 0) {
dev_err(&spi->dev, "ADF4371 setup failed\n");
return ret;
}
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id adf4371_id_table[] = {
{ "adf4371", ADF4371 },
{ "adf4372", ADF4372 },
{}
};
MODULE_DEVICE_TABLE(spi, adf4371_id_table);
static const struct of_device_id adf4371_of_match[] = {
{ .compatible = "adi,adf4371" },
{ .compatible = "adi,adf4372" },
{ },
};
MODULE_DEVICE_TABLE(of, adf4371_of_match);
static struct spi_driver adf4371_driver = {
.driver = {
.name = "adf4371",
.of_match_table = adf4371_of_match,
},
.probe = adf4371_probe,
.id_table = adf4371_id_table,
};
module_spi_driver(adf4371_driver);
MODULE_AUTHOR("Stefan Popa <[email protected]>");
MODULE_DESCRIPTION("Analog Devices ADF4371 SPI PLL");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/frequency/adf4371.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADMV1014 driver
*
* Copyright 2022 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/device.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <asm/unaligned.h>
/* ADMV1014 Register Map */
#define ADMV1014_REG_SPI_CONTROL 0x00
#define ADMV1014_REG_ALARM 0x01
#define ADMV1014_REG_ALARM_MASKS 0x02
#define ADMV1014_REG_ENABLE 0x03
#define ADMV1014_REG_QUAD 0x04
#define ADMV1014_REG_LO_AMP_PHASE_ADJUST1 0x05
#define ADMV1014_REG_MIXER 0x07
#define ADMV1014_REG_IF_AMP 0x08
#define ADMV1014_REG_IF_AMP_BB_AMP 0x09
#define ADMV1014_REG_BB_AMP_AGC 0x0A
#define ADMV1014_REG_VVA_TEMP_COMP 0x0B
/* ADMV1014_REG_SPI_CONTROL Map */
#define ADMV1014_PARITY_EN_MSK BIT(15)
#define ADMV1014_SPI_SOFT_RESET_MSK BIT(14)
#define ADMV1014_CHIP_ID_MSK GENMASK(11, 4)
#define ADMV1014_CHIP_ID 0x9
#define ADMV1014_REVISION_ID_MSK GENMASK(3, 0)
/* ADMV1014_REG_ALARM Map */
#define ADMV1014_PARITY_ERROR_MSK BIT(15)
#define ADMV1014_TOO_FEW_ERRORS_MSK BIT(14)
#define ADMV1014_TOO_MANY_ERRORS_MSK BIT(13)
#define ADMV1014_ADDRESS_RANGE_ERROR_MSK BIT(12)
/* ADMV1014_REG_ENABLE Map */
#define ADMV1014_IBIAS_PD_MSK BIT(14)
#define ADMV1014_P1DB_COMPENSATION_MSK GENMASK(13, 12)
#define ADMV1014_IF_AMP_PD_MSK BIT(11)
#define ADMV1014_QUAD_BG_PD_MSK BIT(9)
#define ADMV1014_BB_AMP_PD_MSK BIT(8)
#define ADMV1014_QUAD_IBIAS_PD_MSK BIT(7)
#define ADMV1014_DET_EN_MSK BIT(6)
#define ADMV1014_BG_PD_MSK BIT(5)
/* ADMV1014_REG_QUAD Map */
#define ADMV1014_QUAD_SE_MODE_MSK GENMASK(9, 6)
#define ADMV1014_QUAD_FILTERS_MSK GENMASK(3, 0)
/* ADMV1014_REG_LO_AMP_PHASE_ADJUST1 Map */
#define ADMV1014_LOAMP_PH_ADJ_I_FINE_MSK GENMASK(15, 9)
#define ADMV1014_LOAMP_PH_ADJ_Q_FINE_MSK GENMASK(8, 2)
/* ADMV1014_REG_MIXER Map */
#define ADMV1014_MIXER_VGATE_MSK GENMASK(15, 9)
#define ADMV1014_DET_PROG_MSK GENMASK(6, 0)
/* ADMV1014_REG_IF_AMP Map */
#define ADMV1014_IF_AMP_COARSE_GAIN_I_MSK GENMASK(11, 8)
#define ADMV1014_IF_AMP_FINE_GAIN_Q_MSK GENMASK(7, 4)
#define ADMV1014_IF_AMP_FINE_GAIN_I_MSK GENMASK(3, 0)
/* ADMV1014_REG_IF_AMP_BB_AMP Map */
#define ADMV1014_IF_AMP_COARSE_GAIN_Q_MSK GENMASK(15, 12)
#define ADMV1014_BB_AMP_OFFSET_Q_MSK GENMASK(9, 5)
#define ADMV1014_BB_AMP_OFFSET_I_MSK GENMASK(4, 0)
/* ADMV1014_REG_BB_AMP_AGC Map */
#define ADMV1014_BB_AMP_REF_GEN_MSK GENMASK(6, 3)
#define ADMV1014_BB_AMP_GAIN_CTRL_MSK GENMASK(2, 1)
#define ADMV1014_BB_SWITCH_HIGH_LOW_CM_MSK BIT(0)
/* ADMV1014_REG_VVA_TEMP_COMP Map */
#define ADMV1014_VVA_TEMP_COMP_MSK GENMASK(15, 0)
/* ADMV1014 Miscellaneous Defines */
#define ADMV1014_READ BIT(7)
#define ADMV1014_REG_ADDR_READ_MSK GENMASK(6, 1)
#define ADMV1014_REG_ADDR_WRITE_MSK GENMASK(22, 17)
#define ADMV1014_REG_DATA_MSK GENMASK(16, 1)
#define ADMV1014_NUM_REGULATORS 9
enum {
ADMV1014_IQ_MODE,
ADMV1014_IF_MODE,
};
enum {
ADMV1014_SE_MODE_POS = 6,
ADMV1014_SE_MODE_NEG = 9,
ADMV1014_SE_MODE_DIFF = 12,
};
enum {
ADMV1014_CALIBSCALE_COARSE,
ADMV1014_CALIBSCALE_FINE,
};
static const int detector_table[] = {0, 1, 2, 4, 8, 16, 32, 64};
static const char * const input_mode_names[] = { "iq", "if" };
static const char * const quad_se_mode_names[] = { "se-pos", "se-neg", "diff" };
struct admv1014_state {
struct spi_device *spi;
struct clk *clkin;
struct notifier_block nb;
/* Protect against concurrent accesses to the device and to data*/
struct mutex lock;
struct regulator_bulk_data regulators[ADMV1014_NUM_REGULATORS];
unsigned int input_mode;
unsigned int quad_se_mode;
unsigned int p1db_comp;
bool det_en;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static const int mixer_vgate_table[] = {106, 107, 108, 110, 111, 112, 113, 114,
117, 118, 119, 120, 122, 123, 44, 45};
static int __admv1014_spi_read(struct admv1014_state *st, unsigned int reg,
unsigned int *val)
{
struct spi_transfer t = {};
int ret;
st->data[0] = ADMV1014_READ | FIELD_PREP(ADMV1014_REG_ADDR_READ_MSK, reg);
st->data[1] = 0;
st->data[2] = 0;
t.rx_buf = &st->data[0];
t.tx_buf = &st->data[0];
t.len = sizeof(st->data);
ret = spi_sync_transfer(st->spi, &t, 1);
if (ret)
return ret;
*val = FIELD_GET(ADMV1014_REG_DATA_MSK, get_unaligned_be24(&st->data[0]));
return ret;
}
static int admv1014_spi_read(struct admv1014_state *st, unsigned int reg,
unsigned int *val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1014_spi_read(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __admv1014_spi_write(struct admv1014_state *st,
unsigned int reg,
unsigned int val)
{
put_unaligned_be24(FIELD_PREP(ADMV1014_REG_DATA_MSK, val) |
FIELD_PREP(ADMV1014_REG_ADDR_WRITE_MSK, reg), &st->data[0]);
return spi_write(st->spi, &st->data[0], 3);
}
static int admv1014_spi_write(struct admv1014_state *st, unsigned int reg,
unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1014_spi_write(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __admv1014_spi_update_bits(struct admv1014_state *st, unsigned int reg,
unsigned int mask, unsigned int val)
{
unsigned int data, temp;
int ret;
ret = __admv1014_spi_read(st, reg, &data);
if (ret)
return ret;
temp = (data & ~mask) | (val & mask);
return __admv1014_spi_write(st, reg, temp);
}
static int admv1014_spi_update_bits(struct admv1014_state *st, unsigned int reg,
unsigned int mask, unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1014_spi_update_bits(st, reg, mask, val);
mutex_unlock(&st->lock);
return ret;
}
static int admv1014_update_quad_filters(struct admv1014_state *st)
{
unsigned int filt_raw;
u64 rate = clk_get_rate(st->clkin);
if (rate >= (5400 * HZ_PER_MHZ) && rate <= (7000 * HZ_PER_MHZ))
filt_raw = 15;
else if (rate > (7000 * HZ_PER_MHZ) && rate <= (8000 * HZ_PER_MHZ))
filt_raw = 10;
else if (rate > (8000 * HZ_PER_MHZ) && rate <= (9200 * HZ_PER_MHZ))
filt_raw = 5;
else
filt_raw = 0;
return __admv1014_spi_update_bits(st, ADMV1014_REG_QUAD,
ADMV1014_QUAD_FILTERS_MSK,
FIELD_PREP(ADMV1014_QUAD_FILTERS_MSK, filt_raw));
}
static int admv1014_update_vcm_settings(struct admv1014_state *st)
{
unsigned int i, vcm_mv, vcm_comp, bb_sw_hl_cm;
int ret;
vcm_mv = regulator_get_voltage(st->regulators[0].consumer) / 1000;
for (i = 0; i < ARRAY_SIZE(mixer_vgate_table); i++) {
vcm_comp = 1050 + mult_frac(i, 450, 8);
if (vcm_mv != vcm_comp)
continue;
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_MIXER,
ADMV1014_MIXER_VGATE_MSK,
FIELD_PREP(ADMV1014_MIXER_VGATE_MSK,
mixer_vgate_table[i]));
if (ret)
return ret;
bb_sw_hl_cm = ~(i / 8);
bb_sw_hl_cm = FIELD_PREP(ADMV1014_BB_SWITCH_HIGH_LOW_CM_MSK, bb_sw_hl_cm);
return __admv1014_spi_update_bits(st, ADMV1014_REG_BB_AMP_AGC,
ADMV1014_BB_AMP_REF_GEN_MSK |
ADMV1014_BB_SWITCH_HIGH_LOW_CM_MSK,
FIELD_PREP(ADMV1014_BB_AMP_REF_GEN_MSK, i) |
bb_sw_hl_cm);
}
return -EINVAL;
}
static int admv1014_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
{
struct admv1014_state *st = iio_priv(indio_dev);
unsigned int data;
int ret;
switch (info) {
case IIO_CHAN_INFO_OFFSET:
ret = admv1014_spi_read(st, ADMV1014_REG_IF_AMP_BB_AMP, &data);
if (ret)
return ret;
if (chan->channel2 == IIO_MOD_I)
*val = FIELD_GET(ADMV1014_BB_AMP_OFFSET_I_MSK, data);
else
*val = FIELD_GET(ADMV1014_BB_AMP_OFFSET_Q_MSK, data);
return IIO_VAL_INT;
case IIO_CHAN_INFO_PHASE:
ret = admv1014_spi_read(st, ADMV1014_REG_LO_AMP_PHASE_ADJUST1, &data);
if (ret)
return ret;
if (chan->channel2 == IIO_MOD_I)
*val = FIELD_GET(ADMV1014_LOAMP_PH_ADJ_I_FINE_MSK, data);
else
*val = FIELD_GET(ADMV1014_LOAMP_PH_ADJ_Q_FINE_MSK, data);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = admv1014_spi_read(st, ADMV1014_REG_MIXER, &data);
if (ret)
return ret;
*val = FIELD_GET(ADMV1014_DET_PROG_MSK, data);
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBSCALE:
ret = admv1014_spi_read(st, ADMV1014_REG_BB_AMP_AGC, &data);
if (ret)
return ret;
*val = FIELD_GET(ADMV1014_BB_AMP_GAIN_CTRL_MSK, data);
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int admv1014_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long info)
{
int data;
unsigned int msk;
struct admv1014_state *st = iio_priv(indio_dev);
switch (info) {
case IIO_CHAN_INFO_OFFSET:
if (chan->channel2 == IIO_MOD_I) {
msk = ADMV1014_BB_AMP_OFFSET_I_MSK;
data = FIELD_PREP(ADMV1014_BB_AMP_OFFSET_I_MSK, val);
} else {
msk = ADMV1014_BB_AMP_OFFSET_Q_MSK;
data = FIELD_PREP(ADMV1014_BB_AMP_OFFSET_Q_MSK, val);
}
return admv1014_spi_update_bits(st, ADMV1014_REG_IF_AMP_BB_AMP, msk, data);
case IIO_CHAN_INFO_PHASE:
if (chan->channel2 == IIO_MOD_I) {
msk = ADMV1014_LOAMP_PH_ADJ_I_FINE_MSK;
data = FIELD_PREP(ADMV1014_LOAMP_PH_ADJ_I_FINE_MSK, val);
} else {
msk = ADMV1014_LOAMP_PH_ADJ_Q_FINE_MSK;
data = FIELD_PREP(ADMV1014_LOAMP_PH_ADJ_Q_FINE_MSK, val);
}
return admv1014_spi_update_bits(st, ADMV1014_REG_LO_AMP_PHASE_ADJUST1, msk, data);
case IIO_CHAN_INFO_SCALE:
return admv1014_spi_update_bits(st, ADMV1014_REG_MIXER,
ADMV1014_DET_PROG_MSK,
FIELD_PREP(ADMV1014_DET_PROG_MSK, val));
case IIO_CHAN_INFO_CALIBSCALE:
return admv1014_spi_update_bits(st, ADMV1014_REG_BB_AMP_AGC,
ADMV1014_BB_AMP_GAIN_CTRL_MSK,
FIELD_PREP(ADMV1014_BB_AMP_GAIN_CTRL_MSK, val));
default:
return -EINVAL;
}
}
static ssize_t admv1014_read(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
char *buf)
{
struct admv1014_state *st = iio_priv(indio_dev);
unsigned int data;
int ret;
switch (private) {
case ADMV1014_CALIBSCALE_COARSE:
if (chan->channel2 == IIO_MOD_I) {
ret = admv1014_spi_read(st, ADMV1014_REG_IF_AMP, &data);
if (ret)
return ret;
data = FIELD_GET(ADMV1014_IF_AMP_COARSE_GAIN_I_MSK, data);
} else {
ret = admv1014_spi_read(st, ADMV1014_REG_IF_AMP_BB_AMP, &data);
if (ret)
return ret;
data = FIELD_GET(ADMV1014_IF_AMP_COARSE_GAIN_Q_MSK, data);
}
break;
case ADMV1014_CALIBSCALE_FINE:
ret = admv1014_spi_read(st, ADMV1014_REG_IF_AMP, &data);
if (ret)
return ret;
if (chan->channel2 == IIO_MOD_I)
data = FIELD_GET(ADMV1014_IF_AMP_FINE_GAIN_I_MSK, data);
else
data = FIELD_GET(ADMV1014_IF_AMP_FINE_GAIN_Q_MSK, data);
break;
default:
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", data);
}
static ssize_t admv1014_write(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct admv1014_state *st = iio_priv(indio_dev);
unsigned int data, addr, msk;
int ret;
ret = kstrtouint(buf, 10, &data);
if (ret)
return ret;
switch (private) {
case ADMV1014_CALIBSCALE_COARSE:
if (chan->channel2 == IIO_MOD_I) {
addr = ADMV1014_REG_IF_AMP;
msk = ADMV1014_IF_AMP_COARSE_GAIN_I_MSK;
data = FIELD_PREP(ADMV1014_IF_AMP_COARSE_GAIN_I_MSK, data);
} else {
addr = ADMV1014_REG_IF_AMP_BB_AMP;
msk = ADMV1014_IF_AMP_COARSE_GAIN_Q_MSK;
data = FIELD_PREP(ADMV1014_IF_AMP_COARSE_GAIN_Q_MSK, data);
}
break;
case ADMV1014_CALIBSCALE_FINE:
addr = ADMV1014_REG_IF_AMP;
if (chan->channel2 == IIO_MOD_I) {
msk = ADMV1014_IF_AMP_FINE_GAIN_I_MSK;
data = FIELD_PREP(ADMV1014_IF_AMP_FINE_GAIN_I_MSK, data);
} else {
msk = ADMV1014_IF_AMP_FINE_GAIN_Q_MSK;
data = FIELD_PREP(ADMV1014_IF_AMP_FINE_GAIN_Q_MSK, data);
}
break;
default:
return -EINVAL;
}
ret = admv1014_spi_update_bits(st, addr, msk, data);
return ret ? ret : len;
}
static int admv1014_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long info)
{
switch (info) {
case IIO_CHAN_INFO_SCALE:
*vals = detector_table;
*type = IIO_VAL_INT;
*length = ARRAY_SIZE(detector_table);
return IIO_AVAIL_LIST;
default:
return -EINVAL;
}
}
static int admv1014_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
unsigned int write_val,
unsigned int *read_val)
{
struct admv1014_state *st = iio_priv(indio_dev);
if (read_val)
return admv1014_spi_read(st, reg, read_val);
else
return admv1014_spi_write(st, reg, write_val);
}
static const struct iio_info admv1014_info = {
.read_raw = admv1014_read_raw,
.write_raw = admv1014_write_raw,
.read_avail = &admv1014_read_avail,
.debugfs_reg_access = &admv1014_reg_access,
};
static const char * const admv1014_reg_name[] = {
"vcm", "vcc-if-bb", "vcc-vga", "vcc-vva", "vcc-lna-3p3",
"vcc-lna-1p5", "vcc-bg", "vcc-quad", "vcc-mixer"
};
static int admv1014_freq_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct admv1014_state *st = container_of(nb, struct admv1014_state, nb);
int ret;
if (action == POST_RATE_CHANGE) {
mutex_lock(&st->lock);
ret = notifier_from_errno(admv1014_update_quad_filters(st));
mutex_unlock(&st->lock);
return ret;
}
return NOTIFY_OK;
}
#define _ADMV1014_EXT_INFO(_name, _shared, _ident) { \
.name = _name, \
.read = admv1014_read, \
.write = admv1014_write, \
.private = _ident, \
.shared = _shared, \
}
static const struct iio_chan_spec_ext_info admv1014_ext_info[] = {
_ADMV1014_EXT_INFO("calibscale_coarse", IIO_SEPARATE, ADMV1014_CALIBSCALE_COARSE),
_ADMV1014_EXT_INFO("calibscale_fine", IIO_SEPARATE, ADMV1014_CALIBSCALE_FINE),
{ }
};
#define ADMV1014_CHAN_IQ(_channel, rf_comp) { \
.type = IIO_ALTVOLTAGE, \
.modified = 1, \
.output = 0, \
.indexed = 1, \
.channel2 = IIO_MOD_##rf_comp, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_PHASE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBSCALE), \
}
#define ADMV1014_CHAN_IF(_channel, rf_comp) { \
.type = IIO_ALTVOLTAGE, \
.modified = 1, \
.output = 0, \
.indexed = 1, \
.channel2 = IIO_MOD_##rf_comp, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_PHASE) | \
BIT(IIO_CHAN_INFO_OFFSET), \
}
#define ADMV1014_CHAN_POWER(_channel) { \
.type = IIO_POWER, \
.output = 0, \
.indexed = 1, \
.channel = _channel, \
.info_mask_separate = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
}
#define ADMV1014_CHAN_CALIBSCALE(_channel, rf_comp, _admv1014_ext_info) { \
.type = IIO_ALTVOLTAGE, \
.modified = 1, \
.output = 0, \
.indexed = 1, \
.channel2 = IIO_MOD_##rf_comp, \
.channel = _channel, \
.ext_info = _admv1014_ext_info, \
}
static const struct iio_chan_spec admv1014_channels_iq[] = {
ADMV1014_CHAN_IQ(0, I),
ADMV1014_CHAN_IQ(0, Q),
ADMV1014_CHAN_POWER(0),
};
static const struct iio_chan_spec admv1014_channels_if[] = {
ADMV1014_CHAN_IF(0, I),
ADMV1014_CHAN_IF(0, Q),
ADMV1014_CHAN_CALIBSCALE(0, I, admv1014_ext_info),
ADMV1014_CHAN_CALIBSCALE(0, Q, admv1014_ext_info),
ADMV1014_CHAN_POWER(0),
};
static void admv1014_clk_disable(void *data)
{
clk_disable_unprepare(data);
}
static void admv1014_reg_disable(void *data)
{
regulator_bulk_disable(ADMV1014_NUM_REGULATORS, data);
}
static void admv1014_powerdown(void *data)
{
unsigned int enable_reg, enable_reg_msk;
/* Disable all components in the Enable Register */
enable_reg_msk = ADMV1014_IBIAS_PD_MSK |
ADMV1014_IF_AMP_PD_MSK |
ADMV1014_QUAD_BG_PD_MSK |
ADMV1014_BB_AMP_PD_MSK |
ADMV1014_QUAD_IBIAS_PD_MSK |
ADMV1014_BG_PD_MSK;
enable_reg = FIELD_PREP(ADMV1014_IBIAS_PD_MSK, 1) |
FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, 1) |
FIELD_PREP(ADMV1014_QUAD_BG_PD_MSK, 1) |
FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, 1) |
FIELD_PREP(ADMV1014_QUAD_IBIAS_PD_MSK, 1) |
FIELD_PREP(ADMV1014_BG_PD_MSK, 1);
admv1014_spi_update_bits(data, ADMV1014_REG_ENABLE,
enable_reg_msk, enable_reg);
}
static int admv1014_init(struct admv1014_state *st)
{
unsigned int chip_id, enable_reg, enable_reg_msk;
struct spi_device *spi = st->spi;
int ret;
ret = regulator_bulk_enable(ADMV1014_NUM_REGULATORS, st->regulators);
if (ret) {
dev_err(&spi->dev, "Failed to enable regulators");
return ret;
}
ret = devm_add_action_or_reset(&spi->dev, admv1014_reg_disable, st->regulators);
if (ret)
return ret;
ret = clk_prepare_enable(st->clkin);
if (ret)
return ret;
ret = devm_add_action_or_reset(&spi->dev, admv1014_clk_disable, st->clkin);
if (ret)
return ret;
st->nb.notifier_call = admv1014_freq_change;
ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
if (ret)
return ret;
ret = devm_add_action_or_reset(&spi->dev, admv1014_powerdown, st);
if (ret)
return ret;
/* Perform a software reset */
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_SPI_CONTROL,
ADMV1014_SPI_SOFT_RESET_MSK,
FIELD_PREP(ADMV1014_SPI_SOFT_RESET_MSK, 1));
if (ret) {
dev_err(&spi->dev, "ADMV1014 SPI software reset failed.\n");
return ret;
}
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_SPI_CONTROL,
ADMV1014_SPI_SOFT_RESET_MSK,
FIELD_PREP(ADMV1014_SPI_SOFT_RESET_MSK, 0));
if (ret) {
dev_err(&spi->dev, "ADMV1014 SPI software reset disable failed.\n");
return ret;
}
ret = __admv1014_spi_write(st, ADMV1014_REG_VVA_TEMP_COMP, 0x727C);
if (ret) {
dev_err(&spi->dev, "Writing default Temperature Compensation value failed.\n");
return ret;
}
ret = __admv1014_spi_read(st, ADMV1014_REG_SPI_CONTROL, &chip_id);
if (ret)
return ret;
chip_id = FIELD_GET(ADMV1014_CHIP_ID_MSK, chip_id);
if (chip_id != ADMV1014_CHIP_ID) {
dev_err(&spi->dev, "Invalid Chip ID.\n");
return -EINVAL;
}
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_QUAD,
ADMV1014_QUAD_SE_MODE_MSK,
FIELD_PREP(ADMV1014_QUAD_SE_MODE_MSK,
st->quad_se_mode));
if (ret) {
dev_err(&spi->dev, "Writing Quad SE Mode failed.\n");
return ret;
}
ret = admv1014_update_quad_filters(st);
if (ret) {
dev_err(&spi->dev, "Update Quad Filters failed.\n");
return ret;
}
ret = admv1014_update_vcm_settings(st);
if (ret) {
dev_err(&spi->dev, "Update VCM Settings failed.\n");
return ret;
}
enable_reg_msk = ADMV1014_P1DB_COMPENSATION_MSK |
ADMV1014_IF_AMP_PD_MSK |
ADMV1014_BB_AMP_PD_MSK |
ADMV1014_DET_EN_MSK;
enable_reg = FIELD_PREP(ADMV1014_P1DB_COMPENSATION_MSK, st->p1db_comp ? 3 : 0) |
FIELD_PREP(ADMV1014_IF_AMP_PD_MSK,
(st->input_mode == ADMV1014_IF_MODE) ? 0 : 1) |
FIELD_PREP(ADMV1014_BB_AMP_PD_MSK,
(st->input_mode == ADMV1014_IF_MODE) ? 1 : 0) |
FIELD_PREP(ADMV1014_DET_EN_MSK, st->det_en);
return __admv1014_spi_update_bits(st, ADMV1014_REG_ENABLE, enable_reg_msk, enable_reg);
}
static int admv1014_properties_parse(struct admv1014_state *st)
{
const char *str;
unsigned int i;
struct spi_device *spi = st->spi;
int ret;
st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
st->p1db_comp = device_property_read_bool(&spi->dev, "adi,p1db-compensation-enable");
ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
if (ret) {
st->input_mode = ADMV1014_IQ_MODE;
} else {
ret = match_string(input_mode_names, ARRAY_SIZE(input_mode_names), str);
if (ret < 0)
return ret;
st->input_mode = ret;
}
ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
if (ret) {
st->quad_se_mode = ADMV1014_SE_MODE_POS;
} else {
ret = match_string(quad_se_mode_names, ARRAY_SIZE(quad_se_mode_names), str);
if (ret < 0)
return ret;
st->quad_se_mode = ADMV1014_SE_MODE_POS + (ret * 3);
}
for (i = 0; i < ADMV1014_NUM_REGULATORS; ++i)
st->regulators[i].supply = admv1014_reg_name[i];
ret = devm_regulator_bulk_get(&st->spi->dev, ADMV1014_NUM_REGULATORS,
st->regulators);
if (ret) {
dev_err(&spi->dev, "Failed to request regulators");
return ret;
}
st->clkin = devm_clk_get(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
return 0;
}
static int admv1014_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct admv1014_state *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
ret = admv1014_properties_parse(st);
if (ret)
return ret;
indio_dev->info = &admv1014_info;
indio_dev->name = "admv1014";
if (st->input_mode == ADMV1014_IQ_MODE) {
indio_dev->channels = admv1014_channels_iq;
indio_dev->num_channels = ARRAY_SIZE(admv1014_channels_iq);
} else {
indio_dev->channels = admv1014_channels_if;
indio_dev->num_channels = ARRAY_SIZE(admv1014_channels_if);
}
st->spi = spi;
mutex_init(&st->lock);
ret = admv1014_init(st);
if (ret)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id admv1014_id[] = {
{ "admv1014", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, admv1014_id);
static const struct of_device_id admv1014_of_match[] = {
{ .compatible = "adi,admv1014" },
{}
};
MODULE_DEVICE_TABLE(of, admv1014_of_match);
static struct spi_driver admv1014_driver = {
.driver = {
.name = "admv1014",
.of_match_table = admv1014_of_match,
},
.probe = admv1014_probe,
.id_table = admv1014_id,
};
module_spi_driver(admv1014_driver);
MODULE_AUTHOR("Antoniu Miclaus <[email protected]");
MODULE_DESCRIPTION("Analog Devices ADMV1014");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/frequency/admv1014.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AD9523 SPI Low Jitter Clock Generator
*
* Copyright 2012 Analog Devices Inc.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/frequency/ad9523.h>
#define AD9523_READ (1 << 15)
#define AD9523_WRITE (0 << 15)
#define AD9523_CNT(x) (((x) - 1) << 13)
#define AD9523_ADDR(x) ((x) & 0xFFF)
#define AD9523_R1B (1 << 16)
#define AD9523_R2B (2 << 16)
#define AD9523_R3B (3 << 16)
#define AD9523_TRANSF_LEN(x) ((x) >> 16)
#define AD9523_SERIAL_PORT_CONFIG (AD9523_R1B | 0x0)
#define AD9523_VERSION_REGISTER (AD9523_R1B | 0x2)
#define AD9523_PART_REGISTER (AD9523_R1B | 0x3)
#define AD9523_READBACK_CTRL (AD9523_R1B | 0x4)
#define AD9523_EEPROM_CUSTOMER_VERSION_ID (AD9523_R2B | 0x6)
#define AD9523_PLL1_REF_A_DIVIDER (AD9523_R2B | 0x11)
#define AD9523_PLL1_REF_B_DIVIDER (AD9523_R2B | 0x13)
#define AD9523_PLL1_REF_TEST_DIVIDER (AD9523_R1B | 0x14)
#define AD9523_PLL1_FEEDBACK_DIVIDER (AD9523_R2B | 0x17)
#define AD9523_PLL1_CHARGE_PUMP_CTRL (AD9523_R2B | 0x19)
#define AD9523_PLL1_INPUT_RECEIVERS_CTRL (AD9523_R1B | 0x1A)
#define AD9523_PLL1_REF_CTRL (AD9523_R1B | 0x1B)
#define AD9523_PLL1_MISC_CTRL (AD9523_R1B | 0x1C)
#define AD9523_PLL1_LOOP_FILTER_CTRL (AD9523_R1B | 0x1D)
#define AD9523_PLL2_CHARGE_PUMP (AD9523_R1B | 0xF0)
#define AD9523_PLL2_FEEDBACK_DIVIDER_AB (AD9523_R1B | 0xF1)
#define AD9523_PLL2_CTRL (AD9523_R1B | 0xF2)
#define AD9523_PLL2_VCO_CTRL (AD9523_R1B | 0xF3)
#define AD9523_PLL2_VCO_DIVIDER (AD9523_R1B | 0xF4)
#define AD9523_PLL2_LOOP_FILTER_CTRL (AD9523_R2B | 0xF6)
#define AD9523_PLL2_R2_DIVIDER (AD9523_R1B | 0xF7)
#define AD9523_CHANNEL_CLOCK_DIST(ch) (AD9523_R3B | (0x192 + 3 * ch))
#define AD9523_PLL1_OUTPUT_CTRL (AD9523_R1B | 0x1BA)
#define AD9523_PLL1_OUTPUT_CHANNEL_CTRL (AD9523_R1B | 0x1BB)
#define AD9523_READBACK_0 (AD9523_R1B | 0x22C)
#define AD9523_READBACK_1 (AD9523_R1B | 0x22D)
#define AD9523_STATUS_SIGNALS (AD9523_R3B | 0x232)
#define AD9523_POWER_DOWN_CTRL (AD9523_R1B | 0x233)
#define AD9523_IO_UPDATE (AD9523_R1B | 0x234)
#define AD9523_EEPROM_DATA_XFER_STATUS (AD9523_R1B | 0xB00)
#define AD9523_EEPROM_ERROR_READBACK (AD9523_R1B | 0xB01)
#define AD9523_EEPROM_CTRL1 (AD9523_R1B | 0xB02)
#define AD9523_EEPROM_CTRL2 (AD9523_R1B | 0xB03)
/* AD9523_SERIAL_PORT_CONFIG */
#define AD9523_SER_CONF_SDO_ACTIVE (1 << 7)
#define AD9523_SER_CONF_SOFT_RESET (1 << 5)
/* AD9523_READBACK_CTRL */
#define AD9523_READBACK_CTRL_READ_BUFFERED (1 << 0)
/* AD9523_PLL1_CHARGE_PUMP_CTRL */
#define AD9523_PLL1_CHARGE_PUMP_CURRENT_nA(x) (((x) / 500) & 0x7F)
#define AD9523_PLL1_CHARGE_PUMP_TRISTATE (1 << 7)
#define AD9523_PLL1_CHARGE_PUMP_MODE_NORMAL (3 << 8)
#define AD9523_PLL1_CHARGE_PUMP_MODE_PUMP_DOWN (2 << 8)
#define AD9523_PLL1_CHARGE_PUMP_MODE_PUMP_UP (1 << 8)
#define AD9523_PLL1_CHARGE_PUMP_MODE_TRISTATE (0 << 8)
#define AD9523_PLL1_BACKLASH_PW_MIN (0 << 10)
#define AD9523_PLL1_BACKLASH_PW_LOW (1 << 10)
#define AD9523_PLL1_BACKLASH_PW_HIGH (2 << 10)
#define AD9523_PLL1_BACKLASH_PW_MAX (3 << 10)
/* AD9523_PLL1_INPUT_RECEIVERS_CTRL */
#define AD9523_PLL1_REF_TEST_RCV_EN (1 << 7)
#define AD9523_PLL1_REFB_DIFF_RCV_EN (1 << 6)
#define AD9523_PLL1_REFA_DIFF_RCV_EN (1 << 5)
#define AD9523_PLL1_REFB_RCV_EN (1 << 4)
#define AD9523_PLL1_REFA_RCV_EN (1 << 3)
#define AD9523_PLL1_REFA_REFB_PWR_CTRL_EN (1 << 2)
#define AD9523_PLL1_OSC_IN_CMOS_NEG_INP_EN (1 << 1)
#define AD9523_PLL1_OSC_IN_DIFF_EN (1 << 0)
/* AD9523_PLL1_REF_CTRL */
#define AD9523_PLL1_BYPASS_REF_TEST_DIV_EN (1 << 7)
#define AD9523_PLL1_BYPASS_FEEDBACK_DIV_EN (1 << 6)
#define AD9523_PLL1_ZERO_DELAY_MODE_INT (1 << 5)
#define AD9523_PLL1_ZERO_DELAY_MODE_EXT (0 << 5)
#define AD9523_PLL1_OSC_IN_PLL_FEEDBACK_EN (1 << 4)
#define AD9523_PLL1_ZD_IN_CMOS_NEG_INP_EN (1 << 3)
#define AD9523_PLL1_ZD_IN_DIFF_EN (1 << 2)
#define AD9523_PLL1_REFB_CMOS_NEG_INP_EN (1 << 1)
#define AD9523_PLL1_REFA_CMOS_NEG_INP_EN (1 << 0)
/* AD9523_PLL1_MISC_CTRL */
#define AD9523_PLL1_REFB_INDEP_DIV_CTRL_EN (1 << 7)
#define AD9523_PLL1_OSC_CTRL_FAIL_VCC_BY2_EN (1 << 6)
#define AD9523_PLL1_REF_MODE(x) ((x) << 2)
#define AD9523_PLL1_BYPASS_REFB_DIV (1 << 1)
#define AD9523_PLL1_BYPASS_REFA_DIV (1 << 0)
/* AD9523_PLL1_LOOP_FILTER_CTRL */
#define AD9523_PLL1_LOOP_FILTER_RZERO(x) ((x) & 0xF)
/* AD9523_PLL2_CHARGE_PUMP */
#define AD9523_PLL2_CHARGE_PUMP_CURRENT_nA(x) ((x) / 3500)
/* AD9523_PLL2_FEEDBACK_DIVIDER_AB */
#define AD9523_PLL2_FB_NDIV_A_CNT(x) (((x) & 0x3) << 6)
#define AD9523_PLL2_FB_NDIV_B_CNT(x) (((x) & 0x3F) << 0)
#define AD9523_PLL2_FB_NDIV(a, b) (4 * (b) + (a))
/* AD9523_PLL2_CTRL */
#define AD9523_PLL2_CHARGE_PUMP_MODE_NORMAL (3 << 0)
#define AD9523_PLL2_CHARGE_PUMP_MODE_PUMP_DOWN (2 << 0)
#define AD9523_PLL2_CHARGE_PUMP_MODE_PUMP_UP (1 << 0)
#define AD9523_PLL2_CHARGE_PUMP_MODE_TRISTATE (0 << 0)
#define AD9523_PLL2_BACKLASH_PW_MIN (0 << 2)
#define AD9523_PLL2_BACKLASH_PW_LOW (1 << 2)
#define AD9523_PLL2_BACKLASH_PW_HIGH (2 << 2)
#define AD9523_PLL2_BACKLASH_PW_MAX (3 << 1)
#define AD9523_PLL2_BACKLASH_CTRL_EN (1 << 4)
#define AD9523_PLL2_FREQ_DOUBLER_EN (1 << 5)
#define AD9523_PLL2_LOCK_DETECT_PWR_DOWN_EN (1 << 7)
/* AD9523_PLL2_VCO_CTRL */
#define AD9523_PLL2_VCO_CALIBRATE (1 << 1)
#define AD9523_PLL2_FORCE_VCO_MIDSCALE (1 << 2)
#define AD9523_PLL2_FORCE_REFERENCE_VALID (1 << 3)
#define AD9523_PLL2_FORCE_RELEASE_SYNC (1 << 4)
/* AD9523_PLL2_VCO_DIVIDER */
#define AD9523_PLL2_VCO_DIV_M1(x) ((((x) - 3) & 0x3) << 0)
#define AD9523_PLL2_VCO_DIV_M2(x) ((((x) - 3) & 0x3) << 4)
#define AD9523_PLL2_VCO_DIV_M1_PWR_DOWN_EN (1 << 2)
#define AD9523_PLL2_VCO_DIV_M2_PWR_DOWN_EN (1 << 6)
/* AD9523_PLL2_LOOP_FILTER_CTRL */
#define AD9523_PLL2_LOOP_FILTER_CPOLE1(x) (((x) & 0x7) << 0)
#define AD9523_PLL2_LOOP_FILTER_RZERO(x) (((x) & 0x7) << 3)
#define AD9523_PLL2_LOOP_FILTER_RPOLE2(x) (((x) & 0x7) << 6)
#define AD9523_PLL2_LOOP_FILTER_RZERO_BYPASS_EN (1 << 8)
/* AD9523_PLL2_R2_DIVIDER */
#define AD9523_PLL2_R2_DIVIDER_VAL(x) (((x) & 0x1F) << 0)
/* AD9523_CHANNEL_CLOCK_DIST */
#define AD9523_CLK_DIST_DIV_PHASE(x) (((x) & 0x3F) << 18)
#define AD9523_CLK_DIST_DIV_PHASE_REV(x) ((ret >> 18) & 0x3F)
#define AD9523_CLK_DIST_DIV(x) ((((x) - 1) & 0x3FF) << 8)
#define AD9523_CLK_DIST_DIV_REV(x) (((ret >> 8) & 0x3FF) + 1)
#define AD9523_CLK_DIST_INV_DIV_OUTPUT_EN (1 << 7)
#define AD9523_CLK_DIST_IGNORE_SYNC_EN (1 << 6)
#define AD9523_CLK_DIST_PWR_DOWN_EN (1 << 5)
#define AD9523_CLK_DIST_LOW_PWR_MODE_EN (1 << 4)
#define AD9523_CLK_DIST_DRIVER_MODE(x) (((x) & 0xF) << 0)
/* AD9523_PLL1_OUTPUT_CTRL */
#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH6_M2 (1 << 7)
#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH5_M2 (1 << 6)
#define AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH4_M2 (1 << 5)
#define AD9523_PLL1_OUTP_CTRL_CMOS_DRV_WEAK (1 << 4)
#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_1 (0 << 0)
#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_2 (1 << 0)
#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_4 (2 << 0)
#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_8 (4 << 0)
#define AD9523_PLL1_OUTP_CTRL_OUTPUT_DIV_16 (8 << 0)
/* AD9523_PLL1_OUTPUT_CHANNEL_CTRL */
#define AD9523_PLL1_OUTP_CH_CTRL_OUTPUT_PWR_DOWN_EN (1 << 7)
#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH9_M2 (1 << 6)
#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH8_M2 (1 << 5)
#define AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH7_M2 (1 << 4)
#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH3 (1 << 3)
#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH2 (1 << 2)
#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH1 (1 << 1)
#define AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH0 (1 << 0)
/* AD9523_READBACK_0 */
#define AD9523_READBACK_0_STAT_PLL2_REF_CLK (1 << 7)
#define AD9523_READBACK_0_STAT_PLL2_FB_CLK (1 << 6)
#define AD9523_READBACK_0_STAT_VCXO (1 << 5)
#define AD9523_READBACK_0_STAT_REF_TEST (1 << 4)
#define AD9523_READBACK_0_STAT_REFB (1 << 3)
#define AD9523_READBACK_0_STAT_REFA (1 << 2)
#define AD9523_READBACK_0_STAT_PLL2_LD (1 << 1)
#define AD9523_READBACK_0_STAT_PLL1_LD (1 << 0)
/* AD9523_READBACK_1 */
#define AD9523_READBACK_1_HOLDOVER_ACTIVE (1 << 3)
#define AD9523_READBACK_1_AUTOMODE_SEL_REFB (1 << 2)
#define AD9523_READBACK_1_VCO_CALIB_IN_PROGRESS (1 << 0)
/* AD9523_STATUS_SIGNALS */
#define AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL (1 << 16)
#define AD9523_STATUS_MONITOR_01_PLL12_LOCKED (0x302)
/* AD9523_POWER_DOWN_CTRL */
#define AD9523_POWER_DOWN_CTRL_PLL1_PWR_DOWN (1 << 2)
#define AD9523_POWER_DOWN_CTRL_PLL2_PWR_DOWN (1 << 1)
#define AD9523_POWER_DOWN_CTRL_DIST_PWR_DOWN (1 << 0)
/* AD9523_IO_UPDATE */
#define AD9523_IO_UPDATE_EN (1 << 0)
/* AD9523_EEPROM_DATA_XFER_STATUS */
#define AD9523_EEPROM_DATA_XFER_IN_PROGRESS (1 << 0)
/* AD9523_EEPROM_ERROR_READBACK */
#define AD9523_EEPROM_ERROR_READBACK_FAIL (1 << 0)
/* AD9523_EEPROM_CTRL1 */
#define AD9523_EEPROM_CTRL1_SOFT_EEPROM (1 << 1)
#define AD9523_EEPROM_CTRL1_EEPROM_WRITE_PROT_DIS (1 << 0)
/* AD9523_EEPROM_CTRL2 */
#define AD9523_EEPROM_CTRL2_REG2EEPROM (1 << 0)
#define AD9523_NUM_CHAN 14
#define AD9523_NUM_CHAN_ALT_CLK_SRC 10
/* Helpers to avoid excess line breaks */
#define AD_IFE(_pde, _a, _b) ((pdata->_pde) ? _a : _b)
#define AD_IF(_pde, _a) AD_IFE(_pde, _a, 0)
enum {
AD9523_STAT_PLL1_LD,
AD9523_STAT_PLL2_LD,
AD9523_STAT_REFA,
AD9523_STAT_REFB,
AD9523_STAT_REF_TEST,
AD9523_STAT_VCXO,
AD9523_STAT_PLL2_FB_CLK,
AD9523_STAT_PLL2_REF_CLK,
AD9523_SYNC,
AD9523_EEPROM,
};
enum {
AD9523_VCO1,
AD9523_VCO2,
AD9523_VCXO,
AD9523_NUM_CLK_SRC,
};
struct ad9523_state {
struct spi_device *spi;
struct ad9523_platform_data *pdata;
struct iio_chan_spec ad9523_channels[AD9523_NUM_CHAN];
struct gpio_desc *pwrdown_gpio;
struct gpio_desc *reset_gpio;
struct gpio_desc *sync_gpio;
unsigned long vcxo_freq;
unsigned long vco_freq;
unsigned long vco_out_freq[AD9523_NUM_CLK_SRC];
unsigned char vco_out_map[AD9523_NUM_CHAN_ALT_CLK_SRC];
/*
* Lock for accessing device registers. Some operations require
* multiple consecutive R/W operations, during which the device
* shouldn't be interrupted. The buffers are also shared across
* all operations so need to be protected on stand alone reads and
* writes.
*/
struct mutex lock;
/*
* DMA (thus cache coherency maintenance) may require that
* transfer buffers live in their own cache lines.
*/
union {
__be32 d32;
u8 d8[4];
} data[2] __aligned(IIO_DMA_MINALIGN);
};
static int ad9523_read(struct iio_dev *indio_dev, unsigned int addr)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
/* We encode the register size 1..3 bytes into the register address.
* On transfer we get the size from the register datum, and make sure
* the result is properly aligned.
*/
struct spi_transfer t[] = {
{
.tx_buf = &st->data[0].d8[2],
.len = 2,
}, {
.rx_buf = &st->data[1].d8[4 - AD9523_TRANSF_LEN(addr)],
.len = AD9523_TRANSF_LEN(addr),
},
};
st->data[0].d32 = cpu_to_be32(AD9523_READ |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "read failed (%d)", ret);
else
ret = be32_to_cpu(st->data[1].d32) & (0xFFFFFF >>
(8 * (3 - AD9523_TRANSF_LEN(addr))));
return ret;
};
static int ad9523_write(struct iio_dev *indio_dev,
unsigned int addr, unsigned int val)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer t[] = {
{
.tx_buf = &st->data[0].d8[2],
.len = 2,
}, {
.tx_buf = &st->data[1].d8[4 - AD9523_TRANSF_LEN(addr)],
.len = AD9523_TRANSF_LEN(addr),
},
};
st->data[0].d32 = cpu_to_be32(AD9523_WRITE |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
st->data[1].d32 = cpu_to_be32(val);
ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "write failed (%d)", ret);
return ret;
}
static int ad9523_io_update(struct iio_dev *indio_dev)
{
return ad9523_write(indio_dev, AD9523_IO_UPDATE, AD9523_IO_UPDATE_EN);
}
static int ad9523_vco_out_map(struct iio_dev *indio_dev,
unsigned int ch, unsigned int out)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
unsigned int mask;
switch (ch) {
case 0 ... 3:
ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CHANNEL_CTRL);
if (ret < 0)
break;
mask = AD9523_PLL1_OUTP_CH_CTRL_VCXO_SRC_SEL_CH0 << ch;
if (out) {
ret |= mask;
out = 2;
} else {
ret &= ~mask;
}
ret = ad9523_write(indio_dev,
AD9523_PLL1_OUTPUT_CHANNEL_CTRL, ret);
break;
case 4 ... 6:
ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CTRL);
if (ret < 0)
break;
mask = AD9523_PLL1_OUTP_CTRL_VCO_DIV_SEL_CH4_M2 << (ch - 4);
if (out)
ret |= mask;
else
ret &= ~mask;
ret = ad9523_write(indio_dev, AD9523_PLL1_OUTPUT_CTRL, ret);
break;
case 7 ... 9:
ret = ad9523_read(indio_dev, AD9523_PLL1_OUTPUT_CHANNEL_CTRL);
if (ret < 0)
break;
mask = AD9523_PLL1_OUTP_CH_CTRL_VCO_DIV_SEL_CH7_M2 << (ch - 7);
if (out)
ret |= mask;
else
ret &= ~mask;
ret = ad9523_write(indio_dev,
AD9523_PLL1_OUTPUT_CHANNEL_CTRL, ret);
break;
default:
return 0;
}
st->vco_out_map[ch] = out;
return ret;
}
static int ad9523_set_clock_provider(struct iio_dev *indio_dev,
unsigned int ch, unsigned long freq)
{
struct ad9523_state *st = iio_priv(indio_dev);
long tmp1, tmp2;
bool use_alt_clk_src;
switch (ch) {
case 0 ... 3:
use_alt_clk_src = (freq == st->vco_out_freq[AD9523_VCXO]);
break;
case 4 ... 9:
tmp1 = st->vco_out_freq[AD9523_VCO1] / freq;
tmp2 = st->vco_out_freq[AD9523_VCO2] / freq;
tmp1 *= freq;
tmp2 *= freq;
use_alt_clk_src = (abs(tmp1 - freq) > abs(tmp2 - freq));
break;
default:
/* Ch 10..14: No action required, return success */
return 0;
}
return ad9523_vco_out_map(indio_dev, ch, use_alt_clk_src);
}
static int ad9523_store_eeprom(struct iio_dev *indio_dev)
{
int ret, tmp;
ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL1,
AD9523_EEPROM_CTRL1_EEPROM_WRITE_PROT_DIS);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL2,
AD9523_EEPROM_CTRL2_REG2EEPROM);
if (ret < 0)
return ret;
tmp = 4;
do {
msleep(20);
ret = ad9523_read(indio_dev,
AD9523_EEPROM_DATA_XFER_STATUS);
if (ret < 0)
return ret;
} while ((ret & AD9523_EEPROM_DATA_XFER_IN_PROGRESS) && tmp--);
ret = ad9523_write(indio_dev, AD9523_EEPROM_CTRL1, 0);
if (ret < 0)
return ret;
ret = ad9523_read(indio_dev, AD9523_EEPROM_ERROR_READBACK);
if (ret < 0)
return ret;
if (ret & AD9523_EEPROM_ERROR_READBACK_FAIL) {
dev_err(&indio_dev->dev, "Verify EEPROM failed");
ret = -EIO;
}
return ret;
}
static int ad9523_sync(struct iio_dev *indio_dev)
{
int ret, tmp;
ret = ad9523_read(indio_dev, AD9523_STATUS_SIGNALS);
if (ret < 0)
return ret;
tmp = ret;
tmp |= AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL;
ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS, tmp);
if (ret < 0)
return ret;
ad9523_io_update(indio_dev);
tmp &= ~AD9523_STATUS_SIGNALS_SYNC_MAN_CTRL;
ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS, tmp);
if (ret < 0)
return ret;
return ad9523_io_update(indio_dev);
}
static ssize_t ad9523_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct ad9523_state *st = iio_priv(indio_dev);
bool state;
int ret;
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
if (!state)
return len;
mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9523_SYNC:
ret = ad9523_sync(indio_dev);
break;
case AD9523_EEPROM:
ret = ad9523_store_eeprom(indio_dev);
break;
default:
ret = -ENODEV;
}
mutex_unlock(&st->lock);
return ret ? ret : len;
}
static ssize_t ad9523_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_READBACK_0);
if (ret >= 0) {
ret = sysfs_emit(buf, "%d\n", !!(ret & (1 <<
(u32)this_attr->address)));
}
mutex_unlock(&st->lock);
return ret;
}
static IIO_DEVICE_ATTR(pll1_locked, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_PLL1_LD);
static IIO_DEVICE_ATTR(pll2_locked, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_PLL2_LD);
static IIO_DEVICE_ATTR(pll1_reference_clk_a_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_REFA);
static IIO_DEVICE_ATTR(pll1_reference_clk_b_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_REFB);
static IIO_DEVICE_ATTR(pll1_reference_clk_test_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_REF_TEST);
static IIO_DEVICE_ATTR(vcxo_clk_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_VCXO);
static IIO_DEVICE_ATTR(pll2_feedback_clk_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_PLL2_FB_CLK);
static IIO_DEVICE_ATTR(pll2_reference_clk_present, S_IRUGO,
ad9523_show,
NULL,
AD9523_STAT_PLL2_REF_CLK);
static IIO_DEVICE_ATTR(sync_dividers, S_IWUSR,
NULL,
ad9523_store,
AD9523_SYNC);
static IIO_DEVICE_ATTR(store_eeprom, S_IWUSR,
NULL,
ad9523_store,
AD9523_EEPROM);
static struct attribute *ad9523_attributes[] = {
&iio_dev_attr_sync_dividers.dev_attr.attr,
&iio_dev_attr_store_eeprom.dev_attr.attr,
&iio_dev_attr_pll2_feedback_clk_present.dev_attr.attr,
&iio_dev_attr_pll2_reference_clk_present.dev_attr.attr,
&iio_dev_attr_pll1_reference_clk_a_present.dev_attr.attr,
&iio_dev_attr_pll1_reference_clk_b_present.dev_attr.attr,
&iio_dev_attr_pll1_reference_clk_test_present.dev_attr.attr,
&iio_dev_attr_vcxo_clk_present.dev_attr.attr,
&iio_dev_attr_pll1_locked.dev_attr.attr,
&iio_dev_attr_pll2_locked.dev_attr.attr,
NULL,
};
static const struct attribute_group ad9523_attribute_group = {
.attrs = ad9523_attributes,
};
static int ad9523_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad9523_state *st = iio_priv(indio_dev);
unsigned int code;
int ret;
mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
mutex_unlock(&st->lock);
if (ret < 0)
return ret;
switch (m) {
case IIO_CHAN_INFO_RAW:
*val = !(ret & AD9523_CLK_DIST_PWR_DOWN_EN);
return IIO_VAL_INT;
case IIO_CHAN_INFO_FREQUENCY:
*val = st->vco_out_freq[st->vco_out_map[chan->channel]] /
AD9523_CLK_DIST_DIV_REV(ret);
return IIO_VAL_INT;
case IIO_CHAN_INFO_PHASE:
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
*val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
};
static int ad9523_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct ad9523_state *st = iio_priv(indio_dev);
unsigned int reg;
int ret, tmp, code;
mutex_lock(&st->lock);
ret = ad9523_read(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel));
if (ret < 0)
goto out;
reg = ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val)
reg &= ~AD9523_CLK_DIST_PWR_DOWN_EN;
else
reg |= AD9523_CLK_DIST_PWR_DOWN_EN;
break;
case IIO_CHAN_INFO_FREQUENCY:
if (val <= 0) {
ret = -EINVAL;
goto out;
}
ret = ad9523_set_clock_provider(indio_dev, chan->channel, val);
if (ret < 0)
goto out;
tmp = st->vco_out_freq[st->vco_out_map[chan->channel]] / val;
tmp = clamp(tmp, 1, 1024);
reg &= ~(0x3FF << 8);
reg |= AD9523_CLK_DIST_DIV(tmp);
break;
case IIO_CHAN_INFO_PHASE:
code = val * 1000000 + val2 % 1000000;
tmp = (code * AD9523_CLK_DIST_DIV_REV(ret)) / 3141592;
tmp = clamp(tmp, 0, 63);
reg &= ~AD9523_CLK_DIST_DIV_PHASE(~0);
reg |= AD9523_CLK_DIST_DIV_PHASE(tmp);
break;
default:
ret = -EINVAL;
goto out;
}
ret = ad9523_write(indio_dev, AD9523_CHANNEL_CLOCK_DIST(chan->channel),
reg);
if (ret < 0)
goto out;
ad9523_io_update(indio_dev);
out:
mutex_unlock(&st->lock);
return ret;
}
static int ad9523_reg_access(struct iio_dev *indio_dev,
unsigned int reg, unsigned int writeval,
unsigned int *readval)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->lock);
if (readval == NULL) {
ret = ad9523_write(indio_dev, reg | AD9523_R1B, writeval);
ad9523_io_update(indio_dev);
} else {
ret = ad9523_read(indio_dev, reg | AD9523_R1B);
if (ret < 0)
goto out_unlock;
*readval = ret;
ret = 0;
}
out_unlock:
mutex_unlock(&st->lock);
return ret;
}
static const struct iio_info ad9523_info = {
.read_raw = &ad9523_read_raw,
.write_raw = &ad9523_write_raw,
.debugfs_reg_access = &ad9523_reg_access,
.attrs = &ad9523_attribute_group,
};
static int ad9523_setup(struct iio_dev *indio_dev)
{
struct ad9523_state *st = iio_priv(indio_dev);
struct ad9523_platform_data *pdata = st->pdata;
struct ad9523_channel_spec *chan;
unsigned long active_mask = 0;
int ret, i;
ret = ad9523_write(indio_dev, AD9523_SERIAL_PORT_CONFIG,
AD9523_SER_CONF_SOFT_RESET |
(st->spi->mode & SPI_3WIRE ? 0 :
AD9523_SER_CONF_SDO_ACTIVE));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_READBACK_CTRL,
AD9523_READBACK_CTRL_READ_BUFFERED);
if (ret < 0)
return ret;
ret = ad9523_io_update(indio_dev);
if (ret < 0)
return ret;
/*
* PLL1 Setup
*/
ret = ad9523_write(indio_dev, AD9523_PLL1_REF_A_DIVIDER,
pdata->refa_r_div);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_REF_B_DIVIDER,
pdata->refb_r_div);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_FEEDBACK_DIVIDER,
pdata->pll1_feedback_div);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_CHARGE_PUMP_CTRL,
AD9523_PLL1_CHARGE_PUMP_CURRENT_nA(pdata->
pll1_charge_pump_current_nA) |
AD9523_PLL1_CHARGE_PUMP_MODE_NORMAL |
AD9523_PLL1_BACKLASH_PW_MIN);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_INPUT_RECEIVERS_CTRL,
AD_IF(refa_diff_rcv_en, AD9523_PLL1_REFA_RCV_EN) |
AD_IF(refb_diff_rcv_en, AD9523_PLL1_REFB_RCV_EN) |
AD_IF(osc_in_diff_en, AD9523_PLL1_OSC_IN_DIFF_EN) |
AD_IF(osc_in_cmos_neg_inp_en,
AD9523_PLL1_OSC_IN_CMOS_NEG_INP_EN) |
AD_IF(refa_diff_rcv_en, AD9523_PLL1_REFA_DIFF_RCV_EN) |
AD_IF(refb_diff_rcv_en, AD9523_PLL1_REFB_DIFF_RCV_EN));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_REF_CTRL,
AD_IF(zd_in_diff_en, AD9523_PLL1_ZD_IN_DIFF_EN) |
AD_IF(zd_in_cmos_neg_inp_en,
AD9523_PLL1_ZD_IN_CMOS_NEG_INP_EN) |
AD_IF(zero_delay_mode_internal_en,
AD9523_PLL1_ZERO_DELAY_MODE_INT) |
AD_IF(osc_in_feedback_en, AD9523_PLL1_OSC_IN_PLL_FEEDBACK_EN) |
AD_IF(refa_cmos_neg_inp_en, AD9523_PLL1_REFA_CMOS_NEG_INP_EN) |
AD_IF(refb_cmos_neg_inp_en, AD9523_PLL1_REFB_CMOS_NEG_INP_EN));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_MISC_CTRL,
AD9523_PLL1_REFB_INDEP_DIV_CTRL_EN |
AD9523_PLL1_REF_MODE(pdata->ref_mode));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL1_LOOP_FILTER_CTRL,
AD9523_PLL1_LOOP_FILTER_RZERO(pdata->pll1_loop_filter_rzero));
if (ret < 0)
return ret;
/*
* PLL2 Setup
*/
ret = ad9523_write(indio_dev, AD9523_PLL2_CHARGE_PUMP,
AD9523_PLL2_CHARGE_PUMP_CURRENT_nA(pdata->
pll2_charge_pump_current_nA));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL2_FEEDBACK_DIVIDER_AB,
AD9523_PLL2_FB_NDIV_A_CNT(pdata->pll2_ndiv_a_cnt) |
AD9523_PLL2_FB_NDIV_B_CNT(pdata->pll2_ndiv_b_cnt));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL2_CTRL,
AD9523_PLL2_CHARGE_PUMP_MODE_NORMAL |
AD9523_PLL2_BACKLASH_CTRL_EN |
AD_IF(pll2_freq_doubler_en, AD9523_PLL2_FREQ_DOUBLER_EN));
if (ret < 0)
return ret;
st->vco_freq = div_u64((unsigned long long)pdata->vcxo_freq *
(pdata->pll2_freq_doubler_en ? 2 : 1) *
AD9523_PLL2_FB_NDIV(pdata->pll2_ndiv_a_cnt,
pdata->pll2_ndiv_b_cnt),
pdata->pll2_r2_div);
ret = ad9523_write(indio_dev, AD9523_PLL2_VCO_CTRL,
AD9523_PLL2_VCO_CALIBRATE);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL2_VCO_DIVIDER,
AD9523_PLL2_VCO_DIV_M1(pdata->pll2_vco_div_m1) |
AD9523_PLL2_VCO_DIV_M2(pdata->pll2_vco_div_m2) |
AD_IFE(pll2_vco_div_m1, 0,
AD9523_PLL2_VCO_DIV_M1_PWR_DOWN_EN) |
AD_IFE(pll2_vco_div_m2, 0,
AD9523_PLL2_VCO_DIV_M2_PWR_DOWN_EN));
if (ret < 0)
return ret;
if (pdata->pll2_vco_div_m1)
st->vco_out_freq[AD9523_VCO1] =
st->vco_freq / pdata->pll2_vco_div_m1;
if (pdata->pll2_vco_div_m2)
st->vco_out_freq[AD9523_VCO2] =
st->vco_freq / pdata->pll2_vco_div_m2;
st->vco_out_freq[AD9523_VCXO] = pdata->vcxo_freq;
ret = ad9523_write(indio_dev, AD9523_PLL2_R2_DIVIDER,
AD9523_PLL2_R2_DIVIDER_VAL(pdata->pll2_r2_div));
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_PLL2_LOOP_FILTER_CTRL,
AD9523_PLL2_LOOP_FILTER_CPOLE1(pdata->cpole1) |
AD9523_PLL2_LOOP_FILTER_RZERO(pdata->rzero) |
AD9523_PLL2_LOOP_FILTER_RPOLE2(pdata->rpole2) |
AD_IF(rzero_bypass_en,
AD9523_PLL2_LOOP_FILTER_RZERO_BYPASS_EN));
if (ret < 0)
return ret;
for (i = 0; i < pdata->num_channels; i++) {
chan = &pdata->channels[i];
if (chan->channel_num < AD9523_NUM_CHAN) {
__set_bit(chan->channel_num, &active_mask);
ret = ad9523_write(indio_dev,
AD9523_CHANNEL_CLOCK_DIST(chan->channel_num),
AD9523_CLK_DIST_DRIVER_MODE(chan->driver_mode) |
AD9523_CLK_DIST_DIV(chan->channel_divider) |
AD9523_CLK_DIST_DIV_PHASE(chan->divider_phase) |
(chan->sync_ignore_en ?
AD9523_CLK_DIST_IGNORE_SYNC_EN : 0) |
(chan->divider_output_invert_en ?
AD9523_CLK_DIST_INV_DIV_OUTPUT_EN : 0) |
(chan->low_power_mode_en ?
AD9523_CLK_DIST_LOW_PWR_MODE_EN : 0) |
(chan->output_dis ?
AD9523_CLK_DIST_PWR_DOWN_EN : 0));
if (ret < 0)
return ret;
ret = ad9523_vco_out_map(indio_dev, chan->channel_num,
chan->use_alt_clock_src);
if (ret < 0)
return ret;
st->ad9523_channels[i].type = IIO_ALTVOLTAGE;
st->ad9523_channels[i].output = 1;
st->ad9523_channels[i].indexed = 1;
st->ad9523_channels[i].channel = chan->channel_num;
st->ad9523_channels[i].extend_name =
chan->extended_name;
st->ad9523_channels[i].info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_PHASE) |
BIT(IIO_CHAN_INFO_FREQUENCY);
}
}
for_each_clear_bit(i, &active_mask, AD9523_NUM_CHAN) {
ret = ad9523_write(indio_dev,
AD9523_CHANNEL_CLOCK_DIST(i),
AD9523_CLK_DIST_DRIVER_MODE(TRISTATE) |
AD9523_CLK_DIST_PWR_DOWN_EN);
if (ret < 0)
return ret;
}
ret = ad9523_write(indio_dev, AD9523_POWER_DOWN_CTRL, 0);
if (ret < 0)
return ret;
ret = ad9523_write(indio_dev, AD9523_STATUS_SIGNALS,
AD9523_STATUS_MONITOR_01_PLL12_LOCKED);
if (ret < 0)
return ret;
ret = ad9523_io_update(indio_dev);
if (ret < 0)
return ret;
return 0;
}
static int ad9523_probe(struct spi_device *spi)
{
struct ad9523_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
struct ad9523_state *st;
int ret;
if (!pdata) {
dev_err(&spi->dev, "no platform data?\n");
return -EINVAL;
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
mutex_init(&st->lock);
ret = devm_regulator_get_enable(&spi->dev, "vcc");
if (ret)
return ret;
st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(st->pwrdown_gpio))
return PTR_ERR(st->pwrdown_gpio);
st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(st->reset_gpio))
return PTR_ERR(st->reset_gpio);
if (st->reset_gpio) {
udelay(1);
gpiod_direction_output(st->reset_gpio, 1);
}
st->sync_gpio = devm_gpiod_get_optional(&spi->dev, "sync",
GPIOD_OUT_HIGH);
if (IS_ERR(st->sync_gpio))
return PTR_ERR(st->sync_gpio);
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->pdata = pdata;
indio_dev->name = (pdata->name[0] != 0) ? pdata->name :
spi_get_device_id(spi)->name;
indio_dev->info = &ad9523_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->ad9523_channels;
indio_dev->num_channels = pdata->num_channels;
ret = ad9523_setup(indio_dev);
if (ret < 0)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad9523_id[] = {
{"ad9523-1", 9523},
{}
};
MODULE_DEVICE_TABLE(spi, ad9523_id);
static struct spi_driver ad9523_driver = {
.driver = {
.name = "ad9523",
},
.probe = ad9523_probe,
.id_table = ad9523_id,
};
module_spi_driver(ad9523_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("Analog Devices AD9523 CLOCKDIST/PLL");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/frequency/ad9523.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADF4377 driver
*
* Copyright 2022 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/regmap.h>
#include <linux/units.h>
#include <asm/unaligned.h>
/* ADF4377 REG0000 Map */
#define ADF4377_0000_SOFT_RESET_R_MSK BIT(7)
#define ADF4377_0000_LSB_FIRST_R_MSK BIT(6)
#define ADF4377_0000_ADDRESS_ASC_R_MSK BIT(5)
#define ADF4377_0000_SDO_ACTIVE_R_MSK BIT(4)
#define ADF4377_0000_SDO_ACTIVE_MSK BIT(3)
#define ADF4377_0000_ADDRESS_ASC_MSK BIT(2)
#define ADF4377_0000_LSB_FIRST_MSK BIT(1)
#define ADF4377_0000_SOFT_RESET_MSK BIT(0)
/* ADF4377 REG0000 Bit Definition */
#define ADF4377_0000_SDO_ACTIVE_SPI_3W 0x0
#define ADF4377_0000_SDO_ACTIVE_SPI_4W 0x1
#define ADF4377_0000_ADDR_ASC_AUTO_DECR 0x0
#define ADF4377_0000_ADDR_ASC_AUTO_INCR 0x1
#define ADF4377_0000_LSB_FIRST_MSB 0x0
#define ADF4377_0000_LSB_FIRST_LSB 0x1
#define ADF4377_0000_SOFT_RESET_N_OP 0x0
#define ADF4377_0000_SOFT_RESET_EN 0x1
/* ADF4377 REG0001 Map */
#define ADF4377_0001_SINGLE_INSTR_MSK BIT(7)
#define ADF4377_0001_MASTER_RB_CTRL_MSK BIT(5)
/* ADF4377 REG0003 Bit Definition */
#define ADF4377_0003_CHIP_TYPE 0x06
/* ADF4377 REG0004 Bit Definition */
#define ADF4377_0004_PRODUCT_ID_LSB 0x0005
/* ADF4377 REG0005 Bit Definition */
#define ADF4377_0005_PRODUCT_ID_MSB 0x0005
/* ADF4377 REG000A Map */
#define ADF4377_000A_SCRATCHPAD_MSK GENMASK(7, 0)
/* ADF4377 REG000C Bit Definition */
#define ADF4377_000C_VENDOR_ID_LSB 0x56
/* ADF4377 REG000D Bit Definition */
#define ADF4377_000D_VENDOR_ID_MSB 0x04
/* ADF4377 REG000F Bit Definition */
#define ADF4377_000F_R00F_RSV1_MSK GENMASK(7, 0)
/* ADF4377 REG0010 Map*/
#define ADF4377_0010_N_INT_LSB_MSK GENMASK(7, 0)
/* ADF4377 REG0011 Map*/
#define ADF4377_0011_EN_AUTOCAL_MSK BIT(7)
#define ADF4377_0011_EN_RDBLR_MSK BIT(6)
#define ADF4377_0011_DCLK_DIV2_MSK GENMASK(5, 4)
#define ADF4377_0011_N_INT_MSB_MSK GENMASK(3, 0)
/* ADF4377 REG0011 Bit Definition */
#define ADF4377_0011_DCLK_DIV2_1 0x0
#define ADF4377_0011_DCLK_DIV2_2 0x1
#define ADF4377_0011_DCLK_DIV2_4 0x2
#define ADF4377_0011_DCLK_DIV2_8 0x3
/* ADF4377 REG0012 Map*/
#define ADF4377_0012_CLKOUT_DIV_MSK GENMASK(7, 6)
#define ADF4377_0012_R_DIV_MSK GENMASK(5, 0)
/* ADF4377 REG0012 Bit Definition */
#define ADF4377_0012_CLKOUT_DIV_1 0x0
#define ADF4377_0012_CLKOUT_DIV_2 0x1
#define ADF4377_0012_CLKOUT_DIV_4 0x2
#define ADF4377_0012_CLKOUT_DIV_8 0x3
/* ADF4377 REG0013 Map */
#define ADF4377_0013_M_VCO_CORE_MSK GENMASK(5, 4)
#define ADF4377_0013_VCO_BIAS_MSK GENMASK(3, 0)
/* ADF4377 REG0013 Bit Definition */
#define ADF4377_0013_M_VCO_0 0x0
#define ADF4377_0013_M_VCO_1 0x1
#define ADF4377_0013_M_VCO_2 0x2
#define ADF4377_0013_M_VCO_3 0x3
/* ADF4377 REG0014 Map */
#define ADF4377_0014_M_VCO_BAND_MSK GENMASK(7, 0)
/* ADF4377 REG0015 Map */
#define ADF4377_0015_BLEED_I_LSB_MSK GENMASK(7, 6)
#define ADF4377_0015_BLEED_POL_MSK BIT(5)
#define ADF4377_0015_EN_BLEED_MSK BIT(4)
#define ADF4377_0015_CP_I_MSK GENMASK(3, 0)
/* ADF4377 REG0015 Bit Definition */
#define ADF4377_CURRENT_SINK 0x0
#define ADF4377_CURRENT_SOURCE 0x1
#define ADF4377_0015_CP_0MA7 0x0
#define ADF4377_0015_CP_0MA9 0x1
#define ADF4377_0015_CP_1MA1 0x2
#define ADF4377_0015_CP_1MA3 0x3
#define ADF4377_0015_CP_1MA4 0x4
#define ADF4377_0015_CP_1MA8 0x5
#define ADF4377_0015_CP_2MA2 0x6
#define ADF4377_0015_CP_2MA5 0x7
#define ADF4377_0015_CP_2MA9 0x8
#define ADF4377_0015_CP_3MA6 0x9
#define ADF4377_0015_CP_4MA3 0xA
#define ADF4377_0015_CP_5MA0 0xB
#define ADF4377_0015_CP_5MA7 0xC
#define ADF4377_0015_CP_7MA2 0xD
#define ADF4377_0015_CP_8MA6 0xE
#define ADF4377_0015_CP_10MA1 0xF
/* ADF4377 REG0016 Map */
#define ADF4377_0016_BLEED_I_MSB_MSK GENMASK(7, 0)
/* ADF4377 REG0017 Map */
#define ADF4377_0016_INV_CLKOUT_MSK BIT(7)
#define ADF4377_0016_N_DEL_MSK GENMASK(6, 0)
/* ADF4377 REG0018 Map */
#define ADF4377_0018_CMOS_OV_MSK BIT(7)
#define ADF4377_0018_R_DEL_MSK GENMASK(6, 0)
/* ADF4377 REG0018 Bit Definition */
#define ADF4377_0018_1V8_LOGIC 0x0
#define ADF4377_0018_3V3_LOGIC 0x1
/* ADF4377 REG0019 Map */
#define ADF4377_0019_CLKOUT2_OP_MSK GENMASK(7, 6)
#define ADF4377_0019_CLKOUT1_OP_MSK GENMASK(5, 4)
#define ADF4377_0019_PD_CLK_MSK BIT(3)
#define ADF4377_0019_PD_RDET_MSK BIT(2)
#define ADF4377_0019_PD_ADC_MSK BIT(1)
#define ADF4377_0019_PD_CALADC_MSK BIT(0)
/* ADF4377 REG0019 Bit Definition */
#define ADF4377_0019_CLKOUT_320MV 0x0
#define ADF4377_0019_CLKOUT_420MV 0x1
#define ADF4377_0019_CLKOUT_530MV 0x2
#define ADF4377_0019_CLKOUT_640MV 0x3
/* ADF4377 REG001A Map */
#define ADF4377_001A_PD_ALL_MSK BIT(7)
#define ADF4377_001A_PD_RDIV_MSK BIT(6)
#define ADF4377_001A_PD_NDIV_MSK BIT(5)
#define ADF4377_001A_PD_VCO_MSK BIT(4)
#define ADF4377_001A_PD_LD_MSK BIT(3)
#define ADF4377_001A_PD_PFDCP_MSK BIT(2)
#define ADF4377_001A_PD_CLKOUT1_MSK BIT(1)
#define ADF4377_001A_PD_CLKOUT2_MSK BIT(0)
/* ADF4377 REG001B Map */
#define ADF4377_001B_EN_LOL_MSK BIT(7)
#define ADF4377_001B_LDWIN_PW_MSK BIT(6)
#define ADF4377_001B_EN_LDWIN_MSK BIT(5)
#define ADF4377_001B_LD_COUNT_MSK GENMASK(4, 0)
/* ADF4377 REG001B Bit Definition */
#define ADF4377_001B_LDWIN_PW_NARROW 0x0
#define ADF4377_001B_LDWIN_PW_WIDE 0x1
/* ADF4377 REG001C Map */
#define ADF4377_001C_EN_DNCLK_MSK BIT(7)
#define ADF4377_001C_EN_DRCLK_MSK BIT(6)
#define ADF4377_001C_RST_LD_MSK BIT(2)
#define ADF4377_001C_R01C_RSV1_MSK BIT(0)
/* ADF4377 REG001C Bit Definition */
#define ADF4377_001C_RST_LD_INACTIVE 0x0
#define ADF4377_001C_RST_LD_ACTIVE 0x1
#define ADF4377_001C_R01C_RSV1 0x1
/* ADF4377 REG001D Map */
#define ADF4377_001D_MUXOUT_MSK GENMASK(7, 4)
#define ADF4377_001D_EN_CPTEST_MSK BIT(2)
#define ADF4377_001D_CP_DOWN_MSK BIT(1)
#define ADF4377_001D_CP_UP_MSK BIT(0)
#define ADF4377_001D_EN_CPTEST_OFF 0x0
#define ADF4377_001D_EN_CPTEST_ON 0x1
#define ADF4377_001D_CP_DOWN_OFF 0x0
#define ADF4377_001D_CP_DOWN_ON 0x1
#define ADF4377_001D_CP_UP_OFF 0x0
#define ADF4377_001D_CP_UP_ON 0x1
/* ADF4377 REG001F Map */
#define ADF4377_001F_BST_REF_MSK BIT(7)
#define ADF4377_001F_FILT_REF_MSK BIT(6)
#define ADF4377_001F_REF_SEL_MSK BIT(5)
#define ADF4377_001F_R01F_RSV1_MSK GENMASK(4, 0)
/* ADF4377 REG001F Bit Definition */
#define ADF4377_001F_BST_LARGE_REF_IN 0x0
#define ADF4377_001F_BST_SMALL_REF_IN 0x1
#define ADF4377_001F_FILT_REF_OFF 0x0
#define ADF4377_001F_FILT_REF_ON 0x1
#define ADF4377_001F_REF_SEL_DMA 0x0
#define ADF4377_001F_REF_SEL_LNA 0x1
#define ADF4377_001F_R01F_RSV1 0x7
/* ADF4377 REG0020 Map */
#define ADF4377_0020_RST_SYS_MSK BIT(4)
#define ADF4377_0020_EN_ADC_CLK_MSK BIT(3)
#define ADF4377_0020_R020_RSV1_MSK BIT(0)
/* ADF4377 REG0021 Bit Definition */
#define ADF4377_0021_R021_RSV1 0xD3
/* ADF4377 REG0022 Bit Definition */
#define ADF4377_0022_R022_RSV1 0x32
/* ADF4377 REG0023 Map */
#define ADF4377_0023_CAT_CT_SEL BIT(7)
#define ADF4377_0023_R023_RSV1_MSK GENMASK(6, 0)
/* ADF4377 REG0023 Bit Definition */
#define ADF4377_0023_R023_RSV1 0x18
/* ADF4377 REG0024 Map */
#define ADF4377_0024_DCLK_MODE_MSK BIT(2)
/* ADF4377 REG0025 Map */
#define ADF4377_0025_CLKODIV_DB_MSK BIT(7)
#define ADF4377_0025_DCLK_DB_MSK BIT(6)
#define ADF4377_0025_R025_RSV1_MSK GENMASK(5, 0)
/* ADF4377 REG0025 Bit Definition */
#define ADF4377_0025_R025_RSV1 0x16
/* ADF4377 REG0026 Map */
#define ADF4377_0026_VCO_BAND_DIV_MSK GENMASK(7, 0)
/* ADF4377 REG0027 Map */
#define ADF4377_0027_SYNTH_LOCK_TO_LSB_MSK GENMASK(7, 0)
/* ADF4377 REG0028 Map */
#define ADF4377_0028_O_VCO_DB_MSK BIT(7)
#define ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK GENMASK(6, 0)
/* ADF4377 REG0029 Map */
#define ADF4377_0029_VCO_ALC_TO_LSB_MSK GENMASK(7, 0)
/* ADF4377 REG002A Map */
#define ADF4377_002A_DEL_CTRL_DB_MSK BIT(7)
#define ADF4377_002A_VCO_ALC_TO_MSB_MSK GENMASK(6, 0)
/* ADF4377 REG002C Map */
#define ADF4377_002C_R02C_RSV1 0xC0
/* ADF4377 REG002D Map */
#define ADF4377_002D_ADC_CLK_DIV_MSK GENMASK(7, 0)
/* ADF4377 REG002E Map */
#define ADF4377_002E_EN_ADC_CNV_MSK BIT(7)
#define ADF4377_002E_EN_ADC_MSK BIT(1)
#define ADF4377_002E_ADC_A_CONV_MSK BIT(0)
/* ADF4377 REG002E Bit Definition */
#define ADF4377_002E_ADC_A_CONV_ADC_ST_CNV 0x0
#define ADF4377_002E_ADC_A_CONV_VCO_CALIB 0x1
/* ADF4377 REG002F Map */
#define ADF4377_002F_DCLK_DIV1_MSK GENMASK(1, 0)
/* ADF4377 REG002F Bit Definition */
#define ADF4377_002F_DCLK_DIV1_1 0x0
#define ADF4377_002F_DCLK_DIV1_2 0x1
#define ADF4377_002F_DCLK_DIV1_8 0x2
#define ADF4377_002F_DCLK_DIV1_32 0x3
/* ADF4377 REG0031 Bit Definition */
#define ADF4377_0031_R031_RSV1 0x09
/* ADF4377 REG0032 Map */
#define ADF4377_0032_ADC_CLK_SEL_MSK BIT(6)
#define ADF4377_0032_R032_RSV1_MSK GENMASK(5, 0)
/* ADF4377 REG0032 Bit Definition */
#define ADF4377_0032_ADC_CLK_SEL_N_OP 0x0
#define ADF4377_0032_ADC_CLK_SEL_SPI_CLK 0x1
#define ADF4377_0032_R032_RSV1 0x9
/* ADF4377 REG0033 Bit Definition */
#define ADF4377_0033_R033_RSV1 0x18
/* ADF4377 REG0034 Bit Definition */
#define ADF4377_0034_R034_RSV1 0x08
/* ADF4377 REG003A Bit Definition */
#define ADF4377_003A_R03A_RSV1 0x5D
/* ADF4377 REG003B Bit Definition */
#define ADF4377_003B_R03B_RSV1 0x2B
/* ADF4377 REG003D Map */
#define ADF4377_003D_O_VCO_BAND_MSK BIT(3)
#define ADF4377_003D_O_VCO_CORE_MSK BIT(2)
#define ADF4377_003D_O_VCO_BIAS_MSK BIT(1)
/* ADF4377 REG003D Bit Definition */
#define ADF4377_003D_O_VCO_BAND_VCO_CALIB 0x0
#define ADF4377_003D_O_VCO_BAND_M_VCO 0x1
#define ADF4377_003D_O_VCO_CORE_VCO_CALIB 0x0
#define ADF4377_003D_O_VCO_CORE_M_VCO 0x1
#define ADF4377_003D_O_VCO_BIAS_VCO_CALIB 0x0
#define ADF4377_003D_O_VCO_BIAS_M_VCO 0x1
/* ADF4377 REG0042 Map */
#define ADF4377_0042_R042_RSV1 0x05
/* ADF4377 REG0045 Map */
#define ADF4377_0045_ADC_ST_CNV_MSK BIT(0)
/* ADF4377 REG0049 Map */
#define ADF4377_0049_EN_CLK2_MSK BIT(7)
#define ADF4377_0049_EN_CLK1_MSK BIT(6)
#define ADF4377_0049_REF_OK_MSK BIT(3)
#define ADF4377_0049_ADC_BUSY_MSK BIT(2)
#define ADF4377_0049_FSM_BUSY_MSK BIT(1)
#define ADF4377_0049_LOCKED_MSK BIT(0)
/* ADF4377 REG004B Map */
#define ADF4377_004B_VCO_CORE_MSK GENMASK(1, 0)
/* ADF4377 REG004C Map */
#define ADF4377_004C_CHIP_TEMP_LSB_MSK GENMASK(7, 0)
/* ADF4377 REG004D Map */
#define ADF4377_004D_CHIP_TEMP_MSB_MSK BIT(0)
/* ADF4377 REG004F Map */
#define ADF4377_004F_VCO_BAND_MSK GENMASK(7, 0)
/* ADF4377 REG0051 Map */
#define ADF4377_0051_VCO_BIAS_MSK GENMASK(3, 0)
/* ADF4377 REG0054 Map */
#define ADF4377_0054_CHIP_VERSION_MSK GENMASK(7, 0)
/* Specifications */
#define ADF4377_SPI_READ_CMD BIT(7)
#define ADF4377_MAX_VCO_FREQ (12800ULL * HZ_PER_MHZ)
#define ADF4377_MIN_VCO_FREQ (6400ULL * HZ_PER_MHZ)
#define ADF4377_MAX_REFIN_FREQ (1000 * HZ_PER_MHZ)
#define ADF4377_MIN_REFIN_FREQ (10 * HZ_PER_MHZ)
#define ADF4377_MAX_FREQ_PFD (500 * HZ_PER_MHZ)
#define ADF4377_MIN_FREQ_PFD (3 * HZ_PER_MHZ)
#define ADF4377_MAX_CLKPN_FREQ ADF4377_MAX_VCO_FREQ
#define ADF4377_MIN_CLKPN_FREQ (ADF4377_MIN_VCO_FREQ / 8)
#define ADF4377_FREQ_PFD_80MHZ (80 * HZ_PER_MHZ)
#define ADF4377_FREQ_PFD_125MHZ (125 * HZ_PER_MHZ)
#define ADF4377_FREQ_PFD_160MHZ (160 * HZ_PER_MHZ)
#define ADF4377_FREQ_PFD_250MHZ (250 * HZ_PER_MHZ)
#define ADF4377_FREQ_PFD_320MHZ (320 * HZ_PER_MHZ)
enum {
ADF4377_FREQ,
};
enum muxout_select_mode {
ADF4377_MUXOUT_HIGH_Z = 0x0,
ADF4377_MUXOUT_LKDET = 0x1,
ADF4377_MUXOUT_LOW = 0x2,
ADF4377_MUXOUT_DIV_RCLK_2 = 0x4,
ADF4377_MUXOUT_DIV_NCLK_2 = 0x5,
ADF4377_MUXOUT_HIGH = 0x8,
};
struct adf4377_state {
struct spi_device *spi;
struct regmap *regmap;
struct clk *clkin;
/* Protect against concurrent accesses to the device and data content */
struct mutex lock;
struct notifier_block nb;
/* Reference Divider */
unsigned int ref_div_factor;
/* PFD Frequency */
unsigned int f_pfd;
/* Input Reference Clock */
unsigned int clkin_freq;
/* CLKOUT Divider */
u8 clkout_div_sel;
/* Feedback Divider (N) */
u16 n_int;
u16 synth_lock_timeout;
u16 vco_alc_timeout;
u16 adc_clk_div;
u16 vco_band_div;
u8 dclk_div1;
u8 dclk_div2;
u8 dclk_mode;
unsigned int f_div_rclk;
enum muxout_select_mode muxout_select;
struct gpio_desc *gpio_ce;
struct gpio_desc *gpio_enclk1;
struct gpio_desc *gpio_enclk2;
u8 buf[2] __aligned(IIO_DMA_MINALIGN);
};
static const char * const adf4377_muxout_modes[] = {
[ADF4377_MUXOUT_HIGH_Z] = "high_z",
[ADF4377_MUXOUT_LKDET] = "lock_detect",
[ADF4377_MUXOUT_LOW] = "muxout_low",
[ADF4377_MUXOUT_DIV_RCLK_2] = "f_div_rclk_2",
[ADF4377_MUXOUT_DIV_NCLK_2] = "f_div_nclk_2",
[ADF4377_MUXOUT_HIGH] = "muxout_high",
};
static const struct reg_sequence adf4377_reg_defaults[] = {
{ 0x42, ADF4377_0042_R042_RSV1 },
{ 0x3B, ADF4377_003B_R03B_RSV1 },
{ 0x3A, ADF4377_003A_R03A_RSV1 },
{ 0x34, ADF4377_0034_R034_RSV1 },
{ 0x33, ADF4377_0033_R033_RSV1 },
{ 0x32, ADF4377_0032_R032_RSV1 },
{ 0x31, ADF4377_0031_R031_RSV1 },
{ 0x2C, ADF4377_002C_R02C_RSV1 },
{ 0x25, ADF4377_0025_R025_RSV1 },
{ 0x23, ADF4377_0023_R023_RSV1 },
{ 0x22, ADF4377_0022_R022_RSV1 },
{ 0x21, ADF4377_0021_R021_RSV1 },
{ 0x1f, ADF4377_001F_R01F_RSV1 },
{ 0x1c, ADF4377_001C_R01C_RSV1 },
};
static const struct regmap_config adf4377_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.read_flag_mask = BIT(7),
.max_register = 0x54,
};
static int adf4377_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
unsigned int write_val,
unsigned int *read_val)
{
struct adf4377_state *st = iio_priv(indio_dev);
if (read_val)
return regmap_read(st->regmap, reg, read_val);
return regmap_write(st->regmap, reg, write_val);
}
static const struct iio_info adf4377_info = {
.debugfs_reg_access = &adf4377_reg_access,
};
static int adf4377_soft_reset(struct adf4377_state *st)
{
unsigned int read_val;
int ret;
ret = regmap_update_bits(st->regmap, 0x0, ADF4377_0000_SOFT_RESET_MSK |
ADF4377_0000_SOFT_RESET_R_MSK,
FIELD_PREP(ADF4377_0000_SOFT_RESET_MSK, 1) |
FIELD_PREP(ADF4377_0000_SOFT_RESET_R_MSK, 1));
if (ret)
return ret;
return regmap_read_poll_timeout(st->regmap, 0x0, read_val,
!(read_val & (ADF4377_0000_SOFT_RESET_R_MSK |
ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100);
}
static int adf4377_get_freq(struct adf4377_state *st, u64 *freq)
{
unsigned int ref_div_factor, n_int;
u64 clkin_freq;
int ret;
mutex_lock(&st->lock);
ret = regmap_read(st->regmap, 0x12, &ref_div_factor);
if (ret)
goto exit;
ret = regmap_bulk_read(st->regmap, 0x10, st->buf, sizeof(st->buf));
if (ret)
goto exit;
clkin_freq = clk_get_rate(st->clkin);
ref_div_factor = FIELD_GET(ADF4377_0012_R_DIV_MSK, ref_div_factor);
n_int = FIELD_GET(ADF4377_0010_N_INT_LSB_MSK | ADF4377_0011_N_INT_MSB_MSK,
get_unaligned_le16(&st->buf));
*freq = div_u64(clkin_freq, ref_div_factor) * n_int;
exit:
mutex_unlock(&st->lock);
return ret;
}
static int adf4377_set_freq(struct adf4377_state *st, u64 freq)
{
unsigned int read_val;
u64 f_vco;
int ret;
mutex_lock(&st->lock);
if (freq > ADF4377_MAX_CLKPN_FREQ || freq < ADF4377_MIN_CLKPN_FREQ) {
ret = -EINVAL;
goto exit;
}
ret = regmap_update_bits(st->regmap, 0x1C, ADF4377_001C_EN_DNCLK_MSK |
ADF4377_001C_EN_DRCLK_MSK,
FIELD_PREP(ADF4377_001C_EN_DNCLK_MSK, 1) |
FIELD_PREP(ADF4377_001C_EN_DRCLK_MSK, 1));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x11, ADF4377_0011_EN_AUTOCAL_MSK |
ADF4377_0011_DCLK_DIV2_MSK,
FIELD_PREP(ADF4377_0011_EN_AUTOCAL_MSK, 1) |
FIELD_PREP(ADF4377_0011_DCLK_DIV2_MSK, st->dclk_div2));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x2E, ADF4377_002E_EN_ADC_CNV_MSK |
ADF4377_002E_EN_ADC_MSK |
ADF4377_002E_ADC_A_CONV_MSK,
FIELD_PREP(ADF4377_002E_EN_ADC_CNV_MSK, 1) |
FIELD_PREP(ADF4377_002E_EN_ADC_MSK, 1) |
FIELD_PREP(ADF4377_002E_ADC_A_CONV_MSK,
ADF4377_002E_ADC_A_CONV_VCO_CALIB));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x20, ADF4377_0020_EN_ADC_CLK_MSK,
FIELD_PREP(ADF4377_0020_EN_ADC_CLK_MSK, 1));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x2F, ADF4377_002F_DCLK_DIV1_MSK,
FIELD_PREP(ADF4377_002F_DCLK_DIV1_MSK, st->dclk_div1));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x24, ADF4377_0024_DCLK_MODE_MSK,
FIELD_PREP(ADF4377_0024_DCLK_MODE_MSK, st->dclk_mode));
if (ret)
goto exit;
ret = regmap_write(st->regmap, 0x27,
FIELD_PREP(ADF4377_0027_SYNTH_LOCK_TO_LSB_MSK,
st->synth_lock_timeout));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x28, ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK,
FIELD_PREP(ADF4377_0028_SYNTH_LOCK_TO_MSB_MSK,
st->synth_lock_timeout >> 8));
if (ret)
goto exit;
ret = regmap_write(st->regmap, 0x29,
FIELD_PREP(ADF4377_0029_VCO_ALC_TO_LSB_MSK,
st->vco_alc_timeout));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x2A, ADF4377_002A_VCO_ALC_TO_MSB_MSK,
FIELD_PREP(ADF4377_002A_VCO_ALC_TO_MSB_MSK,
st->vco_alc_timeout >> 8));
if (ret)
goto exit;
ret = regmap_write(st->regmap, 0x26,
FIELD_PREP(ADF4377_0026_VCO_BAND_DIV_MSK, st->vco_band_div));
if (ret)
goto exit;
ret = regmap_write(st->regmap, 0x2D,
FIELD_PREP(ADF4377_002D_ADC_CLK_DIV_MSK, st->adc_clk_div));
if (ret)
goto exit;
st->clkout_div_sel = 0;
f_vco = freq;
while (f_vco < ADF4377_MIN_VCO_FREQ) {
f_vco <<= 1;
st->clkout_div_sel++;
}
st->n_int = div_u64(freq, st->f_pfd);
ret = regmap_update_bits(st->regmap, 0x11, ADF4377_0011_EN_RDBLR_MSK |
ADF4377_0011_N_INT_MSB_MSK,
FIELD_PREP(ADF4377_0011_EN_RDBLR_MSK, 0) |
FIELD_PREP(ADF4377_0011_N_INT_MSB_MSK, st->n_int >> 8));
if (ret)
goto exit;
ret = regmap_update_bits(st->regmap, 0x12, ADF4377_0012_R_DIV_MSK |
ADF4377_0012_CLKOUT_DIV_MSK,
FIELD_PREP(ADF4377_0012_CLKOUT_DIV_MSK, st->clkout_div_sel) |
FIELD_PREP(ADF4377_0012_R_DIV_MSK, st->ref_div_factor));
if (ret)
goto exit;
ret = regmap_write(st->regmap, 0x10,
FIELD_PREP(ADF4377_0010_N_INT_LSB_MSK, st->n_int));
if (ret)
goto exit;
ret = regmap_read_poll_timeout(st->regmap, 0x49, read_val,
!(read_val & (ADF4377_0049_FSM_BUSY_MSK)), 200, 200 * 100);
if (ret)
goto exit;
/* Disable EN_DNCLK, EN_DRCLK */
ret = regmap_update_bits(st->regmap, 0x1C, ADF4377_001C_EN_DNCLK_MSK |
ADF4377_001C_EN_DRCLK_MSK,
FIELD_PREP(ADF4377_001C_EN_DNCLK_MSK, 0) |
FIELD_PREP(ADF4377_001C_EN_DRCLK_MSK, 0));
if (ret)
goto exit;
/* Disable EN_ADC_CLK */
ret = regmap_update_bits(st->regmap, 0x20, ADF4377_0020_EN_ADC_CLK_MSK,
FIELD_PREP(ADF4377_0020_EN_ADC_CLK_MSK, 0));
if (ret)
goto exit;
/* Set output Amplitude */
ret = regmap_update_bits(st->regmap, 0x19, ADF4377_0019_CLKOUT2_OP_MSK |
ADF4377_0019_CLKOUT1_OP_MSK,
FIELD_PREP(ADF4377_0019_CLKOUT1_OP_MSK,
ADF4377_0019_CLKOUT_420MV) |
FIELD_PREP(ADF4377_0019_CLKOUT2_OP_MSK,
ADF4377_0019_CLKOUT_420MV));
exit:
mutex_unlock(&st->lock);
return ret;
}
static void adf4377_gpio_init(struct adf4377_state *st)
{
if (st->gpio_ce) {
gpiod_set_value(st->gpio_ce, 1);
/* Delay for SPI register bits to settle to their power-on reset state */
fsleep(200);
}
if (st->gpio_enclk1)
gpiod_set_value(st->gpio_enclk1, 1);
if (st->gpio_enclk2)
gpiod_set_value(st->gpio_enclk2, 1);
}
static int adf4377_init(struct adf4377_state *st)
{
struct spi_device *spi = st->spi;
int ret;
adf4377_gpio_init(st);
ret = adf4377_soft_reset(st);
if (ret) {
dev_err(&spi->dev, "Failed to soft reset.\n");
return ret;
}
ret = regmap_multi_reg_write(st->regmap, adf4377_reg_defaults,
ARRAY_SIZE(adf4377_reg_defaults));
if (ret) {
dev_err(&spi->dev, "Failed to set default registers.\n");
return ret;
}
ret = regmap_update_bits(st->regmap, 0x00,
ADF4377_0000_SDO_ACTIVE_MSK | ADF4377_0000_SDO_ACTIVE_R_MSK,
FIELD_PREP(ADF4377_0000_SDO_ACTIVE_MSK,
ADF4377_0000_SDO_ACTIVE_SPI_4W) |
FIELD_PREP(ADF4377_0000_SDO_ACTIVE_R_MSK,
ADF4377_0000_SDO_ACTIVE_SPI_4W));
if (ret) {
dev_err(&spi->dev, "Failed to set 4-Wire Operation.\n");
return ret;
}
st->clkin_freq = clk_get_rate(st->clkin);
/* Power Up */
ret = regmap_write(st->regmap, 0x1a,
FIELD_PREP(ADF4377_001A_PD_ALL_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_RDIV_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_NDIV_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_VCO_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_LD_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_PFDCP_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_CLKOUT1_MSK, 0) |
FIELD_PREP(ADF4377_001A_PD_CLKOUT2_MSK, 0));
if (ret) {
dev_err(&spi->dev, "Failed to set power down registers.\n");
return ret;
}
/* Set Mux Output */
ret = regmap_update_bits(st->regmap, 0x1D,
ADF4377_001D_MUXOUT_MSK,
FIELD_PREP(ADF4377_001D_MUXOUT_MSK, st->muxout_select));
if (ret)
return ret;
/* Compute PFD */
st->ref_div_factor = 0;
do {
st->ref_div_factor++;
st->f_pfd = st->clkin_freq / st->ref_div_factor;
} while (st->f_pfd > ADF4377_MAX_FREQ_PFD);
if (st->f_pfd > ADF4377_MAX_FREQ_PFD || st->f_pfd < ADF4377_MIN_FREQ_PFD)
return -EINVAL;
st->f_div_rclk = st->f_pfd;
if (st->f_pfd <= ADF4377_FREQ_PFD_80MHZ) {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_1;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1;
st->dclk_mode = 0;
} else if (st->f_pfd <= ADF4377_FREQ_PFD_125MHZ) {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_1;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1;
st->dclk_mode = 1;
} else if (st->f_pfd <= ADF4377_FREQ_PFD_160MHZ) {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1;
st->dclk_mode = 0;
st->f_div_rclk /= 2;
} else if (st->f_pfd <= ADF4377_FREQ_PFD_250MHZ) {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_1;
st->dclk_mode = 1;
st->f_div_rclk /= 2;
} else if (st->f_pfd <= ADF4377_FREQ_PFD_320MHZ) {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_2;
st->dclk_mode = 0;
st->f_div_rclk /= 4;
} else {
st->dclk_div1 = ADF4377_002F_DCLK_DIV1_2;
st->dclk_div2 = ADF4377_0011_DCLK_DIV2_2;
st->dclk_mode = 1;
st->f_div_rclk /= 4;
}
st->synth_lock_timeout = DIV_ROUND_UP(st->f_div_rclk, 50000);
st->vco_alc_timeout = DIV_ROUND_UP(st->f_div_rclk, 20000);
st->vco_band_div = DIV_ROUND_UP(st->f_div_rclk, 150000 * 16 * (1 << st->dclk_mode));
st->adc_clk_div = DIV_ROUND_UP((st->f_div_rclk / 400000 - 2), 4);
return 0;
}
static ssize_t adf4377_read(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf)
{
struct adf4377_state *st = iio_priv(indio_dev);
u64 val = 0;
int ret;
switch ((u32)private) {
case ADF4377_FREQ:
ret = adf4377_get_freq(st, &val);
if (ret)
return ret;
return sysfs_emit(buf, "%llu\n", val);
default:
return -EINVAL;
}
}
static ssize_t adf4377_write(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, const char *buf,
size_t len)
{
struct adf4377_state *st = iio_priv(indio_dev);
unsigned long long freq;
int ret;
switch ((u32)private) {
case ADF4377_FREQ:
ret = kstrtoull(buf, 10, &freq);
if (ret)
return ret;
ret = adf4377_set_freq(st, freq);
if (ret)
return ret;
return len;
default:
return -EINVAL;
}
}
#define _ADF4377_EXT_INFO(_name, _shared, _ident) { \
.name = _name, \
.read = adf4377_read, \
.write = adf4377_write, \
.private = _ident, \
.shared = _shared, \
}
static const struct iio_chan_spec_ext_info adf4377_ext_info[] = {
/*
* Usually we use IIO_CHAN_INFO_FREQUENCY, but there are
* values > 2^32 in order to support the entire frequency range
* in Hz.
*/
_ADF4377_EXT_INFO("frequency", IIO_SEPARATE, ADF4377_FREQ),
{ }
};
static const struct iio_chan_spec adf4377_channels[] = {
{
.type = IIO_ALTVOLTAGE,
.indexed = 1,
.output = 1,
.channel = 0,
.ext_info = adf4377_ext_info,
},
};
static int adf4377_properties_parse(struct adf4377_state *st)
{
struct spi_device *spi = st->spi;
const char *str;
int ret;
st->clkin = devm_clk_get_enabled(&spi->dev, "ref_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the reference input clock\n");
st->gpio_ce = devm_gpiod_get_optional(&st->spi->dev, "chip-enable",
GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_ce))
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_ce),
"failed to get the CE GPIO\n");
st->gpio_enclk1 = devm_gpiod_get_optional(&st->spi->dev, "clk1-enable",
GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_enclk1))
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk1),
"failed to get the CE GPIO\n");
st->gpio_enclk2 = devm_gpiod_get_optional(&st->spi->dev, "clk2-enable",
GPIOD_OUT_LOW);
if (IS_ERR(st->gpio_enclk2))
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
"failed to get the CE GPIO\n");
ret = device_property_read_string(&spi->dev, "adi,muxout-select", &str);
if (ret) {
st->muxout_select = ADF4377_MUXOUT_HIGH_Z;
} else {
ret = match_string(adf4377_muxout_modes, ARRAY_SIZE(adf4377_muxout_modes), str);
if (ret < 0)
return ret;
st->muxout_select = ret;
}
return 0;
}
static int adf4377_freq_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct adf4377_state *st = container_of(nb, struct adf4377_state, nb);
int ret;
if (action == POST_RATE_CHANGE) {
mutex_lock(&st->lock);
ret = notifier_from_errno(adf4377_init(st));
mutex_unlock(&st->lock);
return ret;
}
return NOTIFY_OK;
}
static int adf4377_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct regmap *regmap;
struct adf4377_state *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
regmap = devm_regmap_init_spi(spi, &adf4377_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
st = iio_priv(indio_dev);
indio_dev->info = &adf4377_info;
indio_dev->name = "adf4377";
indio_dev->channels = adf4377_channels;
indio_dev->num_channels = ARRAY_SIZE(adf4377_channels);
st->regmap = regmap;
st->spi = spi;
mutex_init(&st->lock);
ret = adf4377_properties_parse(st);
if (ret)
return ret;
st->nb.notifier_call = adf4377_freq_change;
ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
if (ret)
return ret;
ret = adf4377_init(st);
if (ret)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id adf4377_id[] = {
{ "adf4377", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, adf4377_id);
static const struct of_device_id adf4377_of_match[] = {
{ .compatible = "adi,adf4377" },
{}
};
MODULE_DEVICE_TABLE(of, adf4377_of_match);
static struct spi_driver adf4377_driver = {
.driver = {
.name = "adf4377",
.of_match_table = adf4377_of_match,
},
.probe = adf4377_probe,
.id_table = adf4377_id,
};
module_spi_driver(adf4377_driver);
MODULE_AUTHOR("Antoniu Miclaus <[email protected]>");
MODULE_DESCRIPTION("Analog Devices ADF4377");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/frequency/adf4377.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ADMV1013 driver
*
* Copyright 2021 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <asm/unaligned.h>
/* ADMV1013 Register Map */
#define ADMV1013_REG_SPI_CONTROL 0x00
#define ADMV1013_REG_ALARM 0x01
#define ADMV1013_REG_ALARM_MASKS 0x02
#define ADMV1013_REG_ENABLE 0x03
#define ADMV1013_REG_LO_AMP_I 0x05
#define ADMV1013_REG_LO_AMP_Q 0x06
#define ADMV1013_REG_OFFSET_ADJUST_I 0x07
#define ADMV1013_REG_OFFSET_ADJUST_Q 0x08
#define ADMV1013_REG_QUAD 0x09
#define ADMV1013_REG_VVA_TEMP_COMP 0x0A
/* ADMV1013_REG_SPI_CONTROL Map */
#define ADMV1013_PARITY_EN_MSK BIT(15)
#define ADMV1013_SPI_SOFT_RESET_MSK BIT(14)
#define ADMV1013_CHIP_ID_MSK GENMASK(11, 4)
#define ADMV1013_CHIP_ID 0xA
#define ADMV1013_REVISION_ID_MSK GENMASK(3, 0)
/* ADMV1013_REG_ALARM Map */
#define ADMV1013_PARITY_ERROR_MSK BIT(15)
#define ADMV1013_TOO_FEW_ERRORS_MSK BIT(14)
#define ADMV1013_TOO_MANY_ERRORS_MSK BIT(13)
#define ADMV1013_ADDRESS_RANGE_ERROR_MSK BIT(12)
/* ADMV1013_REG_ENABLE Map */
#define ADMV1013_VGA_PD_MSK BIT(15)
#define ADMV1013_MIXER_PD_MSK BIT(14)
#define ADMV1013_QUAD_PD_MSK GENMASK(13, 11)
#define ADMV1013_BG_PD_MSK BIT(10)
#define ADMV1013_MIXER_IF_EN_MSK BIT(7)
#define ADMV1013_DET_EN_MSK BIT(5)
/* ADMV1013_REG_LO_AMP Map */
#define ADMV1013_LOAMP_PH_ADJ_FINE_MSK GENMASK(13, 7)
#define ADMV1013_MIXER_VGATE_MSK GENMASK(6, 0)
/* ADMV1013_REG_OFFSET_ADJUST Map */
#define ADMV1013_MIXER_OFF_ADJ_P_MSK GENMASK(15, 9)
#define ADMV1013_MIXER_OFF_ADJ_N_MSK GENMASK(8, 2)
/* ADMV1013_REG_QUAD Map */
#define ADMV1013_QUAD_SE_MODE_MSK GENMASK(9, 6)
#define ADMV1013_QUAD_FILTERS_MSK GENMASK(3, 0)
/* ADMV1013_REG_VVA_TEMP_COMP Map */
#define ADMV1013_VVA_TEMP_COMP_MSK GENMASK(15, 0)
/* ADMV1013 Miscellaneous Defines */
#define ADMV1013_READ BIT(7)
#define ADMV1013_REG_ADDR_READ_MSK GENMASK(6, 1)
#define ADMV1013_REG_ADDR_WRITE_MSK GENMASK(22, 17)
#define ADMV1013_REG_DATA_MSK GENMASK(16, 1)
enum {
ADMV1013_IQ_MODE,
ADMV1013_IF_MODE
};
enum {
ADMV1013_RFMOD_I_CALIBPHASE,
ADMV1013_RFMOD_Q_CALIBPHASE,
};
enum {
ADMV1013_SE_MODE_POS = 6,
ADMV1013_SE_MODE_NEG = 9,
ADMV1013_SE_MODE_DIFF = 12
};
struct admv1013_state {
struct spi_device *spi;
struct clk *clkin;
/* Protect against concurrent accesses to the device and to data */
struct mutex lock;
struct regulator *reg;
struct notifier_block nb;
unsigned int input_mode;
unsigned int quad_se_mode;
bool det_en;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
static int __admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
unsigned int *val)
{
int ret;
struct spi_transfer t = {0};
st->data[0] = ADMV1013_READ | FIELD_PREP(ADMV1013_REG_ADDR_READ_MSK, reg);
st->data[1] = 0x0;
st->data[2] = 0x0;
t.rx_buf = &st->data[0];
t.tx_buf = &st->data[0];
t.len = 3;
ret = spi_sync_transfer(st->spi, &t, 1);
if (ret)
return ret;
*val = FIELD_GET(ADMV1013_REG_DATA_MSK, get_unaligned_be24(&st->data[0]));
return ret;
}
static int admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
unsigned int *val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1013_spi_read(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __admv1013_spi_write(struct admv1013_state *st,
unsigned int reg,
unsigned int val)
{
put_unaligned_be24(FIELD_PREP(ADMV1013_REG_DATA_MSK, val) |
FIELD_PREP(ADMV1013_REG_ADDR_WRITE_MSK, reg), &st->data[0]);
return spi_write(st->spi, &st->data[0], 3);
}
static int admv1013_spi_write(struct admv1013_state *st, unsigned int reg,
unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1013_spi_write(st, reg, val);
mutex_unlock(&st->lock);
return ret;
}
static int __admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
unsigned int mask, unsigned int val)
{
int ret;
unsigned int data, temp;
ret = __admv1013_spi_read(st, reg, &data);
if (ret)
return ret;
temp = (data & ~mask) | (val & mask);
return __admv1013_spi_write(st, reg, temp);
}
static int admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
unsigned int mask, unsigned int val)
{
int ret;
mutex_lock(&st->lock);
ret = __admv1013_spi_update_bits(st, reg, mask, val);
mutex_unlock(&st->lock);
return ret;
}
static int admv1013_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
{
struct admv1013_state *st = iio_priv(indio_dev);
unsigned int data, addr;
int ret;
switch (info) {
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->channel) {
case IIO_MOD_I:
addr = ADMV1013_REG_OFFSET_ADJUST_I;
break;
case IIO_MOD_Q:
addr = ADMV1013_REG_OFFSET_ADJUST_Q;
break;
default:
return -EINVAL;
}
ret = admv1013_spi_read(st, addr, &data);
if (ret)
return ret;
if (!chan->channel)
*val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_P_MSK, data);
else
*val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_N_MSK, data);
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int admv1013_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long info)
{
struct admv1013_state *st = iio_priv(indio_dev);
unsigned int addr, data, msk;
switch (info) {
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->channel2) {
case IIO_MOD_I:
addr = ADMV1013_REG_OFFSET_ADJUST_I;
break;
case IIO_MOD_Q:
addr = ADMV1013_REG_OFFSET_ADJUST_Q;
break;
default:
return -EINVAL;
}
if (!chan->channel) {
msk = ADMV1013_MIXER_OFF_ADJ_P_MSK;
data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_P_MSK, val);
} else {
msk = ADMV1013_MIXER_OFF_ADJ_N_MSK;
data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_N_MSK, val);
}
return admv1013_spi_update_bits(st, addr, msk, data);
default:
return -EINVAL;
}
}
static ssize_t admv1013_read(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
char *buf)
{
struct admv1013_state *st = iio_priv(indio_dev);
unsigned int data, addr;
int ret;
switch ((u32)private) {
case ADMV1013_RFMOD_I_CALIBPHASE:
addr = ADMV1013_REG_LO_AMP_I;
break;
case ADMV1013_RFMOD_Q_CALIBPHASE:
addr = ADMV1013_REG_LO_AMP_Q;
break;
default:
return -EINVAL;
}
ret = admv1013_spi_read(st, addr, &data);
if (ret)
return ret;
data = FIELD_GET(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
return sysfs_emit(buf, "%u\n", data);
}
static ssize_t admv1013_write(struct iio_dev *indio_dev,
uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len)
{
struct admv1013_state *st = iio_priv(indio_dev);
unsigned int data;
int ret;
ret = kstrtou32(buf, 10, &data);
if (ret)
return ret;
data = FIELD_PREP(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
switch ((u32)private) {
case ADMV1013_RFMOD_I_CALIBPHASE:
ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
data);
if (ret)
return ret;
break;
case ADMV1013_RFMOD_Q_CALIBPHASE:
ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_Q,
ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
data);
if (ret)
return ret;
break;
default:
return -EINVAL;
}
return ret ? ret : len;
}
static int admv1013_update_quad_filters(struct admv1013_state *st)
{
unsigned int filt_raw;
u64 rate = clk_get_rate(st->clkin);
if (rate >= (5400 * HZ_PER_MHZ) && rate <= (7000 * HZ_PER_MHZ))
filt_raw = 15;
else if (rate >= (5400 * HZ_PER_MHZ) && rate <= (8000 * HZ_PER_MHZ))
filt_raw = 10;
else if (rate >= (6600 * HZ_PER_MHZ) && rate <= (9200 * HZ_PER_MHZ))
filt_raw = 5;
else
filt_raw = 0;
return __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
ADMV1013_QUAD_FILTERS_MSK,
FIELD_PREP(ADMV1013_QUAD_FILTERS_MSK, filt_raw));
}
static int admv1013_update_mixer_vgate(struct admv1013_state *st)
{
unsigned int mixer_vgate;
int vcm;
vcm = regulator_get_voltage(st->reg);
if (vcm < 0)
return vcm;
if (vcm < 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
else if (vcm > 1800000 && vcm < 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
else
return -EINVAL;
return __admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
ADMV1013_MIXER_VGATE_MSK,
FIELD_PREP(ADMV1013_MIXER_VGATE_MSK, mixer_vgate));
}
static int admv1013_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
unsigned int write_val,
unsigned int *read_val)
{
struct admv1013_state *st = iio_priv(indio_dev);
if (read_val)
return admv1013_spi_read(st, reg, read_val);
else
return admv1013_spi_write(st, reg, write_val);
}
static const struct iio_info admv1013_info = {
.read_raw = admv1013_read_raw,
.write_raw = admv1013_write_raw,
.debugfs_reg_access = &admv1013_reg_access,
};
static const char * const admv1013_vcc_regs[] = {
"vcc-drv", "vcc2-drv", "vcc-vva", "vcc-amp1", "vcc-amp2",
"vcc-env", "vcc-bg", "vcc-bg2", "vcc-mixer", "vcc-quad"
};
static int admv1013_freq_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct admv1013_state *st = container_of(nb, struct admv1013_state, nb);
int ret;
if (action == POST_RATE_CHANGE) {
mutex_lock(&st->lock);
ret = notifier_from_errno(admv1013_update_quad_filters(st));
mutex_unlock(&st->lock);
return ret;
}
return NOTIFY_OK;
}
#define _ADMV1013_EXT_INFO(_name, _shared, _ident) { \
.name = _name, \
.read = admv1013_read, \
.write = admv1013_write, \
.private = _ident, \
.shared = _shared, \
}
static const struct iio_chan_spec_ext_info admv1013_ext_info[] = {
_ADMV1013_EXT_INFO("i_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_I_CALIBPHASE),
_ADMV1013_EXT_INFO("q_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_Q_CALIBPHASE),
{ },
};
#define ADMV1013_CHAN_PHASE(_channel, _channel2, _admv1013_ext_info) { \
.type = IIO_ALTVOLTAGE, \
.output = 0, \
.indexed = 1, \
.channel2 = _channel2, \
.channel = _channel, \
.differential = 1, \
.ext_info = _admv1013_ext_info, \
}
#define ADMV1013_CHAN_CALIB(_channel, rf_comp) { \
.type = IIO_ALTVOLTAGE, \
.output = 0, \
.indexed = 1, \
.channel = _channel, \
.channel2 = IIO_MOD_##rf_comp, \
.info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \
}
static const struct iio_chan_spec admv1013_channels[] = {
ADMV1013_CHAN_PHASE(0, 1, admv1013_ext_info),
ADMV1013_CHAN_CALIB(0, I),
ADMV1013_CHAN_CALIB(0, Q),
ADMV1013_CHAN_CALIB(1, I),
ADMV1013_CHAN_CALIB(1, Q),
};
static int admv1013_init(struct admv1013_state *st)
{
int ret;
unsigned int data;
struct spi_device *spi = st->spi;
/* Perform a software reset */
ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
ADMV1013_SPI_SOFT_RESET_MSK,
FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 1));
if (ret)
return ret;
ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
ADMV1013_SPI_SOFT_RESET_MSK,
FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 0));
if (ret)
return ret;
ret = __admv1013_spi_read(st, ADMV1013_REG_SPI_CONTROL, &data);
if (ret)
return ret;
data = FIELD_GET(ADMV1013_CHIP_ID_MSK, data);
if (data != ADMV1013_CHIP_ID) {
dev_err(&spi->dev, "Invalid Chip ID.\n");
return -EINVAL;
}
ret = __admv1013_spi_write(st, ADMV1013_REG_VVA_TEMP_COMP, 0xE700);
if (ret)
return ret;
data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode);
ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
ADMV1013_QUAD_SE_MODE_MSK, data);
if (ret)
return ret;
ret = admv1013_update_mixer_vgate(st);
if (ret)
return ret;
ret = admv1013_update_quad_filters(st);
if (ret)
return ret;
return __admv1013_spi_update_bits(st, ADMV1013_REG_ENABLE,
ADMV1013_DET_EN_MSK |
ADMV1013_MIXER_IF_EN_MSK,
st->det_en |
st->input_mode);
}
static void admv1013_reg_disable(void *data)
{
regulator_disable(data);
}
static void admv1013_powerdown(void *data)
{
unsigned int enable_reg, enable_reg_msk;
/* Disable all components in the Enable Register */
enable_reg_msk = ADMV1013_VGA_PD_MSK |
ADMV1013_MIXER_PD_MSK |
ADMV1013_QUAD_PD_MSK |
ADMV1013_BG_PD_MSK |
ADMV1013_MIXER_IF_EN_MSK |
ADMV1013_DET_EN_MSK;
enable_reg = FIELD_PREP(ADMV1013_VGA_PD_MSK, 1) |
FIELD_PREP(ADMV1013_MIXER_PD_MSK, 1) |
FIELD_PREP(ADMV1013_QUAD_PD_MSK, 7) |
FIELD_PREP(ADMV1013_BG_PD_MSK, 1) |
FIELD_PREP(ADMV1013_MIXER_IF_EN_MSK, 0) |
FIELD_PREP(ADMV1013_DET_EN_MSK, 0);
admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg);
}
static int admv1013_properties_parse(struct admv1013_state *st)
{
int ret;
const char *str;
struct spi_device *spi = st->spi;
st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
if (ret)
st->input_mode = ADMV1013_IQ_MODE;
if (!strcmp(str, "iq"))
st->input_mode = ADMV1013_IQ_MODE;
else if (!strcmp(str, "if"))
st->input_mode = ADMV1013_IF_MODE;
else
return -EINVAL;
ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
if (ret)
st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
if (!strcmp(str, "diff"))
st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
else if (!strcmp(str, "se-pos"))
st->quad_se_mode = ADMV1013_SE_MODE_POS;
else if (!strcmp(str, "se-neg"))
st->quad_se_mode = ADMV1013_SE_MODE_NEG;
else
return -EINVAL;
st->reg = devm_regulator_get(&spi->dev, "vcm");
if (IS_ERR(st->reg))
return dev_err_probe(&spi->dev, PTR_ERR(st->reg),
"failed to get the common-mode voltage\n");
ret = devm_regulator_bulk_get_enable(&st->spi->dev,
ARRAY_SIZE(admv1013_vcc_regs),
admv1013_vcc_regs);
if (ret) {
dev_err_probe(&spi->dev, ret,
"Failed to request VCC regulators\n");
return ret;
}
return 0;
}
static int admv1013_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct admv1013_state *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
indio_dev->info = &admv1013_info;
indio_dev->name = "admv1013";
indio_dev->channels = admv1013_channels;
indio_dev->num_channels = ARRAY_SIZE(admv1013_channels);
st->spi = spi;
ret = admv1013_properties_parse(st);
if (ret)
return ret;
ret = regulator_enable(st->reg);
if (ret) {
dev_err(&spi->dev, "Failed to enable specified Common-Mode Voltage!\n");
return ret;
}
ret = devm_add_action_or_reset(&spi->dev, admv1013_reg_disable,
st->reg);
if (ret)
return ret;
st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
st->nb.notifier_call = admv1013_freq_change;
ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
if (ret)
return ret;
mutex_init(&st->lock);
ret = admv1013_init(st);
if (ret) {
dev_err(&spi->dev, "admv1013 init failed\n");
return ret;
}
ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st);
if (ret)
return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id admv1013_id[] = {
{ "admv1013", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, admv1013_id);
static const struct of_device_id admv1013_of_match[] = {
{ .compatible = "adi,admv1013" },
{},
};
MODULE_DEVICE_TABLE(of, admv1013_of_match);
static struct spi_driver admv1013_driver = {
.driver = {
.name = "admv1013",
.of_match_table = admv1013_of_match,
},
.probe = admv1013_probe,
.id_table = admv1013_id,
};
module_spi_driver(admv1013_driver);
MODULE_AUTHOR("Antoniu Miclaus <[email protected]");
MODULE_DESCRIPTION("Analog Devices ADMV1013");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/frequency/admv1013.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* ADMV4420
*
* Copyright 2021 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <asm/unaligned.h>
/* ADMV4420 Register Map */
#define ADMV4420_SPI_CONFIG_1 0x00
#define ADMV4420_SPI_CONFIG_2 0x01
#define ADMV4420_CHIPTYPE 0x03
#define ADMV4420_PRODUCT_ID_L 0x04
#define ADMV4420_PRODUCT_ID_H 0x05
#define ADMV4420_SCRATCHPAD 0x0A
#define ADMV4420_SPI_REV 0x0B
#define ADMV4420_ENABLES 0x103
#define ADMV4420_SDO_LEVEL 0x108
#define ADMV4420_INT_L 0x200
#define ADMV4420_INT_H 0x201
#define ADMV4420_FRAC_L 0x202
#define ADMV4420_FRAC_M 0x203
#define ADMV4420_FRAC_H 0x204
#define ADMV4420_MOD_L 0x208
#define ADMV4420_MOD_M 0x209
#define ADMV4420_MOD_H 0x20A
#define ADMV4420_R_DIV_L 0x20C
#define ADMV4420_R_DIV_H 0x20D
#define ADMV4420_REFERENCE 0x20E
#define ADMV4420_VCO_DATA_READBACK1 0x211
#define ADMV4420_VCO_DATA_READBACK2 0x212
#define ADMV4420_PLL_MUX_SEL 0x213
#define ADMV4420_LOCK_DETECT 0x214
#define ADMV4420_BAND_SELECT 0x215
#define ADMV4420_VCO_ALC_TIMEOUT 0x216
#define ADMV4420_VCO_MANUAL 0x217
#define ADMV4420_ALC 0x219
#define ADMV4420_VCO_TIMEOUT1 0x21C
#define ADMV4420_VCO_TIMEOUT2 0x21D
#define ADMV4420_VCO_BAND_DIV 0x21E
#define ADMV4420_VCO_READBACK_SEL 0x21F
#define ADMV4420_AUTOCAL 0x226
#define ADMV4420_CP_STATE 0x22C
#define ADMV4420_CP_BLEED_EN 0x22D
#define ADMV4420_CP_CURRENT 0x22E
#define ADMV4420_CP_BLEED 0x22F
#define ADMV4420_SPI_CONFIG_1_SDOACTIVE (BIT(4) | BIT(3))
#define ADMV4420_SPI_CONFIG_1_ENDIAN (BIT(5) | BIT(2))
#define ADMV4420_SPI_CONFIG_1_SOFTRESET (BIT(7) | BIT(1))
#define ADMV4420_REFERENCE_DIVIDE_BY_2_MASK BIT(0)
#define ADMV4420_REFERENCE_MODE_MASK BIT(1)
#define ADMV4420_REFERENCE_DOUBLER_MASK BIT(2)
#define ADMV4420_REF_DIVIDER_MAX_VAL GENMASK(9, 0)
#define ADMV4420_N_COUNTER_INT_MAX GENMASK(15, 0)
#define ADMV4420_N_COUNTER_FRAC_MAX GENMASK(23, 0)
#define ADMV4420_N_COUNTER_MOD_MAX GENMASK(23, 0)
#define ENABLE_PLL BIT(6)
#define ENABLE_LO BIT(5)
#define ENABLE_VCO BIT(3)
#define ENABLE_IFAMP BIT(2)
#define ENABLE_MIXER BIT(1)
#define ENABLE_LNA BIT(0)
#define ADMV4420_SCRATCH_PAD_VAL_1 0xAD
#define ADMV4420_SCRATCH_PAD_VAL_2 0xEA
#define ADMV4420_REF_FREQ_HZ 50000000
#define MAX_N_COUNTER 655360UL
#define MAX_R_DIVIDER 1024
#define ADMV4420_DEFAULT_LO_FREQ_HZ 16750000000ULL
enum admv4420_mux_sel {
ADMV4420_LOW = 0,
ADMV4420_LOCK_DTCT = 1,
ADMV4420_R_COUNTER_PER_2 = 4,
ADMV4420_N_CONUTER_PER_2 = 5,
ADMV4420_HIGH = 8,
};
struct admv4420_reference_block {
bool doubler_en;
bool divide_by_2_en;
bool ref_single_ended;
u32 divider;
};
struct admv4420_n_counter {
u32 int_val;
u32 frac_val;
u32 mod_val;
u32 n_counter;
};
struct admv4420_state {
struct spi_device *spi;
struct regmap *regmap;
u64 vco_freq_hz;
u64 lo_freq_hz;
struct admv4420_reference_block ref_block;
struct admv4420_n_counter n_counter;
enum admv4420_mux_sel mux_sel;
struct mutex lock;
u8 transf_buf[4] __aligned(IIO_DMA_MINALIGN);
};
static const struct regmap_config admv4420_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
.read_flag_mask = BIT(7),
};
static int admv4420_reg_access(struct iio_dev *indio_dev,
u32 reg, u32 writeval,
u32 *readval)
{
struct admv4420_state *st = iio_priv(indio_dev);
if (readval)
return regmap_read(st->regmap, reg, readval);
else
return regmap_write(st->regmap, reg, writeval);
}
static int admv4420_set_n_counter(struct admv4420_state *st, u32 int_val,
u32 frac_val, u32 mod_val)
{
int ret;
put_unaligned_le32(frac_val, st->transf_buf);
ret = regmap_bulk_write(st->regmap, ADMV4420_FRAC_L, st->transf_buf, 3);
if (ret)
return ret;
put_unaligned_le32(mod_val, st->transf_buf);
ret = regmap_bulk_write(st->regmap, ADMV4420_MOD_L, st->transf_buf, 3);
if (ret)
return ret;
put_unaligned_le32(int_val, st->transf_buf);
return regmap_bulk_write(st->regmap, ADMV4420_INT_L, st->transf_buf, 2);
}
static int admv4420_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
{
struct admv4420_state *st = iio_priv(indio_dev);
switch (info) {
case IIO_CHAN_INFO_FREQUENCY:
*val = div_u64_rem(st->lo_freq_hz, MICRO, val2);
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static const struct iio_info admv4420_info = {
.read_raw = admv4420_read_raw,
.debugfs_reg_access = &admv4420_reg_access,
};
static const struct iio_chan_spec admv4420_channels[] = {
{
.type = IIO_ALTVOLTAGE,
.output = 0,
.indexed = 1,
.channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_FREQUENCY),
},
};
static void admv4420_fw_parse(struct admv4420_state *st)
{
struct device *dev = &st->spi->dev;
u32 tmp;
int ret;
ret = device_property_read_u32(dev, "adi,lo-freq-khz", &tmp);
if (!ret)
st->lo_freq_hz = (u64)tmp * KILO;
st->ref_block.ref_single_ended = device_property_read_bool(dev,
"adi,ref-ext-single-ended-en");
}
static inline uint64_t admv4420_calc_pfd_vco(struct admv4420_state *st)
{
return div_u64(st->vco_freq_hz * 10, st->n_counter.n_counter);
}
static inline uint32_t admv4420_calc_pfd_ref(struct admv4420_state *st)
{
uint32_t tmp;
u8 doubler, divide_by_2;
doubler = st->ref_block.doubler_en ? 2 : 1;
divide_by_2 = st->ref_block.divide_by_2_en ? 2 : 1;
tmp = ADMV4420_REF_FREQ_HZ * doubler;
return (tmp / (st->ref_block.divider * divide_by_2));
}
static int admv4420_calc_parameters(struct admv4420_state *st)
{
u64 pfd_ref, pfd_vco;
bool sol_found = false;
st->ref_block.doubler_en = false;
st->ref_block.divide_by_2_en = false;
st->vco_freq_hz = div_u64(st->lo_freq_hz, 2);
for (st->ref_block.divider = 1; st->ref_block.divider < MAX_R_DIVIDER;
st->ref_block.divider++) {
pfd_ref = admv4420_calc_pfd_ref(st);
for (st->n_counter.n_counter = 1; st->n_counter.n_counter < MAX_N_COUNTER;
st->n_counter.n_counter++) {
pfd_vco = admv4420_calc_pfd_vco(st);
if (pfd_ref == pfd_vco) {
sol_found = true;
break;
}
}
if (sol_found)
break;
st->n_counter.n_counter = 1;
}
if (!sol_found)
return -1;
st->n_counter.int_val = div_u64_rem(st->n_counter.n_counter, 10, &st->n_counter.frac_val);
st->n_counter.mod_val = 10;
return 0;
}
static int admv4420_setup(struct iio_dev *indio_dev)
{
struct admv4420_state *st = iio_priv(indio_dev);
struct device *dev = indio_dev->dev.parent;
u32 val;
int ret;
ret = regmap_write(st->regmap, ADMV4420_SPI_CONFIG_1,
ADMV4420_SPI_CONFIG_1_SOFTRESET);
if (ret)
return ret;
ret = regmap_write(st->regmap, ADMV4420_SPI_CONFIG_1,
ADMV4420_SPI_CONFIG_1_SDOACTIVE |
ADMV4420_SPI_CONFIG_1_ENDIAN);
if (ret)
return ret;
ret = regmap_write(st->regmap,
ADMV4420_SCRATCHPAD,
ADMV4420_SCRATCH_PAD_VAL_1);
if (ret)
return ret;
ret = regmap_read(st->regmap, ADMV4420_SCRATCHPAD, &val);
if (ret)
return ret;
if (val != ADMV4420_SCRATCH_PAD_VAL_1) {
dev_err(dev, "Failed ADMV4420 to read/write scratchpad %x ", val);
return -EIO;
}
ret = regmap_write(st->regmap,
ADMV4420_SCRATCHPAD,
ADMV4420_SCRATCH_PAD_VAL_2);
if (ret)
return ret;
ret = regmap_read(st->regmap, ADMV4420_SCRATCHPAD, &val);
if (ret)
return ret;
if (val != ADMV4420_SCRATCH_PAD_VAL_2) {
dev_err(dev, "Failed to read/write scratchpad %x ", val);
return -EIO;
}
st->mux_sel = ADMV4420_LOCK_DTCT;
st->lo_freq_hz = ADMV4420_DEFAULT_LO_FREQ_HZ;
admv4420_fw_parse(st);
ret = admv4420_calc_parameters(st);
if (ret) {
dev_err(dev, "Failed calc parameters for %lld ", st->vco_freq_hz);
return ret;
}
ret = regmap_write(st->regmap, ADMV4420_R_DIV_L,
FIELD_GET(0xFF, st->ref_block.divider));
if (ret)
return ret;
ret = regmap_write(st->regmap, ADMV4420_R_DIV_H,
FIELD_GET(0xFF00, st->ref_block.divider));
if (ret)
return ret;
ret = regmap_write(st->regmap, ADMV4420_REFERENCE,
st->ref_block.divide_by_2_en |
FIELD_PREP(ADMV4420_REFERENCE_MODE_MASK, st->ref_block.ref_single_ended) |
FIELD_PREP(ADMV4420_REFERENCE_DOUBLER_MASK, st->ref_block.doubler_en));
if (ret)
return ret;
ret = admv4420_set_n_counter(st, st->n_counter.int_val,
st->n_counter.frac_val,
st->n_counter.mod_val);
if (ret)
return ret;
ret = regmap_write(st->regmap, ADMV4420_PLL_MUX_SEL, st->mux_sel);
if (ret)
return ret;
return regmap_write(st->regmap, ADMV4420_ENABLES,
ENABLE_PLL | ENABLE_LO | ENABLE_VCO |
ENABLE_IFAMP | ENABLE_MIXER | ENABLE_LNA);
}
static int admv4420_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct admv4420_state *st;
struct regmap *regmap;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
regmap = devm_regmap_init_spi(spi, &admv4420_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(&spi->dev, PTR_ERR(regmap),
"Failed to initializing spi regmap\n");
st = iio_priv(indio_dev);
st->spi = spi;
st->regmap = regmap;
indio_dev->name = "admv4420";
indio_dev->info = &admv4420_info;
indio_dev->channels = admv4420_channels;
indio_dev->num_channels = ARRAY_SIZE(admv4420_channels);
ret = admv4420_setup(indio_dev);
if (ret) {
dev_err(&spi->dev, "Setup ADMV4420 failed (%d)\n", ret);
return ret;
}
return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id admv4420_of_match[] = {
{ .compatible = "adi,admv4420" },
{ }
};
MODULE_DEVICE_TABLE(of, admv4420_of_match);
static struct spi_driver admv4420_driver = {
.driver = {
.name = "admv4420",
.of_match_table = admv4420_of_match,
},
.probe = admv4420_probe,
};
module_spi_driver(admv4420_driver);
MODULE_AUTHOR("Cristian Pop <[email protected]>");
MODULE_DESCRIPTION("Analog Devices ADMV44200 K Band Downconverter");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/iio/frequency/admv4420.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* hdc2010.c - Support for the TI HDC2010 and HDC2080
* temperature + relative humidity sensors
*
* Copyright (C) 2020 Norphonic AS
* Author: Eugene Zaikonnikov <[email protected]>
*
* Datasheet: https://www.ti.com/product/HDC2010/datasheet
* Datasheet: https://www.ti.com/product/HDC2080/datasheet
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/bitops.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define HDC2010_REG_TEMP_LOW 0x00
#define HDC2010_REG_TEMP_HIGH 0x01
#define HDC2010_REG_HUMIDITY_LOW 0x02
#define HDC2010_REG_HUMIDITY_HIGH 0x03
#define HDC2010_REG_INTERRUPT_DRDY 0x04
#define HDC2010_REG_TEMP_MAX 0x05
#define HDC2010_REG_HUMIDITY_MAX 0x06
#define HDC2010_REG_INTERRUPT_EN 0x07
#define HDC2010_REG_TEMP_OFFSET_ADJ 0x08
#define HDC2010_REG_HUMIDITY_OFFSET_ADJ 0x09
#define HDC2010_REG_TEMP_THR_L 0x0a
#define HDC2010_REG_TEMP_THR_H 0x0b
#define HDC2010_REG_RH_THR_L 0x0c
#define HDC2010_REG_RH_THR_H 0x0d
#define HDC2010_REG_RESET_DRDY_INT_CONF 0x0e
#define HDC2010_REG_MEASUREMENT_CONF 0x0f
#define HDC2010_MEAS_CONF GENMASK(2, 1)
#define HDC2010_MEAS_TRIG BIT(0)
#define HDC2010_HEATER_EN BIT(3)
#define HDC2010_AMM GENMASK(6, 4)
struct hdc2010_data {
struct i2c_client *client;
struct mutex lock;
u8 measurement_config;
u8 interrupt_config;
u8 drdy_config;
};
enum hdc2010_addr_groups {
HDC2010_GROUP_TEMP = 0,
HDC2010_GROUP_HUMIDITY,
};
struct hdc2010_reg_record {
unsigned long primary;
unsigned long peak;
};
static const struct hdc2010_reg_record hdc2010_reg_translation[] = {
[HDC2010_GROUP_TEMP] = {
.primary = HDC2010_REG_TEMP_LOW,
.peak = HDC2010_REG_TEMP_MAX,
},
[HDC2010_GROUP_HUMIDITY] = {
.primary = HDC2010_REG_HUMIDITY_LOW,
.peak = HDC2010_REG_HUMIDITY_MAX,
},
};
static IIO_CONST_ATTR(out_current_heater_raw_available, "0 1");
static struct attribute *hdc2010_attributes[] = {
&iio_const_attr_out_current_heater_raw_available.dev_attr.attr,
NULL
};
static const struct attribute_group hdc2010_attribute_group = {
.attrs = hdc2010_attributes,
};
static const struct iio_chan_spec hdc2010_channels[] = {
{
.type = IIO_TEMP,
.address = HDC2010_GROUP_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
},
{
.type = IIO_HUMIDITYRELATIVE,
.address = HDC2010_GROUP_HUMIDITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_SCALE),
},
{
.type = IIO_CURRENT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.extend_name = "heater",
.output = 1,
},
};
static int hdc2010_update_drdy_config(struct hdc2010_data *data,
char mask, char val)
{
u8 tmp = (~mask & data->drdy_config) | val;
int ret;
ret = i2c_smbus_write_byte_data(data->client,
HDC2010_REG_RESET_DRDY_INT_CONF, tmp);
if (ret)
return ret;
data->drdy_config = tmp;
return 0;
}
static int hdc2010_get_prim_measurement_word(struct hdc2010_data *data,
struct iio_chan_spec const *chan)
{
struct i2c_client *client = data->client;
s32 ret;
ret = i2c_smbus_read_word_data(client,
hdc2010_reg_translation[chan->address].primary);
if (ret < 0)
dev_err(&client->dev, "Could not read sensor measurement word\n");
return ret;
}
static int hdc2010_get_peak_measurement_byte(struct hdc2010_data *data,
struct iio_chan_spec const *chan)
{
struct i2c_client *client = data->client;
s32 ret;
ret = i2c_smbus_read_byte_data(client,
hdc2010_reg_translation[chan->address].peak);
if (ret < 0)
dev_err(&client->dev, "Could not read sensor measurement byte\n");
return ret;
}
static int hdc2010_get_heater_status(struct hdc2010_data *data)
{
return !!(data->drdy_config & HDC2010_HEATER_EN);
}
static int hdc2010_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
struct hdc2010_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW: {
int ret;
if (chan->type == IIO_CURRENT) {
*val = hdc2010_get_heater_status(data);
return IIO_VAL_INT;
}
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
mutex_lock(&data->lock);
ret = hdc2010_get_prim_measurement_word(data, chan);
mutex_unlock(&data->lock);
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_PEAK: {
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
mutex_lock(&data->lock);
ret = hdc2010_get_peak_measurement_byte(data, chan);
mutex_unlock(&data->lock);
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
/* Scaling up the value so we can use same offset as RAW */
*val = ret * 256;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
*val2 = 65536;
if (chan->type == IIO_TEMP)
*val = 165000;
else
*val = 100000;
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_OFFSET:
*val = -15887;
*val2 = 515151;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static int hdc2010_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct hdc2010_data *data = iio_priv(indio_dev);
int new, ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->type != IIO_CURRENT || val2 != 0)
return -EINVAL;
switch (val) {
case 1:
new = HDC2010_HEATER_EN;
break;
case 0:
new = 0;
break;
default:
return -EINVAL;
}
mutex_lock(&data->lock);
ret = hdc2010_update_drdy_config(data, HDC2010_HEATER_EN, new);
mutex_unlock(&data->lock);
return ret;
default:
return -EINVAL;
}
}
static const struct iio_info hdc2010_info = {
.read_raw = hdc2010_read_raw,
.write_raw = hdc2010_write_raw,
.attrs = &hdc2010_attribute_group,
};
static int hdc2010_probe(struct i2c_client *client)
{
struct iio_dev *indio_dev;
struct hdc2010_data *data;
u8 tmp;
int ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
mutex_init(&data->lock);
/*
* As DEVICE ID register does not differentiate between
* HDC2010 and HDC2080, we have the name hardcoded
*/
indio_dev->name = "hdc2010";
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &hdc2010_info;
indio_dev->channels = hdc2010_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc2010_channels);
/* Enable Automatic Measurement Mode at 5Hz */
ret = hdc2010_update_drdy_config(data, HDC2010_AMM, HDC2010_AMM);
if (ret)
return ret;
/*
* We enable both temp and humidity measurement.
* However the measurement won't start even in AMM until triggered.
*/
tmp = (data->measurement_config & ~HDC2010_MEAS_CONF) |
HDC2010_MEAS_TRIG;
ret = i2c_smbus_write_byte_data(client, HDC2010_REG_MEASUREMENT_CONF, tmp);
if (ret) {
dev_warn(&client->dev, "Unable to set up measurement\n");
if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
dev_warn(&client->dev, "Unable to restore default AMM\n");
return ret;
}
data->measurement_config = tmp;
return iio_device_register(indio_dev);
}
static void hdc2010_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct hdc2010_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
/* Disable Automatic Measurement Mode */
if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
dev_warn(&client->dev, "Unable to restore default AMM\n");
}
static const struct i2c_device_id hdc2010_id[] = {
{ "hdc2010" },
{ "hdc2080" },
{ }
};
MODULE_DEVICE_TABLE(i2c, hdc2010_id);
static const struct of_device_id hdc2010_dt_ids[] = {
{ .compatible = "ti,hdc2010" },
{ .compatible = "ti,hdc2080" },
{ }
};
MODULE_DEVICE_TABLE(of, hdc2010_dt_ids);
static struct i2c_driver hdc2010_driver = {
.driver = {
.name = "hdc2010",
.of_match_table = hdc2010_dt_ids,
},
.probe = hdc2010_probe,
.remove = hdc2010_remove,
.id_table = hdc2010_id,
};
module_i2c_driver(hdc2010_driver);
MODULE_AUTHOR("Eugene Zaikonnikov <[email protected]>");
MODULE_DESCRIPTION("TI HDC2010 humidity and temperature sensor driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/iio/humidity/hdc2010.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Aosong AM2315 relative humidity and temperature
*
* Copyright (c) 2016, Intel Corporation.
*
* 7-bit I2C address: 0x5C.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#define AM2315_REG_HUM_MSB 0x00
#define AM2315_REG_HUM_LSB 0x01
#define AM2315_REG_TEMP_MSB 0x02
#define AM2315_REG_TEMP_LSB 0x03
#define AM2315_FUNCTION_READ 0x03
#define AM2315_HUM_OFFSET 2
#define AM2315_TEMP_OFFSET 4
#define AM2315_ALL_CHANNEL_MASK GENMASK(1, 0)
#define AM2315_DRIVER_NAME "am2315"
struct am2315_data {
struct i2c_client *client;
struct mutex lock;
/* Ensure timestamp is naturally aligned */
struct {
s16 chans[2];
s64 timestamp __aligned(8);
} scan;
};
struct am2315_sensor_data {
s16 hum_data;
s16 temp_data;
};
static const struct iio_chan_spec am2315_channels[] = {
{
.type = IIO_HUMIDITYRELATIVE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
.scan_type = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_CPU,
},
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 1,
.scan_type = {
.sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_CPU,
},
},
IIO_CHAN_SOFT_TIMESTAMP(2),
};
/* CRC calculation algorithm, as specified in the datasheet (page 13). */
static u16 am2315_crc(u8 *data, u8 nr_bytes)
{
int i;
u16 crc = 0xffff;
while (nr_bytes--) {
crc ^= *data++;
for (i = 0; i < 8; i++) {
if (crc & 0x01) {
crc >>= 1;
crc ^= 0xA001;
} else {
crc >>= 1;
}
}
}
return crc;
}
/* Simple function that sends a few bytes to the device to wake it up. */
static void am2315_ping(struct i2c_client *client)
{
i2c_smbus_read_byte_data(client, AM2315_REG_HUM_MSB);
}
static int am2315_read_data(struct am2315_data *data,
struct am2315_sensor_data *sensor_data)
{
int ret;
/* tx_buf format: <function code> <start addr> <nr of regs to read> */
u8 tx_buf[3] = { AM2315_FUNCTION_READ, AM2315_REG_HUM_MSB, 4 };
/*
* rx_buf format:
* <function code> <number of registers read>
* <humidity MSB> <humidity LSB> <temp MSB> <temp LSB>
* <CRC LSB> <CRC MSB>
*/
u8 rx_buf[8];
u16 crc;
/* First wake up the device. */
am2315_ping(data->client);
mutex_lock(&data->lock);
ret = i2c_master_send(data->client, tx_buf, sizeof(tx_buf));
if (ret < 0) {
dev_err(&data->client->dev, "failed to send read request\n");
goto exit_unlock;
}
/* Wait 2-3 ms, then read back the data sent by the device. */
usleep_range(2000, 3000);
/* Do a bulk data read, then pick out what we need. */
ret = i2c_master_recv(data->client, rx_buf, sizeof(rx_buf));
if (ret < 0) {
dev_err(&data->client->dev, "failed to read sensor data\n");
goto exit_unlock;
}
mutex_unlock(&data->lock);
/*
* Do a CRC check on the data and compare it to the value
* calculated by the device.
*/
crc = am2315_crc(rx_buf, sizeof(rx_buf) - 2);
if ((crc & 0xff) != rx_buf[6] || (crc >> 8) != rx_buf[7]) {
dev_err(&data->client->dev, "failed to verify sensor data\n");
return -EIO;
}
sensor_data->hum_data = (rx_buf[AM2315_HUM_OFFSET] << 8) |
rx_buf[AM2315_HUM_OFFSET + 1];
sensor_data->temp_data = (rx_buf[AM2315_TEMP_OFFSET] << 8) |
rx_buf[AM2315_TEMP_OFFSET + 1];
return ret;
exit_unlock:
mutex_unlock(&data->lock);
return ret;
}
static irqreturn_t am2315_trigger_handler(int irq, void *p)
{
int i;
int ret;
int bit;
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct am2315_data *data = iio_priv(indio_dev);
struct am2315_sensor_data sensor_data;
ret = am2315_read_data(data, &sensor_data);
if (ret < 0)
goto err;
mutex_lock(&data->lock);
if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
data->scan.chans[0] = sensor_data.hum_data;
data->scan.chans[1] = sensor_data.temp_data;
} else {
i = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
data->scan.chans[i] = (bit ? sensor_data.temp_data :
sensor_data.hum_data);
i++;
}
}
mutex_unlock(&data->lock);
iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static int am2315_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
int ret;
struct am2315_sensor_data sensor_data;
struct am2315_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = am2315_read_data(data, &sensor_data);
if (ret < 0)
return ret;
*val = (chan->type == IIO_HUMIDITYRELATIVE) ?
sensor_data.hum_data : sensor_data.temp_data;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 100;
return IIO_VAL_INT;
}
return -EINVAL;
}
static const struct iio_info am2315_info = {
.read_raw = am2315_read_raw,
};
static int am2315_probe(struct i2c_client *client)
{
int ret;
struct iio_dev *indio_dev;
struct am2315_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev) {
dev_err(&client->dev, "iio allocation failed!\n");
return -ENOMEM;
}
data = iio_priv(indio_dev);
data->client = client;
i2c_set_clientdata(client, indio_dev);
mutex_init(&data->lock);
indio_dev->info = &am2315_info;
indio_dev->name = AM2315_DRIVER_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = am2315_channels;
indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
ret = devm_iio_triggered_buffer_setup(&client->dev,
indio_dev, iio_pollfunc_store_time,
am2315_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
return ret;
}
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id am2315_i2c_id[] = {
{"am2315", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
static struct i2c_driver am2315_driver = {
.driver = {
.name = "am2315",
},
.probe = am2315_probe,
.id_table = am2315_i2c_id,
};
module_i2c_driver(am2315_driver);
MODULE_AUTHOR("Tiberiu Breana <[email protected]>");
MODULE_DESCRIPTION("Aosong AM2315 relative humidity and temperature");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/iio/humidity/am2315.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.