python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Miscelanous functionality used in the other GenWQE driver parts.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/page-flags.h>
#include <linux/scatterlist.h>
#include <linux/hugetlb.h>
#include <linux/iommu.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/pgtable.h>
#include "genwqe_driver.h"
#include "card_base.h"
#include "card_ddcb.h"
/**
* __genwqe_writeq() - Write 64-bit register
* @cd: genwqe device descriptor
* @byte_offs: byte offset within BAR
* @val: 64-bit value
*
* Return: 0 if success; < 0 if error
*/
int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
return -EIO;
if (cd->mmio == NULL)
return -EIO;
if (pci_channel_offline(pci_dev))
return -EIO;
__raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
return 0;
}
/**
* __genwqe_readq() - Read 64-bit register
* @cd: genwqe device descriptor
* @byte_offs: offset within BAR
*
* Return: value from register
*/
u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
{
if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
return 0xffffffffffffffffull;
if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
(byte_offs == IO_SLC_CFGREG_GFIR))
return 0x000000000000ffffull;
if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
(byte_offs == IO_SLC_CFGREG_GFIR))
return 0x00000000ffff0000ull;
if (cd->mmio == NULL)
return 0xffffffffffffffffull;
return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
}
/**
* __genwqe_writel() - Write 32-bit register
* @cd: genwqe device descriptor
* @byte_offs: byte offset within BAR
* @val: 32-bit value
*
* Return: 0 if success; < 0 if error
*/
int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
return -EIO;
if (cd->mmio == NULL)
return -EIO;
if (pci_channel_offline(pci_dev))
return -EIO;
__raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
return 0;
}
/**
* __genwqe_readl() - Read 32-bit register
* @cd: genwqe device descriptor
* @byte_offs: offset within BAR
*
* Return: Value from register
*/
u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
{
if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
return 0xffffffff;
if (cd->mmio == NULL)
return 0xffffffff;
return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
}
/**
* genwqe_read_app_id() - Extract app_id
* @cd: genwqe device descriptor
* @app_name: carrier used to pass-back name
* @len: length of data for name
*
* app_unitcfg need to be filled with valid data first
*/
int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
{
int i, j;
u32 app_id = (u32)cd->app_unitcfg;
memset(app_name, 0, len);
for (i = 0, j = 0; j < min(len, 4); j++) {
char ch = (char)((app_id >> (24 - j*8)) & 0xff);
if (ch == ' ')
continue;
app_name[i++] = isprint(ch) ? ch : 'X';
}
return i;
}
#define CRC32_POLYNOMIAL 0x20044009
static u32 crc32_tab[256]; /* crc32 lookup table */
/**
* genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
*
* Existing kernel functions seem to use a different polynom,
* therefore we could not use them here.
*
* Genwqe's Polynomial = 0x20044009
*/
void genwqe_init_crc32(void)
{
int i, j;
u32 crc;
for (i = 0; i < 256; i++) {
crc = i << 24;
for (j = 0; j < 8; j++) {
if (crc & 0x80000000)
crc = (crc << 1) ^ CRC32_POLYNOMIAL;
else
crc = (crc << 1);
}
crc32_tab[i] = crc;
}
}
/**
* genwqe_crc32() - Generate 32-bit crc as required for DDCBs
* @buff: pointer to data buffer
* @len: length of data for calculation
* @init: initial crc (0xffffffff at start)
*
* polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
*
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
* result in a crc32 of 0xf33cb7d3.
*
* The existing kernel crc functions did not cover this polynom yet.
*
* Return: crc32 checksum.
*/
u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
{
int i;
u32 crc;
crc = init;
while (len--) {
i = ((crc >> 24) ^ *buff++) & 0xFF;
crc = (crc << 8) ^ crc32_tab[i];
}
return crc;
}
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
if (get_order(size) > MAX_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
GFP_KERNEL);
}
void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (vaddr == NULL)
return;
dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
}
static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
int num_pages)
{
int i;
struct pci_dev *pci_dev = cd->pci_dev;
for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
dma_list[i] = 0x0;
}
}
static int genwqe_map_pages(struct genwqe_dev *cd,
struct page **page_list, int num_pages,
dma_addr_t *dma_list)
{
int i;
struct pci_dev *pci_dev = cd->pci_dev;
/* establish DMA mapping for requested pages */
for (i = 0; i < num_pages; i++) {
dma_addr_t daddr;
dma_list[i] = 0x0;
daddr = dma_map_page(&pci_dev->dev, page_list[i],
0, /* map_offs */
PAGE_SIZE,
DMA_BIDIRECTIONAL); /* FIXME rd/rw */
if (dma_mapping_error(&pci_dev->dev, daddr)) {
dev_err(&pci_dev->dev,
"[%s] err: no dma addr daddr=%016llx!\n",
__func__, (long long)daddr);
goto err;
}
dma_list[i] = daddr;
}
return 0;
err:
genwqe_unmap_pages(cd, dma_list, num_pages);
return -EIO;
}
static int genwqe_sgl_size(int num_pages)
{
int len, num_tlb = num_pages / 7;
len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
return roundup(len, PAGE_SIZE);
}
/*
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
* overlap other user-space memory blocks are being cached for DMAs,
* such that we do not run into syncronization issues. Data is copied
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size, int write)
{
int ret = -ENOMEM;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
__func__, user_addr, user_size, sgl->nr_pages,
sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
sgl->user_addr = user_addr;
sgl->user_size = user_size;
sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
return ret;
}
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
&sgl->sgl_dma_addr);
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
return ret;
}
/* Only use buffering on incomplete pages */
if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
&sgl->fpage_dma_addr);
if (sgl->fpage == NULL)
goto err_out;
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
ret = -EFAULT;
goto err_out;
}
}
if (sgl->lpage_size != 0) {
sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
&sgl->lpage_dma_addr);
if (sgl->lpage == NULL)
goto err_out1;
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
ret = -EFAULT;
goto err_out2;
}
}
return 0;
err_out2:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
sgl->lpage = NULL;
sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
sgl->fpage = NULL;
sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
return ret;
}
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list)
{
int i = 0, j = 0, p;
unsigned long dma_offs, map_offs;
dma_addr_t prev_daddr = 0;
struct sg_entry *s, *last_s = NULL;
size_t size = sgl->user_size;
dma_offs = 128; /* next block if needed/dma_offset */
map_offs = sgl->fpage_offs; /* offset in first page */
s = &sgl->sgl[0]; /* first set of 8 entries */
p = 0; /* page */
while (p < sgl->nr_pages) {
dma_addr_t daddr;
unsigned int size_to_map;
/* always write the chaining entry, cleanup is done later */
j = 0;
s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
s[j].len = cpu_to_be32(128);
s[j].flags = cpu_to_be32(SG_CHAINED);
j++;
while (j < 8) {
/* DMA mapping for requested page, offs, size */
size_to_map = min(size, PAGE_SIZE - map_offs);
if ((p == 0) && (sgl->fpage != NULL)) {
daddr = sgl->fpage_dma_addr + map_offs;
} else if ((p == sgl->nr_pages - 1) &&
(sgl->lpage != NULL)) {
daddr = sgl->lpage_dma_addr;
} else {
daddr = dma_list[p] + map_offs;
}
size -= size_to_map;
map_offs = 0;
if (prev_daddr == daddr) {
u32 prev_len = be32_to_cpu(last_s->len);
/* pr_info("daddr combining: "
"%016llx/%08x -> %016llx\n",
prev_daddr, prev_len, daddr); */
last_s->len = cpu_to_be32(prev_len +
size_to_map);
p++; /* process next page */
if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
prev_daddr = daddr + size_to_map;
continue;
}
/* start new entry */
s[j].target_addr = cpu_to_be64(daddr);
s[j].len = cpu_to_be32(size_to_map);
s[j].flags = cpu_to_be32(SG_DATA);
prev_daddr = daddr + size_to_map;
last_s = &s[j];
j++;
p++; /* process next page */
if (p == sgl->nr_pages)
goto fixup; /* nothing to do */
}
dma_offs += 128;
s += 8; /* continue 8 elements further */
}
fixup:
if (j == 1) { /* combining happened on last entry! */
s -= 8; /* full shift needed on previous sgl block */
j = 7; /* shift all elements */
}
for (i = 0; i < j; i++) /* move elements 1 up */
s[i] = s[i + 1];
s[i].target_addr = cpu_to_be64(0);
s[i].len = cpu_to_be32(0);
s[i].flags = cpu_to_be32(SG_END_LIST);
return 0;
}
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
* @cd: genwqe device descriptor
* @sgl: scatter gather list describing user-space memory
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transferred from cached
* pages into user-space buffers.
*/
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
size_t offset;
unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
if (sgl->write) {
res = copy_to_user(sgl->user_addr,
sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
if (res) {
dev_err(&pci_dev->dev,
"[%s] err: copying fpage! (res=%lu)\n",
__func__, res);
rc = -EFAULT;
}
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
sgl->fpage = NULL;
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
if (sgl->write) {
offset = sgl->user_size - sgl->lpage_size;
res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
sgl->lpage_size);
if (res) {
dev_err(&pci_dev->dev,
"[%s] err: copying lpage! (res=%lu)\n",
__func__, res);
rc = -EFAULT;
}
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
sgl->lpage = NULL;
sgl->lpage_dma_addr = 0;
}
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0x0;
sgl->sgl_size = 0;
return rc;
}
/**
* genwqe_user_vmap() - Map user-space memory to virtual kernel memory
* @cd: pointer to genwqe device
* @m: mapping params
* @uaddr: user virtual address
* @size: size of memory to be mapped
*
* We need to think about how we could speed this up. Of course it is
* not a good idea to do this over and over again, like we are
* currently doing it. Nevertheless, I am curious where on the path
* the performance is spend. Most probably within the memory
* allocation functions, but maybe also in the DMA mapping code.
*
* Restrictions: The maximum size of the possible mapping currently depends
* on the amount of memory we can get using kzalloc() for the
* page_list and pci_alloc_consistent for the sg_list.
* The sg_list is currently itself not scattered, which could
* be fixed with some effort. The page_list must be split into
* PAGE_SIZE chunks too. All that will make the complicated
* code more complicated.
*
* Return: 0 if success
*/
int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
unsigned long size)
{
int rc = -EINVAL;
unsigned long data, offs;
struct pci_dev *pci_dev = cd->pci_dev;
if ((uaddr == NULL) || (size == 0)) {
m->size = 0; /* mark unused and not added */
return -EINVAL;
}
m->u_vaddr = uaddr;
m->size = size;
/* determine space needed for page_list. */
data = (unsigned long)uaddr;
offs = offset_in_page(data);
if (size > ULONG_MAX - PAGE_SIZE - offs) {
m->size = 0; /* mark unused and not added */
return -EINVAL;
}
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
m->page_list = kcalloc(m->nr_pages,
sizeof(struct page *) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!m->page_list) {
dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
m->nr_pages = 0;
m->u_vaddr = NULL;
m->size = 0; /* mark unused and not added */
return -ENOMEM;
}
m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
/* pin user pages in memory */
rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_pin_user_pages;
/* assumption: pin_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_pin_user_pages;
}
rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
if (rc != 0)
goto fail_free_user_pages;
return 0;
fail_free_user_pages:
unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
fail_pin_user_pages:
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
m->nr_pages = 0;
m->u_vaddr = NULL;
m->size = 0; /* mark unused and not added */
return rc;
}
/**
* genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
* memory
* @cd: pointer to genwqe device
* @m: mapping params
*/
int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (!dma_mapping_used(m)) {
dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
__func__, m);
return -EINVAL;
}
if (m->dma_list)
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
m->write);
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
m->nr_pages = 0;
}
m->u_vaddr = NULL;
m->size = 0; /* mark as unused and not added */
return 0;
}
/**
* genwqe_card_type() - Get chip type SLU Configuration Register
* @cd: pointer to the genwqe device descriptor
* Return: 0: Altera Stratix-IV 230
* 1: Altera Stratix-IV 530
* 2: Altera Stratix-V A4
* 3: Altera Stratix-V A7
*/
u8 genwqe_card_type(struct genwqe_dev *cd)
{
u64 card_type = cd->slu_unitcfg;
return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
}
/**
* genwqe_card_reset() - Reset the card
* @cd: pointer to the genwqe device descriptor
*/
int genwqe_card_reset(struct genwqe_dev *cd)
{
u64 softrst;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_is_privileged(cd))
return -ENODEV;
/* new SL */
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
msleep(1000);
__genwqe_readq(cd, IO_HSU_FIR_CLR);
__genwqe_readq(cd, IO_APP_FIR_CLR);
__genwqe_readq(cd, IO_SLU_FIR_CLR);
/*
* Read-modify-write to preserve the stealth bits
*
* For SL >= 039, Stealth WE bit allows removing
* the read-modify-wrote.
* r-m-w may require a mask 0x3C to avoid hitting hard
* reset again for error reset (should be 0, chicken).
*/
softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
/* give ERRORRESET some time to finish */
msleep(50);
if (genwqe_need_err_masking(cd)) {
dev_info(&pci_dev->dev,
"[%s] masking errors for old bitstreams\n", __func__);
__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
}
return 0;
}
int genwqe_read_softreset(struct genwqe_dev *cd)
{
u64 bitstream;
if (!genwqe_is_privileged(cd))
return -ENODEV;
bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
return 0;
}
/**
* genwqe_set_interrupt_capability() - Configure MSI capability structure
* @cd: pointer to the device
* @count: number of vectors to allocate
* Return: 0 if no error
*/
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
{
int rc;
rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
if (rc < 0)
return rc;
return 0;
}
/**
* genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
* @cd: pointer to the device
*/
void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
{
pci_free_irq_vectors(cd->pci_dev);
}
/**
* set_reg_idx() - Fill array with data. Ignore illegal offsets.
* @cd: card device
* @r: debug register array
* @i: index to desired entry
* @m: maximum possible entries
* @addr: addr which is read
* @idx: index in debug array
* @val: read value
*/
static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
unsigned int *i, unsigned int m, u32 addr, u32 idx,
u64 val)
{
if (WARN_ON_ONCE(*i >= m))
return -EFAULT;
r[*i].addr = addr;
r[*i].idx = idx;
r[*i].val = val;
++*i;
return 0;
}
static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
unsigned int *i, unsigned int m, u32 addr, u64 val)
{
return set_reg_idx(cd, r, i, m, addr, 0, val);
}
int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
unsigned int max_regs, int all)
{
unsigned int i, j, idx = 0;
u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
/* Global FIR */
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
/* UnitCfg for SLU */
sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
/* UnitCfg for APP */
appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
/* Check all chip Units */
for (i = 0; i < GENWQE_MAX_UNITS; i++) {
/* Unit FIR */
ufir_addr = (i << 24) | 0x008;
ufir = __genwqe_readq(cd, ufir_addr);
set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
/* Unit FEC */
ufec_addr = (i << 24) | 0x018;
ufec = __genwqe_readq(cd, ufec_addr);
set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
for (j = 0; j < 64; j++) {
/* wherever there is a primary 1, read the 2ndary */
if (!all && (!(ufir & (1ull << j))))
continue;
sfir_addr = (i << 24) | (0x100 + 8 * j);
sfir = __genwqe_readq(cd, sfir_addr);
set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
sfec_addr = (i << 24) | (0x300 + 8 * j);
sfec = __genwqe_readq(cd, sfec_addr);
set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
}
}
/* fill with invalid data until end */
for (i = idx; i < max_regs; i++) {
regs[i].addr = 0xffffffff;
regs[i].val = 0xffffffffffffffffull;
}
return idx;
}
/**
* genwqe_ffdc_buff_size() - Calculates the number of dump registers
* @cd: genwqe device descriptor
* @uid: unit ID
*/
int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
{
int entries = 0, ring, traps, traces, trace_entries;
u32 eevptr_addr, l_addr, d_len, d_type;
u64 eevptr, val, addr;
eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
eevptr = __genwqe_readq(cd, eevptr_addr);
if ((eevptr != 0x0) && (eevptr != -1ull)) {
l_addr = GENWQE_UID_OFFS(uid) | eevptr;
while (1) {
val = __genwqe_readq(cd, l_addr);
if ((val == 0x0) || (val == -1ull))
break;
/* 38:24 */
d_len = (val & 0x0000007fff000000ull) >> 24;
/* 39 */
d_type = (val & 0x0000008000000000ull) >> 36;
if (d_type) { /* repeat */
entries += d_len;
} else { /* size in bytes! */
entries += d_len >> 3;
}
l_addr += 8;
}
}
for (ring = 0; ring < 8; ring++) {
addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
val = __genwqe_readq(cd, addr);
if ((val == 0x0ull) || (val == -1ull))
continue;
traps = (val >> 24) & 0xff;
traces = (val >> 16) & 0xff;
trace_entries = val & 0xffff;
entries += traps + (traces * trace_entries);
}
return entries;
}
/**
* genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
* @cd: genwqe device descriptor
* @uid: unit ID
* @regs: register information
* @max_regs: number of register entries
*/
int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
struct genwqe_reg *regs, unsigned int max_regs)
{
int i, traps, traces, trace, trace_entries, trace_entry, ring;
unsigned int idx = 0;
u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
u64 eevptr, e, val, addr;
eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
eevptr = __genwqe_readq(cd, eevptr_addr);
if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
l_addr = GENWQE_UID_OFFS(uid) | eevptr;
while (1) {
e = __genwqe_readq(cd, l_addr);
if ((e == 0x0) || (e == 0xffffffffffffffffull))
break;
d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
d_addr |= GENWQE_UID_OFFS(uid);
if (d_type) {
for (i = 0; i < (int)d_len; i++) {
val = __genwqe_readq(cd, d_addr);
set_reg_idx(cd, regs, &idx, max_regs,
d_addr, i, val);
}
} else {
d_len >>= 3; /* Size in bytes! */
for (i = 0; i < (int)d_len; i++, d_addr += 8) {
val = __genwqe_readq(cd, d_addr);
set_reg_idx(cd, regs, &idx, max_regs,
d_addr, 0, val);
}
}
l_addr += 8;
}
}
/*
* To save time, there are only 6 traces poplulated on Uid=2,
* Ring=1. each with iters=512.
*/
for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
2...7 are ASI rings */
addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
val = __genwqe_readq(cd, addr);
if ((val == 0x0ull) || (val == -1ull))
continue;
traps = (val >> 24) & 0xff; /* Number of Traps */
traces = (val >> 16) & 0xff; /* Number of Traces */
trace_entries = val & 0xffff; /* Entries per trace */
/* Note: This is a combined loop that dumps both the traps */
/* (for the trace == 0 case) as well as the traces 1 to */
/* 'traces'. */
for (trace = 0; trace <= traces; trace++) {
u32 diag_sel =
GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
addr = (GENWQE_UID_OFFS(uid) |
IO_EXTENDED_DIAG_SELECTOR);
__genwqe_writeq(cd, addr, diag_sel);
for (trace_entry = 0;
trace_entry < (trace ? trace_entries : traps);
trace_entry++) {
addr = (GENWQE_UID_OFFS(uid) |
IO_EXTENDED_DIAG_READ_MBX);
val = __genwqe_readq(cd, addr);
set_reg_idx(cd, regs, &idx, max_regs, addr,
(diag_sel<<16) | trace_entry, val);
}
}
}
return 0;
}
/**
* genwqe_write_vreg() - Write register in virtual window
* @cd: genwqe device descriptor
* @reg: register (byte) offset within BAR
* @val: value to write
* @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
*/
int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
{
__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
__genwqe_writeq(cd, reg, val);
return 0;
}
/**
* genwqe_read_vreg() - Read register in virtual window
* @cd: genwqe device descriptor
* @reg: register (byte) offset within BAR
* @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
*/
u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
{
__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
return __genwqe_readq(cd, reg);
}
/**
* genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
* @cd: genwqe device descriptor
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
* driver cannot understand new codes and is therefore always a
* problem. Better is to measure out the value or put the
* speed/frequency directly into a register which is always a valid
* value for old as well as for new software.
*
* Return: Card clock in MHz
*/
int genwqe_base_clock_frequency(struct genwqe_dev *cd)
{
u16 speed; /* MHz MHz MHz MHz */
static const int speed_grade[] = { 250, 200, 166, 175 };
speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
if (speed >= ARRAY_SIZE(speed_grade))
return 0; /* illegal value */
return speed_grade[speed];
}
/**
* genwqe_stop_traps() - Stop traps
* @cd: genwqe device descriptor
*
* Before reading out the analysis data, we need to stop the traps.
*/
void genwqe_stop_traps(struct genwqe_dev *cd)
{
__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
}
/**
* genwqe_start_traps() - Start traps
* @cd: genwqe device descriptor
*
* After having read the data, we can/must enable the traps again.
*/
void genwqe_start_traps(struct genwqe_dev *cd)
{
__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
if (genwqe_need_err_masking(cd))
__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
}
| linux-master | drivers/misc/genwqe/card_utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Character device representation of the GenWQE device. This allows
* user-space applications to communicate with the card.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include "card_base.h"
#include "card_ddcb.h"
static int genwqe_open_files(struct genwqe_dev *cd)
{
int rc;
unsigned long flags;
spin_lock_irqsave(&cd->file_lock, flags);
rc = list_empty(&cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
return !rc;
}
static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
cfile->opener = get_pid(task_tgid(current));
spin_lock_irqsave(&cd->file_lock, flags);
list_add(&cfile->list, &cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
}
static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
spin_lock_irqsave(&cd->file_lock, flags);
list_del(&cfile->list);
spin_unlock_irqrestore(&cd->file_lock, flags);
put_pid(cfile->opener);
return 0;
}
static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
{
unsigned long flags;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_add(&m->pin_list, &cfile->pin_list);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
}
static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
{
unsigned long flags;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_del(&m->pin_list);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return 0;
}
/**
* genwqe_search_pin() - Search for the mapping for a userspace address
* @cfile: Descriptor of opened file
* @u_addr: User virtual address
* @size: Size of buffer
* @virt_addr: Virtual address to be updated
*
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
unsigned long u_addr,
unsigned int size,
void **virt_addr)
{
unsigned long flags;
struct dma_mapping *m;
spin_lock_irqsave(&cfile->pin_lock, flags);
list_for_each_entry(m, &cfile->pin_list, pin_list) {
if ((((u64)m->u_vaddr) <= (u_addr)) &&
(((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
if (virt_addr)
*virt_addr = m->k_vaddr +
(u_addr - (u64)m->u_vaddr);
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return m;
}
}
spin_unlock_irqrestore(&cfile->pin_lock, flags);
return NULL;
}
static void __genwqe_add_mapping(struct genwqe_file *cfile,
struct dma_mapping *dma_map)
{
unsigned long flags;
spin_lock_irqsave(&cfile->map_lock, flags);
list_add(&dma_map->card_list, &cfile->map_list);
spin_unlock_irqrestore(&cfile->map_lock, flags);
}
static void __genwqe_del_mapping(struct genwqe_file *cfile,
struct dma_mapping *dma_map)
{
unsigned long flags;
spin_lock_irqsave(&cfile->map_lock, flags);
list_del(&dma_map->card_list);
spin_unlock_irqrestore(&cfile->map_lock, flags);
}
/**
* __genwqe_search_mapping() - Search for the mapping for a userspace address
* @cfile: descriptor of opened file
* @u_addr: user virtual address
* @size: size of buffer
* @dma_addr: DMA address to be updated
* @virt_addr: Virtual address to be updated
* Return: Pointer to the corresponding mapping NULL if not found
*/
static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
unsigned long u_addr,
unsigned int size,
dma_addr_t *dma_addr,
void **virt_addr)
{
unsigned long flags;
struct dma_mapping *m;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
spin_lock_irqsave(&cfile->map_lock, flags);
list_for_each_entry(m, &cfile->map_list, card_list) {
if ((((u64)m->u_vaddr) <= (u_addr)) &&
(((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
/* match found: current is as expected and
addr is in range */
if (dma_addr)
*dma_addr = m->dma_addr +
(u_addr - (u64)m->u_vaddr);
if (virt_addr)
*virt_addr = m->k_vaddr +
(u_addr - (u64)m->u_vaddr);
spin_unlock_irqrestore(&cfile->map_lock, flags);
return m;
}
}
spin_unlock_irqrestore(&cfile->map_lock, flags);
dev_err(&pci_dev->dev,
"[%s] Entry not found: u_addr=%lx, size=%x\n",
__func__, u_addr, size);
return NULL;
}
static void genwqe_remove_mappings(struct genwqe_file *cfile)
{
int i = 0;
struct list_head *node, *next;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
list_for_each_safe(node, next, &cfile->map_list) {
dma_map = list_entry(node, struct dma_mapping, card_list);
list_del_init(&dma_map->card_list);
/*
* This is really a bug, because those things should
* have been already tidied up.
*
* GENWQE_MAPPING_RAW should have been removed via mmunmap().
* GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
*/
dev_err(&pci_dev->dev,
"[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
__func__, i++, dma_map->u_vaddr,
(unsigned long)dma_map->k_vaddr,
(unsigned long)dma_map->dma_addr);
if (dma_map->type == GENWQE_MAPPING_RAW) {
/* we allocated this dynamically */
__genwqe_free_consistent(cd, dma_map->size,
dma_map->k_vaddr,
dma_map->dma_addr);
kfree(dma_map);
} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
/* we use dma_map statically from the request */
genwqe_user_vunmap(cd, dma_map);
}
}
}
static void genwqe_remove_pinnings(struct genwqe_file *cfile)
{
struct list_head *node, *next;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
list_for_each_safe(node, next, &cfile->pin_list) {
dma_map = list_entry(node, struct dma_mapping, pin_list);
/*
* This is not a bug, because a killed processed might
* not call the unpin ioctl, which is supposed to free
* the resources.
*
* Pinnings are dymically allocated and need to be
* deleted.
*/
list_del_init(&dma_map->pin_list);
genwqe_user_vunmap(cd, dma_map);
kfree(dma_map);
}
}
/**
* genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
* @cd: GenWQE device information
* @sig: Signal to send out
*
* E.g. genwqe_send_signal(cd, SIGIO);
*/
static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
{
unsigned int files = 0;
unsigned long flags;
struct genwqe_file *cfile;
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
if (cfile->async_queue)
kill_fasync(&cfile->async_queue, sig, POLL_HUP);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
return files;
}
static int genwqe_terminate(struct genwqe_dev *cd)
{
unsigned int files = 0;
unsigned long flags;
struct genwqe_file *cfile;
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
kill_pid(cfile->opener, SIGKILL, 1);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
return files;
}
/**
* genwqe_open() - file open
* @inode: file system information
* @filp: file handle
*
* This function is executed whenever an application calls
* open("/dev/genwqe",..).
*
* Return: 0 if successful or <0 if errors
*/
static int genwqe_open(struct inode *inode, struct file *filp)
{
struct genwqe_dev *cd;
struct genwqe_file *cfile;
cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
if (cfile == NULL)
return -ENOMEM;
cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
cfile->cd = cd;
cfile->filp = filp;
cfile->client = NULL;
spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
INIT_LIST_HEAD(&cfile->map_list);
spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
INIT_LIST_HEAD(&cfile->pin_list);
filp->private_data = cfile;
genwqe_add_file(cd, cfile);
return 0;
}
/**
* genwqe_fasync() - Setup process to receive SIGIO.
* @fd: file descriptor
* @filp: file handle
* @mode: file mode
*
* Sending a signal is working as following:
*
* if (cdev->async_queue)
* kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
*
* Some devices also implement asynchronous notification to indicate
* when the device can be written; in this case, of course,
* kill_fasync must be called with a mode of POLL_OUT.
*/
static int genwqe_fasync(int fd, struct file *filp, int mode)
{
struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
return fasync_helper(fd, filp, mode, &cdev->async_queue);
}
/**
* genwqe_release() - file close
* @inode: file system information
* @filp: file handle
*
* This function is executed whenever an application calls 'close(fd_genwqe)'
*
* Return: always 0
*/
static int genwqe_release(struct inode *inode, struct file *filp)
{
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
/* there must be no entries in these lists! */
genwqe_remove_mappings(cfile);
genwqe_remove_pinnings(cfile);
/* remove this filp from the asynchronously notified filp's */
genwqe_fasync(-1, filp, 0);
/*
* For this to work we must not release cd when this cfile is
* not yet released, otherwise the list entry is invalid,
* because the list itself gets reinstantiated!
*/
genwqe_del_file(cd, cfile);
kfree(cfile);
return 0;
}
static void genwqe_vma_open(struct vm_area_struct *vma)
{
/* nothing ... */
}
/**
* genwqe_vma_close() - Called each time when vma is unmapped
* @vma: VMA area to close
*
* Free memory which got allocated by GenWQE mmap().
*/
static void genwqe_vma_close(struct vm_area_struct *vma)
{
unsigned long vsize = vma->vm_end - vma->vm_start;
struct inode *inode = file_inode(vma->vm_file);
struct dma_mapping *dma_map;
struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
cdev_genwqe);
struct pci_dev *pci_dev = cd->pci_dev;
dma_addr_t d_addr = 0;
struct genwqe_file *cfile = vma->vm_private_data;
dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
&d_addr, NULL);
if (dma_map == NULL) {
dev_err(&pci_dev->dev,
" [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
__func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
vsize);
return;
}
__genwqe_del_mapping(cfile, dma_map);
__genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
dma_map->dma_addr);
kfree(dma_map);
}
static const struct vm_operations_struct genwqe_vma_ops = {
.open = genwqe_vma_open,
.close = genwqe_vma_close,
};
/**
* genwqe_mmap() - Provide contignous buffers to userspace
* @filp: File pointer (unused)
* @vma: VMA area to map
*
* We use mmap() to allocate contignous buffers used for DMA
* transfers. After the buffer is allocated we remap it to user-space
* and remember a reference to our dma_mapping data structure, where
* we store the associated DMA address and allocated size.
*
* When we receive a DDCB execution request with the ATS bits set to
* plain buffer, we lookup our dma_mapping list to find the
* corresponding DMA address for the associated user-space address.
*/
static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
{
int rc;
unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
struct dma_mapping *dma_map;
if (vsize == 0)
return -EINVAL;
if (get_order(vsize) > MAX_ORDER)
return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
if (dma_map == NULL)
return -ENOMEM;
genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
dma_map->u_vaddr = (void *)vma->vm_start;
dma_map->size = vsize;
dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
&dma_map->dma_addr);
if (dma_map->k_vaddr == NULL) {
rc = -ENOMEM;
goto free_dma_map;
}
if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
*(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
rc = remap_pfn_range(vma,
vma->vm_start,
pfn,
vsize,
vma->vm_page_prot);
if (rc != 0) {
rc = -EFAULT;
goto free_dma_mem;
}
vma->vm_private_data = cfile;
vma->vm_ops = &genwqe_vma_ops;
__genwqe_add_mapping(cfile, dma_map);
return 0;
free_dma_mem:
__genwqe_free_consistent(cd, dma_map->size,
dma_map->k_vaddr,
dma_map->dma_addr);
free_dma_map:
kfree(dma_map);
return rc;
}
#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
/**
* do_flash_update() - Excute flash update (write image or CVPD)
* @cfile: Descriptor of opened file
* @load: details about image load
*
* Return: 0 if successful
*/
static int do_flash_update(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
int rc = 0;
int blocks_to_flash;
dma_addr_t dma_addr;
u64 flash = 0;
size_t tocopy = 0;
u8 __user *buf;
u8 *xbuf;
u32 crc;
u8 cmdopts;
struct genwqe_dev *cd = cfile->cd;
struct file *filp = cfile->filp;
struct pci_dev *pci_dev = cd->pci_dev;
if ((load->size & 0x3) != 0)
return -EINVAL;
if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
return -EINVAL;
/* FIXME Bits have changed for new service layer! */
switch ((char)load->partition) {
case '0':
cmdopts = 0x14;
break; /* download/erase_first/part_0 */
case '1':
cmdopts = 0x1C;
break; /* download/erase_first/part_1 */
case 'v':
cmdopts = 0x0C;
break; /* download/erase_first/vpd */
default:
return -EINVAL;
}
buf = (u8 __user *)load->data_addr;
xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
if (xbuf == NULL)
return -ENOMEM;
blocks_to_flash = load->size / FLASH_BLOCK;
while (load->size) {
struct genwqe_ddcb_cmd *req;
/*
* We must be 4 byte aligned. Buffer must be 0 appened
* to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
rc = copy_from_user(xbuf, buf, tocopy);
if (rc) {
rc = -EFAULT;
goto free_buffer;
}
crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
dev_dbg(&pci_dev->dev,
"[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
__func__, (unsigned long)dma_addr, crc, tocopy,
blocks_to_flash);
/* prepare DDCB for SLU process */
req = ddcb_requ_alloc();
if (req == NULL) {
rc = -ENOMEM;
goto free_buffer;
}
req->cmd = SLCMD_MOVE_FLASH;
req->cmdopts = cmdopts;
/* prepare invariant values */
if (genwqe_get_slu_id(cd) <= 0x2) {
*(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
*(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
*(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
*(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
req->__asiv[24] = load->uid;
*(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
/* for simulation only */
*(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
*(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
req->asiv_length = 32; /* bytes included in crc calc */
} else { /* setup DDCB for ATS architecture */
*(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
*(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
*(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
*(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
*(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
*(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
/* for simulation only */
*(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
*(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
/* Rd only */
req->ats = 0x4ULL << 44;
req->asiv_length = 40; /* bytes included in crc calc */
}
req->asv_length = 8;
/* For Genwqe5 we get back the calculated CRC */
*(u64 *)&req->asv[0] = 0ULL; /* 0x80 */
rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
load->retc = req->retc;
load->attn = req->attn;
load->progress = req->progress;
if (rc < 0) {
ddcb_requ_free(req);
goto free_buffer;
}
if (req->retc != DDCB_RETC_COMPLETE) {
rc = -EIO;
ddcb_requ_free(req);
goto free_buffer;
}
load->size -= tocopy;
flash += tocopy;
buf += tocopy;
blocks_to_flash--;
ddcb_requ_free(req);
}
free_buffer:
__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
return rc;
}
static int do_flash_read(struct genwqe_file *cfile,
struct genwqe_bitstream *load)
{
int rc, blocks_to_flash;
dma_addr_t dma_addr;
u64 flash = 0;
size_t tocopy = 0;
u8 __user *buf;
u8 *xbuf;
u8 cmdopts;
struct genwqe_dev *cd = cfile->cd;
struct file *filp = cfile->filp;
struct pci_dev *pci_dev = cd->pci_dev;
struct genwqe_ddcb_cmd *cmd;
if ((load->size & 0x3) != 0)
return -EINVAL;
if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
return -EINVAL;
/* FIXME Bits have changed for new service layer! */
switch ((char)load->partition) {
case '0':
cmdopts = 0x12;
break; /* upload/part_0 */
case '1':
cmdopts = 0x1A;
break; /* upload/part_1 */
case 'v':
cmdopts = 0x0A;
break; /* upload/vpd */
default:
return -EINVAL;
}
buf = (u8 __user *)load->data_addr;
xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
if (xbuf == NULL)
return -ENOMEM;
blocks_to_flash = load->size / FLASH_BLOCK;
while (load->size) {
/*
* We must be 4 byte aligned. Buffer must be 0 appened
* to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
dev_dbg(&pci_dev->dev,
"[%s] DMA: %lx SZ: %ld %d\n",
__func__, (unsigned long)dma_addr, tocopy,
blocks_to_flash);
/* prepare DDCB for SLU process */
cmd = ddcb_requ_alloc();
if (cmd == NULL) {
rc = -ENOMEM;
goto free_buffer;
}
cmd->cmd = SLCMD_MOVE_FLASH;
cmd->cmdopts = cmdopts;
/* prepare invariant values */
if (genwqe_get_slu_id(cd) <= 0x2) {
*(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
*(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
*(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
*(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
cmd->__asiv[24] = load->uid;
*(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
cmd->asiv_length = 32; /* bytes included in crc calc */
} else { /* setup DDCB for ATS architecture */
*(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
*(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
*(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
*(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
*(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
*(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
/* rd/wr */
cmd->ats = 0x5ULL << 44;
cmd->asiv_length = 40; /* bytes included in crc calc */
}
cmd->asv_length = 8;
/* we only get back the calculated CRC */
*(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */
rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
load->retc = cmd->retc;
load->attn = cmd->attn;
load->progress = cmd->progress;
if ((rc < 0) && (rc != -EBADMSG)) {
ddcb_requ_free(cmd);
goto free_buffer;
}
rc = copy_to_user(buf, xbuf, tocopy);
if (rc) {
rc = -EFAULT;
ddcb_requ_free(cmd);
goto free_buffer;
}
/* We know that we can get retc 0x104 with CRC err */
if (((cmd->retc == DDCB_RETC_FAULT) &&
(cmd->attn != 0x02)) || /* Normally ignore CRC error */
((cmd->retc == DDCB_RETC_COMPLETE) &&
(cmd->attn != 0x00))) { /* Everything was fine */
rc = -EIO;
ddcb_requ_free(cmd);
goto free_buffer;
}
load->size -= tocopy;
flash += tocopy;
buf += tocopy;
blocks_to_flash--;
ddcb_requ_free(cmd);
}
rc = 0;
free_buffer:
__genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
return rc;
}
static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
{
int rc;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cfile->cd->pci_dev;
struct dma_mapping *dma_map;
unsigned long map_addr;
unsigned long map_size;
if ((m->addr == 0x0) || (m->size == 0))
return -EINVAL;
if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
if (dma_map == NULL)
return -ENOMEM;
genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
kfree(dma_map);
return rc;
}
genwqe_add_pin(cfile, dma_map);
return 0;
}
static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
{
struct genwqe_dev *cd = cfile->cd;
struct dma_mapping *dma_map;
unsigned long map_addr;
unsigned long map_size;
if (m->addr == 0x0)
return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
if (dma_map == NULL)
return -ENOENT;
genwqe_del_pin(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map);
kfree(dma_map);
return 0;
}
/**
* ddcb_cmd_cleanup() - Remove dynamically created fixup entries
* @cfile: Descriptor of opened file
* @req: DDCB work request
*
* Only if there are any. Pinnings are not removed.
*/
static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
{
unsigned int i;
struct dma_mapping *dma_map;
struct genwqe_dev *cd = cfile->cd;
for (i = 0; i < DDCB_FIXUPS; i++) {
dma_map = &req->dma_mappings[i];
if (dma_mapping_used(dma_map)) {
__genwqe_del_mapping(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map);
}
if (req->sgls[i].sgl != NULL)
genwqe_free_sync_sgl(cd, &req->sgls[i]);
}
return 0;
}
/**
* ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
* @cfile: Descriptor of opened file
* @req: DDCB work request
*
* Before the DDCB gets executed we need to handle the fixups. We
* replace the user-space addresses with DMA addresses or do
* additional setup work e.g. generating a scatter-gather list which
* is used to describe the memory referred to in the fixup.
*/
static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
{
int rc;
unsigned int asiv_offs, i;
struct genwqe_dev *cd = cfile->cd;
struct genwqe_ddcb_cmd *cmd = &req->cmd;
struct dma_mapping *m;
for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
i++, asiv_offs += 0x08) {
u64 u_addr;
dma_addr_t d_addr;
u32 u_size = 0;
u64 ats_flags;
ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
switch (ats_flags) {
case ATS_TYPE_DATA:
break; /* nothing to do here */
case ATS_TYPE_FLAT_RDWR:
case ATS_TYPE_FLAT_RD: {
u_addr = be64_to_cpu(*((__be64 *)&cmd->
asiv[asiv_offs]));
u_size = be32_to_cpu(*((__be32 *)&cmd->
asiv[asiv_offs + 0x08]));
/*
* No data available. Ignore u_addr in this
* case and set addr to 0. Hardware must not
* fetch the buffer.
*/
if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0);
break;
}
m = __genwqe_search_mapping(cfile, u_addr, u_size,
&d_addr, NULL);
if (m == NULL) {
rc = -EFAULT;
goto err_out;
}
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(d_addr);
break;
}
case ATS_TYPE_SGL_RDWR:
case ATS_TYPE_SGL_RD: {
int page_offs;
u_addr = be64_to_cpu(*((__be64 *)
&cmd->asiv[asiv_offs]));
u_size = be32_to_cpu(*((__be32 *)
&cmd->asiv[asiv_offs + 0x08]));
/*
* No data available. Ignore u_addr in this
* case and set addr to 0. Hardware must not
* fetch the empty sgl.
*/
if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0);
break;
}
m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
if (m != NULL) {
page_offs = (u_addr -
(u64)m->u_vaddr)/PAGE_SIZE;
} else {
m = &req->dma_mappings[i];
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
if (ats_flags == ATS_TYPE_SGL_RD)
m->write = 0;
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size);
if (rc != 0)
goto err_out;
__genwqe_add_mapping(cfile, m);
page_offs = 0;
}
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
u_size, m->write);
if (rc != 0)
goto err_out;
genwqe_setup_sgl(cd, &req->sgls[i],
&m->dma_list[page_offs]);
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(req->sgls[i].sgl_dma_addr);
break;
}
default:
rc = -EINVAL;
goto err_out;
}
}
return 0;
err_out:
ddcb_cmd_cleanup(cfile, req);
return rc;
}
/**
* genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
* @cfile: Descriptor of opened file
* @cmd: Command identifier (passed from user)
*
* The code will build up the translation tables or lookup the
* contignous memory allocation table to find the right translations
* and DMA addresses.
*/
static int genwqe_execute_ddcb(struct genwqe_file *cfile,
struct genwqe_ddcb_cmd *cmd)
{
int rc;
struct genwqe_dev *cd = cfile->cd;
struct file *filp = cfile->filp;
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
rc = ddcb_cmd_fixups(cfile, req);
if (rc != 0)
return rc;
rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
ddcb_cmd_cleanup(cfile, req);
return rc;
}
static int do_execute_ddcb(struct genwqe_file *cfile,
unsigned long arg, int raw)
{
int rc;
struct genwqe_ddcb_cmd *cmd;
struct genwqe_dev *cd = cfile->cd;
struct file *filp = cfile->filp;
cmd = ddcb_requ_alloc();
if (cmd == NULL)
return -ENOMEM;
if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
ddcb_requ_free(cmd);
return -EFAULT;
}
if (!raw)
rc = genwqe_execute_ddcb(cfile, cmd);
else
rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
/* Copy back only the modifed fields. Do not copy ASIV
back since the copy got modified by the driver. */
if (copy_to_user((void __user *)arg, cmd,
sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
ddcb_requ_free(cmd);
return -EFAULT;
}
ddcb_requ_free(cmd);
return rc;
}
/**
* genwqe_ioctl() - IO control
* @filp: file handle
* @cmd: command identifier (passed from user)
* @arg: argument (passed from user)
*
* Return: 0 success
*/
static long genwqe_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
struct genwqe_dev *cd = cfile->cd;
struct pci_dev *pci_dev = cd->pci_dev;
struct genwqe_reg_io __user *io;
u64 val;
u32 reg_offs;
/* Return -EIO if card hit EEH */
if (pci_channel_offline(pci_dev))
return -EIO;
if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
return -EINVAL;
switch (cmd) {
case GENWQE_GET_CARD_STATE:
put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
return 0;
/* Register access */
case GENWQE_READ_REG64: {
io = (struct genwqe_reg_io __user *)arg;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
return -EINVAL;
val = __genwqe_readq(cd, reg_offs);
put_user(val, &io->val64);
return 0;
}
case GENWQE_WRITE_REG64: {
io = (struct genwqe_reg_io __user *)arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
return -EINVAL;
if (get_user(val, &io->val64))
return -EFAULT;
__genwqe_writeq(cd, reg_offs, val);
return 0;
}
case GENWQE_READ_REG32: {
io = (struct genwqe_reg_io __user *)arg;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
return -EINVAL;
val = __genwqe_readl(cd, reg_offs);
put_user(val, &io->val64);
return 0;
}
case GENWQE_WRITE_REG32: {
io = (struct genwqe_reg_io __user *)arg;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (get_user(reg_offs, &io->num))
return -EFAULT;
if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
return -EINVAL;
if (get_user(val, &io->val64))
return -EFAULT;
__genwqe_writel(cd, reg_offs, val);
return 0;
}
/* Flash update/reading */
case GENWQE_SLU_UPDATE: {
struct genwqe_bitstream load;
if (!genwqe_is_privileged(cd))
return -EPERM;
if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (copy_from_user(&load, (void __user *)arg,
sizeof(load)))
return -EFAULT;
rc = do_flash_update(cfile, &load);
if (copy_to_user((void __user *)arg, &load, sizeof(load)))
return -EFAULT;
return rc;
}
case GENWQE_SLU_READ: {
struct genwqe_bitstream load;
if (!genwqe_is_privileged(cd))
return -EPERM;
if (genwqe_flash_readback_fails(cd))
return -ENOSPC; /* known to fail for old versions */
if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
return -EFAULT;
rc = do_flash_read(cfile, &load);
if (copy_to_user((void __user *)arg, &load, sizeof(load)))
return -EFAULT;
return rc;
}
/* memory pinning and unpinning */
case GENWQE_PIN_MEM: {
struct genwqe_mem m;
if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
return -EFAULT;
return genwqe_pin_mem(cfile, &m);
}
case GENWQE_UNPIN_MEM: {
struct genwqe_mem m;
if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
return -EFAULT;
return genwqe_unpin_mem(cfile, &m);
}
/* launch an DDCB and wait for completion */
case GENWQE_EXECUTE_DDCB:
return do_execute_ddcb(cfile, arg, 0);
case GENWQE_EXECUTE_RAW_DDCB: {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return do_execute_ddcb(cfile, arg, 1);
}
default:
return -EINVAL;
}
return rc;
}
static const struct file_operations genwqe_fops = {
.owner = THIS_MODULE,
.open = genwqe_open,
.fasync = genwqe_fasync,
.mmap = genwqe_mmap,
.unlocked_ioctl = genwqe_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.release = genwqe_release,
};
static int genwqe_device_initialized(struct genwqe_dev *cd)
{
return cd->dev != NULL;
}
/**
* genwqe_device_create() - Create and configure genwqe char device
* @cd: genwqe device descriptor
*
* This function must be called before we create any more genwqe
* character devices, because it is allocating the major and minor
* number which are supposed to be used by the client drivers.
*/
int genwqe_device_create(struct genwqe_dev *cd)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
/*
* Here starts the individual setup per client. It must
* initialize its own cdev data structure with its own fops.
* The appropriate devnum needs to be created. The ranges must
* not overlap.
*/
rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
GENWQE_MAX_MINOR, GENWQE_DEVNAME);
if (rc < 0) {
dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
goto err_dev;
}
cdev_init(&cd->cdev_genwqe, &genwqe_fops);
cd->cdev_genwqe.owner = THIS_MODULE;
rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
if (rc < 0) {
dev_err(&pci_dev->dev, "err: cdev_add failed\n");
goto err_add;
}
/*
* Finally the device in /dev/... must be created. The rule is
* to use card%d_clientname for each created device.
*/
cd->dev = device_create_with_groups(cd->class_genwqe,
&cd->pci_dev->dev,
cd->devnum_genwqe, cd,
genwqe_attribute_groups,
GENWQE_DEVNAME "%u_card",
cd->card_idx);
if (IS_ERR(cd->dev)) {
rc = PTR_ERR(cd->dev);
goto err_cdev;
}
genwqe_init_debugfs(cd);
return 0;
err_cdev:
cdev_del(&cd->cdev_genwqe);
err_add:
unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
err_dev:
cd->dev = NULL;
return rc;
}
static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
{
int rc;
unsigned int i;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_open_files(cd))
return 0;
dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
rc = genwqe_kill_fasync(cd, SIGIO);
if (rc > 0) {
/* give kill_timeout seconds to close file descriptors ... */
for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_info(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
/* if no open files we can safely continue, else ... */
if (!genwqe_open_files(cd))
return 0;
dev_warn(&pci_dev->dev,
"[%s] send SIGKILL and wait ...\n", __func__);
rc = genwqe_terminate(cd);
if (rc) {
/* Give kill_timout more seconds to end processes */
for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_warn(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
}
}
return 0;
}
/**
* genwqe_device_remove() - Remove genwqe's char device
* @cd: GenWQE device information
*
* This function must be called after the client devices are removed
* because it will free the major/minor number range for the genwqe
* drivers.
*
* This function must be robust enough to be called twice.
*/
int genwqe_device_remove(struct genwqe_dev *cd)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_device_initialized(cd))
return 1;
genwqe_inform_and_stop_processes(cd);
/*
* We currently do wait until all filedescriptors are
* closed. This leads to a problem when we abort the
* application which will decrease this reference from
* 1/unused to 0/illegal and not from 2/used 1/empty.
*/
rc = kref_read(&cd->cdev_genwqe.kobj.kref);
if (rc != 1) {
dev_err(&pci_dev->dev,
"[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
panic("Fatal err: cannot free resources with pending references!");
}
genqwe_exit_debugfs(cd);
device_destroy(cd->class_genwqe, cd->devnum_genwqe);
cdev_del(&cd->cdev_genwqe);
unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
cd->dev = NULL;
return 0;
}
| linux-master | drivers/misc/genwqe/card_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Debugfs interfaces for the GenWQE card. Help to debug potential
* problems. Dump internal chip state for debugging and failure
* determination.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "card_base.h"
#include "card_ddcb.h"
static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs,
int entries)
{
unsigned int i;
u32 v_hi, v_lo;
for (i = 0; i < entries; i++) {
v_hi = (regs[i].val >> 32) & 0xffffffff;
v_lo = (regs[i].val) & 0xffffffff;
seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n",
regs[i].addr, regs[i].idx, v_hi, v_lo);
}
}
static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
{
struct genwqe_dev *cd = s->private;
int entries;
struct genwqe_reg *regs;
entries = genwqe_ffdc_buff_size(cd, uid);
if (entries < 0)
return -EINVAL;
if (entries == 0)
return 0;
regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL);
if (regs == NULL)
return -ENOMEM;
genwqe_stop_traps(cd); /* halt the traps while dumping data */
genwqe_ffdc_buff_read(cd, uid, regs, entries);
genwqe_start_traps(cd);
dbg_uidn_show(s, regs, entries);
kfree(regs);
return 0;
}
static int curr_dbg_uid0_show(struct seq_file *s, void *unused)
{
return curr_dbg_uidn_show(s, unused, 0);
}
DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid0);
static int curr_dbg_uid1_show(struct seq_file *s, void *unused)
{
return curr_dbg_uidn_show(s, unused, 1);
}
DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid1);
static int curr_dbg_uid2_show(struct seq_file *s, void *unused)
{
return curr_dbg_uidn_show(s, unused, 2);
}
DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid2);
static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
{
struct genwqe_dev *cd = s->private;
dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries);
return 0;
}
static int prev_dbg_uid0_show(struct seq_file *s, void *unused)
{
return prev_dbg_uidn_show(s, unused, 0);
}
DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid0);
static int prev_dbg_uid1_show(struct seq_file *s, void *unused)
{
return prev_dbg_uidn_show(s, unused, 1);
}
DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid1);
static int prev_dbg_uid2_show(struct seq_file *s, void *unused)
{
return prev_dbg_uidn_show(s, unused, 2);
}
DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid2);
static int curr_regs_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
unsigned int i;
struct genwqe_reg *regs;
regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL);
if (regs == NULL)
return -ENOMEM;
genwqe_stop_traps(cd);
genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1);
genwqe_start_traps(cd);
for (i = 0; i < GENWQE_FFDC_REGS; i++) {
if (regs[i].addr == 0xffffffff)
break; /* invalid entries */
if (regs[i].val == 0x0ull)
continue; /* do not print 0x0 FIRs */
seq_printf(s, " 0x%08x 0x%016llx\n",
regs[i].addr, regs[i].val);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(curr_regs);
static int prev_regs_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
unsigned int i;
struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs;
if (regs == NULL)
return -EINVAL;
for (i = 0; i < GENWQE_FFDC_REGS; i++) {
if (regs[i].addr == 0xffffffff)
break; /* invalid entries */
if (regs[i].val == 0x0ull)
continue; /* do not print 0x0 FIRs */
seq_printf(s, " 0x%08x 0x%016llx\n",
regs[i].addr, regs[i].val);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(prev_regs);
static int jtimer_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
unsigned int vf_num;
u64 jtimer;
jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
seq_printf(s, " PF 0x%016llx %d msec\n", jtimer,
GENWQE_PF_JOBTIMEOUT_MSEC);
for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
vf_num + 1);
seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer,
cd->vf_jobtimeout_msec[vf_num]);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(jtimer);
static int queue_working_time_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
unsigned int vf_num;
u64 t;
t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0);
seq_printf(s, " PF 0x%016llx\n", t);
for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1);
seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(queue_working_time);
static int ddcb_info_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
unsigned int i;
struct ddcb_queue *queue;
struct ddcb *pddcb;
queue = &cd->queue;
seq_puts(s, "DDCB QUEUE:\n");
seq_printf(s, " ddcb_max: %d\n"
" ddcb_daddr: %016llx - %016llx\n"
" ddcb_vaddr: %p\n"
" ddcbs_in_flight: %u\n"
" ddcbs_max_in_flight: %u\n"
" ddcbs_completed: %u\n"
" return_on_busy: %u\n"
" wait_on_busy: %u\n"
" irqs_processed: %u\n",
queue->ddcb_max, (long long)queue->ddcb_daddr,
(long long)queue->ddcb_daddr +
(queue->ddcb_max * DDCB_LENGTH),
queue->ddcb_vaddr, queue->ddcbs_in_flight,
queue->ddcbs_max_in_flight, queue->ddcbs_completed,
queue->return_on_busy, queue->wait_on_busy,
cd->irqs_processed);
/* Hardware State */
seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n"
" 0x%08x 0x%016llx IO_QUEUE_STATUS\n"
" 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n"
" 0x%08x 0x%016llx IO_QUEUE_INITSQN\n"
" 0x%08x 0x%016llx IO_QUEUE_WRAP\n"
" 0x%08x 0x%016llx IO_QUEUE_OFFSET\n"
" 0x%08x 0x%016llx IO_QUEUE_WTIME\n"
" 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n"
" 0x%08x 0x%016llx IO_QUEUE_LRW\n",
queue->IO_QUEUE_CONFIG,
__genwqe_readq(cd, queue->IO_QUEUE_CONFIG),
queue->IO_QUEUE_STATUS,
__genwqe_readq(cd, queue->IO_QUEUE_STATUS),
queue->IO_QUEUE_SEGMENT,
__genwqe_readq(cd, queue->IO_QUEUE_SEGMENT),
queue->IO_QUEUE_INITSQN,
__genwqe_readq(cd, queue->IO_QUEUE_INITSQN),
queue->IO_QUEUE_WRAP,
__genwqe_readq(cd, queue->IO_QUEUE_WRAP),
queue->IO_QUEUE_OFFSET,
__genwqe_readq(cd, queue->IO_QUEUE_OFFSET),
queue->IO_QUEUE_WTIME,
__genwqe_readq(cd, queue->IO_QUEUE_WTIME),
queue->IO_QUEUE_ERRCNTS,
__genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS),
queue->IO_QUEUE_LRW,
__genwqe_readq(cd, queue->IO_QUEUE_LRW));
seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n",
queue->ddcb_act, queue->ddcb_next);
pddcb = queue->ddcb_vaddr;
for (i = 0; i < queue->ddcb_max; i++) {
seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ",
i, be16_to_cpu(pddcb->retc_16),
be16_to_cpu(pddcb->seqnum_16),
pddcb->hsi, pddcb->shi);
seq_printf(s, "PRIV=%06llx CMD=%02x\n",
be64_to_cpu(pddcb->priv_64), pddcb->cmd);
pddcb++;
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ddcb_info);
static int info_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
u64 app_id, slu_id, bitstream = -1;
struct pci_dev *pci_dev = cd->pci_dev;
slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
if (genwqe_is_privileged(cd))
bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
seq_printf(s, "%s driver version: %s\n"
" Device Name/Type: %s %s CardIdx: %d\n"
" SLU/APP Config : 0x%016llx/0x%016llx\n"
" Build Date : %u/%x/%u\n"
" Base Clock : %u MHz\n"
" Arch/SVN Release: %u/%llx\n"
" Bitstream : %llx\n",
GENWQE_DEVNAME, DRV_VERSION, dev_name(&pci_dev->dev),
genwqe_is_privileged(cd) ?
"Physical" : "Virtual or no SR-IOV",
cd->card_idx, slu_id, app_id,
(u16)((slu_id >> 12) & 0x0fLLU), /* month */
(u16)((slu_id >> 4) & 0xffLLU), /* day */
(u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */
genwqe_base_clock_frequency(cd),
(u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40,
bitstream);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(info);
void genwqe_init_debugfs(struct genwqe_dev *cd)
{
struct dentry *root;
char card_name[64];
char name[64];
unsigned int i;
sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx);
root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
/* non privileged interfaces are done here */
debugfs_create_file("ddcb_info", S_IRUGO, root, cd, &ddcb_info_fops);
debugfs_create_file("info", S_IRUGO, root, cd, &info_fops);
debugfs_create_x64("err_inject", 0666, root, &cd->err_inject);
debugfs_create_u32("ddcb_software_timeout", 0666, root,
&cd->ddcb_software_timeout);
debugfs_create_u32("kill_timeout", 0666, root, &cd->kill_timeout);
/* privileged interfaces follow here */
if (!genwqe_is_privileged(cd)) {
cd->debugfs_root = root;
return;
}
debugfs_create_file("curr_regs", S_IRUGO, root, cd, &curr_regs_fops);
debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd,
&curr_dbg_uid0_fops);
debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd,
&curr_dbg_uid1_fops);
debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd,
&curr_dbg_uid2_fops);
debugfs_create_file("prev_regs", S_IRUGO, root, cd, &prev_regs_fops);
debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd,
&prev_dbg_uid0_fops);
debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd,
&prev_dbg_uid1_fops);
debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd,
&prev_dbg_uid2_fops);
for (i = 0; i < GENWQE_MAX_VFS; i++) {
sprintf(name, "vf%u_jobtimeout_msec", i);
debugfs_create_u32(name, 0666, root,
&cd->vf_jobtimeout_msec[i]);
}
debugfs_create_file("jobtimer", S_IRUGO, root, cd, &jtimer_fops);
debugfs_create_file("queue_working_time", S_IRUGO, root, cd,
&queue_working_time_fops);
debugfs_create_u32("skip_recovery", 0666, root, &cd->skip_recovery);
debugfs_create_u32("use_platform_recovery", 0666, root,
&cd->use_platform_recovery);
cd->debugfs_root = root;
}
void genqwe_exit_debugfs(struct genwqe_dev *cd)
{
debugfs_remove_recursive(cd->debugfs_root);
}
| linux-master | drivers/misc/genwqe/card_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Device Driver Control Block (DDCB) queue support. Definition of
* interrupt handlers for queue support as well as triggering the
* health monitor code in case of problems. The current hardware uses
* an MSI interrupt which is shared between error handling and
* functional code.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/crc-itu-t.h>
#include "card_base.h"
#include "card_ddcb.h"
/*
* N: next DDCB, this is where the next DDCB will be put.
* A: active DDCB, this is where the code will look for the next completion.
* x: DDCB is enqueued, we are waiting for its completion.
* Situation (1): Empty queue
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | | | | | | | | |
* +---+---+---+---+---+---+---+---+
* A/N
* enqueued_ddcbs = A - N = 2 - 2 = 0
*
* Situation (2): Wrapped, N > A
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | | | x | x | | | | |
* +---+---+---+---+---+---+---+---+
* A N
* enqueued_ddcbs = N - A = 4 - 2 = 2
*
* Situation (3): Queue wrapped, A > N
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | | | x | x | x | x |
* +---+---+---+---+---+---+---+---+
* N A
* enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
*
* Situation (4a): Queue full N > A
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | x | x | x | x | x | |
* +---+---+---+---+---+---+---+---+
* A N
*
* enqueued_ddcbs = N - A = 7 - 0 = 7
*
* Situation (4a): Queue full A > N
* +---+---+---+---+---+---+---+---+
* | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | x | x | x | | x | x | x | x |
* +---+---+---+---+---+---+---+---+
* N A
* enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
*/
static int queue_empty(struct ddcb_queue *queue)
{
return queue->ddcb_next == queue->ddcb_act;
}
static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
{
if (queue->ddcb_next >= queue->ddcb_act)
return queue->ddcb_next - queue->ddcb_act;
return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
}
static int queue_free_ddcbs(struct ddcb_queue *queue)
{
int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
return 0;
}
return free_ddcbs;
}
/*
* Use of the PRIV field in the DDCB for queue debugging:
*
* (1) Trying to get rid of a DDCB which saw a timeout:
* pddcb->priv[6] = 0xcc; # cleared
*
* (2) Append a DDCB via NEXT bit:
* pddcb->priv[7] = 0xaa; # appended
*
* (3) DDCB needed tapping:
* pddcb->priv[7] = 0xbb; # tapped
*
* (4) DDCB marked as correctly finished:
* pddcb->priv[6] = 0xff; # finished
*/
static inline void ddcb_mark_tapped(struct ddcb *pddcb)
{
pddcb->priv[7] = 0xbb; /* tapped */
}
static inline void ddcb_mark_appended(struct ddcb *pddcb)
{
pddcb->priv[7] = 0xaa; /* appended */
}
static inline void ddcb_mark_cleared(struct ddcb *pddcb)
{
pddcb->priv[6] = 0xcc; /* cleared */
}
static inline void ddcb_mark_finished(struct ddcb *pddcb)
{
pddcb->priv[6] = 0xff; /* finished */
}
static inline void ddcb_mark_unused(struct ddcb *pddcb)
{
pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
}
/**
* genwqe_crc16() - Generate 16-bit crc as required for DDCBs
* @buff: pointer to data buffer
* @len: length of data for calculation
* @init: initial crc (0xffff at start)
*
* Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
* should result in a crc16 of 0x89c3
*
* Return: crc16 checksum in big endian format !
*/
static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
{
return crc_itu_t(init, buff, len);
}
static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
int i;
struct ddcb *pddcb;
unsigned long flags;
struct pci_dev *pci_dev = cd->pci_dev;
spin_lock_irqsave(&cd->print_lock, flags);
dev_info(&pci_dev->dev,
"DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
cd->card_idx, queue->ddcb_act, queue->ddcb_next);
pddcb = queue->ddcb_vaddr;
for (i = 0; i < queue->ddcb_max; i++) {
dev_err(&pci_dev->dev,
" %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
i == queue->ddcb_act ? '>' : ' ',
i,
be16_to_cpu(pddcb->retc_16),
be16_to_cpu(pddcb->seqnum_16),
pddcb->hsi,
pddcb->shi,
be64_to_cpu(pddcb->priv_64),
pddcb->cmd);
pddcb++;
}
spin_unlock_irqrestore(&cd->print_lock, flags);
}
struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
{
struct ddcb_requ *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
return &req->cmd;
}
void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
{
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
kfree(req);
}
static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
{
return req->req_state;
}
static inline void ddcb_requ_set_state(struct ddcb_requ *req,
enum genwqe_requ_state new_state)
{
req->req_state = new_state;
}
static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
{
return req->cmd.ddata_addr != 0x0;
}
/**
* ddcb_requ_finished() - Returns the hardware state of the associated DDCB
* @cd: pointer to genwqe device descriptor
* @req: DDCB work request
*
* Status of ddcb_requ mirrors this hardware state, but is copied in
* the ddcb_requ on interrupt/polling function. The lowlevel code
* should check the hardware state directly, the higher level code
* should check the copy.
*
* This function will also return true if the state of the queue is
* not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
* shutdown case.
*/
static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
{
return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
(cd->card_state != GENWQE_CARD_USED);
}
#define RET_DDCB_APPENDED 1
#define RET_DDCB_TAPPED 2
/**
* enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @queue: queue this operation should be done on
* @pddcb: pointer to ddcb structure
* @ddcb_no: pointer to ddcb number being tapped
*
* Start execution of DDCB by tapping or append to queue via NEXT
* bit. This is done by an atomic 'compare and swap' instruction and
* checking SHI and HSI of the previous DDCB.
*
* This function must only be called with ddcb_lock held.
*
* Return: 1 if new DDCB is appended to previous
* 2 if DDCB queue is tapped via register/simulation
*/
static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
struct ddcb *pddcb, int ddcb_no)
{
unsigned int try;
int prev_no;
struct ddcb *prev_ddcb;
__be32 old, new, icrc_hsi_shi;
u64 num;
/*
* For performance checks a Dispatch Timestamp can be put into
* DDCB It is supposed to use the SLU's free running counter,
* but this requires PCIe cycles.
*/
ddcb_mark_unused(pddcb);
/* check previous DDCB if already fetched */
prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
prev_ddcb = &queue->ddcb_vaddr[prev_no];
/*
* It might have happened that the HSI.FETCHED bit is
* set. Retry in this case. Therefore I expect maximum 2 times
* trying.
*/
ddcb_mark_appended(pddcb);
for (try = 0; try < 2; try++) {
old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
/* try to append via NEXT bit if prev DDCB is not completed */
if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
break;
new = (old | DDCB_NEXT_BE32);
wmb(); /* need to ensure write ordering */
icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
if (icrc_hsi_shi == old)
return RET_DDCB_APPENDED; /* appended to queue */
}
/* Queue must be re-started by updating QUEUE_OFFSET */
ddcb_mark_tapped(pddcb);
num = (u64)ddcb_no << 8;
wmb(); /* need to ensure write ordering */
__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
return RET_DDCB_TAPPED;
}
/**
* copy_ddcb_results() - Copy output state from real DDCB to request
* @req: pointer to requested DDCB parameters
* @ddcb_no: pointer to ddcb number being tapped
*
* Copy DDCB ASV to request struct. There is no endian
* conversion made, since data structure in ASV is still
* unknown here.
*
* This is needed by:
* - genwqe_purge_ddcb()
* - genwqe_check_ddcb_queue()
*/
static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
{
struct ddcb_queue *queue = req->queue;
struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
/* copy status flags of the variant part */
req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
req->cmd.attn = be16_to_cpu(pddcb->attn_16);
req->cmd.progress = be32_to_cpu(pddcb->progress_32);
req->cmd.retc = be16_to_cpu(pddcb->retc_16);
if (ddcb_requ_collect_debug_data(req)) {
int prev_no = (ddcb_no == 0) ?
queue->ddcb_max - 1 : ddcb_no - 1;
struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
memcpy(&req->debug_data.ddcb_finished, pddcb,
sizeof(req->debug_data.ddcb_finished));
memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
sizeof(req->debug_data.ddcb_prev));
}
}
/**
* genwqe_check_ddcb_queue() - Checks DDCB queue for completed work requests.
* @cd: pointer to genwqe device descriptor
* @queue: queue to be checked
*
* Return: Number of DDCBs which were finished
*/
static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
struct ddcb_queue *queue)
{
unsigned long flags;
int ddcbs_finished = 0;
struct pci_dev *pci_dev = cd->pci_dev;
spin_lock_irqsave(&queue->ddcb_lock, flags);
/* FIXME avoid soft locking CPU */
while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
struct ddcb *pddcb;
struct ddcb_requ *req;
u16 vcrc, vcrc_16, retc_16;
pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
0x00000000)
goto go_home; /* not completed, continue waiting */
wmb(); /* Add sync to decouple prev. read operations */
/* Note: DDCB could be purged */
req = queue->ddcb_req[queue->ddcb_act];
if (req == NULL) {
/* this occurs if DDCB is purged, not an error */
/* Move active DDCB further; Nothing to do anymore. */
goto pick_next_one;
}
/*
* HSI=0x44 (fetched and completed), but RETC is
* 0x101, or even worse 0x000.
*
* In case of seeing the queue in inconsistent state
* we read the errcnts and the queue status to provide
* a trigger for our PCIe analyzer stop capturing.
*/
retc_16 = be16_to_cpu(pddcb->retc_16);
if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
u64 errcnts, status;
u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
dev_err(&pci_dev->dev,
"[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n",
__func__, be16_to_cpu(pddcb->seqnum_16),
pddcb->hsi, retc_16, errcnts, status,
queue->ddcb_daddr + ddcb_offs);
}
copy_ddcb_results(req, queue->ddcb_act);
queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
ddcb_mark_finished(pddcb);
/* calculate CRC_16 to see if VCRC is correct */
vcrc = genwqe_crc16(pddcb->asv,
VCRC_LENGTH(req->cmd.asv_length),
0xffff);
vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
if (vcrc != vcrc_16) {
printk_ratelimited(KERN_ERR
"%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n",
GENWQE_DEVNAME, dev_name(&pci_dev->dev),
pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
vcrc, vcrc_16);
}
ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
queue->ddcbs_completed++;
queue->ddcbs_in_flight--;
/* wake up process waiting for this DDCB, and
processes on the busy queue */
wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
wake_up_interruptible(&queue->busy_waitq);
pick_next_one:
queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
ddcbs_finished++;
}
go_home:
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return ddcbs_finished;
}
/**
* __genwqe_wait_ddcb(): Waits until DDCB is completed
* @cd: pointer to genwqe device descriptor
* @req: pointer to requsted DDCB parameters
*
* The Service Layer will update the RETC in DDCB when processing is
* pending or done.
*
* Return: > 0 remaining jiffies, DDCB completed
* -ETIMEDOUT when timeout
* -ERESTARTSYS when ^C
* -EINVAL when unknown error condition
*
* When an error is returned the called needs to ensure that
* purge_ddcb() is being called to get the &req removed from the
* queue.
*/
int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
{
int rc;
unsigned int ddcb_no;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
if (req == NULL)
return -EINVAL;
queue = req->queue;
if (queue == NULL)
return -EINVAL;
ddcb_no = req->num;
if (ddcb_no >= queue->ddcb_max)
return -EINVAL;
rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
ddcb_requ_finished(cd, req),
GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
/*
* We need to distinguish 3 cases here:
* 1. rc == 0 timeout occurred
* 2. rc == -ERESTARTSYS signal received
* 3. rc > 0 remaining jiffies condition is true
*/
if (rc == 0) {
struct ddcb_queue *queue = req->queue;
struct ddcb *pddcb;
/*
* Timeout may be caused by long task switching time.
* When timeout happens, check if the request has
* meanwhile completed.
*/
genwqe_check_ddcb_queue(cd, req->queue);
if (ddcb_requ_finished(cd, req))
return rc;
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
__func__, req->num, rc, ddcb_requ_get_state(req),
req);
dev_err(&pci_dev->dev,
"[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
__genwqe_readq(cd, queue->IO_QUEUE_STATUS));
pddcb = &queue->ddcb_vaddr[req->num];
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
print_ddcb_info(cd, req->queue);
return -ETIMEDOUT;
} else if (rc == -ERESTARTSYS) {
return rc;
/*
* EINTR: Stops the application
* ERESTARTSYS: Restartable systemcall; called again
*/
} else if (rc < 0) {
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
__func__, req->num, rc, ddcb_requ_get_state(req));
return -EINVAL;
}
/* Severe error occured. Driver is forced to stop operation */
if (cd->card_state != GENWQE_CARD_USED) {
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d forced to stop (rc=%d)\n",
__func__, req->num, rc);
return -EIO;
}
return rc;
}
/**
* get_next_ddcb() - Get next available DDCB
* @cd: pointer to genwqe device descriptor
* @queue: DDCB queue
* @num: internal DDCB number
*
* DDCB's content is completely cleared but presets for PRE and
* SEQNUM. This function must only be called when ddcb_lock is held.
*
* Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
*/
static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
struct ddcb_queue *queue,
int *num)
{
u64 *pu64;
struct ddcb *pddcb;
if (queue_free_ddcbs(queue) == 0) /* queue is full */
return NULL;
/* find new ddcb */
pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
/* if it is not completed, we are not allowed to use it */
/* barrier(); */
if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
return NULL;
*num = queue->ddcb_next; /* internal DDCB number */
queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
/* clear important DDCB fields */
pu64 = (u64 *)pddcb;
pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
/* destroy previous results in ASV */
pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
pddcb->pre = DDCB_PRESET_PRE; /* 128 */
pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
return pddcb;
}
/**
* __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
* @cd: genwqe device descriptor
* @req: DDCB request
*
* This will fail when the request was already FETCHED. In this case
* we need to wait until it is finished. Else the DDCB can be
* reused. This function also ensures that the request data structure
* is removed from ddcb_req[].
*
* Do not forget to call this function when genwqe_wait_ddcb() fails,
* such that the request gets really removed from ddcb_req[].
*
* Return: 0 success
*/
int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
{
struct ddcb *pddcb = NULL;
unsigned int t;
unsigned long flags;
struct ddcb_queue *queue = req->queue;
struct pci_dev *pci_dev = cd->pci_dev;
u64 queue_status;
__be32 icrc_hsi_shi = 0x0000;
__be32 old, new;
/* unsigned long flags; */
if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
dev_err(&pci_dev->dev,
"[%s] err: software timeout is not set!\n", __func__);
return -EFAULT;
}
pddcb = &queue->ddcb_vaddr[req->num];
for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
spin_lock_irqsave(&queue->ddcb_lock, flags);
/* Check if req was meanwhile finished */
if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
goto go_home;
/* try to set PURGE bit if FETCHED/COMPLETED are not set */
old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
new = (old | DDCB_PURGE_BE32);
icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
old, new);
if (icrc_hsi_shi == old)
goto finish_ddcb;
}
/* normal finish with HSI bit */
barrier();
icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
goto finish_ddcb;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
/*
* Here the check_ddcb() function will most likely
* discover this DDCB to be finished some point in
* time. It will mark the req finished and free it up
* in the list.
*/
copy_ddcb_results(req, req->num); /* for the failing case */
msleep(100); /* sleep for 1/10 second and try again */
continue;
finish_ddcb:
copy_ddcb_results(req, req->num);
ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
queue->ddcbs_in_flight--;
queue->ddcb_req[req->num] = NULL; /* delete from array */
ddcb_mark_cleared(pddcb);
/* Move active DDCB further; Nothing to do here anymore. */
/*
* We need to ensure that there is at least one free
* DDCB in the queue. To do that, we must update
* ddcb_act only if the COMPLETED bit is set for the
* DDCB we are working on else we treat that DDCB even
* if we PURGED it as occupied (hardware is supposed
* to set the COMPLETED bit yet!).
*/
icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
(queue->ddcb_act == req->num)) {
queue->ddcb_act = ((queue->ddcb_act + 1) %
queue->ddcb_max);
}
go_home:
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/*
* If the card is dead and the queue is forced to stop, we
* might see this in the queue status register.
*/
queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
__func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
queue_status);
print_ddcb_info(cd, req->queue);
return -EFAULT;
}
int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
{
int len;
struct pci_dev *pci_dev = cd->pci_dev;
if (d == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: invalid memory for debug data!\n",
__func__);
return -EFAULT;
}
len = sizeof(d->driver_version);
snprintf(d->driver_version, len, "%s", DRV_VERSION);
d->slu_unitcfg = cd->slu_unitcfg;
d->app_unitcfg = cd->app_unitcfg;
return 0;
}
/**
* __genwqe_enqueue_ddcb() - Enqueue a DDCB
* @cd: pointer to genwqe device descriptor
* @req: pointer to DDCB execution request
* @f_flags: file mode: blocking, non-blocking
*
* Return: 0 if enqueuing succeeded
* -EIO if card is unusable/PCIe problems
* -EBUSY if enqueuing failed
*/
int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
unsigned int f_flags)
{
struct ddcb *pddcb;
unsigned long flags;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
u16 icrc;
retry:
if (cd->card_state != GENWQE_CARD_USED) {
printk_ratelimited(KERN_ERR
"%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
GENWQE_DEVNAME, dev_name(&pci_dev->dev),
__func__, req->num);
return -EIO;
}
queue = req->queue = &cd->queue;
/* FIXME circumvention to improve performance when no irq is
* there.
*/
if (GENWQE_POLLING_ENABLED)
genwqe_check_ddcb_queue(cd, queue);
/*
* It must be ensured to process all DDCBs in successive
* order. Use a lock here in order to prevent nested DDCB
* enqueuing.
*/
spin_lock_irqsave(&queue->ddcb_lock, flags);
pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
if (pddcb == NULL) {
int rc;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
if (f_flags & O_NONBLOCK) {
queue->return_on_busy++;
return -EBUSY;
}
queue->wait_on_busy++;
rc = wait_event_interruptible(queue->busy_waitq,
queue_free_ddcbs(queue) != 0);
dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n",
__func__, rc);
if (rc == -ERESTARTSYS)
return rc; /* interrupted by a signal */
goto retry;
}
if (queue->ddcb_req[req->num] != NULL) {
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
dev_err(&pci_dev->dev,
"[%s] picked DDCB %d with req=%p still in use!!\n",
__func__, req->num, req);
return -EFAULT;
}
ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
queue->ddcb_req[req->num] = req;
pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
pddcb->cmd = req->cmd.cmd;
pddcb->acfunc = req->cmd.acfunc; /* functional unit */
/*
* We know that we can get retc 0x104 with CRC error, do not
* stop the queue in those cases for this command. XDIR = 1
* does not work for old SLU versions.
*
* Last bitstream with the old XDIR behavior had SLU_ID
* 0x34199.
*/
if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
pddcb->xdir = 0x1;
else
pddcb->xdir = 0x0;
pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
((req->cmd.asv_length / 8)));
pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
/*
* If copying the whole DDCB_ASIV_LENGTH is impacting
* performance we need to change it to
* req->cmd.asiv_length. But simulation benefits from some
* non-architectured bits behind the architectured content.
*
* How much data is copied depends on the availability of the
* ATS field, which was introduced late. If the ATS field is
* supported ASIV is 8 bytes shorter than it used to be. Since
* the ATS field is copied too, the code should do exactly
* what it did before, but I wanted to make copying of the ATS
* field very explicit.
*/
if (genwqe_get_slu_id(cd) <= 0x2) {
memcpy(&pddcb->__asiv[0], /* destination */
&req->cmd.__asiv[0], /* source */
DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
} else {
pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
memcpy(&pddcb->n.asiv[0], /* destination */
&req->cmd.asiv[0], /* source */
DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
}
pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
/*
* Calculate CRC_16 for corresponding range PSP(7:4). Include
* empty 4 bytes prior to the data.
*/
icrc = genwqe_crc16((const u8 *)pddcb,
ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
/* enable DDCB completion irq */
if (!GENWQE_POLLING_ENABLED)
pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
if (ddcb_requ_collect_debug_data(req)) {
/* use the kernel copy of debug data. copying back to
user buffer happens later */
genwqe_init_debug_data(cd, &req->debug_data);
memcpy(&req->debug_data.ddcb_before, pddcb,
sizeof(req->debug_data.ddcb_before));
}
enqueue_ddcb(cd, queue, pddcb, req->num);
queue->ddcbs_in_flight++;
if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
wake_up_interruptible(&cd->queue_waitq);
return 0;
}
/**
* __genwqe_execute_raw_ddcb() - Setup and execute DDCB
* @cd: pointer to genwqe device descriptor
* @cmd: user provided DDCB command
* @f_flags: file mode: blocking, non-blocking
*/
int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
struct genwqe_ddcb_cmd *cmd,
unsigned int f_flags)
{
int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
__func__, cmd->asiv_length);
return -EINVAL;
}
if (cmd->asv_length > DDCB_ASV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
__func__, cmd->asiv_length);
return -EINVAL;
}
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
if (rc != 0)
return rc;
rc = __genwqe_wait_ddcb(cd, req);
if (rc < 0) /* error or signal interrupt */
goto err_exit;
if (ddcb_requ_collect_debug_data(req)) {
if (copy_to_user((struct genwqe_debug_data __user *)
(unsigned long)cmd->ddata_addr,
&req->debug_data,
sizeof(struct genwqe_debug_data)))
return -EFAULT;
}
/*
* Higher values than 0x102 indicate completion with faults,
* lower values than 0x102 indicate processing faults. Note
* that DDCB might have been purged. E.g. Cntl+C.
*/
if (cmd->retc != DDCB_RETC_COMPLETE) {
/* This might happen e.g. flash read, and needs to be
handled by the upper layer code. */
rc = -EBADMSG; /* not processed/error retc */
}
return rc;
err_exit:
__genwqe_purge_ddcb(cd, req);
if (ddcb_requ_collect_debug_data(req)) {
if (copy_to_user((struct genwqe_debug_data __user *)
(unsigned long)cmd->ddata_addr,
&req->debug_data,
sizeof(struct genwqe_debug_data)))
return -EFAULT;
}
return rc;
}
/**
* genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
* @cd: pointer to genwqe device descriptor
*
* We use this as condition for our wait-queue code.
*/
static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
{
unsigned long flags;
struct ddcb *pddcb;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
if (queue_empty(queue)) { /* empty queue */
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 1;
}
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/**
* genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
* @cd: pointer to genwqe device descriptor
*
* Keep track on the number of DDCBs which ware currently in the
* queue. This is needed for statistics as well as condition if we want
* to wait or better do polling in case of no interrupts available.
*/
int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
{
unsigned long flags;
int ddcbs_in_flight = 0;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
ddcbs_in_flight += queue->ddcbs_in_flight;
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return ddcbs_in_flight;
}
static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
int rc, i;
struct ddcb *pddcb;
u64 val64;
unsigned int queue_size;
struct pci_dev *pci_dev = cd->pci_dev;
if (GENWQE_DDCB_MAX < 2)
return -EINVAL;
queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
queue->ddcbs_in_flight = 0; /* statistics */
queue->ddcbs_max_in_flight = 0;
queue->ddcbs_completed = 0;
queue->return_on_busy = 0;
queue->wait_on_busy = 0;
queue->ddcb_seq = 0x100; /* start sequence number */
queue->ddcb_max = GENWQE_DDCB_MAX;
queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
&queue->ddcb_daddr);
if (queue->ddcb_vaddr == NULL) {
dev_err(&pci_dev->dev,
"[%s] **err: could not allocate DDCB **\n", __func__);
return -ENOMEM;
}
queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
GFP_KERNEL);
if (!queue->ddcb_req) {
rc = -ENOMEM;
goto free_ddcbs;
}
queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
sizeof(wait_queue_head_t),
GFP_KERNEL);
if (!queue->ddcb_waitqs) {
rc = -ENOMEM;
goto free_requs;
}
for (i = 0; i < queue->ddcb_max; i++) {
pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
pddcb->retc_16 = cpu_to_be16(0xfff);
queue->ddcb_req[i] = NULL; /* requests */
init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
}
queue->ddcb_act = 0;
queue->ddcb_next = 0; /* queue is empty */
spin_lock_init(&queue->ddcb_lock);
init_waitqueue_head(&queue->busy_waitq);
val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
__genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
__genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
__genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
__genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
return 0;
free_requs:
kfree(queue->ddcb_req);
queue->ddcb_req = NULL;
free_ddcbs:
__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
return rc;
}
static int ddcb_queue_initialized(struct ddcb_queue *queue)
{
return queue->ddcb_vaddr != NULL;
}
static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
{
unsigned int queue_size;
queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
kfree(queue->ddcb_req);
queue->ddcb_req = NULL;
if (queue->ddcb_vaddr) {
__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue->ddcb_daddr);
queue->ddcb_vaddr = NULL;
queue->ddcb_daddr = 0ull;
}
}
static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
{
u64 gfir;
struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
struct pci_dev *pci_dev = cd->pci_dev;
/*
* In case of fatal FIR error the queue is stopped, such that
* we can safely check it without risking anything.
*/
cd->irqs_processed++;
wake_up_interruptible(&cd->queue_waitq);
/*
* Checking for errors before kicking the queue might be
* safer, but slower for the good-case ... See above.
*/
gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
!pci_channel_offline(pci_dev)) {
if (cd->use_platform_recovery) {
/*
* Since we use raw accessors, EEH errors won't be
* detected by the platform until we do a non-raw
* MMIO or config space read
*/
readq(cd->mmio + IO_SLC_CFGREG_GFIR);
/* Don't do anything if the PCI channel is frozen */
if (pci_channel_offline(pci_dev))
goto exit;
}
wake_up_interruptible(&cd->health_waitq);
/*
* By default GFIRs causes recovery actions. This
* count is just for debug when recovery is masked.
*/
dev_err_ratelimited(&pci_dev->dev,
"[%s] GFIR=%016llx\n",
__func__, gfir);
}
exit:
return IRQ_HANDLED;
}
static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
{
struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
cd->irqs_processed++;
wake_up_interruptible(&cd->queue_waitq);
return IRQ_HANDLED;
}
/**
* genwqe_card_thread() - Work thread for the DDCB queue
* @data: pointer to genwqe device descriptor
*
* The idea is to check if there are DDCBs in processing. If there are
* some finished DDCBs, we process them and wakeup the
* requestors. Otherwise we give other processes time using
* cond_resched().
*/
static int genwqe_card_thread(void *data)
{
int should_stop = 0;
struct genwqe_dev *cd = (struct genwqe_dev *)data;
while (!kthread_should_stop()) {
genwqe_check_ddcb_queue(cd, &cd->queue);
if (GENWQE_POLLING_ENABLED) {
wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_ddcbs_in_flight(cd) ||
(should_stop = kthread_should_stop()), 1);
} else {
wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_next_ddcb_ready(cd) ||
(should_stop = kthread_should_stop()), HZ);
}
if (should_stop)
break;
/*
* Avoid soft lockups on heavy loads; we do not want
* to disable our interrupts.
*/
cond_resched();
}
return 0;
}
/**
* genwqe_setup_service_layer() - Setup DDCB queue
* @cd: pointer to genwqe device descriptor
*
* Allocate DDCBs. Configure Service Layer Controller (SLC).
*
* Return: 0 success
*/
int genwqe_setup_service_layer(struct genwqe_dev *cd)
{
int rc;
struct ddcb_queue *queue;
struct pci_dev *pci_dev = cd->pci_dev;
if (genwqe_is_privileged(cd)) {
rc = genwqe_card_reset(cd);
if (rc < 0) {
dev_err(&pci_dev->dev,
"[%s] err: reset failed.\n", __func__);
return rc;
}
genwqe_read_softreset(cd);
}
queue = &cd->queue;
queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
rc = setup_ddcb_queue(cd, queue);
if (rc != 0) {
rc = -ENODEV;
goto err_out;
}
init_waitqueue_head(&cd->queue_waitq);
cd->card_thread = kthread_run(genwqe_card_thread, cd,
GENWQE_DEVNAME "%d_thread",
cd->card_idx);
if (IS_ERR(cd->card_thread)) {
rc = PTR_ERR(cd->card_thread);
cd->card_thread = NULL;
goto stop_free_queue;
}
rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
if (rc)
goto stop_kthread;
/*
* We must have all wait-queues initialized when we enable the
* interrupts. Otherwise we might crash if we get an early
* irq.
*/
init_waitqueue_head(&cd->health_waitq);
if (genwqe_is_privileged(cd)) {
rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
GENWQE_DEVNAME, cd);
} else {
rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
GENWQE_DEVNAME, cd);
}
if (rc < 0) {
dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
goto stop_irq_cap;
}
cd->card_state = GENWQE_CARD_USED;
return 0;
stop_irq_cap:
genwqe_reset_interrupt_capability(cd);
stop_kthread:
kthread_stop(cd->card_thread);
cd->card_thread = NULL;
stop_free_queue:
free_ddcb_queue(cd, queue);
err_out:
return rc;
}
/**
* queue_wake_up_all() - Handles fatal error case
* @cd: pointer to genwqe device descriptor
*
* The PCI device got unusable and we have to stop all pending
* requests as fast as we can. The code after this must purge the
* DDCBs in question and ensure that all mappings are freed.
*/
static int queue_wake_up_all(struct genwqe_dev *cd)
{
unsigned int i;
unsigned long flags;
struct ddcb_queue *queue = &cd->queue;
spin_lock_irqsave(&queue->ddcb_lock, flags);
for (i = 0; i < queue->ddcb_max; i++)
wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
wake_up_interruptible(&queue->busy_waitq);
spin_unlock_irqrestore(&queue->ddcb_lock, flags);
return 0;
}
/**
* genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
* @cd: pointer to genwqe device descriptor
*
* Relies on the pre-condition that there are no users of the card
* device anymore e.g. with open file-descriptors.
*
* This function must be robust enough to be called twice.
*/
int genwqe_finish_queue(struct genwqe_dev *cd)
{
int i, rc = 0, in_flight;
int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_queue *queue = &cd->queue;
if (!ddcb_queue_initialized(queue))
return 0;
/* Do not wipe out the error state. */
if (cd->card_state == GENWQE_CARD_USED)
cd->card_state = GENWQE_CARD_UNUSED;
/* Wake up all requests in the DDCB queue such that they
should be removed nicely. */
queue_wake_up_all(cd);
/* We must wait to get rid of the DDCBs in flight */
for (i = 0; i < waitmax; i++) {
in_flight = genwqe_ddcbs_in_flight(cd);
if (in_flight == 0)
break;
dev_dbg(&pci_dev->dev,
" DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n",
i, waitmax, in_flight);
/*
* Severe severe error situation: The card itself has
* 16 DDCB queues, each queue has e.g. 32 entries,
* each DDBC has a hardware timeout of currently 250
* msec but the PFs have a hardware timeout of 8 sec
* ... so I take something large.
*/
msleep(1000);
}
if (i == waitmax) {
dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
__func__);
rc = -EIO;
}
return rc;
}
/**
* genwqe_release_service_layer() - Shutdown DDCB queue
* @cd: genwqe device descriptor
*
* This function must be robust enough to be called twice.
*/
int genwqe_release_service_layer(struct genwqe_dev *cd)
{
struct pci_dev *pci_dev = cd->pci_dev;
if (!ddcb_queue_initialized(&cd->queue))
return 1;
free_irq(pci_dev->irq, cd);
genwqe_reset_interrupt_capability(cd);
if (cd->card_thread != NULL) {
kthread_stop(cd->card_thread);
cd->card_thread = NULL;
}
free_ddcb_queue(cd, &cd->queue);
return 0;
}
| linux-master | drivers/misc/genwqe/card_ddcb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
*
* Author: Frank Haverkamp <[email protected]>
* Author: Joerg-Stephan Vogt <[email protected]>
* Author: Michael Jung <[email protected]>
* Author: Michael Ruettger <[email protected]>
*/
/*
* Sysfs interfaces for the GenWQE card. There are attributes to query
* the version of the bitstream as well as some for the driver. For
* debugging, please also see the debugfs interfaces of this driver.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include "card_base.h"
#include "card_ddcb.h"
static const char * const genwqe_types[] = {
[GENWQE_TYPE_ALTERA_230] = "GenWQE4-230",
[GENWQE_TYPE_ALTERA_530] = "GenWQE4-530",
[GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4",
[GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7",
};
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct genwqe_dev *cd = dev_get_drvdata(dev);
const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" };
return sprintf(buf, "%s\n", cs[cd->card_state]);
}
static DEVICE_ATTR_RO(status);
static ssize_t appid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
char app_name[5];
struct genwqe_dev *cd = dev_get_drvdata(dev);
genwqe_read_app_id(cd, app_name, sizeof(app_name));
return sprintf(buf, "%s\n", app_name);
}
static DEVICE_ATTR_RO(appid);
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u64 slu_id, app_id;
struct genwqe_dev *cd = dev_get_drvdata(dev);
slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id);
}
static DEVICE_ATTR_RO(version);
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 card_type;
struct genwqe_dev *cd = dev_get_drvdata(dev);
card_type = genwqe_card_type(cd);
return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ?
"invalid" : genwqe_types[card_type]);
}
static DEVICE_ATTR_RO(type);
static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u64 tempsens;
struct genwqe_dev *cd = dev_get_drvdata(dev);
tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR);
return sprintf(buf, "%016llx\n", tempsens);
}
static DEVICE_ATTR_RO(tempsens);
static ssize_t freerunning_timer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 t;
struct genwqe_dev *cd = dev_get_drvdata(dev);
t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER);
return sprintf(buf, "%016llx\n", t);
}
static DEVICE_ATTR_RO(freerunning_timer);
static ssize_t queue_working_time_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 t;
struct genwqe_dev *cd = dev_get_drvdata(dev);
t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME);
return sprintf(buf, "%016llx\n", t);
}
static DEVICE_ATTR_RO(queue_working_time);
static ssize_t base_clock_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 base_clock;
struct genwqe_dev *cd = dev_get_drvdata(dev);
base_clock = genwqe_base_clock_frequency(cd);
return sprintf(buf, "%lld\n", base_clock);
}
static DEVICE_ATTR_RO(base_clock);
/*
* curr_bitstream_show() - Show the current bitstream id
*
* There is a bug in some old versions of the CPLD which selects the
* bitstream, which causes the IO_SLU_BITSTREAM register to report
* unreliable data in very rare cases. This makes this sysfs
* unreliable up to the point were a new CPLD version is being used.
*
* Unfortunately there is no automatic way yet to query the CPLD
* version, such that you need to manually ensure via programming
* tools that you have a recent version of the CPLD software.
*
* The proposed circumvention is to use a special recovery bitstream
* on the backup partition (0) to identify problems while loading the
* image.
*/
static ssize_t curr_bitstream_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int curr_bitstream;
struct genwqe_dev *cd = dev_get_drvdata(dev);
curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
return sprintf(buf, "%d\n", curr_bitstream);
}
static DEVICE_ATTR_RO(curr_bitstream);
/*
* next_bitstream_show() - Show the next activated bitstream
*
* IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
*/
static ssize_t next_bitstream_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int next_bitstream;
struct genwqe_dev *cd = dev_get_drvdata(dev);
switch ((cd->softreset & 0xc) >> 2) {
case 0x2:
next_bitstream = 0;
break;
case 0x3:
next_bitstream = 1;
break;
default:
next_bitstream = -1;
break; /* error */
}
return sprintf(buf, "%d\n", next_bitstream);
}
static ssize_t next_bitstream_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int partition;
struct genwqe_dev *cd = dev_get_drvdata(dev);
if (kstrtoint(buf, 0, &partition) < 0)
return -EINVAL;
switch (partition) {
case 0x0:
cd->softreset = 0x78;
break;
case 0x1:
cd->softreset = 0x7c;
break;
default:
return -EINVAL;
}
__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
return count;
}
static DEVICE_ATTR_RW(next_bitstream);
static ssize_t reload_bitstream_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int reload;
struct genwqe_dev *cd = dev_get_drvdata(dev);
if (kstrtoint(buf, 0, &reload) < 0)
return -EINVAL;
if (reload == 0x1) {
if (cd->card_state == GENWQE_CARD_UNUSED ||
cd->card_state == GENWQE_CARD_USED)
cd->card_state = GENWQE_CARD_RELOAD_BITSTREAM;
else
return -EIO;
} else {
return -EINVAL;
}
return count;
}
static DEVICE_ATTR_WO(reload_bitstream);
/*
* Create device_attribute structures / params: name, mode, show, store
* additional flag if valid in VF
*/
static struct attribute *genwqe_attributes[] = {
&dev_attr_tempsens.attr,
&dev_attr_next_bitstream.attr,
&dev_attr_curr_bitstream.attr,
&dev_attr_base_clock.attr,
&dev_attr_type.attr,
&dev_attr_version.attr,
&dev_attr_appid.attr,
&dev_attr_status.attr,
&dev_attr_freerunning_timer.attr,
&dev_attr_queue_working_time.attr,
&dev_attr_reload_bitstream.attr,
NULL,
};
static struct attribute *genwqe_normal_attributes[] = {
&dev_attr_type.attr,
&dev_attr_version.attr,
&dev_attr_appid.attr,
&dev_attr_status.attr,
&dev_attr_freerunning_timer.attr,
&dev_attr_queue_working_time.attr,
NULL,
};
/*
* genwqe_is_visible() - Determine if sysfs attribute should be visible or not
*
* VFs have restricted mmio capabilities, so not all sysfs entries
* are allowed in VFs.
*/
static umode_t genwqe_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
unsigned int j;
struct device *dev = kobj_to_dev(kobj);
struct genwqe_dev *cd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
if (genwqe_is_privileged(cd))
return mode;
for (j = 0; genwqe_normal_attributes[j] != NULL; j++)
if (genwqe_normal_attributes[j] == attr)
return mode;
return 0;
}
static struct attribute_group genwqe_attribute_group = {
.is_visible = genwqe_is_visible,
.attrs = genwqe_attributes,
};
const struct attribute_group *genwqe_attribute_groups[] = {
&genwqe_attribute_group,
NULL,
};
| linux-master | drivers/misc/genwqe/card_sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cb710/core.c
*
* Copyright by Michał Mirosław, 2008-2009
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/cb710.h>
#include <linux/gfp.h>
static DEFINE_IDA(cb710_ida);
void cb710_pci_update_config_reg(struct pci_dev *pdev,
int reg, uint32_t mask, uint32_t xor)
{
u32 rval;
pci_read_config_dword(pdev, reg, &rval);
rval = (rval & mask) ^ xor;
pci_write_config_dword(pdev, reg, rval);
}
EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
/* Some magic writes based on Windows driver init code */
static int cb710_pci_configure(struct pci_dev *pdev)
{
unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
struct pci_dev *pdev0;
u32 val;
cb710_pci_update_config_reg(pdev, 0x48,
~0x000000FF, 0x0000003F);
pci_read_config_dword(pdev, 0x48, &val);
if (val & 0x80000000)
return 0;
pdev0 = pci_get_slot(pdev->bus, devfn);
if (!pdev0)
return -ENODEV;
if (pdev0->vendor == PCI_VENDOR_ID_ENE
&& pdev0->device == PCI_DEVICE_ID_ENE_720) {
cb710_pci_update_config_reg(pdev0, 0x8C,
~0x00F00000, 0x00100000);
cb710_pci_update_config_reg(pdev0, 0xB0,
~0x08000000, 0x08000000);
}
cb710_pci_update_config_reg(pdev0, 0x8C,
~0x00000F00, 0x00000200);
cb710_pci_update_config_reg(pdev0, 0x90,
~0x00060000, 0x00040000);
pci_dev_put(pdev0);
return 0;
}
static irqreturn_t cb710_irq_handler(int irq, void *data)
{
struct cb710_chip *chip = data;
struct cb710_slot *slot = &chip->slot[0];
irqreturn_t handled = IRQ_NONE;
unsigned nr;
spin_lock(&chip->irq_lock); /* incl. smp_rmb() */
for (nr = chip->slots; nr; ++slot, --nr) {
cb710_irq_handler_t handler_func = slot->irq_handler;
if (handler_func && handler_func(slot))
handled = IRQ_HANDLED;
}
spin_unlock(&chip->irq_lock);
return handled;
}
static void cb710_release_slot(struct device *dev)
{
#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev));
struct cb710_chip *chip = cb710_slot_to_chip(slot);
/* slot struct can be freed now */
atomic_dec(&chip->slot_refs_count);
#endif
}
static int cb710_register_slot(struct cb710_chip *chip,
unsigned slot_mask, unsigned io_offset, const char *name)
{
int nr = chip->slots;
struct cb710_slot *slot = &chip->slot[nr];
int err;
dev_dbg(cb710_chip_dev(chip),
"register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n",
name, chip->platform_id, nr, slot_mask, io_offset);
/* slot->irq_handler == NULL here; this needs to be
* seen before platform_device_register() */
++chip->slots;
smp_wmb();
slot->iobase = chip->iobase + io_offset;
slot->pdev.name = name;
slot->pdev.id = chip->platform_id;
slot->pdev.dev.parent = &chip->pdev->dev;
slot->pdev.dev.release = cb710_release_slot;
err = platform_device_register(&slot->pdev);
#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
atomic_inc(&chip->slot_refs_count);
#endif
if (err) {
/* device_initialize() called from platform_device_register()
* wants this on error path */
platform_device_put(&slot->pdev);
/* slot->irq_handler == NULL here anyway, so no lock needed */
--chip->slots;
return err;
}
chip->slot_mask |= slot_mask;
return 0;
}
static void cb710_unregister_slot(struct cb710_chip *chip,
unsigned slot_mask)
{
int nr = chip->slots - 1;
if (!(chip->slot_mask & slot_mask))
return;
platform_device_unregister(&chip->slot[nr].pdev);
/* complementary to spin_unlock() in cb710_set_irq_handler() */
smp_rmb();
BUG_ON(chip->slot[nr].irq_handler != NULL);
/* slot->irq_handler == NULL here, so no lock needed */
--chip->slots;
chip->slot_mask &= ~slot_mask;
}
void cb710_set_irq_handler(struct cb710_slot *slot,
cb710_irq_handler_t handler)
{
struct cb710_chip *chip = cb710_slot_to_chip(slot);
unsigned long flags;
spin_lock_irqsave(&chip->irq_lock, flags);
slot->irq_handler = handler;
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
static int __maybe_unused cb710_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
devm_free_irq(&pdev->dev, pdev->irq, chip);
return 0;
}
static int __maybe_unused cb710_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct cb710_chip *chip = pci_get_drvdata(pdev);
return devm_request_irq(&pdev->dev, pdev->irq,
cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
}
static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct cb710_chip *chip;
u32 val;
int err;
int n = 0;
err = cb710_pci_configure(pdev);
if (err)
return err;
/* this is actually magic... */
pci_read_config_dword(pdev, 0x48, &val);
if (!(val & 0x80000000)) {
pci_write_config_dword(pdev, 0x48, val|0x71000000);
pci_read_config_dword(pdev, 0x48, &val);
}
dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val);
if (!(val & 0x70000000))
return -ENODEV;
val = (val >> 28) & 7;
if (val & CB710_SLOT_MMC)
++n;
if (val & CB710_SLOT_MS)
++n;
if (val & CB710_SLOT_SM)
++n;
chip = devm_kzalloc(&pdev->dev, struct_size(chip, slot, n),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
err = pcim_enable_device(pdev);
if (err)
return err;
err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME);
if (err)
return err;
spin_lock_init(&chip->irq_lock);
chip->pdev = pdev;
chip->iobase = pcim_iomap_table(pdev)[0];
pci_set_drvdata(pdev, chip);
err = devm_request_irq(&pdev->dev, pdev->irq,
cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
if (err)
return err;
err = ida_alloc(&cb710_ida, GFP_KERNEL);
if (err < 0)
return err;
chip->platform_id = err;
dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
chip->platform_id, chip->iobase, pdev->irq);
if (val & CB710_SLOT_MMC) { /* MMC/SD slot */
err = cb710_register_slot(chip,
CB710_SLOT_MMC, 0x00, "cb710-mmc");
if (err)
return err;
}
if (val & CB710_SLOT_MS) { /* MemoryStick slot */
err = cb710_register_slot(chip,
CB710_SLOT_MS, 0x40, "cb710-ms");
if (err)
goto unreg_mmc;
}
if (val & CB710_SLOT_SM) { /* SmartMedia slot */
err = cb710_register_slot(chip,
CB710_SLOT_SM, 0x60, "cb710-sm");
if (err)
goto unreg_ms;
}
return 0;
unreg_ms:
cb710_unregister_slot(chip, CB710_SLOT_MS);
unreg_mmc:
cb710_unregister_slot(chip, CB710_SLOT_MMC);
#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
#endif
return err;
}
static void cb710_remove_one(struct pci_dev *pdev)
{
struct cb710_chip *chip = pci_get_drvdata(pdev);
cb710_unregister_slot(chip, CB710_SLOT_SM);
cb710_unregister_slot(chip, CB710_SLOT_MS);
cb710_unregister_slot(chip, CB710_SLOT_MMC);
#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
#endif
ida_free(&cb710_ida, chip->platform_id);
}
static const struct pci_device_id cb710_pci_tbl[] = {
{ PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
static SIMPLE_DEV_PM_OPS(cb710_pm_ops, cb710_suspend, cb710_resume);
static struct pci_driver cb710_driver = {
.name = KBUILD_MODNAME,
.id_table = cb710_pci_tbl,
.probe = cb710_probe,
.remove = cb710_remove_one,
.driver.pm = &cb710_pm_ops,
};
static int __init cb710_init_module(void)
{
return pci_register_driver(&cb710_driver);
}
static void __exit cb710_cleanup_module(void)
{
pci_unregister_driver(&cb710_driver);
ida_destroy(&cb710_ida);
}
module_init(cb710_init_module);
module_exit(cb710_cleanup_module);
MODULE_AUTHOR("Michał Mirosław <[email protected]>");
MODULE_DESCRIPTION("ENE CB710 memory card reader driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cb710_pci_tbl);
| linux-master | drivers/misc/cb710/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cb710/debug.c
*
* Copyright by Michał Mirosław, 2008-2009
*/
#include <linux/cb710.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define CB710_REG_COUNT 0x80
static const u16 allow[CB710_REG_COUNT/16] = {
0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
};
static const char *const prefix[ARRAY_SIZE(allow)] = {
"MMC", "MMC", "MMC", "MMC",
"MS?", "MS?", "SM?", "SM?"
};
static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
{
unsigned mask = (1 << bits/8) - 1;
offset *= bits/8;
return ((allow[block] >> offset) & mask) == mask;
}
#define CB710_READ_REGS_TEMPLATE(t) \
static void cb710_read_regs_##t(void __iomem *iobase, \
u##t *reg, unsigned select) \
{ \
unsigned i, j; \
\
for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
if (!(select & (1 << i))) \
continue; \
\
for (j = 0; j < 0x10/(t/8); ++j) { \
if (!allow_reg_read(i, j, t)) \
continue; \
reg[j] = ioread##t(iobase \
+ (i << 4) + (j * (t/8))); \
} \
} \
}
static const char cb710_regf_8[] = "%02X";
static const char cb710_regf_16[] = "%04X";
static const char cb710_regf_32[] = "%08X";
static const char cb710_xes[] = "xxxxxxxx";
#define CB710_DUMP_REGS_TEMPLATE(t) \
static void cb710_dump_regs_##t(struct device *dev, \
const u##t *reg, unsigned select) \
{ \
const char *const xp = &cb710_xes[8 - t/4]; \
const char *const format = cb710_regf_##t; \
\
char msg[100], *p; \
unsigned i, j; \
\
for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
if (!(select & (1 << i))) \
continue; \
p = msg; \
for (j = 0; j < 0x10/(t/8); ++j) { \
*p++ = ' '; \
if (j == 8/(t/8)) \
*p++ = ' '; \
if (allow_reg_read(i, j, t)) \
p += sprintf(p, format, reg[j]); \
else \
p += sprintf(p, "%s", xp); \
} \
dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \
} \
}
#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \
static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \
unsigned select) \
{ \
u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \
\
memset(®s, 0, sizeof(regs)); \
cb710_read_regs_##t(chip->iobase, regs, select); \
cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \
}
#define CB710_REG_ACCESS_TEMPLATES(t) \
CB710_READ_REGS_TEMPLATE(t) \
CB710_DUMP_REGS_TEMPLATE(t) \
CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
CB710_REG_ACCESS_TEMPLATES(8)
CB710_REG_ACCESS_TEMPLATES(16)
CB710_REG_ACCESS_TEMPLATES(32)
void cb710_dump_regs(struct cb710_chip *chip, unsigned select)
{
if (!(select & CB710_DUMP_REGS_MASK))
select = CB710_DUMP_REGS_ALL;
if (!(select & CB710_DUMP_ACCESS_MASK))
select |= CB710_DUMP_ACCESS_8;
if (select & CB710_DUMP_ACCESS_32)
cb710_read_and_dump_regs_32(chip, select);
if (select & CB710_DUMP_ACCESS_16)
cb710_read_and_dump_regs_16(chip, select);
if (select & CB710_DUMP_ACCESS_8)
cb710_read_and_dump_regs_8(chip, select);
}
EXPORT_SYMBOL_GPL(cb710_dump_regs);
| linux-master | drivers/misc/cb710/debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cb710/sgbuf2.c
*
* Copyright by Michał Mirosław, 2008-2009
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cb710.h>
static bool sg_dwiter_next(struct sg_mapping_iter *miter)
{
if (sg_miter_next(miter)) {
miter->consumed = 0;
return true;
} else
return false;
}
static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter)
{
return miter->length == miter->consumed && !sg_dwiter_next(miter);
}
static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
{
size_t len, left = 4;
uint32_t data;
void *addr = &data;
do {
len = min(miter->length - miter->consumed, left);
memcpy(addr, miter->addr + miter->consumed, len);
miter->consumed += len;
left -= len;
if (!left)
return data;
addr += len;
} while (sg_dwiter_next(miter));
memset(addr, 0, left);
return data;
}
static inline bool needs_unaligned_copy(const void *ptr)
{
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
return ((uintptr_t)ptr & 3) != 0;
#endif
}
static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr)
{
size_t len;
if (sg_dwiter_is_at_end(miter))
return true;
len = miter->length - miter->consumed;
if (likely(len >= 4 && !needs_unaligned_copy(
miter->addr + miter->consumed))) {
*ptr = miter->addr + miter->consumed;
miter->consumed += 4;
return true;
}
return false;
}
/**
* cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer
* @miter: sg mapping iterator used for reading
*
* Description:
* Returns 32-bit word starting at byte pointed to by @miter@
* handling any alignment issues. Bytes past the buffer's end
* are not accessed (read) but are returned as zeroes. @miter@
* is advanced by 4 bytes or to the end of buffer whichever is
* closer.
*
* Context:
* Same requirements as in sg_miter_next().
*
* Returns:
* 32-bit word just read.
*/
uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter)
{
uint32_t *ptr = NULL;
if (likely(sg_dwiter_get_next_block(miter, &ptr)))
return ptr ? *ptr : 0;
return sg_dwiter_read_buffer(miter);
}
EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block);
static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
{
size_t len, left = 4;
void *addr = &data;
do {
len = min(miter->length - miter->consumed, left);
memcpy(miter->addr, addr, len);
miter->consumed += len;
left -= len;
if (!left)
return;
addr += len;
} while (sg_dwiter_next(miter));
}
/**
* cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
* @miter: sg mapping iterator used for writing
* @data: data to write to sg buffer
*
* Description:
* Writes 32-bit word starting at byte pointed to by @miter@
* handling any alignment issues. Bytes which would be written
* past the buffer's end are silently discarded. @miter@ is
* advanced by 4 bytes or to the end of buffer whichever is closer.
*
* Context:
* Same requirements as in sg_miter_next().
*/
void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data)
{
uint32_t *ptr = NULL;
if (likely(sg_dwiter_get_next_block(miter, &ptr))) {
if (ptr)
*ptr = data;
else
return;
} else
sg_dwiter_write_slow(miter, data);
}
EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
| linux-master | drivers/misc/cb710/sgbuf2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* MMUOPS callbacks + TLB flushing
*
* This file handles emu notifier callbacks from the core kernel. The callbacks
* are used to update the TLB in the GRU as a result of changes in the
* state of a process address space. This file also handles TLB invalidates
* from the GRU driver.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/hugetlb.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/srcu.h>
#include <asm/processor.h>
#include "gru.h"
#include "grutables.h"
#include <asm/uv/uv_hub.h>
#define gru_random() get_cycles()
/* ---------------------------------- TLB Invalidation functions --------
* get_tgh_handle
*
* Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the
* local blade, use a fixed TGH that is a function of the blade-local cpu
* number. Normally, this TGH is private to the cpu & no contention occurs for
* the TGH. For offblade GRUs, select a random TGH in the range above the
* private TGHs. A spinlock is required to access this TGH & the lock must be
* released when the invalidate is completes. This sucks, but it is the best we
* can do.
*
* Note that the spinlock is IN the TGH handle so locking does not involve
* additional cache lines.
*
*/
static inline int get_off_blade_tgh(struct gru_state *gru)
{
int n;
n = GRU_NUM_TGH - gru->gs_tgh_first_remote;
n = gru_random() % n;
n += gru->gs_tgh_first_remote;
return n;
}
static inline int get_on_blade_tgh(struct gru_state *gru)
{
return uv_blade_processor_id() >> gru->gs_tgh_local_shift;
}
static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
*gru)
{
struct gru_tlb_global_handle *tgh;
int n;
preempt_disable();
if (uv_numa_blade_id() == gru->gs_blade_id)
n = get_on_blade_tgh(gru);
else
n = get_off_blade_tgh(gru);
tgh = get_tgh_by_index(gru, n);
lock_tgh_handle(tgh);
return tgh;
}
static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
{
unlock_tgh_handle(tgh);
preempt_enable();
}
/*
* gru_flush_tlb_range
*
* General purpose TLB invalidation function. This function scans every GRU in
* the ENTIRE system (partition) looking for GRUs where the specified MM has
* been accessed by the GRU. For each GRU found, the TLB must be invalidated OR
* the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
* on the next fault. This effectively flushes the ENTIRE TLB for the MM at the
* cost of (possibly) a large number of future TLBmisses.
*
* The current algorithm is optimized based on the following (somewhat true)
* assumptions:
* - GRU contexts are not loaded into a GRU unless a reference is made to
* the data segment or control block (this is true, not an assumption).
* If a DS/CB is referenced, the user will also issue instructions that
* cause TLBmisses. It is not necessary to optimize for the case where
* contexts are loaded but no instructions cause TLB misses. (I know
* this will happen but I'm not optimizing for it).
* - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally
* a few usec but in unusual cases, it could be longer. Avoid if
* possible.
* - intrablade process migration between cpus is not frequent but is
* common.
* - a GRU context is not typically migrated to a different GRU on the
* blade because of intrablade migration
* - interblade migration is rare. Processes migrate their GRU context to
* the new blade.
* - if interblade migration occurs, migration back to the original blade
* is very very rare (ie., no optimization for this case)
* - most GRU instruction operate on a subset of the user REGIONS. Code
* & shared library regions are not likely targets of GRU instructions.
*
* To help improve the efficiency of TLB invalidation, the GMS data
* structure is maintained for EACH address space (MM struct). The GMS is
* also the structure that contains the pointer to the mmu callout
* functions. This structure is linked to the mm_struct for the address space
* using the mmu "register" function. The mmu interfaces are used to
* provide the callbacks for TLB invalidation. The GMS contains:
*
* - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
* loaded into the GRU.
* - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in
* the above array
* - ctxbitmap[maxgrus]. Indicates the contexts that are currently active
* in the GRU for the address space. This bitmap must be passed to the
* GRU to do an invalidate.
*
* The current algorithm for invalidating TLBs is:
* - scan the asidmap for GRUs where the context has been loaded, ie,
* asid is non-zero.
* - for each gru found:
* - if the ctxtmap is non-zero, there are active contexts in the
* GRU. TLB invalidate instructions must be issued to the GRU.
* - if the ctxtmap is zero, no context is active. Set the ASID to
* zero to force a full TLB invalidation. This is fast but will
* cause a lot of TLB misses if the context is reloaded onto the
* GRU
*
*/
void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
unsigned long len)
{
struct gru_state *gru;
struct gru_mm_tracker *asids;
struct gru_tlb_global_handle *tgh;
unsigned long num;
int grupagesize, pagesize, pageshift, gid, asid;
/* ZZZ TODO - handle huge pages */
pageshift = PAGE_SHIFT;
pagesize = (1UL << pageshift);
grupagesize = GRU_PAGESIZE(pageshift);
num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL);
STAT(flush_tlb);
gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms,
start, len, gms->ms_asidmap[0]);
spin_lock(&gms->ms_asid_lock);
for_each_gru_in_bitmap(gid, gms->ms_asidmap) {
STAT(flush_tlb_gru);
gru = GID_TO_GRU(gid);
asids = gms->ms_asids + gid;
asid = asids->mt_asid;
if (asids->mt_ctxbitmap && asid) {
STAT(flush_tlb_gru_tgh);
asid = GRUASID(asid, start);
gru_dbg(grudev,
" FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap);
get_unlock_tgh_handle(tgh);
} else {
STAT(flush_tlb_gru_zero_asid);
asids->mt_asid = 0;
__clear_bit(gru->gs_gid, gms->ms_asidmap);
gru_dbg(grudev,
" CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n",
gid, asid, asids->mt_ctxbitmap,
gms->ms_asidmap[0]);
}
}
spin_unlock(&gms->ms_asid_lock);
}
/*
* Flush the entire TLB on a chiplet.
*/
void gru_flush_all_tlb(struct gru_state *gru)
{
struct gru_tlb_global_handle *tgh;
gru_dbg(grudev, "gid %d\n", gru->gs_gid);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
get_unlock_tgh_handle(tgh);
}
/*
* MMUOPS notifier callout functions
*/
static int gru_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier);
STAT(mmu_invalidate_range);
atomic_inc(&gms->ms_range_active);
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
range->start, range->end, atomic_read(&gms->ms_range_active));
gru_flush_tlb_range(gms, range->start, range->end - range->start);
return 0;
}
static void gru_invalidate_range_end(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier);
/* ..._and_test() provides needed barrier */
(void)atomic_dec_and_test(&gms->ms_range_active);
wake_up_all(&gms->ms_wait_queue);
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n",
gms, range->start, range->end);
}
static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
{
struct gru_mm_struct *gms;
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (!gms)
return ERR_PTR(-ENOMEM);
STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
init_waitqueue_head(&gms->ms_wait_queue);
return &gms->ms_notifier;
}
static void gru_free_notifier(struct mmu_notifier *mn)
{
kfree(container_of(mn, struct gru_mm_struct, ms_notifier));
STAT(gms_free);
}
static const struct mmu_notifier_ops gru_mmuops = {
.invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end,
.alloc_notifier = gru_alloc_notifier,
.free_notifier = gru_free_notifier,
};
struct gru_mm_struct *gru_register_mmu_notifier(void)
{
struct mmu_notifier *mn;
mn = mmu_notifier_get_locked(&gru_mmuops, current->mm);
if (IS_ERR(mn))
return ERR_CAST(mn);
return container_of(mn, struct gru_mm_struct, ms_notifier);
}
void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
{
mmu_notifier_put(&gms->ms_notifier);
}
/*
* Setup TGH parameters. There are:
* - 24 TGH handles per GRU chiplet
* - a portion (MAX_LOCAL_TGH) of the handles are reserved for
* use by blade-local cpus
* - the rest are used by off-blade cpus. This usage is
* less frequent than blade-local usage.
*
* For now, use 16 handles for local flushes, 8 for remote flushes. If the blade
* has less tan or equal to 16 cpus, each cpu has a unique handle that it can
* use.
*/
#define MAX_LOCAL_TGH 16
void gru_tgh_flush_init(struct gru_state *gru)
{
int cpus, shift = 0, n;
cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id);
/* n = cpus rounded up to next power of 2 */
if (cpus) {
n = 1 << fls(cpus - 1);
/*
* shift count for converting local cpu# to TGH index
* 0 if cpus <= MAX_LOCAL_TGH,
* 1 if cpus <= 2*MAX_LOCAL_TGH,
* etc
*/
shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1));
}
gru->gs_tgh_local_shift = shift;
/* first starting TGH index to use for remote purges */
gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift;
}
| linux-master | drivers/misc/sgi-gru/grutlbpurge.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* PROC INTERFACES
*
* This file supports the /proc interfaces for the GRU driver
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
{
unsigned long val = atomic_long_read(v);
seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
{
printstat(s, vdata_alloc);
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
printstat(s, gms_alloc);
printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
printstat(s, free_context);
printstat(s, load_user_context);
printstat(s, load_kernel_context);
printstat(s, lock_kernel_context);
printstat(s, unlock_kernel_context);
printstat(s, steal_user_context);
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
printstat(s, intr_cbr);
printstat(s, intr_tfh);
printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
printstat(s, check_context_retarget_intr);
printstat(s, check_context_unload);
printstat(s, tlb_dropin);
printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
printstat(s, tlb_dropin_fail_range_active);
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
printstat(s, mesq_send_failed);
printstat(s, mesq_noop);
printstat(s, mesq_send_unexpected_error);
printstat(s, mesq_send_lb_overflow);
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
printstat(s, mesq_qf_unexpected_error);
printstat(s, mesq_noop_unexpected_error);
printstat(s, mesq_noop_lb_overflow);
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
printstat(s, mesq_noop_page_overflow);
return 0;
}
static ssize_t statistics_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *data)
{
memset(&gru_stats, 0, sizeof(gru_stats));
return count;
}
static int mcs_statistics_show(struct seq_file *s, void *p)
{
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
seq_puts(s, "#id count aver-clks max-clks\n");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
max = mcs_op_statistics[op].max;
seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
count ? total / count : 0, max);
}
return 0;
}
static ssize_t mcs_statistics_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *data)
{
memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics));
return count;
}
static int options_show(struct seq_file *s, void *p)
{
seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
static ssize_t options_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *data)
{
int ret;
ret = kstrtoul_from_user(userbuf, count, 0, &gru_options);
if (ret)
return ret;
return count;
}
static int cch_seq_show(struct seq_file *file, void *data)
{
long gid = *(long *)data;
int i;
struct gru_state *gru = GID_TO_GRU(gid);
struct gru_thread_state *ts;
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
GRU_OPT_MISS_MASK]);
}
return 0;
}
static int gru_seq_show(struct seq_file *file, void *data)
{
long gid = *(long *)data, ctxfree, cbrfree, dsrfree;
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n");
seq_puts(file, "# busy busy busy free free free\n");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n",
gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree,
GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree,
ctxfree, cbrfree, dsrfree);
}
return 0;
}
static void seq_stop(struct seq_file *file, void *data)
{
}
static void *seq_start(struct seq_file *file, loff_t *gid)
{
if (*gid < gru_max_gids)
return gid;
return NULL;
}
static void *seq_next(struct seq_file *file, void *data, loff_t *gid)
{
(*gid)++;
if (*gid < gru_max_gids)
return gid;
return NULL;
}
static const struct seq_operations cch_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = cch_seq_show
};
static const struct seq_operations gru_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = gru_seq_show
};
static int statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, statistics_show, NULL);
}
static int mcs_statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, mcs_statistics_show, NULL);
}
static int options_open(struct inode *inode, struct file *file)
{
return single_open(file, options_show, NULL);
}
/* *INDENT-OFF* */
static const struct proc_ops statistics_proc_ops = {
.proc_open = statistics_open,
.proc_read = seq_read,
.proc_write = statistics_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static const struct proc_ops mcs_statistics_proc_ops = {
.proc_open = mcs_statistics_open,
.proc_read = seq_read,
.proc_write = mcs_statistics_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static const struct proc_ops options_proc_ops = {
.proc_open = options_open,
.proc_read = seq_read,
.proc_write = options_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static struct proc_dir_entry *proc_gru __read_mostly;
int gru_proc_init(void)
{
proc_gru = proc_mkdir("sgi_uv/gru", NULL);
if (!proc_gru)
return -1;
if (!proc_create("statistics", 0644, proc_gru, &statistics_proc_ops))
goto err;
if (!proc_create("mcs_statistics", 0644, proc_gru, &mcs_statistics_proc_ops))
goto err;
if (!proc_create("debug_options", 0644, proc_gru, &options_proc_ops))
goto err;
if (!proc_create_seq("cch_status", 0444, proc_gru, &cch_seq_ops))
goto err;
if (!proc_create_seq("gru_status", 0444, proc_gru, &gru_seq_ops))
goto err;
return 0;
err:
remove_proc_subtree("sgi_uv/gru", NULL);
return -1;
}
void gru_proc_exit(void)
{
remove_proc_subtree("sgi_uv/gru", NULL);
}
| linux-master | drivers/misc/sgi-gru/gruprocfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GRU KERNEL MCS INSTRUCTIONS
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
/* 10 sec */
#ifdef CONFIG_IA64
#include <asm/processor.h>
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
#include <linux/sync_core.h>
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
#endif
/* Extract the status field from a kernel handle */
#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
struct mcs_op_statistic mcs_op_statistics[mcsop_last];
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
unsigned long nsec;
nsec = CLKS2NSEC(clks);
atomic_long_inc(&mcs_op_statistics[op].count);
atomic_long_add(nsec, &mcs_op_statistics[op].total);
if (mcs_op_statistics[op].max < nsec)
mcs_op_statistics[op].max = nsec;
}
static void start_instruction(void *h)
{
unsigned long *w0 = h;
wmb(); /* setting CMD/STATUS bits must be last */
*w0 = *w0 | 0x20001;
gru_flush_cache(h);
}
static void report_instruction_timeout(void *h)
{
unsigned long goff = GSEGPOFF((unsigned long)h);
char *id = "???";
if (TYPE_IS(CCH, goff))
id = "CCH";
else if (TYPE_IS(TGH, goff))
id = "TGH";
else if (TYPE_IS(TFH, goff))
id = "TFH";
panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
}
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
unsigned long start_time = get_cycles();
while (1) {
cpu_relax();
status = GET_MSEG_HANDLE_STATUS(h);
if (status != CCHSTATUS_ACTIVE)
break;
if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
report_instruction_timeout(h);
start_time = get_cycles();
}
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
return status;
}
int cch_allocate(struct gru_context_configuration_handle *cch)
{
int ret;
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
ret = wait_instruction_complete(cch, cchop_allocate);
/*
* Stop speculation into the GSEG being mapped by the previous ALLOCATE.
* The GSEG memory does not exist until the ALLOCATE completes.
*/
sync_core();
return ret;
}
int cch_start(struct gru_context_configuration_handle *cch)
{
cch->opc = CCHOP_START;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_start);
}
int cch_interrupt(struct gru_context_configuration_handle *cch)
{
cch->opc = CCHOP_INTERRUPT;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_interrupt);
}
int cch_deallocate(struct gru_context_configuration_handle *cch)
{
int ret;
cch->opc = CCHOP_DEALLOCATE;
start_instruction(cch);
ret = wait_instruction_complete(cch, cchop_deallocate);
/*
* Stop speculation into the GSEG being unmapped by the previous
* DEALLOCATE.
*/
sync_core();
return ret;
}
int cch_interrupt_sync(struct gru_context_configuration_handle
*cch)
{
cch->opc = CCHOP_INTERRUPT_SYNC;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_interrupt_sync);
}
int tgh_invalidate(struct gru_tlb_global_handle *tgh,
unsigned long vaddr, unsigned long vaddrmask,
int asid, int pagesize, int global, int n,
unsigned short ctxbitmap)
{
tgh->vaddr = vaddr;
tgh->asid = asid;
tgh->pagesize = pagesize;
tgh->n = n;
tgh->global = global;
tgh->vaddrmask = vaddrmask;
tgh->ctxbitmap = ctxbitmap;
tgh->opc = TGHOP_TLBINV;
start_instruction(tgh);
return wait_instruction_complete(tgh, tghop_invalidate);
}
int tfh_write_only(struct gru_tlb_fault_handle *tfh,
unsigned long paddr, int gaa,
unsigned long vaddr, int asid, int dirty,
int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_ONLY;
start_instruction(tfh);
return wait_instruction_complete(tfh, tfhop_write_only);
}
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
unsigned long paddr, int gaa,
unsigned long vaddr, int asid, int dirty,
int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_RESTART;
start_instruction(tfh);
}
void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
{
tfh->opc = TFHOP_USER_POLLING_MODE;
start_instruction(tfh);
}
void tfh_exception(struct gru_tlb_fault_handle *tfh)
{
tfh->opc = TFHOP_EXCEPTION;
start_instruction(tfh);
}
| linux-master | drivers/misc/sgi-gru/gruhandles.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* FILE OPERATIONS & DRIVER INITIALIZATION
*
* This file supports the user system call for file open, close, mmap, etc.
* This also incudes the driver initialization code.
*
* (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#ifdef CONFIG_X86_64
#include <asm/uv/uv_irq.h>
#endif
#include <asm/uv/uv.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_mmrs.h>
struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
unsigned long gru_start_paddr __read_mostly;
void *gru_start_vaddr __read_mostly;
unsigned long gru_end_paddr __read_mostly;
unsigned int gru_max_gids __read_mostly;
struct gru_stats_s gru_stats;
/* Guaranteed user available resources on each node */
static int max_user_cbrs, max_user_dsr_bytes;
static struct miscdevice gru_miscdev;
static int gru_supported(void)
{
return is_uv_system() &&
(uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE);
}
/*
* gru_vma_close
*
* Called when unmapping a device mapping. Frees all gru resources
* and tables belonging to the vma.
*/
static void gru_vma_close(struct vm_area_struct *vma)
{
struct gru_vma_data *vdata;
struct gru_thread_state *gts;
struct list_head *entry, *next;
if (!vma->vm_private_data)
return;
vdata = vma->vm_private_data;
vma->vm_private_data = NULL;
gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file,
vdata);
list_for_each_safe(entry, next, &vdata->vd_head) {
gts =
list_entry(entry, struct gru_thread_state, ts_next);
list_del(>s->ts_next);
mutex_lock(>s->ts_ctxlock);
if (gts->ts_gru)
gru_unload_context(gts, 0);
mutex_unlock(>s->ts_ctxlock);
gts_drop(gts);
}
kfree(vdata);
STAT(vdata_free);
}
/*
* gru_file_mmap
*
* Called when mmapping the device. Initializes the vma with a fault handler
* and private data structure necessary to allocate, track, and free the
* underlying pages.
*/
static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE))
return -EPERM;
if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) ||
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
return -EINVAL;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_ops = &gru_vm_ops;
vma->vm_private_data = gru_alloc_vma_data(vma, 0);
if (!vma->vm_private_data)
return -ENOMEM;
gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
file, vma->vm_start, vma, vma->vm_private_data);
return 0;
}
/*
* Create a new GRU context
*/
static int gru_create_new_context(unsigned long arg)
{
struct gru_create_context_req req;
struct vm_area_struct *vma;
struct gru_vma_data *vdata;
int ret = -EINVAL;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
if (req.data_segment_bytes > max_user_dsr_bytes)
return -EINVAL;
if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
return -EINVAL;
if (!(req.options & GRU_OPT_MISS_MASK))
req.options |= GRU_OPT_MISS_FMM_INTR;
mmap_write_lock(current->mm);
vma = gru_find_vma(req.gseg);
if (vma) {
vdata = vma->vm_private_data;
vdata->vd_user_options = req.options;
vdata->vd_dsr_au_count =
GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
mmap_write_unlock(current->mm);
return ret;
}
/*
* Get GRU configuration info (temp - for emulator testing)
*/
static long gru_get_config_info(unsigned long arg)
{
struct gru_config_info info;
int nodesperblade;
if (num_online_nodes() > 1 &&
(uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
nodesperblade = 2;
else
nodesperblade = 1;
memset(&info, 0, sizeof(info));
info.cpus = num_online_cpus();
info.nodes = num_online_nodes();
info.blades = info.nodes / nodesperblade;
info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
/*
* gru_file_unlocked_ioctl
*
* Called to update file attributes via IOCTL calls.
*/
static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
unsigned long arg)
{
int err = -EBADRQC;
gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
switch (req) {
case GRU_CREATE_CONTEXT:
err = gru_create_new_context(arg);
break;
case GRU_SET_CONTEXT_OPTION:
err = gru_set_context_option(arg);
break;
case GRU_USER_GET_EXCEPTION_DETAIL:
err = gru_get_exception_detail(arg);
break;
case GRU_USER_UNLOAD_CONTEXT:
err = gru_user_unload_context(arg);
break;
case GRU_USER_FLUSH_TLB:
err = gru_user_flush_tlb(arg);
break;
case GRU_USER_CALL_OS:
err = gru_handle_user_call_os(arg);
break;
case GRU_GET_GSEG_STATISTICS:
err = gru_get_gseg_statistics(arg);
break;
case GRU_KTEST:
err = gru_ktest(arg);
break;
case GRU_GET_CONFIG_INFO:
err = gru_get_config_info(arg);
break;
case GRU_DUMP_CHIPLET_STATE:
err = gru_dump_chiplet_request(arg);
break;
}
return err;
}
/*
* Called at init time to build tables for all GRUs that are present in the
* system.
*/
static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
void *vaddr, int blade_id, int chiplet_id)
{
spin_lock_init(&gru->gs_lock);
spin_lock_init(&gru->gs_asid_lock);
gru->gs_gru_base_paddr = paddr;
gru->gs_gru_base_vaddr = vaddr;
gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
gru->gs_blade = gru_base[blade_id];
gru->gs_blade_id = blade_id;
gru->gs_chiplet_id = chiplet_id;
gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
gru->gs_asid_limit = MAX_ASID;
gru_tgh_flush_init(gru);
if (gru->gs_gid >= gru_max_gids)
gru_max_gids = gru->gs_gid + 1;
gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
}
static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
{
int pnode, nid, bid, chip;
int cbrs, dsrbytes, n;
int order = get_order(sizeof(struct gru_blade_state));
struct page *page;
struct gru_state *gru;
unsigned long paddr;
void *vaddr;
max_user_cbrs = GRU_NUM_CB;
max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
for_each_possible_blade(bid) {
pnode = uv_blade_to_pnode(bid);
nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
spin_lock_init(&gru_base[bid]->bs_lock);
init_rwsem(&gru_base[bid]->bs_kgts_sema);
dsrbytes = 0;
cbrs = 0;
for (gru = gru_base[bid]->bs_grus, chip = 0;
chip < GRU_CHIPLETS_PER_BLADE;
chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
gru_init_chiplet(gru, paddr, vaddr, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
dsrbytes = max(dsrbytes, n);
}
max_user_cbrs = min(max_user_cbrs, cbrs);
max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes);
}
return 0;
fail:
for (bid--; bid >= 0; bid--)
free_pages((unsigned long)gru_base[bid], order);
return -ENOMEM;
}
static void gru_free_tables(void)
{
int bid;
int order = get_order(sizeof(struct gru_state) *
GRU_CHIPLETS_PER_BLADE);
for (bid = 0; bid < GRU_MAX_BLADES; bid++)
free_pages((unsigned long)gru_base[bid], order);
}
static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
{
unsigned long mmr = 0;
int core;
/*
* We target the cores of a blade and not the hyperthreads themselves.
* There is a max of 8 cores per socket and 2 sockets per blade,
* making for a max total of 16 cores (i.e., 16 CPUs without
* hyperthreading and 32 CPUs with hyperthreading).
*/
core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
return 0;
if (chiplet == 0) {
mmr = UVH_GR0_TLB_INT0_CONFIG +
core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
} else if (chiplet == 1) {
mmr = UVH_GR1_TLB_INT0_CONFIG +
core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
} else {
BUG();
}
*corep = core;
return mmr;
}
#ifdef CONFIG_IA64
static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
static void gru_noop(struct irq_data *d)
{
}
static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
[0 ... GRU_CHIPLETS_PER_BLADE - 1] {
.irq_mask = gru_noop,
.irq_unmask = gru_noop,
.irq_ack = gru_noop
}
};
static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
irq_handler_t irq_handler, int cpu, int blade)
{
unsigned long mmr;
int irq = IRQ_GRU + chiplet;
int ret, core;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return 0;
if (gru_irq_count[chiplet] == 0) {
gru_chip[chiplet].name = irq_name;
ret = irq_set_chip(irq, &gru_chip[chiplet]);
if (ret) {
printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
if (ret) {
printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
}
gru_irq_count[chiplet]++;
return 0;
}
static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
unsigned long mmr;
int core, irq = IRQ_GRU + chiplet;
if (gru_irq_count[chiplet] == 0)
return;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return;
if (--gru_irq_count[chiplet] == 0)
free_irq(irq, NULL);
}
#elif defined CONFIG_X86_64
static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
irq_handler_t irq_handler, int cpu, int blade)
{
unsigned long mmr;
int irq, core;
int ret;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr == 0)
return 0;
irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
if (irq < 0) {
printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -irq);
return irq;
}
ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
if (ret) {
uv_teardown_irq(irq);
printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
return ret;
}
gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
return 0;
}
static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
int irq, core;
unsigned long mmr;
mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
if (mmr) {
irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
if (irq) {
free_irq(irq, NULL);
uv_teardown_irq(irq);
}
}
}
#endif
static void gru_teardown_tlb_irqs(void)
{
int blade;
int cpu;
for_each_online_cpu(cpu) {
blade = uv_cpu_to_blade_id(cpu);
gru_chiplet_teardown_tlb_irq(0, cpu, blade);
gru_chiplet_teardown_tlb_irq(1, cpu, blade);
}
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
gru_chiplet_teardown_tlb_irq(0, 0, blade);
gru_chiplet_teardown_tlb_irq(1, 0, blade);
}
}
static int gru_setup_tlb_irqs(void)
{
int blade;
int cpu;
int ret;
for_each_online_cpu(cpu) {
blade = uv_cpu_to_blade_id(cpu);
ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
if (ret != 0)
goto exit1;
ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
if (ret != 0)
goto exit1;
}
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
if (ret != 0)
goto exit1;
ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
if (ret != 0)
goto exit1;
}
return 0;
exit1:
gru_teardown_tlb_irqs();
return ret;
}
/*
* gru_init
*
* Called at boot or module load time to initialize the GRUs.
*/
static int __init gru_init(void)
{
int ret;
if (!gru_supported())
return 0;
#if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
#else
gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG) &
0x7fffffffffffUL;
#endif
gru_start_vaddr = __va(gru_start_paddr);
gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr);
ret = misc_register(&gru_miscdev);
if (ret) {
printk(KERN_ERR "%s: misc_register failed\n",
GRU_DRIVER_ID_STR);
goto exit0;
}
ret = gru_proc_init();
if (ret) {
printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
goto exit1;
}
ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
if (ret) {
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
goto exit2;
}
ret = gru_setup_tlb_irqs();
if (ret != 0)
goto exit3;
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
GRU_DRIVER_VERSION_STR);
return 0;
exit3:
gru_free_tables();
exit2:
gru_proc_exit();
exit1:
misc_deregister(&gru_miscdev);
exit0:
return ret;
}
static void __exit gru_exit(void)
{
if (!gru_supported())
return;
gru_teardown_tlb_irqs();
gru_kservices_exit();
gru_free_tables();
misc_deregister(&gru_miscdev);
gru_proc_exit();
mmu_notifier_synchronize();
}
static const struct file_operations gru_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = gru_file_unlocked_ioctl,
.mmap = gru_file_mmap,
.llseek = noop_llseek,
};
static struct miscdevice gru_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "gru",
.fops = &gru_fops,
};
const struct vm_operations_struct gru_vm_ops = {
.close = gru_vma_close,
.fault = gru_fault,
};
#ifndef MODULE
fs_initcall(gru_init);
#else
module_init(gru_init);
#endif
module_exit(gru_exit);
module_param(gru_options, ulong, 0644);
MODULE_PARM_DESC(gru_options, "Various debug options");
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR);
MODULE_VERSION(GRU_DRIVER_VERSION_STR);
| linux-master | drivers/misc/sgi-gru/grufile.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* Dump GRU State
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uv/uv_hub.h>
#include <linux/nospec.h>
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"
#include "grulib.h"
#define CCH_LOCK_ATTEMPTS 10
static int gru_user_copy_handle(void __user **dp, void *s)
{
if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
return -1;
*dp += GRU_HANDLE_BYTES;
return 0;
}
static int gru_dump_context_data(void *grubase,
struct gru_context_configuration_handle *cch,
void __user *ubuf, int ctxnum, int dsrcnt,
int flush_cbrs)
{
void *cb, *cbe, *tfh, *gseg;
int i, scr;
gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
tfh = grubase + GRU_TFH_BASE;
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
if (flush_cbrs)
gru_flush_cache(cb);
if (gru_user_copy_handle(&ubuf, cb))
goto fail;
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
goto fail;
if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
goto fail;
cb += GRU_HANDLE_STRIDE;
}
if (dsrcnt)
memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
return 0;
fail:
return -EFAULT;
}
static int gru_dump_tfm(struct gru_state *gru,
void __user *ubuf, void __user *ubufend)
{
struct gru_tlb_fault_map *tfm;
int i;
if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
return -EFBIG;
for (i = 0; i < GRU_NUM_TFM; i++) {
tfm = get_tfm(gru->gs_gru_base_vaddr, i);
if (gru_user_copy_handle(&ubuf, tfm))
goto fail;
}
return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
fail:
return -EFAULT;
}
static int gru_dump_tgh(struct gru_state *gru,
void __user *ubuf, void __user *ubufend)
{
struct gru_tlb_global_handle *tgh;
int i;
if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
return -EFBIG;
for (i = 0; i < GRU_NUM_TGH; i++) {
tgh = get_tgh(gru->gs_gru_base_vaddr, i);
if (gru_user_copy_handle(&ubuf, tgh))
goto fail;
}
return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
fail:
return -EFAULT;
}
static int gru_dump_context(struct gru_state *gru, int ctxnum,
void __user *ubuf, void __user *ubufend, char data_opt,
char lock_cch, char flush_cbrs)
{
struct gru_dump_context_header hdr;
struct gru_dump_context_header __user *uhdr = ubuf;
struct gru_context_configuration_handle *cch, *ubufcch;
struct gru_thread_state *gts;
int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
void *grubase;
memset(&hdr, 0, sizeof(hdr));
grubase = gru->gs_gru_base_vaddr;
cch = get_cch(grubase, ctxnum);
for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
cch_locked = trylock_cch_handle(cch);
if (cch_locked)
break;
msleep(1);
}
ubuf += sizeof(hdr);
ubufcch = ubuf;
if (gru_user_copy_handle(&ubuf, cch)) {
if (cch_locked)
unlock_cch_handle(cch);
return -EFAULT;
}
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
if (cch_locked || !lock_cch) {
gts = gru->gs_gts[ctxnum];
if (gts && gts->ts_vma) {
hdr.pid = gts->ts_tgid_owner;
hdr.vaddr = gts->ts_vma->vm_start;
}
if (cch->state != CCHSTATE_INACTIVE) {
cbrcnt = hweight64(cch->cbr_allocation_map) *
GRU_CBR_AU_SIZE;
dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
GRU_DSR_AU_CL : 0;
}
bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
if (bytes > ubufend - ubuf)
ret = -EFBIG;
else
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
dsrcnt, flush_cbrs);
}
if (cch_locked)
unlock_cch_handle(cch);
if (ret)
return ret;
hdr.magic = GRU_DUMP_MAGIC;
hdr.gid = gru->gs_gid;
hdr.ctxnum = ctxnum;
hdr.cbrcnt = cbrcnt;
hdr.dsrcnt = dsrcnt;
hdr.cch_locked = cch_locked;
if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
return -EFAULT;
return bytes;
}
int gru_dump_chiplet_request(unsigned long arg)
{
struct gru_state *gru;
struct gru_dump_chiplet_state_req req;
void __user *ubuf;
void __user *ubufend;
int ctxnum, ret, cnt = 0;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
/* Currently, only dump by gid is implemented */
if (req.gid >= gru_max_gids)
return -EINVAL;
req.gid = array_index_nospec(req.gid, gru_max_gids);
gru = GID_TO_GRU(req.gid);
ubuf = req.buf;
ubufend = req.buf + req.buflen;
ret = gru_dump_tfm(gru, ubuf, ubufend);
if (ret < 0)
goto fail;
ubuf += ret;
ret = gru_dump_tgh(gru, ubuf, ubufend);
if (ret < 0)
goto fail;
ubuf += ret;
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
req.data_opt, req.lock_cch,
req.flush_cbrs);
if (ret < 0)
goto fail;
ubuf += ret;
cnt++;
}
}
if (copy_to_user((void __user *)arg, &req, sizeof(req)))
return -EFAULT;
return cnt;
fail:
return ret;
}
| linux-master | drivers/misc/sgi-gru/grukdump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* FAULT HANDLER FOR GRU DETECTED TLB MISSES
*
* This file contains code that handles TLB misses within the GRU.
* These misses are reported either via interrupts or user polling of
* the user CB.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/security.h>
#include <linux/sync_core.h>
#include <linux/prefetch.h>
#include "gru.h"
#include "grutables.h"
#include "grulib.h"
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
/* Return codes for vtop functions */
#define VTOP_SUCCESS 0
#define VTOP_INVALID -1
#define VTOP_RETRY -2
/*
* Test if a physical address is a valid GRU GSEG address
*/
static inline int is_gru_paddr(unsigned long paddr)
{
return paddr >= gru_start_paddr && paddr < gru_end_paddr;
}
/*
* Find the vma of a GRU segment. Caller must hold mmap_lock.
*/
struct vm_area_struct *gru_find_vma(unsigned long vaddr)
{
struct vm_area_struct *vma;
vma = vma_lookup(current->mm, vaddr);
if (vma && vma->vm_ops == &gru_vm_ops)
return vma;
return NULL;
}
/*
* Find and lock the gts that contains the specified user vaddr.
*
* Returns:
* - *gts with the mmap_lock locked for read and the GTS locked.
* - NULL if vaddr invalid OR is not a valid GSEG vaddr.
*/
static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
mmap_read_lock(mm);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (gts)
mutex_lock(>s->ts_ctxlock);
else
mmap_read_unlock(mm);
return gts;
}
static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
mmap_write_lock(mm);
vma = gru_find_vma(vaddr);
if (!vma)
goto err;
gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
if (IS_ERR(gts))
goto err;
mutex_lock(>s->ts_ctxlock);
mmap_write_downgrade(mm);
return gts;
err:
mmap_write_unlock(mm);
return gts;
}
/*
* Unlock a GTS that was previously locked with gru_find_lock_gts().
*/
static void gru_unlock_gts(struct gru_thread_state *gts)
{
mutex_unlock(>s->ts_ctxlock);
mmap_read_unlock(current->mm);
}
/*
* Set a CB.istatus to active using a user virtual address. This must be done
* just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
* If the line is evicted, the status may be lost. The in-cache update
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
*/
static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
if (cbk) {
cbk->istatus = CBS_ACTIVE;
}
}
/*
* Read & clear a TFM
*
* The GRU has an array of fault maps. A map is private to a cpu
* Only one cpu will be accessing a cpu's fault map.
*
* This function scans the cpu-private fault map & clears all bits that
* are set. The function returns a bitmap that indicates the bits that
* were cleared. Note that sense the maps may be updated asynchronously by
* the GRU, atomic operations must be used to clear bits.
*/
static void get_clear_fault_map(struct gru_state *gru,
struct gru_tlb_fault_map *imap,
struct gru_tlb_fault_map *dmap)
{
unsigned long i, k;
struct gru_tlb_fault_map *tfm;
tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
prefetchw(tfm); /* Helps on hardware, required for emulator */
for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
k = tfm->fault_bits[i];
if (k)
k = xchg(&tfm->fault_bits[i], 0UL);
imap->fault_bits[i] = k;
k = tfm->done_bits[i];
if (k)
k = xchg(&tfm->done_bits[i], 0UL);
dmap->fault_bits[i] = k;
}
/*
* Not functionally required but helps performance. (Required
* on emulator)
*/
gru_flush_cache(tfm);
}
/*
* Atomic (interrupt context) & non-atomic (user context) functions to
* convert a vaddr into a physical address. The size of the page
* is returned in pageshift.
* returns:
* 0 - successful
* < 0 - error code
* 1 - (atomic only) try again in non-atomic context
*/
static int non_atomic_pte_lookup(struct vm_area_struct *vma,
unsigned long vaddr, int write,
unsigned long *paddr, int *pageshift)
{
struct page *page;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
return 0;
}
/*
* atomic_pte_lookup
*
* Convert a user virtual address to a physical address
* Only supports Intel large pages (2MB only) on x86_64.
* ZZZ - hugepage support is incomplete
*
* NOTE: mmap_lock is already held on entry to this function. This
* guarantees existence of the page tables.
*/
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
int write, unsigned long *paddr, int *pageshift)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t pte;
pgdp = pgd_offset(vma->vm_mm, vaddr);
if (unlikely(pgd_none(*pgdp)))
goto err;
p4dp = p4d_offset(pgdp, vaddr);
if (unlikely(p4d_none(*p4dp)))
goto err;
pudp = pud_offset(p4dp, vaddr);
if (unlikely(pud_none(*pudp)))
goto err;
pmdp = pmd_offset(pudp, vaddr);
if (unlikely(pmd_none(*pmdp)))
goto err;
#ifdef CONFIG_X86_64
if (unlikely(pmd_large(*pmdp)))
pte = ptep_get((pte_t *)pmdp);
else
#endif
pte = *pte_offset_kernel(pmdp, vaddr);
if (unlikely(!pte_present(pte) ||
(write && (!pte_write(pte) || !pte_dirty(pte)))))
return 1;
*paddr = pte_pfn(pte) << PAGE_SHIFT;
#ifdef CONFIG_HUGETLB_PAGE
*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
#else
*pageshift = PAGE_SHIFT;
#endif
return 0;
err:
return 1;
}
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
int write, int atomic, unsigned long *gpa, int *pageshift)
{
struct mm_struct *mm = gts->ts_mm;
struct vm_area_struct *vma;
unsigned long paddr;
int ret, ps;
vma = find_vma(mm, vaddr);
if (!vma)
goto inval;
/*
* Atomic lookup is faster & usually works even if called in non-atomic
* context.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
if (ret) {
if (atomic)
goto upm;
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
goto inval;
}
if (is_gru_paddr(paddr))
goto inval;
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
return VTOP_SUCCESS;
inval:
return VTOP_INVALID;
upm:
return VTOP_RETRY;
}
/*
* Flush a CBE from cache. The CBE is clean in the cache. Dirty the
* CBE cacheline so that the line will be written back to home agent.
* Otherwise the line may be silently dropped. This has no impact
* except on performance.
*/
static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
{
if (unlikely(cbe)) {
cbe->cbrexecstatus = 0; /* make CL dirty */
gru_flush_cache(cbe);
}
}
/*
* Preload the TLB with entries that may be required. Currently, preloading
* is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
* the end of the bcopy tranfer, whichever is smaller.
*/
static void gru_preload_tlb(struct gru_state *gru,
struct gru_thread_state *gts, int atomic,
unsigned long fault_vaddr, int asid, int write,
unsigned char tlb_preload_count,
struct gru_tlb_fault_handle *tfh,
struct gru_control_block_extended *cbe)
{
unsigned long vaddr = 0, gpa;
int ret, pageshift;
if (cbe->opccpy != OP_BCOPY)
return;
if (fault_vaddr == cbe->cbe_baddr0)
vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
else if (fault_vaddr == cbe->cbe_baddr1)
vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
fault_vaddr &= PAGE_MASK;
vaddr &= PAGE_MASK;
vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
while (vaddr > fault_vaddr) {
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift)))
return;
gru_dbg(grudev,
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
vaddr, asid, write, pageshift, gpa);
vaddr -= PAGE_SIZE;
STAT(tlb_preload_page);
}
}
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* Input:
* cb Address of user CBR. Null if not running in user context
* Return:
* 0 = dropin, exception, or switch to UPM successful
* 1 = range invalidate active
* < 0 = error code
*
*/
static int gru_try_dropin(struct gru_state *gru,
struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
struct gru_instruction_bits *cbk)
{
struct gru_control_block_extended *cbe = NULL;
unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
* NOTE: The GRU contains magic hardware that eliminates races between
* TLB invalidates and TLB dropins. If an invalidate occurs
* in the window between reading the TFH and the subsequent TLB dropin,
* the dropin is ignored. This eliminates the need for additional locks.
*/
/*
* Prefetch the CBE if doing TLB preloading
*/
if (unlikely(tlb_preload_count)) {
cbe = gru_tfh_to_cbe(tfh);
prefetchw(cbe);
}
/*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
* is a transient state.
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
rmb(); /* TFH must be cache resident before reading ms_range_active */
/*
* TFH is cache resident - at least briefly. Fail the dropin
* if a range invalidate is active.
*/
if (atomic_read(>s->ts_gms->ms_range_active))
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (ret == VTOP_INVALID)
goto failinval;
if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
gru_flush_cache_cbe(cbe);
}
gru_cb_set_istatus_active(cbk);
gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
gru_dbg(grudev,
"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
" rw %d, ps %d, gpa 0x%lx\n",
atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
indexway, write, pageshift, gpa);
STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
if (cbk)
gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
return 1;
}
/*
* Process an external interrupt from the GRU. This interrupt is
* caused by a TLB miss.
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
gru_dbg(grudev,
"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
smp_processor_id(), chiplet, gru->gs_gid,
imap.fault_bits[0], imap.fault_bits[1],
dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
STAT(intr_cbr);
cmp = gru->gs_blade->bs_async_wq;
if (cmp)
complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
/*
* When hardware sets a bit in the faultmap, it implicitly
* locks the GRU context so that it cannot be unloaded.
* The gts cannot change until a TFH start/writestart command
* is issued.
*/
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
/* Spurious interrupts can cause this. Ignore. */
if (!gts) {
STAT(intr_spurious);
continue;
}
/*
* This is running in interrupt context. Trylock the mmap_lock.
* If it fails, retry the fault in user context.
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
mmap_read_trylock(gts->ts_mm)) {
gru_try_dropin(gru, gts, tfh, NULL);
mmap_read_unlock(gts->ts_mm);
} else {
tfh_user_polling_mode(tfh);
STAT(intr_mm_lock_failed);
}
}
return IRQ_HANDLED;
}
irqreturn_t gru0_intr(int irq, void *dev_id)
{
return gru_intr(0, uv_numa_blade_id());
}
irqreturn_t gru1_intr(int irq, void *dev_id)
{
return gru_intr(1, uv_numa_blade_id());
}
irqreturn_t gru_intr_mblade(int irq, void *dev_id)
{
int blade;
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
gru_intr(0, blade);
gru_intr(1, blade);
}
return IRQ_HANDLED;
}
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
}
}
/*
* This interface is called as a result of a user detecting a "call OS" bit
* in a user CB. Normally means that a TLB fault has occurred.
* cb - user virtual address of the CB
*/
int gru_handle_user_call_os(unsigned long cb)
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
again:
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
if (gru_check_context_placement(gts)) {
gru_unlock_gts(gts);
gru_unload_context(gts, 1);
goto again;
}
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
gts->ts_ctxnum, ucbnum);
ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
return ret;
}
/*
* Fetch the exception detail information for a CB that terminated with
* an exception.
*/
int gru_get_exception_detail(unsigned long arg)
{
struct control_block_extended_exc_detail excdet;
struct gru_control_block_extended *cbe;
struct gru_thread_state *gts;
int ucbnum, cbrnum, ret;
STAT(user_exception);
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
} else if (gts->ts_gru) {
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
excdet.exceptdet0 = cbe->idef1upd;
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
}
gru_unlock_gts(gts);
gru_dbg(grudev,
"cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
"exdet0 0x%lx, exdet1 0x%x\n",
excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
ret = -EFAULT;
return ret;
}
/*
* User request to unload a context. Content is saved for possible reload.
*/
static int gru_unload_all_contexts(void)
{
struct gru_thread_state *gts;
struct gru_state *gru;
int gid, ctxnum;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
foreach_gid(gid) {
gru = GID_TO_GRU(gid);
spin_lock(&gru->gs_lock);
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
gts = gru->gs_gts[ctxnum];
if (gts && mutex_trylock(>s->ts_ctxlock)) {
spin_unlock(&gru->gs_lock);
gru_unload_context(gts, 1);
mutex_unlock(>s->ts_ctxlock);
spin_lock(&gru->gs_lock);
}
}
spin_unlock(&gru->gs_lock);
}
return 0;
}
int gru_user_unload_context(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_unload_context_req req;
STAT(user_unload_context);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
if (!req.gseg)
return gru_unload_all_contexts();
gts = gru_find_lock_gts(req.gseg);
if (!gts)
return -EINVAL;
if (gts->ts_gru)
gru_unload_context(gts, 1);
gru_unlock_gts(gts);
return 0;
}
/*
* User request to flush a range of virtual addresses from the GRU TLB
* (Mainly for testing).
*/
int gru_user_flush_tlb(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_flush_tlb_req req;
struct gru_mm_struct *gms;
STAT(user_flush_tlb);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
req.vaddr, req.len);
gts = gru_find_lock_gts(req.gseg);
if (!gts)
return -EINVAL;
gms = gts->ts_gms;
gru_unlock_gts(gts);
gru_flush_tlb_range(gms, req.vaddr, req.len);
return 0;
}
/*
* Fetch GSEG statisticss
*/
long gru_get_gseg_statistics(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_get_gseg_statistics_req req;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
/*
* The library creates arrays of contexts for threaded programs.
* If no gts exists in the array, the context has never been used & all
* statistics are implicitly 0.
*/
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
gru_unlock_gts(gts);
} else {
memset(&req.stats, 0, sizeof(gts->ustats));
}
if (copy_to_user((void __user *)arg, &req, sizeof(req)))
return -EFAULT;
return 0;
}
/*
* Register the current task as the user of the GSEG slice.
* Needed for TLB fault interrupt targeting.
*/
int gru_set_context_option(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_set_context_option_req req;
int ret = 0;
STAT(set_context_option);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
gts = gru_find_lock_gts(req.gseg);
if (!gts) {
gts = gru_alloc_locked_gts(req.gseg);
if (IS_ERR(gts))
return PTR_ERR(gts);
}
switch (req.op) {
case sco_blade_chiplet:
/* Select blade/chiplet for GRU context */
if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
(req.val1 >= 0 && !gru_base[req.val1])) {
ret = -EINVAL;
} else {
gts->ts_user_blade_id = req.val1;
gts->ts_user_chiplet_id = req.val0;
if (gru_check_context_placement(gts)) {
gru_unlock_gts(gts);
gru_unload_context(gts, 1);
return ret;
}
}
break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;
break;
case sco_cch_req_slice:
/* Set the CCH slice option */
gts->ts_cch_req_slice = req.val1 & 3;
break;
default:
ret = -EINVAL;
}
gru_unlock_gts(gts);
return ret;
}
| linux-master | drivers/misc/sgi-gru/grufault.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/prefetch.h>
#include <asm/uv/uv_hub.h>
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"
unsigned long gru_options __read_mostly;
static struct device_driver gru_driver = {
.name = "gru"
};
static struct device gru_device = {
.init_name = "",
.driver = &gru_driver,
};
struct device *grudev = &gru_device;
/*
* Select a gru fault map to be used by the current cpu. Note that
* multiple cpus may be using the same map.
* ZZZ should be inline but did not work on emulator
*/
int gru_cpu_fault_map_id(void)
{
#ifdef CONFIG_IA64
return uv_blade_processor_id() % GRU_NUM_TFM;
#else
int cpu = smp_processor_id();
int id, core;
core = uv_cpu_core_number(cpu);
id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
return id;
#endif
}
/*--------- ASID Management -------------------------------------------
*
* Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
* Once MAX is reached, flush the TLB & start over. However,
* some asids may still be in use. There won't be many (percentage wise) still
* in use. Search active contexts & determine the value of the first
* asid in use ("x"s below). Set "limit" to this value.
* This defines a block of assignable asids.
*
* When "limit" is reached, search forward from limit+1 and determine the
* next block of assignable asids.
*
* Repeat until MAX_ASID is reached, then start over again.
*
* Each time MAX_ASID is reached, increment the asid generation. Since
* the search for in-use asids only checks contexts with GRUs currently
* assigned, asids in some contexts will be missed. Prior to loading
* a context, the asid generation of the GTS asid is rechecked. If it
* doesn't match the current generation, a new asid will be assigned.
*
* 0---------------x------------x---------------------x----|
* ^-next ^-limit ^-MAX_ASID
*
* All asid manipulation & context loading/unloading is protected by the
* gs_lock.
*/
/* Hit the asid limit. Start over */
static int gru_wrap_asid(struct gru_state *gru)
{
gru_dbg(grudev, "gid %d\n", gru->gs_gid);
STAT(asid_wrap);
gru->gs_asid_gen++;
return MIN_ASID;
}
/* Find the next chunk of unused asids */
static int gru_reset_asid_limit(struct gru_state *gru, int asid)
{
int i, gid, inuse_asid, limit;
gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
STAT(asid_next);
limit = MAX_ASID;
if (asid >= limit)
asid = gru_wrap_asid(gru);
gru_flush_all_tlb(gru);
gid = gru->gs_gid;
again:
for (i = 0; i < GRU_NUM_CCH; i++) {
if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
continue;
inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
inuse_asid, i);
if (inuse_asid == asid) {
asid += ASID_INC;
if (asid >= limit) {
/*
* empty range: reset the range limit and
* start over
*/
limit = MAX_ASID;
if (asid >= MAX_ASID)
asid = gru_wrap_asid(gru);
goto again;
}
}
if ((inuse_asid > asid) && (inuse_asid < limit))
limit = inuse_asid;
}
gru->gs_asid_limit = limit;
gru->gs_asid = asid;
gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
asid, limit);
return asid;
}
/* Assign a new ASID to a thread context. */
static int gru_assign_asid(struct gru_state *gru)
{
int asid;
gru->gs_asid += ASID_INC;
asid = gru->gs_asid;
if (asid >= gru->gs_asid_limit)
asid = gru_reset_asid_limit(gru, asid);
gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
return asid;
}
/*
* Clear n bits in a word. Return a word indicating the bits that were cleared.
* Optionally, build an array of chars that contain the bit numbers allocated.
*/
static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
signed char *idx)
{
unsigned long bits = 0;
int i;
while (n--) {
i = find_first_bit(p, mmax);
if (i == mmax)
BUG();
__clear_bit(i, p);
__set_bit(i, &bits);
if (idx)
*idx++ = i;
}
return bits;
}
unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
signed char *cbmap)
{
return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
cbmap);
}
unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
signed char *dsmap)
{
return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
dsmap);
}
static void reserve_gru_resources(struct gru_state *gru,
struct gru_thread_state *gts)
{
gru->gs_active_contexts++;
gts->ts_cbr_map =
gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
gts->ts_cbr_idx);
gts->ts_dsr_map =
gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
}
static void free_gru_resources(struct gru_state *gru,
struct gru_thread_state *gts)
{
gru->gs_active_contexts--;
gru->gs_cbr_map |= gts->ts_cbr_map;
gru->gs_dsr_map |= gts->ts_dsr_map;
}
/*
* Check if a GRU has sufficient free resources to satisfy an allocation
* request. Note: GRU locks may or may not be held when this is called. If
* not held, recheck after acquiring the appropriate locks.
*
* Returns 1 if sufficient resources, 0 if not
*/
static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
int dsr_au_count, int max_active_contexts)
{
return hweight64(gru->gs_cbr_map) >= cbr_au_count
&& hweight64(gru->gs_dsr_map) >= dsr_au_count
&& gru->gs_active_contexts < max_active_contexts;
}
/*
* TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
* context.
*/
static int gru_load_mm_tracker(struct gru_state *gru,
struct gru_thread_state *gts)
{
struct gru_mm_struct *gms = gts->ts_gms;
struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
int asid;
spin_lock(&gms->ms_asid_lock);
asid = asids->mt_asid;
spin_lock(&gru->gs_asid_lock);
if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
gru->gs_asid_gen)) {
asid = gru_assign_asid(gru);
asids->mt_asid = asid;
asids->mt_asid_gen = gru->gs_asid_gen;
STAT(asid_new);
} else {
STAT(asid_reuse);
}
spin_unlock(&gru->gs_asid_lock);
BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
asids->mt_ctxbitmap |= ctxbitmap;
if (!test_bit(gru->gs_gid, gms->ms_asidmap))
__set_bit(gru->gs_gid, gms->ms_asidmap);
spin_unlock(&gms->ms_asid_lock);
gru_dbg(grudev,
"gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
gms->ms_asidmap[0]);
return asid;
}
static void gru_unload_mm_tracker(struct gru_state *gru,
struct gru_thread_state *gts)
{
struct gru_mm_struct *gms = gts->ts_gms;
struct gru_mm_tracker *asids;
unsigned short ctxbitmap;
asids = &gms->ms_asids[gru->gs_gid];
ctxbitmap = (1 << gts->ts_ctxnum);
spin_lock(&gms->ms_asid_lock);
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
}
/*
* Decrement the reference count on a GTS structure. Free the structure
* if the reference count goes to zero.
*/
void gts_drop(struct gru_thread_state *gts)
{
if (gts && refcount_dec_and_test(>s->ts_refcnt)) {
if (gts->ts_gms)
gru_drop_mmu_notifier(gts->ts_gms);
kfree(gts);
STAT(gts_free);
}
}
/*
* Locate the GTS structure for the current thread.
*/
static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
*vdata, int tsid)
{
struct gru_thread_state *gts;
list_for_each_entry(gts, &vdata->vd_head, ts_next)
if (gts->ts_tsid == tsid)
return gts;
return NULL;
}
/*
* Allocate a thread state structure.
*/
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
int cbr_au_count, int dsr_au_count,
unsigned char tlb_preload_count, int options, int tsid)
{
struct gru_thread_state *gts;
struct gru_mm_struct *gms;
int bytes;
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
return ERR_PTR(-ENOMEM);
STAT(gts_alloc);
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
refcount_set(>s->ts_refcnt, 1);
mutex_init(>s->ts_ctxlock);
gts->ts_cbr_au_count = cbr_au_count;
gts->ts_dsr_au_count = dsr_au_count;
gts->ts_tlb_preload_count = tlb_preload_count;
gts->ts_user_options = options;
gts->ts_user_blade_id = -1;
gts->ts_user_chiplet_id = -1;
gts->ts_tsid = tsid;
gts->ts_ctxnum = NULLCTX;
gts->ts_tlb_int_select = -1;
gts->ts_cch_req_slice = -1;
gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
if (vma) {
gts->ts_mm = current->mm;
gts->ts_vma = vma;
gms = gru_register_mmu_notifier();
if (IS_ERR(gms))
goto err;
gts->ts_gms = gms;
}
gru_dbg(grudev, "alloc gts %p\n", gts);
return gts;
err:
gts_drop(gts);
return ERR_CAST(gms);
}
/*
* Allocate a vma private data structure.
*/
struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
{
struct gru_vma_data *vdata = NULL;
vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
if (!vdata)
return NULL;
STAT(vdata_alloc);
INIT_LIST_HEAD(&vdata->vd_head);
spin_lock_init(&vdata->vd_lock);
gru_dbg(grudev, "alloc vdata %p\n", vdata);
return vdata;
}
/*
* Find the thread state structure for the current thread.
*/
struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
int tsid)
{
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts;
spin_lock(&vdata->vd_lock);
gts = gru_find_current_gts_nolock(vdata, tsid);
spin_unlock(&vdata->vd_lock);
gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
return gts;
}
/*
* Allocate a new thread state for a GSEG. Note that races may allow
* another thread to race to create a gts.
*/
struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
int tsid)
{
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
vdata->vd_dsr_au_count,
vdata->vd_tlb_preload_count,
vdata->vd_user_options, tsid);
if (IS_ERR(gts))
return gts;
spin_lock(&vdata->vd_lock);
ngts = gru_find_current_gts_nolock(vdata, tsid);
if (ngts) {
gts_drop(gts);
gts = ngts;
STAT(gts_double_allocate);
} else {
list_add(>s->ts_next, &vdata->vd_head);
}
spin_unlock(&vdata->vd_lock);
gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
return gts;
}
/*
* Free the GRU context assigned to the thread state.
*/
static void gru_free_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru;
gru = gts->ts_gru;
gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
spin_lock(&gru->gs_lock);
gru->gs_gts[gts->ts_ctxnum] = NULL;
free_gru_resources(gru, gts);
BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
__clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
gts->ts_ctxnum = NULLCTX;
gts->ts_gru = NULL;
gts->ts_blade = -1;
spin_unlock(&gru->gs_lock);
gts_drop(gts);
STAT(free_context);
}
/*
* Prefetching cachelines help hardware performance.
* (Strictly a performance enhancement. Not functionally required).
*/
static void prefetch_data(void *p, int num, int stride)
{
while (num-- > 0) {
prefetchw(p);
p += stride;
}
}
static inline long gru_copy_handle(void *d, void *s)
{
memcpy(d, s, GRU_HANDLE_BYTES);
return GRU_HANDLE_BYTES;
}
static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
unsigned long cbrmap, unsigned long length)
{
int i, scr;
prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
GRU_CACHE_LINE_BYTES);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
GRU_CACHE_LINE_BYTES);
cb += GRU_HANDLE_STRIDE;
}
}
static void gru_load_context_data(void *save, void *grubase, int ctxnum,
unsigned long cbrmap, unsigned long dsrmap,
int data_valid)
{
void *gseg, *cb, *cbe;
unsigned long length;
int i, scr;
gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
if (data_valid) {
save += gru_copy_handle(cb, save);
save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
save);
} else {
memset(cb, 0, GRU_CACHE_LINE_BYTES);
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
GRU_CACHE_LINE_BYTES);
}
/* Flush CBE to hide race in context restart */
mb();
gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
if (data_valid)
memcpy(gseg + GRU_DS_BASE, save, length);
else
memset(gseg + GRU_DS_BASE, 0, length);
}
static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
unsigned long cbrmap, unsigned long dsrmap)
{
void *gseg, *cb, *cbe;
unsigned long length;
int i, scr;
gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
/* CBEs may not be coherent. Flush them from cache */
for_each_cbr_in_allocation_map(i, &cbrmap, scr)
gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
mb(); /* Let the CL flush complete */
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
save += gru_copy_handle(save, cb);
save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
memcpy(save, gseg + GRU_DS_BASE, length);
}
void gru_unload_context(struct gru_thread_state *gts, int savestate)
{
struct gru_state *gru = gts->ts_gru;
struct gru_context_configuration_handle *cch;
int ctxnum = gts->ts_ctxnum;
if (!is_kernel_context(gts))
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
gts, gts->ts_cbr_map, gts->ts_dsr_map);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
if (!is_kernel_context(gts))
gru_unload_mm_tracker(gru, gts);
if (savestate) {
gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
ctxnum, gts->ts_cbr_map,
gts->ts_dsr_map);
gts->ts_data_valid = 1;
}
if (cch_deallocate(cch))
BUG();
unlock_cch_handle(cch);
gru_free_gru_context(gts);
}
/*
* Load a GRU context by copying it from the thread data structure in memory
* to the GRU.
*/
void gru_load_context(struct gru_thread_state *gts)
{
struct gru_state *gru = gts->ts_gru;
struct gru_context_configuration_handle *cch;
int i, err, asid, ctxnum = gts->ts_ctxnum;
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
lock_cch_handle(cch);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch->tlb_int_enable) {
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gts->ts_tlb_int_select;
}
if (gts->ts_cch_req_slice >= 0) {
cch->req_slice_set_enable = 1;
cch->req_slice = gts->ts_cch_req_slice;
} else {
cch->req_slice_set_enable =0;
}
cch->tfm_done_bit_enable = 0;
cch->dsr_allocation_map = gts->ts_dsr_map;
cch->cbr_allocation_map = gts->ts_cbr_map;
if (is_kernel_context(gts)) {
cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
} else {
cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
cch->cb_int_enable = 0;
asid = gru_load_mm_tracker(gru, gts);
for (i = 0; i < 8; i++) {
cch->asid[i] = asid + i;
cch->sizeavail[i] = gts->ts_sizeavail;
}
}
err = cch_allocate(cch);
if (err) {
gru_dbg(grudev,
"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
BUG();
}
gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
(gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
}
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
* - update sizeavail mask
*/
int gru_update_cch(struct gru_thread_state *gts)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
int i, ctxnum = gts->ts_ctxnum, ret = 0;
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
lock_cch_handle(cch);
if (cch->state == CCHSTATE_ACTIVE) {
if (gru->gs_gts[gts->ts_ctxnum] != gts)
goto exit;
if (cch_interrupt(cch))
BUG();
for (i = 0; i < 8; i++)
cch->sizeavail[i] = gts->ts_sizeavail;
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id();
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch_start(cch))
BUG();
ret = 1;
}
exit:
unlock_cch_handle(cch);
return ret;
}
/*
* Update CCH tlb interrupt select. Required when all the following is true:
* - task's GRU context is loaded into a GRU
* - task is using interrupt notification for TLB faults
* - task has migrated to a different cpu on the same blade where
* it was previously running.
*/
static int gru_retarget_intr(struct gru_thread_state *gts)
{
if (gts->ts_tlb_int_select < 0
|| gts->ts_tlb_int_select == gru_cpu_fault_map_id())
return 0;
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
return gru_update_cch(gts);
}
/*
* Check if a GRU context is allowed to use a specific chiplet. By default
* a context is assigned to any blade-local chiplet. However, users can
* override this.
* Returns 1 if assignment allowed, 0 otherwise
*/
static int gru_check_chiplet_assignment(struct gru_state *gru,
struct gru_thread_state *gts)
{
int blade_id;
int chiplet_id;
blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
chiplet_id = gts->ts_user_chiplet_id;
return gru->gs_blade_id == blade_id &&
(chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
}
/*
* Unload the gru context if it is not assigned to the correct blade or
* chiplet. Misassignment can occur if the process migrates to a different
* blade or if the user changes the selected blade/chiplet.
*/
int gru_check_context_placement(struct gru_thread_state *gts)
{
struct gru_state *gru;
int ret = 0;
/*
* If the current task is the context owner, verify that the
* context is correctly placed. This test is skipped for non-owner
* references. Pthread apps use non-owner references to the CBRs.
*/
gru = gts->ts_gru;
/*
* If gru or gts->ts_tgid_owner isn't initialized properly, return
* success to indicate that the caller does not need to unload the
* gru context.The caller is responsible for their inspection and
* reinitialization if needed.
*/
if (!gru || gts->ts_tgid_owner != current->tgid)
return ret;
if (!gru_check_chiplet_assignment(gru, gts)) {
STAT(check_context_unload);
ret = -EINVAL;
} else if (gru_retarget_intr(gts)) {
STAT(check_context_retarget_intr);
}
return ret;
}
/*
* Insufficient GRU resources available on the local blade. Steal a context from
* a process. This is a hack until a _real_ resource scheduler is written....
*/
#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
((g)+1) : &(b)->bs_grus[0])
static int is_gts_stealable(struct gru_thread_state *gts,
struct gru_blade_state *bs)
{
if (is_kernel_context(gts))
return down_write_trylock(&bs->bs_kgts_sema);
else
return mutex_trylock(>s->ts_ctxlock);
}
static void gts_stolen(struct gru_thread_state *gts,
struct gru_blade_state *bs)
{
if (is_kernel_context(gts)) {
up_write(&bs->bs_kgts_sema);
STAT(steal_kernel_context);
} else {
mutex_unlock(>s->ts_ctxlock);
STAT(steal_user_context);
}
}
void gru_steal_context(struct gru_thread_state *gts)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
struct gru_thread_state *ngts = NULL;
int ctxnum, ctxnum0, flag = 0, cbr, dsr;
int blade_id;
blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
blade = gru_base[blade_id];
spin_lock(&blade->bs_lock);
ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
gru = blade->bs_lru_gru;
if (ctxnum == 0)
gru = next_gru(blade, gru);
blade->bs_lru_gru = gru;
blade->bs_lru_ctxnum = ctxnum;
ctxnum0 = ctxnum;
gru0 = gru;
while (1) {
if (gru_check_chiplet_assignment(gru, gts)) {
if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
spin_lock(&gru->gs_lock);
for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (flag && gru == gru0 && ctxnum == ctxnum0)
break;
ngts = gru->gs_gts[ctxnum];
/*
* We are grabbing locks out of order, so trylock is
* needed. GTSs are usually not locked, so the odds of
* success are high. If trylock fails, try to steal a
* different GSEG.
*/
if (ngts && is_gts_stealable(ngts, blade))
break;
ngts = NULL;
}
spin_unlock(&gru->gs_lock);
if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
break;
}
if (flag && gru == gru0)
break;
flag = 1;
ctxnum = 0;
gru = next_gru(blade, gru);
}
spin_unlock(&blade->bs_lock);
if (ngts) {
gts->ustats.context_stolen++;
ngts->ts_steal_jiffies = jiffies;
gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
gts_stolen(ngts, blade);
} else {
STAT(steal_context_failed);
}
gru_dbg(grudev,
"stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
" avail cb %ld, ds %ld\n",
gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
hweight64(gru->gs_dsr_map));
}
/*
* Assign a gru context.
*/
static int gru_assign_context_number(struct gru_state *gru)
{
int ctxnum;
ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
__set_bit(ctxnum, &gru->gs_context_map);
return ctxnum;
}
/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
int blade_id = gts->ts_user_blade_id;
if (blade_id < 0)
blade_id = uv_numa_blade_id();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
for_each_gru_on_blade(grux, blade_id, i) {
if (!gru_check_chiplet_assignment(grux, gts))
continue;
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
gru = grux;
max_active_contexts = grux->gs_active_contexts;
if (max_active_contexts == 0)
break;
}
}
if (gru) {
spin_lock(&gru->gs_lock);
if (!check_gru_resources(gru, gts->ts_cbr_au_count,
gts->ts_dsr_au_count, GRU_NUM_CCH)) {
spin_unlock(&gru->gs_lock);
goto again;
}
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
gts->ts_blade = gru->gs_blade_id;
gts->ts_ctxnum = gru_assign_context_number(gru);
refcount_inc(>s->ts_refcnt);
gru->gs_gts[gts->ts_ctxnum] = gts;
spin_unlock(&gru->gs_lock);
STAT(assign_context);
gru_dbg(grudev,
"gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
gts->ts_gru->gs_gid, gts->ts_ctxnum,
gts->ts_cbr_au_count, gts->ts_dsr_au_count);
} else {
gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
STAT(assign_context_failed);
}
return gru;
}
/*
* gru_nopage
*
* Map the user's GRU segment
*
* Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
*/
vm_fault_t gru_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
unsigned long expires;
vaddr = vmf->address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
vma, vaddr, GSEG_BASE(vaddr));
STAT(nopfn);
/* The following check ensures vaddr is a valid address in the VMA */
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (!gts)
return VM_FAULT_SIGBUS;
again:
mutex_lock(>s->ts_ctxlock);
preempt_disable();
if (gru_check_context_placement(gts)) {
preempt_enable();
mutex_unlock(>s->ts_ctxlock);
gru_unload_context(gts, 1);
return VM_FAULT_NOPAGE;
}
if (!gts->ts_gru) {
STAT(load_user_context);
if (!gru_assign_gru_context(gts)) {
preempt_enable();
mutex_unlock(>s->ts_ctxlock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY;
if (time_before(expires, jiffies))
gru_steal_context(gts);
goto again;
}
gru_load_context(gts);
paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
vma->vm_page_prot);
}
preempt_enable();
mutex_unlock(>s->ts_ctxlock);
return VM_FAULT_NOPAGE;
}
| linux-master | drivers/misc/sgi-gru/grumain.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SN Platform GRU Driver
*
* KERNEL SERVICES THAT USE THE GRU
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/sync_core.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#include "grukservices.h"
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
/*
* Kernel GRU Usage
*
* The following is an interim algorithm for management of kernel GRU
* resources. This will likely be replaced when we better understand the
* kernel/user requirements.
*
* Blade percpu resources reserved for kernel use. These resources are
* reserved whenever the kernel context for the blade is loaded. Note
* that the kernel context is not guaranteed to be always available. It is
* loaded on demand & can be stolen by a user if the user demand exceeds the
* kernel demand. The kernel can always reload the kernel context but
* a SLEEP may be required!!!.
*
* Async Overview:
*
* Each blade has one "kernel context" that owns GRU kernel resources
* located on the blade. Kernel drivers use GRU resources in this context
* for sending messages, zeroing memory, etc.
*
* The kernel context is dynamically loaded on demand. If it is not in
* use by the kernel, the kernel context can be unloaded & given to a user.
* The kernel context will be reloaded when needed. This may require that
* a context be stolen from a user.
* NOTE: frequent unloading/reloading of the kernel context is
* expensive. We are depending on batch schedulers, cpusets, sane
* drivers or some other mechanism to prevent the need for frequent
* stealing/reloading.
*
* The kernel context consists of two parts:
* - 1 CB & a few DSRs that are reserved for each cpu on the blade.
* Each cpu has it's own private resources & does not share them
* with other cpus. These resources are used serially, ie,
* locked, used & unlocked on each call to a function in
* grukservices.
* (Now that we have dynamic loading of kernel contexts, I
* may rethink this & allow sharing between cpus....)
*
* - Additional resources can be reserved long term & used directly
* by UV drivers located in the kernel. Drivers using these GRU
* resources can use asynchronous GRU instructions that send
* interrupts on completion.
* - these resources must be explicitly locked/unlocked
* - locked resources prevent (obviously) the kernel
* context from being unloaded.
* - drivers using these resource directly issue their own
* GRU instruction and must wait/check completion.
*
* When these resources are reserved, the caller can optionally
* associate a wait_queue with the resources and use asynchronous
* GRU instructions. When an async GRU instruction completes, the
* driver will do a wakeup on the event.
*
*/
#define ASYNC_HAN_TO_BID(h) ((h) - 1)
#define ASYNC_BID_TO_HAN(b) ((b) + 1)
#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
GRU_CACHE_LINE_BYTES)
/* GRU instruction attributes for all instructions */
#define IMA IMA_CB_DELAY
/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
#define __gru_cacheline_aligned__ \
__attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
#define MAGIC 0x1234567887654321UL
/* Default retry count for GRU errors on kernel instructions */
#define EXCEPTION_RETRY_LIMIT 3
/* Status of message queue sections */
#define MQS_EMPTY 0
#define MQS_FULL 1
#define MQS_NOOP 2
/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
/* optimized for x86_64 */
struct message_queue {
union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
int qlines; /* DW 1 */
long hstatus[2];
void *next __gru_cacheline_aligned__;/* CL 1 */
void *limit;
void *start;
void *start2;
char data ____cacheline_aligned; /* CL 2 */
};
/* First word in every message - used by mesq interface */
struct message_header {
char present;
char present2;
char lines;
char fill;
};
#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
/*
* Reload the blade's kernel context into a GRU chiplet. Called holding
* the bs_kgts_sema for READ. Will steal user contexts if necessary.
*/
static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
{
struct gru_state *gru;
struct gru_thread_state *kgts;
void *vaddr;
int ctxnum, ncpus;
up_read(&bs->bs_kgts_sema);
down_write(&bs->bs_kgts_sema);
if (!bs->bs_kgts) {
do {
bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
if (!IS_ERR(bs->bs_kgts))
break;
msleep(1);
} while (true);
bs->bs_kgts->ts_user_blade_id = blade_id;
}
kgts = bs->bs_kgts;
if (!kgts->ts_gru) {
STAT(load_kernel_context);
ncpus = uv_blade_nr_possible_cpus(blade_id);
kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
GRU_NUM_KERNEL_DSR_BYTES * ncpus +
bs->bs_async_dsr_bytes);
while (!gru_assign_gru_context(kgts)) {
msleep(1);
gru_steal_context(kgts);
}
gru_load_context(kgts);
gru = bs->bs_kgts->ts_gru;
vaddr = gru->gs_gru_base_vaddr;
ctxnum = kgts->ts_ctxnum;
bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
}
downgrade_write(&bs->bs_kgts_sema);
}
/*
* Free all kernel contexts that are not currently in use.
* Returns 0 if all freed, else number of inuse context.
*/
static int gru_free_kernel_contexts(void)
{
struct gru_blade_state *bs;
struct gru_thread_state *kgts;
int bid, ret = 0;
for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
bs = gru_base[bid];
if (!bs)
continue;
/* Ignore busy contexts. Don't want to block here. */
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
kfree(kgts);
} else {
ret++;
}
}
return ret;
}
/*
* Lock & load the kernel context for the specified blade.
*/
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
int bid;
STAT(lock_kernel_context);
again:
bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
bs = gru_base[bid];
/* Handle the case where migration occurred while waiting for the sema */
down_read(&bs->bs_kgts_sema);
if (blade_id < 0 && bid != uv_numa_blade_id()) {
up_read(&bs->bs_kgts_sema);
goto again;
}
if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
gru_load_kernel_context(bs, bid);
return bs;
}
/*
* Unlock the kernel context for the specified blade. Context is not
* unloaded but may be stolen before next use.
*/
static void gru_unlock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
bs = gru_base[blade_id];
up_read(&bs->bs_kgts_sema);
STAT(unlock_kernel_context);
}
/*
* Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
* - returns with preemption disabled
*/
static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
{
struct gru_blade_state *bs;
int lcpu;
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
bs = gru_lock_kernel_context(-1);
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
return 0;
}
/*
* Free the current cpus reserved DSR/CBR resources.
*/
static void gru_free_cpu_resources(void *cb, void *dsr)
{
gru_unlock_kernel_context(uv_numa_blade_id());
preempt_enable();
}
/*
* Reserve GRU resources to be used asynchronously.
* Note: currently supports only 1 reservation per blade.
*
* input:
* blade_id - blade on which resources should be reserved
* cbrs - number of CBRs
* dsr_bytes - number of DSR bytes needed
* output:
* handle to identify resource
* (0 = async resources already reserved)
*/
unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
struct completion *cmp)
{
struct gru_blade_state *bs;
struct gru_thread_state *kgts;
int ret = 0;
bs = gru_base[blade_id];
down_write(&bs->bs_kgts_sema);
/* Verify no resources already reserved */
if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
goto done;
bs->bs_async_dsr_bytes = dsr_bytes;
bs->bs_async_cbrs = cbrs;
bs->bs_async_wq = cmp;
kgts = bs->bs_kgts;
/* Resources changed. Unload context if already loaded */
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
ret = ASYNC_BID_TO_HAN(blade_id);
done:
up_write(&bs->bs_kgts_sema);
return ret;
}
/*
* Release async resources previously reserved.
*
* input:
* han - handle to identify resources
*/
void gru_release_async_resources(unsigned long han)
{
struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
down_write(&bs->bs_kgts_sema);
bs->bs_async_dsr_bytes = 0;
bs->bs_async_cbrs = 0;
bs->bs_async_wq = NULL;
up_write(&bs->bs_kgts_sema);
}
/*
* Wait for async GRU instructions to complete.
*
* input:
* han - handle to identify resources
*/
void gru_wait_async_cbr(unsigned long han)
{
struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
wait_for_completion(bs->bs_async_wq);
mb();
}
/*
* Lock previous reserved async GRU resources
*
* input:
* han - handle to identify resources
* output:
* cb - pointer to first CBR
* dsr - pointer to first DSR
*/
void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
{
struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
int blade_id = ASYNC_HAN_TO_BID(han);
int ncpus;
gru_lock_kernel_context(blade_id);
ncpus = uv_blade_nr_possible_cpus(blade_id);
if (cb)
*cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
if (dsr)
*dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
}
/*
* Unlock previous reserved async GRU resources
*
* input:
* han - handle to identify resources
*/
void gru_unlock_async_resource(unsigned long han)
{
int blade_id = ASYNC_HAN_TO_BID(han);
gru_unlock_kernel_context(blade_id);
}
/*----------------------------------------------------------------------*/
int gru_get_cb_exception_detail(void *cb,
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
struct gru_thread_state *kgts = NULL;
unsigned long off;
int cbrnum, bid;
/*
* Locate kgts for cb. This algorithm is SLOW but
* this function is rarely called (ie., almost never).
* Performance does not matter.
*/
for_each_possible_blade(bid) {
if (!gru_base[bid])
break;
kgts = gru_base[bid]->bs_kgts;
if (!kgts || !kgts->ts_gru)
continue;
off = cb - kgts->ts_gru->gs_gru_base_vaddr;
if (off < GRU_SIZE)
break;
kgts = NULL;
}
BUG_ON(!kgts);
cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
cbe = get_cbe(GRUBASE(cb), cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
sync_core();
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
excdet->exceptdet0 = cbe->idef1upd;
excdet->exceptdet1 = cbe->idef3upd;
gru_flush_cache(cbe);
return 0;
}
static char *gru_get_cb_exception_detail_str(int ret, void *cb,
char *buf, int size)
{
struct gru_control_block_status *gen = cb;
struct control_block_extended_exc_detail excdet;
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
gru_get_cb_exception_detail(cb, &excdet);
snprintf(buf, size,
"GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
"excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
gen, excdet.opc, excdet.exopc, excdet.ecause,
excdet.exceptdet0, excdet.exceptdet1);
} else {
snprintf(buf, size, "No exception");
}
return buf;
}
static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
{
while (gen->istatus >= CBS_ACTIVE) {
cpu_relax();
barrier();
}
return gen->istatus;
}
static int gru_retry_exception(void *cb)
{
struct gru_control_block_status *gen = cb;
struct control_block_extended_exc_detail excdet;
int retry = EXCEPTION_RETRY_LIMIT;
while (1) {
if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
return CBS_IDLE;
if (gru_get_cb_message_queue_substatus(cb))
return CBS_EXCEPTION;
gru_get_cb_exception_detail(cb, &excdet);
if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
(excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
break;
if (retry-- == 0)
break;
gen->icmd = 1;
gru_flush_cache(gen);
}
return CBS_EXCEPTION;
}
int gru_check_status_proc(void *cb)
{
struct gru_control_block_status *gen = cb;
int ret;
ret = gen->istatus;
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
rmb();
return ret;
}
int gru_wait_proc(void *cb)
{
struct gru_control_block_status *gen = cb;
int ret;
ret = gru_wait_idle_or_exception(gen);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
rmb();
return ret;
}
static void gru_abort(int ret, void *cb, char *str)
{
char buf[GRU_EXC_STR_SIZE];
panic("GRU FATAL ERROR: %s - %s\n", str,
gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
}
void gru_wait_abort_proc(void *cb)
{
int ret;
ret = gru_wait_proc(cb);
if (ret)
gru_abort(ret, cb, "gru_wait_abort");
}
/*------------------------------ MESSAGE QUEUES -----------------------------*/
/* Internal status . These are NOT returned to the user. */
#define MQIE_AGAIN -1 /* try again */
/*
* Save/restore the "present" flag that is in the second line of 2-line
* messages
*/
static inline int get_present2(void *p)
{
struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
return mhdr->present;
}
static inline void restore_present2(void *p, int val)
{
struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
mhdr->present = val;
}
/*
* Create a message queue.
* qlines - message queue size in cache lines. Includes 2-line header.
*/
int gru_create_message_queue(struct gru_message_queue_desc *mqd,
void *p, unsigned int bytes, int nasid, int vector, int apicid)
{
struct message_queue *mq = p;
unsigned int qlines;
qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
memset(mq, 0, bytes);
mq->start = &mq->data;
mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
mq->next = &mq->data;
mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
mq->qlines = qlines;
mq->hstatus[0] = 0;
mq->hstatus[1] = 1;
mq->head = gru_mesq_head(2, qlines / 2 + 1);
mqd->mq = mq;
mqd->mq_gpa = uv_gpa(mq);
mqd->qlines = qlines;
mqd->interrupt_pnode = nasid >> 1;
mqd->interrupt_vector = vector;
mqd->interrupt_apicid = apicid;
return 0;
}
EXPORT_SYMBOL_GPL(gru_create_message_queue);
/*
* Send a NOOP message to a message queue
* Returns:
* 0 - if queue is full after the send. This is the normal case
* but various races can change this.
* -1 - if mesq sent successfully but queue not full
* >0 - unexpected error. MQE_xxx returned
*/
static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
void *mesg)
{
const struct message_header noop_header = {
.present = MQS_NOOP, .lines = 1};
unsigned long m;
int substatus, ret;
struct message_header save_mhdr, *mhdr = mesg;
STAT(mesq_noop);
save_mhdr = *mhdr;
*mhdr = noop_header;
gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
ret = gru_wait(cb);
if (ret) {
substatus = gru_get_cb_message_queue_substatus(cb);
switch (substatus) {
case CBSS_NO_ERROR:
STAT(mesq_noop_unexpected_error);
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_LB_OVERFLOWED:
STAT(mesq_noop_lb_overflow);
ret = MQE_CONGESTION;
break;
case CBSS_QLIMIT_REACHED:
STAT(mesq_noop_qlimit_reached);
ret = 0;
break;
case CBSS_AMO_NACKED:
STAT(mesq_noop_amo_nacked);
ret = MQE_CONGESTION;
break;
case CBSS_PUT_NACKED:
STAT(mesq_noop_put_nacked);
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
IMA);
if (gru_wait(cb) == CBS_IDLE)
ret = MQIE_AGAIN;
else
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
fallthrough;
default:
BUG();
}
}
*mhdr = save_mhdr;
return ret;
}
/*
* Handle a gru_mesq full.
*/
static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
union gru_mesqhead mqh;
unsigned int limit, head;
unsigned long avalue;
int half, qlines;
/* Determine if switching to first/second half of q */
avalue = gru_get_amo_value(cb);
head = gru_get_amo_value_head(cb);
limit = gru_get_amo_value_limit(cb);
qlines = mqd->qlines;
half = (limit != qlines);
if (half)
mqh = gru_mesq_head(qlines / 2 + 1, qlines);
else
mqh = gru_mesq_head(2, qlines / 2 + 1);
/* Try to get lock for switching head pointer */
gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
if (gru_wait(cb) != CBS_IDLE)
goto cberr;
if (!gru_get_amo_value(cb)) {
STAT(mesq_qf_locked);
return MQE_QUEUE_FULL;
}
/* Got the lock. Send optional NOP if queue not full, */
if (head != limit) {
if (send_noop_message(cb, mqd, mesg)) {
gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
XTYPE_DW, IMA);
if (gru_wait(cb) != CBS_IDLE)
goto cberr;
STAT(mesq_qf_noop_not_full);
return MQIE_AGAIN;
}
avalue++;
}
/* Then flip queuehead to other half of queue. */
gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
IMA);
if (gru_wait(cb) != CBS_IDLE)
goto cberr;
/* If not successfully in swapping queue head, clear the hstatus lock */
if (gru_get_amo_value(cb) != avalue) {
STAT(mesq_qf_switch_head_failed);
gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
IMA);
if (gru_wait(cb) != CBS_IDLE)
goto cberr;
}
return MQIE_AGAIN;
cberr:
STAT(mesq_qf_unexpected_error);
return MQE_UNEXPECTED_CB_ERR;
}
/*
* Handle a PUT failure. Note: if message was a 2-line message, one of the
* lines might have successfully have been written. Before sending the
* message, "present" must be cleared in BOTH lines to prevent the receiver
* from prematurely seeing the full message.
*/
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
unsigned long m;
int ret, loops = 200; /* experimentally determined */
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
}
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
if (!mqd->interrupt_vector)
return MQE_OK;
/*
* Send a noop message in order to deliver a cross-partition interrupt
* to the SSI that contains the target message queue. Normally, the
* interrupt is automatically delivered by hardware following mesq
* operations, but some error conditions require explicit delivery.
* The noop message will trigger delivery. Otherwise partition failures
* could cause unrecovered errors.
*/
do {
ret = send_noop_message(cb, mqd, mesg);
} while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
/*
* Don't indicate to the app to resend the message, as it's
* already been successfully sent. We simply send an OK
* (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
* assuming that the other side is receiving enough
* interrupts to get this message processed anyway.
*/
ret = MQE_OK;
}
return ret;
}
/*
* Handle a gru_mesq failure. Some of these failures are software recoverable
* or retryable.
*/
static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
int substatus, ret = 0;
substatus = gru_get_cb_message_queue_substatus(cb);
switch (substatus) {
case CBSS_NO_ERROR:
STAT(mesq_send_unexpected_error);
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_LB_OVERFLOWED:
STAT(mesq_send_lb_overflow);
ret = MQE_CONGESTION;
break;
case CBSS_QLIMIT_REACHED:
STAT(mesq_send_qlimit_reached);
ret = send_message_queue_full(cb, mqd, mesg, lines);
break;
case CBSS_AMO_NACKED:
STAT(mesq_send_amo_nacked);
ret = MQE_CONGESTION;
break;
case CBSS_PUT_NACKED:
STAT(mesq_send_put_nacked);
ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
fallthrough;
default:
BUG();
}
return ret;
}
/*
* Send a message to a message queue
* mqd message queue descriptor
* mesg message. ust be vaddr within a GSEG
* bytes message size (<= 2 CL)
*/
int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
unsigned int bytes)
{
struct message_header *mhdr;
void *cb;
void *dsr;
int istatus, clines, ret;
STAT(mesq_send);
BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
if (gru_get_cpu_resources(bytes, &cb, &dsr))
return MQE_BUG_NO_RESOURCES;
memcpy(dsr, mesg, bytes);
mhdr = dsr;
mhdr->present = MQS_FULL;
mhdr->lines = clines;
if (clines == 2) {
mhdr->present2 = get_present2(mhdr);
restore_present2(mhdr, MQS_FULL);
}
do {
ret = MQE_OK;
gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
istatus = gru_wait(cb);
if (istatus != CBS_IDLE)
ret = send_message_failure(cb, mqd, dsr, clines);
} while (ret == MQIE_AGAIN);
gru_free_cpu_resources(cb, dsr);
if (ret)
STAT(mesq_send_failed);
return ret;
}
EXPORT_SYMBOL_GPL(gru_send_message_gpa);
/*
* Advance the receive pointer for the queue to the next message.
*/
void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
{
struct message_queue *mq = mqd->mq;
struct message_header *mhdr = mq->next;
void *next, *pnext;
int half = -1;
int lines = mhdr->lines;
if (lines == 2)
restore_present2(mhdr, MQS_EMPTY);
mhdr->present = MQS_EMPTY;
pnext = mq->next;
next = pnext + GRU_CACHE_LINE_BYTES * lines;
if (next == mq->limit) {
next = mq->start;
half = 1;
} else if (pnext < mq->start2 && next >= mq->start2) {
half = 0;
}
if (half >= 0)
mq->hstatus[half] = 1;
mq->next = next;
}
EXPORT_SYMBOL_GPL(gru_free_message);
/*
* Get next message from message queue. Return NULL if no message
* present. User must call next_message() to move to next message.
* rmq message queue
*/
void *gru_get_next_message(struct gru_message_queue_desc *mqd)
{
struct message_queue *mq = mqd->mq;
struct message_header *mhdr = mq->next;
int present = mhdr->present;
/* skip NOOP messages */
while (present == MQS_NOOP) {
gru_free_message(mqd, mhdr);
mhdr = mq->next;
present = mhdr->present;
}
/* Wait for both halves of 2 line messages */
if (present == MQS_FULL && mhdr->lines == 2 &&
get_present2(mhdr) == MQS_EMPTY)
present = MQS_EMPTY;
if (!present) {
STAT(mesq_receive_none);
return NULL;
}
if (mhdr->lines == 2)
restore_present2(mhdr, mhdr->present2);
STAT(mesq_receive);
return mhdr;
}
EXPORT_SYMBOL_GPL(gru_get_next_message);
/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
/*
* Load a DW from a global GPA. The GPA can be a memory or MMR address.
*/
int gru_read_gpa(unsigned long *value, unsigned long gpa)
{
void *cb;
void *dsr;
int ret, iaa;
STAT(read_gpa);
if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
return MQE_BUG_NO_RESOURCES;
iaa = gpa >> 62;
gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
ret = gru_wait(cb);
if (ret == CBS_IDLE)
*value = *(unsigned long *)dsr;
gru_free_cpu_resources(cb, dsr);
return ret;
}
EXPORT_SYMBOL_GPL(gru_read_gpa);
/*
* Copy a block of data using the GRU resources
*/
int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
unsigned int bytes)
{
void *cb;
void *dsr;
int ret;
STAT(copy_gpa);
if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
return MQE_BUG_NO_RESOURCES;
gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
ret = gru_wait(cb);
gru_free_cpu_resources(cb, dsr);
return ret;
}
EXPORT_SYMBOL_GPL(gru_copy_gpa);
/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
/* Temp - will delete after we gain confidence in the GRU */
static int quicktest0(unsigned long arg)
{
unsigned long word0;
unsigned long word1;
void *cb;
void *dsr;
unsigned long *p;
int ret = -EIO;
if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
return MQE_BUG_NO_RESOURCES;
p = dsr;
word0 = MAGIC;
word1 = 0;
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
goto done;
}
if (*p != MAGIC) {
printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
goto done;
}
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
goto done;
}
if (word0 != word1 || word1 != MAGIC) {
printk(KERN_DEBUG
"GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
smp_processor_id(), word1, MAGIC);
goto done;
}
ret = 0;
done:
gru_free_cpu_resources(cb, dsr);
return ret;
}
#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
static int quicktest1(unsigned long arg)
{
struct gru_message_queue_desc mqd;
void *p, *mq;
int i, ret = -EIO;
char mes[GRU_CACHE_LINE_BYTES], *m;
/* Need 1K cacheline aligned that does not cross page boundary */
p = kmalloc(4096, 0);
if (p == NULL)
return -ENOMEM;
mq = ALIGNUP(p, 1024);
memset(mes, 0xee, sizeof(mes));
gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
for (i = 0; i < 6; i++) {
mes[8] = i;
do {
ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
} while (ret == MQE_CONGESTION);
if (ret)
break;
}
if (ret != MQE_QUEUE_FULL || i != 4) {
printk(KERN_DEBUG "GRU:%d quicktest1: unexpected status %d, i %d\n",
smp_processor_id(), ret, i);
goto done;
}
for (i = 0; i < 6; i++) {
m = gru_get_next_message(&mqd);
if (!m || m[8] != i)
break;
gru_free_message(&mqd, m);
}
if (i != 4) {
printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
smp_processor_id(), i, m, m ? m[8] : -1);
goto done;
}
ret = 0;
done:
kfree(p);
return ret;
}
static int quicktest2(unsigned long arg)
{
static DECLARE_COMPLETION(cmp);
unsigned long han;
int blade_id = 0;
int numcb = 4;
int ret = 0;
unsigned long *buf;
void *cb0, *cb;
struct gru_control_block_status *gen;
int i, k, istatus, bytes;
bytes = numcb * 4 * 8;
buf = kmalloc(bytes, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = -EBUSY;
han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
if (!han)
goto done;
gru_lock_async_resource(han, &cb0, NULL);
memset(buf, 0xee, bytes);
for (i = 0; i < numcb; i++)
gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
XTYPE_DW, 4, 1, IMA_INTERRUPT);
ret = 0;
k = numcb;
do {
gru_wait_async_cbr(han);
for (i = 0; i < numcb; i++) {
cb = cb0 + i * GRU_HANDLE_STRIDE;
istatus = gru_check_status(cb);
if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
break;
}
if (i == numcb)
continue;
if (istatus != CBS_IDLE) {
printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
ret = -EFAULT;
} else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
buf[4 * i + 3]) {
printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
ret = -EIO;
}
k--;
gen = cb;
gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
} while (k);
BUG_ON(cmp.done);
gru_unlock_async_resource(han);
gru_release_async_resources(han);
done:
kfree(buf);
return ret;
}
#define BUFSIZE 200
static int quicktest3(unsigned long arg)
{
char buf1[BUFSIZE], buf2[BUFSIZE];
int ret = 0;
memset(buf2, 0, sizeof(buf2));
memset(buf1, get_cycles() & 255, sizeof(buf1));
gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
if (memcmp(buf1, buf2, BUFSIZE)) {
printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
ret = -EIO;
}
return ret;
}
/*
* Debugging only. User hook for various kernel tests
* of driver & gru.
*/
int gru_ktest(unsigned long arg)
{
int ret = -EINVAL;
switch (arg & 0xff) {
case 0:
ret = quicktest0(arg);
break;
case 1:
ret = quicktest1(arg);
break;
case 2:
ret = quicktest2(arg);
break;
case 3:
ret = quicktest3(arg);
break;
case 99:
ret = gru_free_kernel_contexts();
break;
}
return ret;
}
int gru_kservices_init(void)
{
return 0;
}
void gru_kservices_exit(void)
{
if (gru_free_kernel_contexts())
BUG();
}
| linux-master | drivers/misc/sgi-gru/grukservices.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* at24.c - handle most I2C EEPROMs
*
* Copyright (C) 2005-2007 David Brownell
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*/
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Address pointer is 16 bit. */
#define AT24_FLAG_ADDR16 BIT(7)
/* sysfs-entry will be read-only. */
#define AT24_FLAG_READONLY BIT(6)
/* sysfs-entry will be world-readable. */
#define AT24_FLAG_IRUGO BIT(5)
/* Take always 8 addresses (24c00). */
#define AT24_FLAG_TAKE8ADDR BIT(4)
/* Factory-programmed serial number. */
#define AT24_FLAG_SERIAL BIT(3)
/* Factory-programmed mac address. */
#define AT24_FLAG_MAC BIT(2)
/* Does not auto-rollover reads to the next slave address. */
#define AT24_FLAG_NO_RDROL BIT(1)
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
* Differences between different vendor product lines (like Atmel AT24C or
* MicroChip 24LC, etc) won't much matter for typical read/write access.
* There are also I2C RAM chips, likewise interchangeable. One example
* would be the PCF8570, which acts like a 24c02 EEPROM (256 bytes).
*
* However, misconfiguration can lose data. "Set 16-bit memory address"
* to a part with 8-bit addressing will overwrite data. Writing with too
* big a page size also loses data. And it's not safe to assume that the
* conventional addresses 0x50..0x57 only hold eeproms; a PCF8563 RTC
* uses 0x51, for just one example.
*
* Accordingly, explicit board-specific configuration data should be used
* in almost all cases. (One partial exception is an SMBus used to access
* "SPD" data for DRAM sticks. Those only use 24c02 EEPROMs.)
*
* So this driver uses "new style" I2C driver binding, expecting to be
* told what devices exist. That may be in arch/X/mach-Y/board-Z.c or
* similar kernel-resident tables; or, configuration data coming from
* a bootloader.
*
* Other than binding model, current differences from "eeprom" driver are
* that this one handles write access and isn't restricted to 24c02 devices.
* It also handles larger devices (32 kbit and up) with two-byte addresses,
* which won't work on pure SMBus systems.
*/
struct at24_data {
/*
* Lock protects against activities from other Linux tasks,
* but not from changes by other I2C masters.
*/
struct mutex lock;
unsigned int write_max;
unsigned int num_addresses;
unsigned int offset_adj;
u32 byte_len;
u16 page_size;
u8 flags;
struct nvmem_device *nvmem;
struct regulator *vcc_reg;
void (*read_post)(unsigned int off, char *buf, size_t count);
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
* them for us.
*/
u8 bank_addr_shift;
struct regmap *client_regmaps[];
};
/*
* This parameter is to help this driver avoid blocking other drivers out
* of I2C for potentially troublesome amounts of time. With a 100 kHz I2C
* clock, one 256 byte read takes about 1/43 second which is excessive;
* but the 1/170 second it takes at 400 kHz may be quite reasonable; and
* at 1 MHz (Fm+) a 1/430 second delay could easily be invisible.
*
* This value is forced to be a power of two so that writes align on pages.
*/
static unsigned int at24_io_limit = 128;
module_param_named(io_limit, at24_io_limit, uint, 0);
MODULE_PARM_DESC(at24_io_limit, "Maximum bytes per I/O (default 128)");
/*
* Specs often allow 5 msec for a page write, sometimes 20 msec;
* it's important to recover from write timeouts.
*/
static unsigned int at24_write_timeout = 25;
module_param_named(write_timeout, at24_write_timeout, uint, 0);
MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)");
struct at24_chip_data {
u32 byte_len;
u8 flags;
u8 bank_addr_shift;
void (*read_post)(unsigned int off, char *buf, size_t count);
};
#define AT24_CHIP_DATA(_name, _len, _flags) \
static const struct at24_chip_data _name = { \
.byte_len = _len, .flags = _flags, \
}
#define AT24_CHIP_DATA_CB(_name, _len, _flags, _read_post) \
static const struct at24_chip_data _name = { \
.byte_len = _len, .flags = _flags, \
.read_post = _read_post, \
}
#define AT24_CHIP_DATA_BS(_name, _len, _flags, _bank_addr_shift) \
static const struct at24_chip_data _name = { \
.byte_len = _len, .flags = _flags, \
.bank_addr_shift = _bank_addr_shift \
}
static void at24_read_post_vaio(unsigned int off, char *buf, size_t count)
{
int i;
if (capable(CAP_SYS_ADMIN))
return;
/*
* Hide VAIO private settings to regular users:
* - BIOS passwords: bytes 0x00 to 0x0f
* - UUID: bytes 0x10 to 0x1f
* - Serial number: 0xc0 to 0xdf
*/
for (i = 0; i < count; i++) {
if ((off + i <= 0x1f) ||
(off + i >= 0xc0 && off + i <= 0xdf))
buf[i] = 0;
}
}
/* needs 8 addresses as A0-A2 are ignored */
AT24_CHIP_DATA(at24_data_24c00, 128 / 8, AT24_FLAG_TAKE8ADDR);
/* old variants can't be handled with this generic entry! */
AT24_CHIP_DATA(at24_data_24c01, 1024 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs01, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c02, 2048 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs02, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24mac402, 48 / 8,
AT24_FLAG_MAC | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24mac602, 64 / 8,
AT24_FLAG_MAC | AT24_FLAG_READONLY);
/* spd is a 24c02 in memory DIMMs */
AT24_CHIP_DATA(at24_data_spd, 2048 / 8,
AT24_FLAG_READONLY | AT24_FLAG_IRUGO);
/* 24c02_vaio is a 24c02 on some Sony laptops */
AT24_CHIP_DATA_CB(at24_data_24c02_vaio, 2048 / 8,
AT24_FLAG_READONLY | AT24_FLAG_IRUGO,
at24_read_post_vaio);
AT24_CHIP_DATA(at24_data_24c04, 4096 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs04, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
/* 24rf08 quirk is handled at i2c-core */
AT24_CHIP_DATA(at24_data_24c08, 8192 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs08, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c16, 16384 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs16, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c32, 32768 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24cs32, 16,
AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c64, 65536 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24cs64, 16,
AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2);
AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
/* identical to 24c08 ? */
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
static const struct i2c_device_id at24_ids[] = {
{ "24c00", (kernel_ulong_t)&at24_data_24c00 },
{ "24c01", (kernel_ulong_t)&at24_data_24c01 },
{ "24cs01", (kernel_ulong_t)&at24_data_24cs01 },
{ "24c02", (kernel_ulong_t)&at24_data_24c02 },
{ "24cs02", (kernel_ulong_t)&at24_data_24cs02 },
{ "24mac402", (kernel_ulong_t)&at24_data_24mac402 },
{ "24mac602", (kernel_ulong_t)&at24_data_24mac602 },
{ "spd", (kernel_ulong_t)&at24_data_spd },
{ "24c02-vaio", (kernel_ulong_t)&at24_data_24c02_vaio },
{ "24c04", (kernel_ulong_t)&at24_data_24c04 },
{ "24cs04", (kernel_ulong_t)&at24_data_24cs04 },
{ "24c08", (kernel_ulong_t)&at24_data_24c08 },
{ "24cs08", (kernel_ulong_t)&at24_data_24cs08 },
{ "24c16", (kernel_ulong_t)&at24_data_24c16 },
{ "24cs16", (kernel_ulong_t)&at24_data_24cs16 },
{ "24c32", (kernel_ulong_t)&at24_data_24c32 },
{ "24cs32", (kernel_ulong_t)&at24_data_24cs32 },
{ "24c64", (kernel_ulong_t)&at24_data_24c64 },
{ "24cs64", (kernel_ulong_t)&at24_data_24cs64 },
{ "24c128", (kernel_ulong_t)&at24_data_24c128 },
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
{ "24c1025", (kernel_ulong_t)&at24_data_24c1025 },
{ "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
{ "at24", 0 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c00", .data = &at24_data_24c00 },
{ .compatible = "atmel,24c01", .data = &at24_data_24c01 },
{ .compatible = "atmel,24cs01", .data = &at24_data_24cs01 },
{ .compatible = "atmel,24c02", .data = &at24_data_24c02 },
{ .compatible = "atmel,24cs02", .data = &at24_data_24cs02 },
{ .compatible = "atmel,24mac402", .data = &at24_data_24mac402 },
{ .compatible = "atmel,24mac602", .data = &at24_data_24mac602 },
{ .compatible = "atmel,spd", .data = &at24_data_spd },
{ .compatible = "atmel,24c04", .data = &at24_data_24c04 },
{ .compatible = "atmel,24cs04", .data = &at24_data_24cs04 },
{ .compatible = "atmel,24c08", .data = &at24_data_24c08 },
{ .compatible = "atmel,24cs08", .data = &at24_data_24cs08 },
{ .compatible = "atmel,24c16", .data = &at24_data_24c16 },
{ .compatible = "atmel,24cs16", .data = &at24_data_24cs16 },
{ .compatible = "atmel,24c32", .data = &at24_data_24c32 },
{ .compatible = "atmel,24cs32", .data = &at24_data_24cs32 },
{ .compatible = "atmel,24c64", .data = &at24_data_24c64 },
{ .compatible = "atmel,24cs64", .data = &at24_data_24cs64 },
{ .compatible = "atmel,24c128", .data = &at24_data_24c128 },
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
{ .compatible = "atmel,24c1025", .data = &at24_data_24c1025 },
{ .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
{ /* END OF LIST */ },
};
MODULE_DEVICE_TABLE(of, at24_of_match);
static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
{ "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
/*
* This routine supports chips which consume multiple I2C addresses. It
* computes the addressing information to be used for a given r/w request.
* Assumes that sanity checks for offset happened at sysfs-layer.
*
* Slave address and byte offset derive from the offset. Always
* set the byte address; on a multi-master board, another master
* may have changed the chip's "current" address pointer.
*/
static struct regmap *at24_translate_offset(struct at24_data *at24,
unsigned int *offset)
{
unsigned int i;
if (at24->flags & AT24_FLAG_ADDR16) {
i = *offset >> 16;
*offset &= 0xffff;
} else {
i = *offset >> 8;
*offset &= 0xff;
}
return at24->client_regmaps[i];
}
static struct device *at24_base_client_dev(struct at24_data *at24)
{
return regmap_get_device(at24->client_regmaps[0]);
}
static size_t at24_adjust_read_count(struct at24_data *at24,
unsigned int offset, size_t count)
{
unsigned int bits;
size_t remainder;
/*
* In case of multi-address chips that don't rollover reads to
* the next slave address: truncate the count to the slave boundary,
* so that the read never straddles slaves.
*/
if (at24->flags & AT24_FLAG_NO_RDROL) {
bits = (at24->flags & AT24_FLAG_ADDR16) ? 16 : 8;
remainder = BIT(bits) - offset;
if (count > remainder)
count = remainder;
}
if (count > at24_io_limit)
count = at24_io_limit;
return count;
}
static ssize_t at24_regmap_read(struct at24_data *at24, char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, read_time;
struct regmap *regmap;
int ret;
regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_read_count(at24, offset, count);
/* adjust offset for mac and serial read ops */
offset += at24->offset_adj;
timeout = jiffies + msecs_to_jiffies(at24_write_timeout);
do {
/*
* The timestamp shall be taken before the actual operation
* to avoid a premature timeout in case of high CPU load.
*/
read_time = jiffies;
ret = regmap_bulk_read(regmap, offset, buf, count);
dev_dbg(regmap_get_device(regmap), "read %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
usleep_range(1000, 1500);
} while (time_before(read_time, timeout));
return -ETIMEDOUT;
}
/*
* Note that if the hardware write-protect pin is pulled high, the whole
* chip is normally write protected. But there are plenty of product
* variants here, including OTP fuses and partial chip protect.
*
* We only use page mode writes; the alternative is sloooow. These routines
* write at most one page.
*/
static size_t at24_adjust_write_count(struct at24_data *at24,
unsigned int offset, size_t count)
{
unsigned int next_page;
/* write_max is at most a page */
if (count > at24->write_max)
count = at24->write_max;
/* Never roll over backwards, to the start of this page */
next_page = roundup(offset + 1, at24->page_size);
if (offset + count > next_page)
count = next_page - offset;
return count;
}
static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, write_time;
struct regmap *regmap;
int ret;
regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_write_count(at24, offset, count);
timeout = jiffies + msecs_to_jiffies(at24_write_timeout);
do {
/*
* The timestamp shall be taken before the actual operation
* to avoid a premature timeout in case of high CPU load.
*/
write_time = jiffies;
ret = regmap_bulk_write(regmap, offset, buf, count);
dev_dbg(regmap_get_device(regmap), "write %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
usleep_range(1000, 1500);
} while (time_before(write_time, timeout));
return -ETIMEDOUT;
}
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24;
struct device *dev;
char *buf = val;
int i, ret;
at24 = priv;
dev = at24_base_client_dev(at24);
if (unlikely(!count))
return count;
if (off + count > at24->byte_len)
return -EINVAL;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
*/
mutex_lock(&at24->lock);
for (i = 0; count; i += ret, count -= ret) {
ret = at24_regmap_read(at24, buf + i, off + i, count);
if (ret < 0) {
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
return ret;
}
}
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
if (unlikely(at24->read_post))
at24->read_post(off, buf, i);
return 0;
}
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24;
struct device *dev;
char *buf = val;
int ret;
at24 = priv;
dev = at24_base_client_dev(at24);
if (unlikely(!count))
return -EINVAL;
if (off + count > at24->byte_len)
return -EINVAL;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
*/
mutex_lock(&at24->lock);
while (count) {
ret = at24_regmap_write(at24, buf, off, count);
if (ret < 0) {
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
return ret;
}
buf += ret;
off += ret;
count -= ret;
}
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
return 0;
}
static const struct at24_chip_data *at24_get_chip_data(struct device *dev)
{
struct device_node *of_node = dev->of_node;
const struct at24_chip_data *cdata;
const struct i2c_device_id *id;
id = i2c_match_id(at24_ids, to_i2c_client(dev));
/*
* The I2C core allows OF nodes compatibles to match against the
* I2C device ID table as a fallback, so check not only if an OF
* node is present but also if it matches an OF device ID entry.
*/
if (of_node && of_match_device(at24_of_match, dev))
cdata = of_device_get_match_data(dev);
else if (id)
cdata = (void *)id->driver_data;
else
cdata = acpi_device_get_match_data(dev);
if (!cdata)
return ERR_PTR(-ENODEV);
return cdata;
}
static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
struct i2c_client *base_client,
struct regmap_config *regmap_config)
{
struct i2c_client *dummy_client;
struct regmap *regmap;
dummy_client = devm_i2c_new_dummy_device(&base_client->dev,
base_client->adapter,
base_client->addr +
(index << at24->bank_addr_shift));
if (IS_ERR(dummy_client))
return PTR_ERR(dummy_client);
regmap = devm_regmap_init_i2c(dummy_client, regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
at24->client_regmaps[index] = regmap;
return 0;
}
static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
{
if (flags & AT24_FLAG_MAC) {
/* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
return 0xa0 - byte_len;
} else if (flags & AT24_FLAG_SERIAL && flags & AT24_FLAG_ADDR16) {
/*
* For 16 bit address pointers, the word address must contain
* a '10' sequence in bits 11 and 10 regardless of the
* intended position of the address pointer.
*/
return 0x0800;
} else if (flags & AT24_FLAG_SERIAL) {
/*
* Otherwise the word address must begin with a '10' sequence,
* regardless of the intended address.
*/
return 0x0080;
} else {
return 0;
}
}
static int at24_probe(struct i2c_client *client)
{
struct regmap_config regmap_config = { };
struct nvmem_config nvmem_config = { };
u32 byte_len, page_size, flags, addrw;
const struct at24_chip_data *cdata;
struct device *dev = &client->dev;
bool i2c_fn_i2c, i2c_fn_block;
unsigned int i, num_addresses;
struct at24_data *at24;
bool full_power;
struct regmap *regmap;
bool writable;
u8 test_byte;
int err;
i2c_fn_i2c = i2c_check_functionality(client->adapter, I2C_FUNC_I2C);
i2c_fn_block = i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK);
cdata = at24_get_chip_data(dev);
if (IS_ERR(cdata))
return PTR_ERR(cdata);
err = device_property_read_u32(dev, "pagesize", &page_size);
if (err)
/*
* This is slow, but we can't know all eeproms, so we better
* play safe. Specifying custom eeprom-types via device tree
* or properties is recommended anyhow.
*/
page_size = 1;
flags = cdata->flags;
if (device_property_present(dev, "read-only"))
flags |= AT24_FLAG_READONLY;
if (device_property_present(dev, "no-read-rollover"))
flags |= AT24_FLAG_NO_RDROL;
err = device_property_read_u32(dev, "address-width", &addrw);
if (!err) {
switch (addrw) {
case 8:
if (flags & AT24_FLAG_ADDR16)
dev_warn(dev,
"Override address width to be 8, while default is 16\n");
flags &= ~AT24_FLAG_ADDR16;
break;
case 16:
flags |= AT24_FLAG_ADDR16;
break;
default:
dev_warn(dev, "Bad \"address-width\" property: %u\n",
addrw);
}
}
err = device_property_read_u32(dev, "size", &byte_len);
if (err)
byte_len = cdata->byte_len;
if (!i2c_fn_i2c && !i2c_fn_block)
page_size = 1;
if (!page_size) {
dev_err(dev, "page_size must not be 0!\n");
return -EINVAL;
}
if (!is_power_of_2(page_size))
dev_warn(dev, "page_size looks suspicious (no power of 2)!\n");
err = device_property_read_u32(dev, "num-addresses", &num_addresses);
if (err) {
if (flags & AT24_FLAG_TAKE8ADDR)
num_addresses = 8;
else
num_addresses = DIV_ROUND_UP(byte_len,
(flags & AT24_FLAG_ADDR16) ? 65536 : 256);
}
if ((flags & AT24_FLAG_SERIAL) && (flags & AT24_FLAG_MAC)) {
dev_err(dev,
"invalid device data - cannot have both AT24_FLAG_SERIAL & AT24_FLAG_MAC.");
return -EINVAL;
}
regmap_config.val_bits = 8;
regmap_config.reg_bits = (flags & AT24_FLAG_ADDR16) ? 16 : 8;
regmap_config.disable_locking = true;
regmap = devm_regmap_init_i2c(client, ®map_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
at24 = devm_kzalloc(dev, struct_size(at24, client_regmaps, num_addresses),
GFP_KERNEL);
if (!at24)
return -ENOMEM;
mutex_init(&at24->lock);
at24->byte_len = byte_len;
at24->page_size = page_size;
at24->flags = flags;
at24->read_post = cdata->read_post;
at24->bank_addr_shift = cdata->bank_addr_shift;
at24->num_addresses = num_addresses;
at24->offset_adj = at24_get_offset_adj(flags, byte_len);
at24->client_regmaps[0] = regmap;
at24->vcc_reg = devm_regulator_get(dev, "vcc");
if (IS_ERR(at24->vcc_reg))
return PTR_ERR(at24->vcc_reg);
writable = !(flags & AT24_FLAG_READONLY);
if (writable) {
at24->write_max = min_t(unsigned int,
page_size, at24_io_limit);
if (!i2c_fn_i2c && at24->write_max > I2C_SMBUS_BLOCK_MAX)
at24->write_max = I2C_SMBUS_BLOCK_MAX;
}
/* use dummy devices for multiple-address chips */
for (i = 1; i < num_addresses; i++) {
err = at24_make_dummy_client(at24, i, client, ®map_config);
if (err)
return err;
}
/*
* We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
* label property is set as some platform can have multiple eeproms
* with same label and we can not register each of those with same
* label. Failing to register those eeproms trigger cascade failure
* on such platform.
*/
nvmem_config.id = NVMEM_DEVID_AUTO;
if (device_property_present(dev, "label")) {
err = device_property_read_string(dev, "label",
&nvmem_config.name);
if (err)
return err;
} else {
nvmem_config.name = dev_name(dev);
}
nvmem_config.type = NVMEM_TYPE_EEPROM;
nvmem_config.dev = dev;
nvmem_config.read_only = !writable;
nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
nvmem_config.owner = THIS_MODULE;
nvmem_config.compat = true;
nvmem_config.base_dev = dev;
nvmem_config.reg_read = at24_read;
nvmem_config.reg_write = at24_write;
nvmem_config.priv = at24;
nvmem_config.stride = 1;
nvmem_config.word_size = 1;
nvmem_config.size = byte_len;
i2c_set_clientdata(client, at24);
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
err = regulator_enable(at24->vcc_reg);
if (err) {
dev_err(dev, "Failed to enable vcc regulator\n");
return err;
}
pm_runtime_set_active(dev);
}
pm_runtime_enable(dev);
at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
if (IS_ERR(at24->nvmem)) {
pm_runtime_disable(dev);
if (!pm_runtime_status_suspended(dev))
regulator_disable(at24->vcc_reg);
return dev_err_probe(dev, PTR_ERR(at24->nvmem),
"failed to register nvmem\n");
}
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
* it's powered off right now).
*/
if (full_power) {
err = at24_read(at24, 0, &test_byte, 1);
if (err) {
pm_runtime_disable(dev);
if (!pm_runtime_status_suspended(dev))
regulator_disable(at24->vcc_reg);
return -ENODEV;
}
}
pm_runtime_idle(dev);
if (writable)
dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
byte_len, client->name, at24->write_max);
else
dev_info(dev, "%u byte %s EEPROM, read-only\n",
byte_len, client->name);
return 0;
}
static void at24_remove(struct i2c_client *client)
{
struct at24_data *at24 = i2c_get_clientdata(client);
pm_runtime_disable(&client->dev);
if (acpi_dev_state_d0(&client->dev)) {
if (!pm_runtime_status_suspended(&client->dev))
regulator_disable(at24->vcc_reg);
pm_runtime_set_suspended(&client->dev);
}
}
static int __maybe_unused at24_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct at24_data *at24 = i2c_get_clientdata(client);
return regulator_disable(at24->vcc_reg);
}
static int __maybe_unused at24_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct at24_data *at24 = i2c_get_clientdata(client);
return regulator_enable(at24->vcc_reg);
}
static const struct dev_pm_ops at24_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(at24_suspend, at24_resume, NULL)
};
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
.pm = &at24_pm_ops,
.of_match_table = at24_of_match,
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
.probe = at24_probe,
.remove = at24_remove,
.id_table = at24_ids,
.flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
static int __init at24_init(void)
{
if (!at24_io_limit) {
pr_err("at24: at24_io_limit must not be 0!\n");
return -EINVAL;
}
at24_io_limit = rounddown_pow_of_two(at24_io_limit);
return i2c_add_driver(&at24_driver);
}
module_init(at24_init);
static void __exit at24_exit(void)
{
i2c_del_driver(&at24_driver);
}
module_exit(at24_exit);
MODULE_DESCRIPTION("Driver for most I2C EEPROMs");
MODULE_AUTHOR("David Brownell and Wolfram Sang");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/eeprom/at24.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 T-Platforms. All Rights Reserved.
*
* IDT PCIe-switch NTB Linux driver
*
* Contact Information:
* Serge Semin <[email protected]>, <[email protected]>
*/
/*
* NOTE of the IDT 89HPESx SMBus-slave interface driver
* This driver primarily is developed to have an access to EEPROM device of
* IDT PCIe-switches. IDT provides a simple SMBus interface to perform IO-
* operations from/to EEPROM, which is located at private (so called Master)
* SMBus of switches. Using that interface this the driver creates a simple
* binary sysfs-file in the device directory:
* /sys/bus/i2c/devices/<bus>-<devaddr>/eeprom
* In case if read-only flag is specified in the dts-node of device desription,
* User-space applications won't be able to write to the EEPROM sysfs-node.
* Additionally IDT 89HPESx SMBus interface has an ability to write/read
* data of device CSRs. This driver exposes debugf-file to perform simple IO
* operations using that ability for just basic debug purpose. Particularly
* next file is created in the specific debugfs-directory:
* /sys/kernel/debug/idt_csr/
* Format of the debugfs-node is:
* $ cat /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
* <CSR address>:<CSR value>
* So reading the content of the file gives current CSR address and it value.
* If User-space application wishes to change current CSR address,
* it can just write a proper value to the sysfs-file:
* $ echo "<CSR address>" > /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>
* If it wants to change the CSR value as well, the format of the write
* operation is:
* $ echo "<CSR address>:<CSR value>" > \
* /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
* CSR address and value can be any of hexadecimal, decimal or octal format.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/i2c.h>
#include <linux/pci_ids.h>
#include <linux/delay.h>
#define IDT_NAME "89hpesx"
#define IDT_89HPESX_DESC "IDT 89HPESx SMBus-slave interface driver"
#define IDT_89HPESX_VER "1.0"
MODULE_DESCRIPTION(IDT_89HPESX_DESC);
MODULE_VERSION(IDT_89HPESX_VER);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("T-platforms");
/*
* csr_dbgdir - CSR read/write operations Debugfs directory
*/
static struct dentry *csr_dbgdir;
/*
* struct idt_89hpesx_dev - IDT 89HPESx device data structure
* @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible")
* @eero: EEPROM Read-only flag
* @eeaddr: EEPROM custom address
*
* @inieecmd: Initial cmd value for EEPROM read/write operations
* @inicsrcmd: Initial cmd value for CSR read/write operations
* @iniccode: Initialial command code value for IO-operations
*
* @csr: CSR address to perform read operation
*
* @smb_write: SMBus write method
* @smb_read: SMBus read method
* @smb_mtx: SMBus mutex
*
* @client: i2c client used to perform IO operations
*
* @ee_file: EEPROM read/write sysfs-file
*/
struct idt_smb_seq;
struct idt_89hpesx_dev {
u32 eesize;
bool eero;
u8 eeaddr;
u8 inieecmd;
u8 inicsrcmd;
u8 iniccode;
u16 csr;
int (*smb_write)(struct idt_89hpesx_dev *, const struct idt_smb_seq *);
int (*smb_read)(struct idt_89hpesx_dev *, struct idt_smb_seq *);
struct mutex smb_mtx;
struct i2c_client *client;
struct bin_attribute *ee_file;
struct dentry *csr_dir;
};
/*
* struct idt_smb_seq - sequence of data to be read/written from/to IDT 89HPESx
* @ccode: SMBus command code
* @bytecnt: Byte count of operation
* @data: Data to by written
*/
struct idt_smb_seq {
u8 ccode;
u8 bytecnt;
u8 *data;
};
/*
* struct idt_eeprom_seq - sequence of data to be read/written from/to EEPROM
* @cmd: Transaction CMD
* @eeaddr: EEPROM custom address
* @memaddr: Internal memory address of EEPROM
* @data: Data to be written at the memory address
*/
struct idt_eeprom_seq {
u8 cmd;
u8 eeaddr;
u16 memaddr;
u8 data;
} __packed;
/*
* struct idt_csr_seq - sequence of data to be read/written from/to CSR
* @cmd: Transaction CMD
* @csraddr: Internal IDT device CSR address
* @data: Data to be read/written from/to the CSR address
*/
struct idt_csr_seq {
u8 cmd;
u16 csraddr;
u32 data;
} __packed;
/*
* SMBus command code macros
* @CCODE_END: Indicates the end of transaction
* @CCODE_START: Indicates the start of transaction
* @CCODE_CSR: CSR read/write transaction
* @CCODE_EEPROM: EEPROM read/write transaction
* @CCODE_BYTE: Supplied data has BYTE length
* @CCODE_WORD: Supplied data has WORD length
* @CCODE_BLOCK: Supplied data has variable length passed in bytecnt
* byte right following CCODE byte
*/
#define CCODE_END ((u8)0x01)
#define CCODE_START ((u8)0x02)
#define CCODE_CSR ((u8)0x00)
#define CCODE_EEPROM ((u8)0x04)
#define CCODE_BYTE ((u8)0x00)
#define CCODE_WORD ((u8)0x20)
#define CCODE_BLOCK ((u8)0x40)
#define CCODE_PEC ((u8)0x80)
/*
* EEPROM command macros
* @EEPROM_OP_WRITE: EEPROM write operation
* @EEPROM_OP_READ: EEPROM read operation
* @EEPROM_USA: Use specified address of EEPROM
* @EEPROM_NAERR: EEPROM device is not ready to respond
* @EEPROM_LAERR: EEPROM arbitration loss error
* @EEPROM_MSS: EEPROM misplace start & stop bits error
* @EEPROM_WR_CNT: Bytes count to perform write operation
* @EEPROM_WRRD_CNT: Bytes count to write before reading
* @EEPROM_RD_CNT: Bytes count to perform read operation
* @EEPROM_DEF_SIZE: Fall back size of EEPROM
* @EEPROM_DEF_ADDR: Defatul EEPROM address
* @EEPROM_TOUT: Timeout before retry read operation if eeprom is busy
*/
#define EEPROM_OP_WRITE ((u8)0x00)
#define EEPROM_OP_READ ((u8)0x01)
#define EEPROM_USA ((u8)0x02)
#define EEPROM_NAERR ((u8)0x08)
#define EEPROM_LAERR ((u8)0x10)
#define EEPROM_MSS ((u8)0x20)
#define EEPROM_WR_CNT ((u8)5)
#define EEPROM_WRRD_CNT ((u8)4)
#define EEPROM_RD_CNT ((u8)5)
#define EEPROM_DEF_SIZE ((u16)4096)
#define EEPROM_DEF_ADDR ((u8)0x50)
#define EEPROM_TOUT (100)
/*
* CSR command macros
* @CSR_DWE: Enable all four bytes of the operation
* @CSR_OP_WRITE: CSR write operation
* @CSR_OP_READ: CSR read operation
* @CSR_RERR: Read operation error
* @CSR_WERR: Write operation error
* @CSR_WR_CNT: Bytes count to perform write operation
* @CSR_WRRD_CNT: Bytes count to write before reading
* @CSR_RD_CNT: Bytes count to perform read operation
* @CSR_MAX: Maximum CSR address
* @CSR_DEF: Default CSR address
* @CSR_REAL_ADDR: CSR real unshifted address
*/
#define CSR_DWE ((u8)0x0F)
#define CSR_OP_WRITE ((u8)0x00)
#define CSR_OP_READ ((u8)0x10)
#define CSR_RERR ((u8)0x40)
#define CSR_WERR ((u8)0x80)
#define CSR_WR_CNT ((u8)7)
#define CSR_WRRD_CNT ((u8)3)
#define CSR_RD_CNT ((u8)7)
#define CSR_MAX ((u32)0x3FFFF)
#define CSR_DEF ((u16)0x0000)
#define CSR_REAL_ADDR(val) ((unsigned int)val << 2)
/*
* IDT 89HPESx basic register
* @IDT_VIDDID_CSR: PCIe VID and DID of IDT 89HPESx
* @IDT_VID_MASK: Mask of VID
*/
#define IDT_VIDDID_CSR ((u32)0x0000)
#define IDT_VID_MASK ((u32)0xFFFF)
/*
* IDT 89HPESx can send NACK when new command is sent before previous one
* fininshed execution. In this case driver retries operation
* certain times.
* @RETRY_CNT: Number of retries before giving up and fail
* @idt_smb_safe: Generate a retry loop on corresponding SMBus method
*/
#define RETRY_CNT (128)
#define idt_smb_safe(ops, args...) ({ \
int __retry = RETRY_CNT; \
s32 __sts; \
do { \
__sts = i2c_smbus_ ## ops ## _data(args); \
} while (__retry-- && __sts < 0); \
__sts; \
})
/*===========================================================================
* i2c bus level IO-operations
*===========================================================================
*/
/*
* idt_smb_write_byte() - SMBus write method when I2C_SMBUS_BYTE_DATA operation
* is only available
* @pdev: Pointer to the driver data
* @seq: Sequence of data to be written
*/
static int idt_smb_write_byte(struct idt_89hpesx_dev *pdev,
const struct idt_smb_seq *seq)
{
s32 sts;
u8 ccode;
int idx;
/* Loop over the supplied data sending byte one-by-one */
for (idx = 0; idx < seq->bytecnt; idx++) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BYTE;
if (idx == 0)
ccode |= CCODE_START;
if (idx == seq->bytecnt - 1)
ccode |= CCODE_END;
/* Send data to the device */
sts = idt_smb_safe(write_byte, pdev->client, ccode,
seq->data[idx]);
if (sts != 0)
return (int)sts;
}
return 0;
}
/*
* idt_smb_read_byte() - SMBus read method when I2C_SMBUS_BYTE_DATA operation
* is only available
* @pdev: Pointer to the driver data
* @seq: Buffer to read data to
*/
static int idt_smb_read_byte(struct idt_89hpesx_dev *pdev,
struct idt_smb_seq *seq)
{
s32 sts;
u8 ccode;
int idx;
/* Loop over the supplied buffer receiving byte one-by-one */
for (idx = 0; idx < seq->bytecnt; idx++) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BYTE;
if (idx == 0)
ccode |= CCODE_START;
if (idx == seq->bytecnt - 1)
ccode |= CCODE_END;
/* Read data from the device */
sts = idt_smb_safe(read_byte, pdev->client, ccode);
if (sts < 0)
return (int)sts;
seq->data[idx] = (u8)sts;
}
return 0;
}
/*
* idt_smb_write_word() - SMBus write method when I2C_SMBUS_BYTE_DATA and
* I2C_FUNC_SMBUS_WORD_DATA operations are available
* @pdev: Pointer to the driver data
* @seq: Sequence of data to be written
*/
static int idt_smb_write_word(struct idt_89hpesx_dev *pdev,
const struct idt_smb_seq *seq)
{
s32 sts;
u8 ccode;
int idx, evencnt;
/* Calculate the even count of data to send */
evencnt = seq->bytecnt - (seq->bytecnt % 2);
/* Loop over the supplied data sending two bytes at a time */
for (idx = 0; idx < evencnt; idx += 2) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_WORD;
if (idx == 0)
ccode |= CCODE_START;
if (idx == evencnt - 2)
ccode |= CCODE_END;
/* Send word data to the device */
sts = idt_smb_safe(write_word, pdev->client, ccode,
*(u16 *)&seq->data[idx]);
if (sts != 0)
return (int)sts;
}
/* If there is odd number of bytes then send just one last byte */
if (seq->bytecnt != evencnt) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BYTE | CCODE_END;
if (idx == 0)
ccode |= CCODE_START;
/* Send byte data to the device */
sts = idt_smb_safe(write_byte, pdev->client, ccode,
seq->data[idx]);
if (sts != 0)
return (int)sts;
}
return 0;
}
/*
* idt_smb_read_word() - SMBus read method when I2C_SMBUS_BYTE_DATA and
* I2C_FUNC_SMBUS_WORD_DATA operations are available
* @pdev: Pointer to the driver data
* @seq: Buffer to read data to
*/
static int idt_smb_read_word(struct idt_89hpesx_dev *pdev,
struct idt_smb_seq *seq)
{
s32 sts;
u8 ccode;
int idx, evencnt;
/* Calculate the even count of data to send */
evencnt = seq->bytecnt - (seq->bytecnt % 2);
/* Loop over the supplied data reading two bytes at a time */
for (idx = 0; idx < evencnt; idx += 2) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_WORD;
if (idx == 0)
ccode |= CCODE_START;
if (idx == evencnt - 2)
ccode |= CCODE_END;
/* Read word data from the device */
sts = idt_smb_safe(read_word, pdev->client, ccode);
if (sts < 0)
return (int)sts;
*(u16 *)&seq->data[idx] = (u16)sts;
}
/* If there is odd number of bytes then receive just one last byte */
if (seq->bytecnt != evencnt) {
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BYTE | CCODE_END;
if (idx == 0)
ccode |= CCODE_START;
/* Read last data byte from the device */
sts = idt_smb_safe(read_byte, pdev->client, ccode);
if (sts < 0)
return (int)sts;
seq->data[idx] = (u8)sts;
}
return 0;
}
/*
* idt_smb_write_block() - SMBus write method when I2C_SMBUS_BLOCK_DATA
* operation is available
* @pdev: Pointer to the driver data
* @seq: Sequence of data to be written
*/
static int idt_smb_write_block(struct idt_89hpesx_dev *pdev,
const struct idt_smb_seq *seq)
{
u8 ccode;
/* Return error if too much data passed to send */
if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
/* Send block of data to the device */
return idt_smb_safe(write_block, pdev->client, ccode, seq->bytecnt,
seq->data);
}
/*
* idt_smb_read_block() - SMBus read method when I2C_SMBUS_BLOCK_DATA
* operation is available
* @pdev: Pointer to the driver data
* @seq: Buffer to read data to
*/
static int idt_smb_read_block(struct idt_89hpesx_dev *pdev,
struct idt_smb_seq *seq)
{
s32 sts;
u8 ccode;
/* Return error if too much data passed to send */
if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
/* Read block of data from the device */
sts = idt_smb_safe(read_block, pdev->client, ccode, seq->data);
if (sts != seq->bytecnt)
return (sts < 0 ? sts : -ENODATA);
return 0;
}
/*
* idt_smb_write_i2c_block() - SMBus write method when I2C_SMBUS_I2C_BLOCK_DATA
* operation is available
* @pdev: Pointer to the driver data
* @seq: Sequence of data to be written
*
* NOTE It's usual SMBus write block operation, except the actual data length is
* sent as first byte of data
*/
static int idt_smb_write_i2c_block(struct idt_89hpesx_dev *pdev,
const struct idt_smb_seq *seq)
{
u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
/* Return error if too much data passed to send */
if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
/* Collect the data to send. Length byte must be added prior the data */
buf[0] = seq->bytecnt;
memcpy(&buf[1], seq->data, seq->bytecnt);
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
/* Send length and block of data to the device */
return idt_smb_safe(write_i2c_block, pdev->client, ccode,
seq->bytecnt + 1, buf);
}
/*
* idt_smb_read_i2c_block() - SMBus read method when I2C_SMBUS_I2C_BLOCK_DATA
* operation is available
* @pdev: Pointer to the driver data
* @seq: Buffer to read data to
*
* NOTE It's usual SMBus read block operation, except the actual data length is
* retrieved as first byte of data
*/
static int idt_smb_read_i2c_block(struct idt_89hpesx_dev *pdev,
struct idt_smb_seq *seq)
{
u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
s32 sts;
/* Return error if too much data passed to send */
if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
/* Collect the command code byte */
ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
/* Read length and block of data from the device */
sts = idt_smb_safe(read_i2c_block, pdev->client, ccode,
seq->bytecnt + 1, buf);
if (sts != seq->bytecnt + 1)
return (sts < 0 ? sts : -ENODATA);
if (buf[0] != seq->bytecnt)
return -ENODATA;
/* Copy retrieved data to the output data buffer */
memcpy(seq->data, &buf[1], seq->bytecnt);
return 0;
}
/*===========================================================================
* EEPROM IO-operations
*===========================================================================
*/
/*
* idt_eeprom_read_byte() - read just one byte from EEPROM
* @pdev: Pointer to the driver data
* @memaddr: Start EEPROM memory address
* @data: Data to be written to EEPROM
*/
static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
u8 *data)
{
struct device *dev = &pdev->client->dev;
struct idt_eeprom_seq eeseq;
struct idt_smb_seq smbseq;
int ret, retry;
/* Initialize SMBus sequence fields */
smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
smbseq.data = (u8 *)&eeseq;
/*
* Sometimes EEPROM may respond with NACK if it's busy with previous
* operation, so we need to perform a few attempts of read cycle
*/
retry = RETRY_CNT;
do {
/* Send EEPROM memory address to read data from */
smbseq.bytecnt = EEPROM_WRRD_CNT;
eeseq.cmd = pdev->inieecmd | EEPROM_OP_READ;
eeseq.eeaddr = pdev->eeaddr;
eeseq.memaddr = cpu_to_le16(memaddr);
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to init eeprom addr 0x%02x",
memaddr);
break;
}
/* Perform read operation */
smbseq.bytecnt = EEPROM_RD_CNT;
ret = pdev->smb_read(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to read eeprom data 0x%02x",
memaddr);
break;
}
/* Restart read operation if the device is busy */
if (retry && (eeseq.cmd & EEPROM_NAERR)) {
dev_dbg(dev, "EEPROM busy, retry reading after %d ms",
EEPROM_TOUT);
msleep(EEPROM_TOUT);
continue;
}
/* Check whether IDT successfully read data from EEPROM */
if (eeseq.cmd & (EEPROM_NAERR | EEPROM_LAERR | EEPROM_MSS)) {
dev_err(dev,
"Communication with eeprom failed, cmd 0x%hhx",
eeseq.cmd);
ret = -EREMOTEIO;
break;
}
/* Save retrieved data and exit the loop */
*data = eeseq.data;
break;
} while (retry--);
/* Return the status of operation */
return ret;
}
/*
* idt_eeprom_write() - EEPROM write operation
* @pdev: Pointer to the driver data
* @memaddr: Start EEPROM memory address
* @len: Length of data to be written
* @data: Data to be written to EEPROM
*/
static int idt_eeprom_write(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
const u8 *data)
{
struct device *dev = &pdev->client->dev;
struct idt_eeprom_seq eeseq;
struct idt_smb_seq smbseq;
int ret;
u16 idx;
/* Initialize SMBus sequence fields */
smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
smbseq.data = (u8 *)&eeseq;
/* Send data byte-by-byte, checking if it is successfully written */
for (idx = 0; idx < len; idx++, memaddr++) {
/* Lock IDT SMBus device */
mutex_lock(&pdev->smb_mtx);
/* Perform write operation */
smbseq.bytecnt = EEPROM_WR_CNT;
eeseq.cmd = pdev->inieecmd | EEPROM_OP_WRITE;
eeseq.eeaddr = pdev->eeaddr;
eeseq.memaddr = cpu_to_le16(memaddr);
eeseq.data = data[idx];
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
dev_err(dev,
"Failed to write 0x%04hx:0x%02hhx to eeprom",
memaddr, data[idx]);
goto err_mutex_unlock;
}
/*
* Check whether the data is successfully written by reading
* from the same EEPROM memory address.
*/
eeseq.data = ~data[idx];
ret = idt_eeprom_read_byte(pdev, memaddr, &eeseq.data);
if (ret != 0)
goto err_mutex_unlock;
/* Check whether the read byte is the same as written one */
if (eeseq.data != data[idx]) {
dev_err(dev, "Values don't match 0x%02hhx != 0x%02hhx",
eeseq.data, data[idx]);
ret = -EREMOTEIO;
goto err_mutex_unlock;
}
/* Unlock IDT SMBus device */
err_mutex_unlock:
mutex_unlock(&pdev->smb_mtx);
if (ret != 0)
return ret;
}
return 0;
}
/*
* idt_eeprom_read() - EEPROM read operation
* @pdev: Pointer to the driver data
* @memaddr: Start EEPROM memory address
* @len: Length of data to read
* @buf: Buffer to read data to
*/
static int idt_eeprom_read(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
u8 *buf)
{
int ret;
u16 idx;
/* Read data byte-by-byte, retrying if it wasn't successful */
for (idx = 0; idx < len; idx++, memaddr++) {
/* Lock IDT SMBus device */
mutex_lock(&pdev->smb_mtx);
/* Just read the byte to the buffer */
ret = idt_eeprom_read_byte(pdev, memaddr, &buf[idx]);
/* Unlock IDT SMBus device */
mutex_unlock(&pdev->smb_mtx);
/* Return error if read operation failed */
if (ret != 0)
return ret;
}
return 0;
}
/*===========================================================================
* CSR IO-operations
*===========================================================================
*/
/*
* idt_csr_write() - CSR write operation
* @pdev: Pointer to the driver data
* @csraddr: CSR address (with no two LS bits)
* @data: Data to be written to CSR
*/
static int idt_csr_write(struct idt_89hpesx_dev *pdev, u16 csraddr,
const u32 data)
{
struct device *dev = &pdev->client->dev;
struct idt_csr_seq csrseq;
struct idt_smb_seq smbseq;
int ret;
/* Initialize SMBus sequence fields */
smbseq.ccode = pdev->iniccode | CCODE_CSR;
smbseq.data = (u8 *)&csrseq;
/* Lock IDT SMBus device */
mutex_lock(&pdev->smb_mtx);
/* Perform write operation */
smbseq.bytecnt = CSR_WR_CNT;
csrseq.cmd = pdev->inicsrcmd | CSR_OP_WRITE;
csrseq.csraddr = cpu_to_le16(csraddr);
csrseq.data = cpu_to_le32(data);
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to write 0x%04x: 0x%04x to csr",
CSR_REAL_ADDR(csraddr), data);
goto err_mutex_unlock;
}
/* Send CSR address to read data from */
smbseq.bytecnt = CSR_WRRD_CNT;
csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to init csr address 0x%04x",
CSR_REAL_ADDR(csraddr));
goto err_mutex_unlock;
}
/* Perform read operation */
smbseq.bytecnt = CSR_RD_CNT;
ret = pdev->smb_read(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to read csr 0x%04x",
CSR_REAL_ADDR(csraddr));
goto err_mutex_unlock;
}
/* Check whether IDT successfully retrieved CSR data */
if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
dev_err(dev, "IDT failed to perform CSR r/w");
ret = -EREMOTEIO;
goto err_mutex_unlock;
}
/* Unlock IDT SMBus device */
err_mutex_unlock:
mutex_unlock(&pdev->smb_mtx);
return ret;
}
/*
* idt_csr_read() - CSR read operation
* @pdev: Pointer to the driver data
* @csraddr: CSR address (with no two LS bits)
* @data: Data to be written to CSR
*/
static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data)
{
struct device *dev = &pdev->client->dev;
struct idt_csr_seq csrseq;
struct idt_smb_seq smbseq;
int ret;
/* Initialize SMBus sequence fields */
smbseq.ccode = pdev->iniccode | CCODE_CSR;
smbseq.data = (u8 *)&csrseq;
/* Lock IDT SMBus device */
mutex_lock(&pdev->smb_mtx);
/* Send CSR register address before reading it */
smbseq.bytecnt = CSR_WRRD_CNT;
csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
csrseq.csraddr = cpu_to_le16(csraddr);
ret = pdev->smb_write(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to init csr address 0x%04x",
CSR_REAL_ADDR(csraddr));
goto err_mutex_unlock;
}
/* Perform read operation */
smbseq.bytecnt = CSR_RD_CNT;
ret = pdev->smb_read(pdev, &smbseq);
if (ret != 0) {
dev_err(dev, "Failed to read csr 0x%04x",
CSR_REAL_ADDR(csraddr));
goto err_mutex_unlock;
}
/* Check whether IDT successfully retrieved CSR data */
if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
dev_err(dev, "IDT failed to perform CSR r/w");
ret = -EREMOTEIO;
goto err_mutex_unlock;
}
/* Save data retrieved from IDT */
*data = le32_to_cpu(csrseq.data);
/* Unlock IDT SMBus device */
err_mutex_unlock:
mutex_unlock(&pdev->smb_mtx);
return ret;
}
/*===========================================================================
* Sysfs/debugfs-nodes IO-operations
*===========================================================================
*/
/*
* eeprom_write() - EEPROM sysfs-node write callback
* @filep: Pointer to the file system node
* @kobj: Pointer to the kernel object related to the sysfs-node
* @attr: Attributes of the file
* @buf: Buffer to write data to
* @off: Offset at which data should be written to
* @count: Number of bytes to write
*/
static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
int ret;
/* Retrieve driver data */
pdev = dev_get_drvdata(kobj_to_dev(kobj));
/* Perform EEPROM write operation */
ret = idt_eeprom_write(pdev, (u16)off, (u16)count, (u8 *)buf);
return (ret != 0 ? ret : count);
}
/*
* eeprom_read() - EEPROM sysfs-node read callback
* @filep: Pointer to the file system node
* @kobj: Pointer to the kernel object related to the sysfs-node
* @attr: Attributes of the file
* @buf: Buffer to write data to
* @off: Offset at which data should be written to
* @count: Number of bytes to write
*/
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct idt_89hpesx_dev *pdev;
int ret;
/* Retrieve driver data */
pdev = dev_get_drvdata(kobj_to_dev(kobj));
/* Perform EEPROM read operation */
ret = idt_eeprom_read(pdev, (u16)off, (u16)count, (u8 *)buf);
return (ret != 0 ? ret : count);
}
/*
* idt_dbgfs_csr_write() - CSR debugfs-node write callback
* @filep: Pointer to the file system file descriptor
* @buf: Buffer to read data from
* @count: Size of the buffer
* @offp: Offset within the file
*
* It accepts either "0x<reg addr>:0x<value>" for saving register address
* and writing value to specified DWORD register or "0x<reg addr>" for
* just saving register address in order to perform next read operation.
*
* WARNING No spaces are allowed. Incoming string must be strictly formated as:
* "<reg addr>:<value>". Register address must be aligned within 4 bytes
* (one DWORD).
*/
static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct idt_89hpesx_dev *pdev = filep->private_data;
char *colon_ch, *csraddr_str, *csrval_str;
int ret, csraddr_len;
u32 csraddr, csrval;
char *buf;
if (*offp)
return 0;
/* Copy data from User-space */
buf = memdup_user_nul(ubuf, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
/* Find position of colon in the buffer */
colon_ch = strnchr(buf, count, ':');
/*
* If there is colon passed then new CSR value should be parsed as
* well, so allocate buffer for CSR address substring.
* If no colon is found, then string must have just one number with
* no new CSR value
*/
if (colon_ch != NULL) {
csraddr_len = colon_ch - buf;
csraddr_str =
kmalloc(csraddr_len + 1, GFP_KERNEL);
if (csraddr_str == NULL) {
ret = -ENOMEM;
goto free_buf;
}
/* Copy the register address to the substring buffer */
strncpy(csraddr_str, buf, csraddr_len);
csraddr_str[csraddr_len] = '\0';
/* Register value must follow the colon */
csrval_str = colon_ch + 1;
} else /* if (str_colon == NULL) */ {
csraddr_str = (char *)buf; /* Just to shut warning up */
csraddr_len = strnlen(csraddr_str, count);
csrval_str = NULL;
}
/* Convert CSR address to u32 value */
ret = kstrtou32(csraddr_str, 0, &csraddr);
if (ret != 0)
goto free_csraddr_str;
/* Check whether passed register address is valid */
if (csraddr > CSR_MAX || !IS_ALIGNED(csraddr, SZ_4)) {
ret = -EINVAL;
goto free_csraddr_str;
}
/* Shift register address to the right so to have u16 address */
pdev->csr = (csraddr >> 2);
/* Parse new CSR value and send it to IDT, if colon has been found */
if (colon_ch != NULL) {
ret = kstrtou32(csrval_str, 0, &csrval);
if (ret != 0)
goto free_csraddr_str;
ret = idt_csr_write(pdev, pdev->csr, csrval);
if (ret != 0)
goto free_csraddr_str;
}
/* Free memory only if colon has been found */
free_csraddr_str:
if (colon_ch != NULL)
kfree(csraddr_str);
/* Free buffer allocated for data retrieved from User-space */
free_buf:
kfree(buf);
return (ret != 0 ? ret : count);
}
/*
* idt_dbgfs_csr_read() - CSR debugfs-node read callback
* @filep: Pointer to the file system file descriptor
* @buf: Buffer to write data to
* @count: Size of the buffer
* @offp: Offset within the file
*
* It just prints the pair "0x<reg addr>:0x<value>" to passed buffer.
*/
#define CSRBUF_SIZE ((size_t)32)
static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
size_t count, loff_t *offp)
{
struct idt_89hpesx_dev *pdev = filep->private_data;
u32 csraddr, csrval;
char buf[CSRBUF_SIZE];
int ret, size;
/* Perform CSR read operation */
ret = idt_csr_read(pdev, pdev->csr, &csrval);
if (ret != 0)
return ret;
/* Shift register address to the left so to have real address */
csraddr = ((u32)pdev->csr << 2);
/* Print the "0x<reg addr>:0x<value>" to buffer */
size = snprintf(buf, CSRBUF_SIZE, "0x%05x:0x%08x\n",
(unsigned int)csraddr, (unsigned int)csrval);
/* Copy data to User-space */
return simple_read_from_buffer(ubuf, count, offp, buf, size);
}
/*
* eeprom_attribute - EEPROM sysfs-node attributes
*
* NOTE Size will be changed in compliance with OF node. EEPROM attribute will
* be read-only as well if the corresponding flag is specified in OF node.
*/
static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
/*
* csr_dbgfs_ops - CSR debugfs-node read/write operations
*/
static const struct file_operations csr_dbgfs_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = idt_dbgfs_csr_write,
.read = idt_dbgfs_csr_read
};
/*===========================================================================
* Driver init/deinit methods
*===========================================================================
*/
/*
* idt_set_defval() - disable EEPROM access by default
* @pdev: Pointer to the driver data
*/
static void idt_set_defval(struct idt_89hpesx_dev *pdev)
{
/* If OF info is missing then use next values */
pdev->eesize = 0;
pdev->eero = true;
pdev->inieecmd = 0;
pdev->eeaddr = 0;
}
static const struct i2c_device_id ee_ids[];
/*
* idt_ee_match_id() - check whether the node belongs to compatible EEPROMs
*/
static const struct i2c_device_id *idt_ee_match_id(struct fwnode_handle *fwnode)
{
const struct i2c_device_id *id = ee_ids;
const char *compatible, *p;
char devname[I2C_NAME_SIZE];
int ret;
ret = fwnode_property_read_string(fwnode, "compatible", &compatible);
if (ret)
return NULL;
p = strchr(compatible, ',');
strscpy(devname, p ? p + 1 : compatible, sizeof(devname));
/* Search through the device name */
while (id->name[0]) {
if (strcmp(devname, id->name) == 0)
return id;
id++;
}
return NULL;
}
/*
* idt_get_fw_data() - get IDT i2c-device parameters from device tree
* @pdev: Pointer to the driver data
*/
static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
{
struct device *dev = &pdev->client->dev;
struct fwnode_handle *fwnode;
const struct i2c_device_id *ee_id = NULL;
u32 eeprom_addr;
int ret;
device_for_each_child_node(dev, fwnode) {
ee_id = idt_ee_match_id(fwnode);
if (ee_id)
break;
dev_warn(dev, "Skip unsupported EEPROM device %pfw\n", fwnode);
}
/* If there is no fwnode EEPROM device, then set zero size */
if (!ee_id) {
dev_warn(dev, "No fwnode, EEPROM access disabled");
idt_set_defval(pdev);
return;
}
/* Retrieve EEPROM size */
pdev->eesize = (u32)ee_id->driver_data;
/* Get custom EEPROM address from 'reg' attribute */
ret = fwnode_property_read_u32(fwnode, "reg", &eeprom_addr);
if (ret || (eeprom_addr == 0)) {
dev_warn(dev, "No EEPROM reg found, use default address 0x%x",
EEPROM_DEF_ADDR);
pdev->inieecmd = 0;
pdev->eeaddr = EEPROM_DEF_ADDR << 1;
} else {
pdev->inieecmd = EEPROM_USA;
pdev->eeaddr = eeprom_addr << 1;
}
/* Check EEPROM 'read-only' flag */
if (fwnode_property_read_bool(fwnode, "read-only"))
pdev->eero = true;
else /* if (!fwnode_property_read_bool(node, "read-only")) */
pdev->eero = false;
fwnode_handle_put(fwnode);
dev_info(dev, "EEPROM of %d bytes found by 0x%x",
pdev->eesize, pdev->eeaddr);
}
/*
* idt_create_pdev() - create and init data structure of the driver
* @client: i2c client of IDT PCIe-switch device
*/
static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev;
/* Allocate memory for driver data */
pdev = devm_kmalloc(&client->dev, sizeof(struct idt_89hpesx_dev),
GFP_KERNEL);
if (pdev == NULL)
return ERR_PTR(-ENOMEM);
/* Initialize basic fields of the data */
pdev->client = client;
i2c_set_clientdata(client, pdev);
/* Read firmware nodes information */
idt_get_fw_data(pdev);
/* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */
pdev->inicsrcmd = CSR_DWE;
pdev->csr = CSR_DEF;
/* Enable Packet Error Checking if it's supported by adapter */
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
pdev->iniccode = CCODE_PEC;
client->flags |= I2C_CLIENT_PEC;
} else /* PEC is unsupported */ {
pdev->iniccode = 0;
}
return pdev;
}
/*
* idt_free_pdev() - free data structure of the driver
* @pdev: Pointer to the driver data
*/
static void idt_free_pdev(struct idt_89hpesx_dev *pdev)
{
/* Clear driver data from device private field */
i2c_set_clientdata(pdev->client, NULL);
}
/*
* idt_set_smbus_ops() - set supported SMBus operations
* @pdev: Pointer to the driver data
* Return status of smbus check operations
*/
static int idt_set_smbus_ops(struct idt_89hpesx_dev *pdev)
{
struct i2c_adapter *adapter = pdev->client->adapter;
struct device *dev = &pdev->client->dev;
/* Check i2c adapter read functionality */
if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_READ_BLOCK_DATA)) {
pdev->smb_read = idt_smb_read_block;
dev_dbg(dev, "SMBus block-read op chosen");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
pdev->smb_read = idt_smb_read_i2c_block;
dev_dbg(dev, "SMBus i2c-block-read op chosen");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA) &&
i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
pdev->smb_read = idt_smb_read_word;
dev_warn(dev, "Use slow word/byte SMBus read ops");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
pdev->smb_read = idt_smb_read_byte;
dev_warn(dev, "Use slow byte SMBus read op");
} else /* no supported smbus read operations */ {
dev_err(dev, "No supported SMBus read op");
return -EPFNOSUPPORT;
}
/* Check i2c adapter write functionality */
if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) {
pdev->smb_write = idt_smb_write_block;
dev_dbg(dev, "SMBus block-write op chosen");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
pdev->smb_write = idt_smb_write_i2c_block;
dev_dbg(dev, "SMBus i2c-block-write op chosen");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_WRITE_WORD_DATA) &&
i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
pdev->smb_write = idt_smb_write_word;
dev_warn(dev, "Use slow word/byte SMBus write op");
} else if (i2c_check_functionality(adapter,
I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
pdev->smb_write = idt_smb_write_byte;
dev_warn(dev, "Use slow byte SMBus write op");
} else /* no supported smbus write operations */ {
dev_err(dev, "No supported SMBus write op");
return -EPFNOSUPPORT;
}
/* Initialize IDT SMBus slave interface mutex */
mutex_init(&pdev->smb_mtx);
return 0;
}
/*
* idt_check_dev() - check whether it's really IDT 89HPESx device
* @pdev: Pointer to the driver data
* Return status of i2c adapter check operation
*/
static int idt_check_dev(struct idt_89hpesx_dev *pdev)
{
struct device *dev = &pdev->client->dev;
u32 viddid;
int ret;
/* Read VID and DID directly from IDT memory space */
ret = idt_csr_read(pdev, IDT_VIDDID_CSR, &viddid);
if (ret != 0) {
dev_err(dev, "Failed to read VID/DID");
return ret;
}
/* Check whether it's IDT device */
if ((viddid & IDT_VID_MASK) != PCI_VENDOR_ID_IDT) {
dev_err(dev, "Got unsupported VID/DID: 0x%08x", viddid);
return -ENODEV;
}
dev_info(dev, "Found IDT 89HPES device VID:0x%04x, DID:0x%04x",
(viddid & IDT_VID_MASK), (viddid >> 16));
return 0;
}
/*
* idt_create_sysfs_files() - create sysfs attribute files
* @pdev: Pointer to the driver data
* Return status of operation
*/
static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev)
{
struct device *dev = &pdev->client->dev;
int ret;
/* Don't do anything if EEPROM isn't accessible */
if (pdev->eesize == 0) {
dev_dbg(dev, "Skip creating sysfs-files");
return 0;
}
/*
* Allocate memory for attribute file and copy the declared EEPROM attr
* structure to change some of fields
*/
pdev->ee_file = devm_kmemdup(dev, &bin_attr_eeprom,
sizeof(*pdev->ee_file), GFP_KERNEL);
if (!pdev->ee_file)
return -ENOMEM;
/* In case of read-only EEPROM get rid of write ability */
if (pdev->eero) {
pdev->ee_file->attr.mode &= ~0200;
pdev->ee_file->write = NULL;
}
/* Create EEPROM sysfs file */
pdev->ee_file->size = pdev->eesize;
ret = sysfs_create_bin_file(&dev->kobj, pdev->ee_file);
if (ret != 0) {
dev_err(dev, "Failed to create EEPROM sysfs-node");
return ret;
}
return 0;
}
/*
* idt_remove_sysfs_files() - remove sysfs attribute files
* @pdev: Pointer to the driver data
*/
static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev)
{
struct device *dev = &pdev->client->dev;
/* Don't do anything if EEPROM wasn't accessible */
if (pdev->eesize == 0)
return;
/* Remove EEPROM sysfs file */
sysfs_remove_bin_file(&dev->kobj, pdev->ee_file);
}
/*
* idt_create_dbgfs_files() - create debugfs files
* @pdev: Pointer to the driver data
*/
#define CSRNAME_LEN ((size_t)32)
static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
{
struct i2c_client *cli = pdev->client;
char fname[CSRNAME_LEN];
/* Create Debugfs directory for CSR file */
snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr);
pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
/* Create Debugfs file for CSR read/write operations */
debugfs_create_file(cli->name, 0600, pdev->csr_dir, pdev,
&csr_dbgfs_ops);
}
/*
* idt_remove_dbgfs_files() - remove debugfs files
* @pdev: Pointer to the driver data
*/
static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev)
{
/* Remove CSR directory and it sysfs-node */
debugfs_remove_recursive(pdev->csr_dir);
}
/*
* idt_probe() - IDT 89HPESx driver probe() callback method
*/
static int idt_probe(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev;
int ret;
/* Create driver data */
pdev = idt_create_pdev(client);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
/* Set SMBus operations */
ret = idt_set_smbus_ops(pdev);
if (ret != 0)
goto err_free_pdev;
/* Check whether it is truly IDT 89HPESx device */
ret = idt_check_dev(pdev);
if (ret != 0)
goto err_free_pdev;
/* Create sysfs files */
ret = idt_create_sysfs_files(pdev);
if (ret != 0)
goto err_free_pdev;
/* Create debugfs files */
idt_create_dbgfs_files(pdev);
return 0;
err_free_pdev:
idt_free_pdev(pdev);
return ret;
}
/*
* idt_remove() - IDT 89HPESx driver remove() callback method
*/
static void idt_remove(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
/* Remove debugfs files first */
idt_remove_dbgfs_files(pdev);
/* Remove sysfs files */
idt_remove_sysfs_files(pdev);
/* Discard driver data structure */
idt_free_pdev(pdev);
}
/*
* ee_ids - array of supported EEPROMs
*/
static const struct i2c_device_id ee_ids[] = {
{ "24c32", 4096},
{ "24c64", 8192},
{ "24c128", 16384},
{ "24c256", 32768},
{ "24c512", 65536},
{}
};
MODULE_DEVICE_TABLE(i2c, ee_ids);
/*
* idt_ids - supported IDT 89HPESx devices
*/
static const struct i2c_device_id idt_ids[] = {
{ "89hpes8nt2", 0 },
{ "89hpes12nt3", 0 },
{ "89hpes24nt6ag2", 0 },
{ "89hpes32nt8ag2", 0 },
{ "89hpes32nt8bg2", 0 },
{ "89hpes12nt12g2", 0 },
{ "89hpes16nt16g2", 0 },
{ "89hpes24nt24g2", 0 },
{ "89hpes32nt24ag2", 0 },
{ "89hpes32nt24bg2", 0 },
{ "89hpes12n3", 0 },
{ "89hpes12n3a", 0 },
{ "89hpes24n3", 0 },
{ "89hpes24n3a", 0 },
{ "89hpes32h8", 0 },
{ "89hpes32h8g2", 0 },
{ "89hpes48h12", 0 },
{ "89hpes48h12g2", 0 },
{ "89hpes48h12ag2", 0 },
{ "89hpes16h16", 0 },
{ "89hpes22h16", 0 },
{ "89hpes22h16g2", 0 },
{ "89hpes34h16", 0 },
{ "89hpes34h16g2", 0 },
{ "89hpes64h16", 0 },
{ "89hpes64h16g2", 0 },
{ "89hpes64h16ag2", 0 },
/* { "89hpes3t3", 0 }, // No SMBus-slave iface */
{ "89hpes12t3g2", 0 },
{ "89hpes24t3g2", 0 },
/* { "89hpes4t4", 0 }, // No SMBus-slave iface */
{ "89hpes16t4", 0 },
{ "89hpes4t4g2", 0 },
{ "89hpes10t4g2", 0 },
{ "89hpes16t4g2", 0 },
{ "89hpes16t4ag2", 0 },
{ "89hpes5t5", 0 },
{ "89hpes6t5", 0 },
{ "89hpes8t5", 0 },
{ "89hpes8t5a", 0 },
{ "89hpes24t6", 0 },
{ "89hpes6t6g2", 0 },
{ "89hpes24t6g2", 0 },
{ "89hpes16t7", 0 },
{ "89hpes32t8", 0 },
{ "89hpes32t8g2", 0 },
{ "89hpes48t12", 0 },
{ "89hpes48t12g2", 0 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(i2c, idt_ids);
static const struct of_device_id idt_of_match[] = {
{ .compatible = "idt,89hpes8nt2", },
{ .compatible = "idt,89hpes12nt3", },
{ .compatible = "idt,89hpes24nt6ag2", },
{ .compatible = "idt,89hpes32nt8ag2", },
{ .compatible = "idt,89hpes32nt8bg2", },
{ .compatible = "idt,89hpes12nt12g2", },
{ .compatible = "idt,89hpes16nt16g2", },
{ .compatible = "idt,89hpes24nt24g2", },
{ .compatible = "idt,89hpes32nt24ag2", },
{ .compatible = "idt,89hpes32nt24bg2", },
{ .compatible = "idt,89hpes12n3", },
{ .compatible = "idt,89hpes12n3a", },
{ .compatible = "idt,89hpes24n3", },
{ .compatible = "idt,89hpes24n3a", },
{ .compatible = "idt,89hpes32h8", },
{ .compatible = "idt,89hpes32h8g2", },
{ .compatible = "idt,89hpes48h12", },
{ .compatible = "idt,89hpes48h12g2", },
{ .compatible = "idt,89hpes48h12ag2", },
{ .compatible = "idt,89hpes16h16", },
{ .compatible = "idt,89hpes22h16", },
{ .compatible = "idt,89hpes22h16g2", },
{ .compatible = "idt,89hpes34h16", },
{ .compatible = "idt,89hpes34h16g2", },
{ .compatible = "idt,89hpes64h16", },
{ .compatible = "idt,89hpes64h16g2", },
{ .compatible = "idt,89hpes64h16ag2", },
{ .compatible = "idt,89hpes12t3g2", },
{ .compatible = "idt,89hpes24t3g2", },
{ .compatible = "idt,89hpes16t4", },
{ .compatible = "idt,89hpes4t4g2", },
{ .compatible = "idt,89hpes10t4g2", },
{ .compatible = "idt,89hpes16t4g2", },
{ .compatible = "idt,89hpes16t4ag2", },
{ .compatible = "idt,89hpes5t5", },
{ .compatible = "idt,89hpes6t5", },
{ .compatible = "idt,89hpes8t5", },
{ .compatible = "idt,89hpes8t5a", },
{ .compatible = "idt,89hpes24t6", },
{ .compatible = "idt,89hpes6t6g2", },
{ .compatible = "idt,89hpes24t6g2", },
{ .compatible = "idt,89hpes16t7", },
{ .compatible = "idt,89hpes32t8", },
{ .compatible = "idt,89hpes32t8g2", },
{ .compatible = "idt,89hpes48t12", },
{ .compatible = "idt,89hpes48t12g2", },
{ },
};
MODULE_DEVICE_TABLE(of, idt_of_match);
/*
* idt_driver - IDT 89HPESx driver structure
*/
static struct i2c_driver idt_driver = {
.driver = {
.name = IDT_NAME,
.of_match_table = idt_of_match,
},
.probe = idt_probe,
.remove = idt_remove,
.id_table = idt_ids,
};
/*
* idt_init() - IDT 89HPESx driver init() callback method
*/
static int __init idt_init(void)
{
int ret;
/* Create Debugfs directory first */
if (debugfs_initialized())
csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
/* Add new i2c-device driver */
ret = i2c_add_driver(&idt_driver);
if (ret) {
debugfs_remove_recursive(csr_dbgdir);
return ret;
}
return 0;
}
module_init(idt_init);
/*
* idt_exit() - IDT 89HPESx driver exit() callback method
*/
static void __exit idt_exit(void)
{
/* Discard debugfs directory and all files if any */
debugfs_remove_recursive(csr_dbgdir);
/* Unregister i2c-device driver */
i2c_del_driver(&idt_driver);
}
module_exit(idt_exit);
| linux-master | drivers/misc/eeprom/idt_89hpesx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for 93xx46 EEPROMs
*
* (C) 2011 DENX Software Engineering, Anatolij Gustschin <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
#include <linux/eeprom_93xx46.h>
#define OP_START 0x4
#define OP_WRITE (OP_START | 0x1)
#define OP_READ (OP_START | 0x2)
#define ADDR_EWDS 0x00
#define ADDR_ERAL 0x20
#define ADDR_EWEN 0x30
struct eeprom_93xx46_devtype_data {
unsigned int quirks;
unsigned char flags;
};
static const struct eeprom_93xx46_devtype_data at93c46_data = {
.flags = EE_SIZE1K,
};
static const struct eeprom_93xx46_devtype_data at93c56_data = {
.flags = EE_SIZE2K,
};
static const struct eeprom_93xx46_devtype_data at93c66_data = {
.flags = EE_SIZE4K,
};
static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
.flags = EE_SIZE1K,
.quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
};
static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
.flags = EE_SIZE1K,
.quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
};
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
struct mutex lock;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
int addrlen;
int size;
};
static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
{
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
}
static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
{
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
{
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
}
static int eeprom_93xx46_read(void *priv, unsigned int off,
void *val, size_t count)
{
struct eeprom_93xx46_dev *edev = priv;
char *buf = val;
int err = 0;
int bits;
if (unlikely(off >= edev->size))
return 0;
if ((off + count) > edev->size)
count = edev->size - off;
if (unlikely(!count))
return count;
mutex_lock(&edev->lock);
if (edev->pdata->prepare)
edev->pdata->prepare(edev);
/* The opcode in front of the address is three bits. */
bits = edev->addrlen + 3;
while (count) {
struct spi_message m;
struct spi_transfer t[2] = { { 0 } };
u16 cmd_addr = OP_READ << edev->addrlen;
size_t nbytes = count;
if (edev->pdata->flags & EE_ADDR8) {
cmd_addr |= off;
if (has_quirk_single_word_read(edev))
nbytes = 1;
} else {
cmd_addr |= (off >> 1);
if (has_quirk_single_word_read(edev))
nbytes = 2;
}
dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
cmd_addr, edev->spi->max_speed_hz);
if (has_quirk_extra_read_cycle(edev)) {
cmd_addr <<= 1;
bits += 1;
}
spi_message_init(&m);
t[0].tx_buf = (char *)&cmd_addr;
t[0].len = 2;
t[0].bits_per_word = bits;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = count;
t[1].bits_per_word = 8;
spi_message_add_tail(&t[1], &m);
err = spi_sync(edev->spi, &m);
/* have to wait at least Tcsl ns */
ndelay(250);
if (err) {
dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
nbytes, (int)off, err);
break;
}
buf += nbytes;
off += nbytes;
count -= nbytes;
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
return err;
}
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
{
struct spi_message m;
struct spi_transfer t;
int bits, ret;
u16 cmd_addr;
/* The opcode in front of the address is three bits. */
bits = edev->addrlen + 3;
cmd_addr = OP_START << edev->addrlen;
if (edev->pdata->flags & EE_ADDR8)
cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1;
else
cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS);
if (has_quirk_instruction_length(edev)) {
cmd_addr <<= 2;
bits += 2;
}
dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
is_on ? "en" : "ds", cmd_addr, bits);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t.tx_buf = &cmd_addr;
t.len = 2;
t.bits_per_word = bits;
spi_message_add_tail(&t, &m);
mutex_lock(&edev->lock);
if (edev->pdata->prepare)
edev->pdata->prepare(edev);
ret = spi_sync(edev->spi, &m);
/* have to wait at least Tcsl ns */
ndelay(250);
if (ret)
dev_err(&edev->spi->dev, "erase/write %sable error %d\n",
is_on ? "en" : "dis", ret);
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
return ret;
}
static ssize_t
eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
const char *buf, unsigned off)
{
struct spi_message m;
struct spi_transfer t[2];
int bits, data_len, ret;
u16 cmd_addr;
if (unlikely(off >= edev->size))
return -EINVAL;
/* The opcode in front of the address is three bits. */
bits = edev->addrlen + 3;
cmd_addr = OP_WRITE << edev->addrlen;
if (edev->pdata->flags & EE_ADDR8) {
cmd_addr |= off;
data_len = 1;
} else {
cmd_addr |= (off >> 1);
data_len = 2;
}
dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr);
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = (char *)&cmd_addr;
t[0].len = 2;
t[0].bits_per_word = bits;
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
t[1].len = data_len;
t[1].bits_per_word = 8;
spi_message_add_tail(&t[1], &m);
ret = spi_sync(edev->spi, &m);
/* have to wait program cycle time Twc ms */
mdelay(6);
return ret;
}
static int eeprom_93xx46_write(void *priv, unsigned int off,
void *val, size_t count)
{
struct eeprom_93xx46_dev *edev = priv;
char *buf = val;
int i, ret, step = 1;
if (unlikely(off >= edev->size))
return -EFBIG;
if ((off + count) > edev->size)
count = edev->size - off;
if (unlikely(!count))
return count;
/* only write even number of bytes on 16-bit devices */
if (edev->pdata->flags & EE_ADDR16) {
step = 2;
count &= ~1;
}
/* erase/write enable */
ret = eeprom_93xx46_ew(edev, 1);
if (ret)
return ret;
mutex_lock(&edev->lock);
if (edev->pdata->prepare)
edev->pdata->prepare(edev);
for (i = 0; i < count; i += step) {
ret = eeprom_93xx46_write_word(edev, &buf[i], off + i);
if (ret) {
dev_err(&edev->spi->dev, "write failed at %d: %d\n",
(int)off + i, ret);
break;
}
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
/* erase/write disable */
eeprom_93xx46_ew(edev, 0);
return ret;
}
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
struct eeprom_93xx46_platform_data *pd = edev->pdata;
struct spi_message m;
struct spi_transfer t;
int bits, ret;
u16 cmd_addr;
/* The opcode in front of the address is three bits. */
bits = edev->addrlen + 3;
cmd_addr = OP_START << edev->addrlen;
if (edev->pdata->flags & EE_ADDR8)
cmd_addr |= ADDR_ERAL << 1;
else
cmd_addr |= ADDR_ERAL;
if (has_quirk_instruction_length(edev)) {
cmd_addr <<= 2;
bits += 2;
}
dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t.tx_buf = &cmd_addr;
t.len = 2;
t.bits_per_word = bits;
spi_message_add_tail(&t, &m);
mutex_lock(&edev->lock);
if (edev->pdata->prepare)
edev->pdata->prepare(edev);
ret = spi_sync(edev->spi, &m);
if (ret)
dev_err(&edev->spi->dev, "erase error %d\n", ret);
/* have to wait erase cycle time Tec ms */
mdelay(6);
if (pd->finish)
pd->finish(edev);
mutex_unlock(&edev->lock);
return ret;
}
static ssize_t eeprom_93xx46_store_erase(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev);
int erase = 0, ret;
sscanf(buf, "%d", &erase);
if (erase) {
ret = eeprom_93xx46_ew(edev, 1);
if (ret)
return ret;
ret = eeprom_93xx46_eral(edev);
if (ret)
return ret;
ret = eeprom_93xx46_ew(edev, 0);
if (ret)
return ret;
}
return count;
}
static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
static void select_assert(void *context)
{
struct eeprom_93xx46_dev *edev = context;
gpiod_set_value_cansleep(edev->pdata->select, 1);
}
static void select_deassert(void *context)
{
struct eeprom_93xx46_dev *edev = context;
gpiod_set_value_cansleep(edev->pdata->select, 0);
}
static const struct of_device_id eeprom_93xx46_of_table[] = {
{ .compatible = "eeprom-93xx46", .data = &at93c46_data, },
{ .compatible = "atmel,at93c46", .data = &at93c46_data, },
{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
{ .compatible = "atmel,at93c56", .data = &at93c56_data, },
{ .compatible = "atmel,at93c66", .data = &at93c66_data, },
{ .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
{ .name = "eeprom-93xx46",
.driver_data = (kernel_ulong_t)&at93c46_data, },
{ .name = "at93c46",
.driver_data = (kernel_ulong_t)&at93c46_data, },
{ .name = "at93c46d",
.driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
{ .name = "at93c56",
.driver_data = (kernel_ulong_t)&at93c56_data, },
{ .name = "at93c66",
.driver_data = (kernel_ulong_t)&at93c66_data, },
{ .name = "93lc46b",
.driver_data = (kernel_ulong_t)µchip_93lc46b_data, },
{}
};
MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
static int eeprom_93xx46_probe_dt(struct spi_device *spi)
{
const struct of_device_id *of_id =
of_match_device(eeprom_93xx46_of_table, &spi->dev);
struct device_node *np = spi->dev.of_node;
struct eeprom_93xx46_platform_data *pd;
u32 tmp;
int ret;
pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
ret = of_property_read_u32(np, "data-size", &tmp);
if (ret < 0) {
dev_err(&spi->dev, "data-size property not found\n");
return ret;
}
if (tmp == 8) {
pd->flags |= EE_ADDR8;
} else if (tmp == 16) {
pd->flags |= EE_ADDR16;
} else {
dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
return -EINVAL;
}
if (of_property_read_bool(np, "read-only"))
pd->flags |= EE_READONLY;
pd->select = devm_gpiod_get_optional(&spi->dev, "select",
GPIOD_OUT_LOW);
if (IS_ERR(pd->select))
return PTR_ERR(pd->select);
pd->prepare = select_assert;
pd->finish = select_deassert;
gpiod_direction_output(pd->select, 0);
if (of_id->data) {
const struct eeprom_93xx46_devtype_data *data = of_id->data;
pd->quirks = data->quirks;
pd->flags |= data->flags;
}
spi->dev.platform_data = pd;
return 0;
}
static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
int err;
if (spi->dev.of_node) {
err = eeprom_93xx46_probe_dt(spi);
if (err < 0)
return err;
}
pd = spi->dev.platform_data;
if (!pd) {
dev_err(&spi->dev, "missing platform data\n");
return -ENODEV;
}
edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
if (pd->flags & EE_SIZE1K)
edev->size = 128;
else if (pd->flags & EE_SIZE2K)
edev->size = 256;
else if (pd->flags & EE_SIZE4K)
edev->size = 512;
else {
dev_err(&spi->dev, "unspecified size\n");
return -EINVAL;
}
if (pd->flags & EE_ADDR8)
edev->addrlen = ilog2(edev->size);
else if (pd->flags & EE_ADDR16)
edev->addrlen = ilog2(edev->size) - 1;
else {
dev_err(&spi->dev, "unspecified address type\n");
return -EINVAL;
}
mutex_init(&edev->lock);
edev->spi = spi;
edev->pdata = pd;
edev->nvmem_config.type = NVMEM_TYPE_EEPROM;
edev->nvmem_config.name = dev_name(&spi->dev);
edev->nvmem_config.dev = &spi->dev;
edev->nvmem_config.read_only = pd->flags & EE_READONLY;
edev->nvmem_config.root_only = true;
edev->nvmem_config.owner = THIS_MODULE;
edev->nvmem_config.compat = true;
edev->nvmem_config.base_dev = &spi->dev;
edev->nvmem_config.reg_read = eeprom_93xx46_read;
edev->nvmem_config.reg_write = eeprom_93xx46_write;
edev->nvmem_config.priv = edev;
edev->nvmem_config.stride = 4;
edev->nvmem_config.word_size = 1;
edev->nvmem_config.size = edev->size;
edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config);
if (IS_ERR(edev->nvmem))
return PTR_ERR(edev->nvmem);
dev_info(&spi->dev, "%d-bit eeprom containing %d bytes %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
edev->size,
(pd->flags & EE_READONLY) ? "(readonly)" : "");
if (!(pd->flags & EE_READONLY)) {
if (device_create_file(&spi->dev, &dev_attr_erase))
dev_err(&spi->dev, "can't create erase interface\n");
}
spi_set_drvdata(spi, edev);
return 0;
}
static void eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
}
static struct spi_driver eeprom_93xx46_driver = {
.driver = {
.name = "93xx46",
.of_match_table = of_match_ptr(eeprom_93xx46_of_table),
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
.id_table = eeprom_93xx46_spi_ids,
};
module_spi_driver(eeprom_93xx46_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
MODULE_AUTHOR("Anatolij Gustschin <[email protected]>");
MODULE_ALIAS("spi:93xx46");
MODULE_ALIAS("spi:eeprom-93xx46");
MODULE_ALIAS("spi:93lc46b");
| linux-master | drivers/misc/eeprom/eeprom_93xx46.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* max6875.c - driver for MAX6874/MAX6875
*
* Copyright (C) 2005 Ben Gardner <[email protected]>
*
* Based on eeprom.c
*
* The MAX6875 has a bank of registers and two banks of EEPROM.
* Address ranges are defined as follows:
* * 0x0000 - 0x0046 = configuration registers
* * 0x8000 - 0x8046 = configuration EEPROM
* * 0x8100 - 0x82FF = user EEPROM
*
* This driver makes the user EEPROM available for read.
*
* The registers & config EEPROM should be accessed via i2c-dev.
*
* The MAX6875 ignores the lowest address bit, so each chip responds to
* two addresses - 0x50/0x51 and 0x52/0x53.
*
* Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
* address, so this driver is destructive if loaded for the wrong EEPROM chip.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
/* The MAX6875 can only read/write 16 bytes at a time */
#define SLICE_SIZE 16
#define SLICE_BITS 4
/* USER EEPROM is at addresses 0x8100 - 0x82FF */
#define USER_EEPROM_BASE 0x8100
#define USER_EEPROM_SIZE 0x0200
#define USER_EEPROM_SLICES 32
/* MAX6875 commands */
#define MAX6875_CMD_BLK_READ 0x84
/* Each client has this additional data */
struct max6875_data {
struct i2c_client *fake_client;
struct mutex update_lock;
u32 valid;
u8 data[USER_EEPROM_SIZE];
unsigned long last_updated[USER_EEPROM_SLICES];
};
static void max6875_update_slice(struct i2c_client *client, int slice)
{
struct max6875_data *data = i2c_get_clientdata(client);
int i, j, addr;
u8 *buf;
if (slice >= USER_EEPROM_SLICES)
return;
mutex_lock(&data->update_lock);
buf = &data->data[slice << SLICE_BITS];
if (!(data->valid & (1 << slice)) ||
time_after(jiffies, data->last_updated[slice])) {
dev_dbg(&client->dev, "Starting update of slice %u\n", slice);
data->valid &= ~(1 << slice);
addr = USER_EEPROM_BASE + (slice << SLICE_BITS);
/* select the eeprom address */
if (i2c_smbus_write_byte_data(client, addr >> 8, addr & 0xFF)) {
dev_err(&client->dev, "address set failed\n");
goto exit_up;
}
if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
if (i2c_smbus_read_i2c_block_data(client,
MAX6875_CMD_BLK_READ,
SLICE_SIZE,
buf) != SLICE_SIZE) {
goto exit_up;
}
} else {
for (i = 0; i < SLICE_SIZE; i++) {
j = i2c_smbus_read_byte(client);
if (j < 0) {
goto exit_up;
}
buf[i] = j;
}
}
data->last_updated[slice] = jiffies;
data->valid |= (1 << slice);
}
exit_up:
mutex_unlock(&data->update_lock);
}
static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
struct max6875_data *data = i2c_get_clientdata(client);
int slice, max_slice;
/* refresh slices which contain requested bytes */
max_slice = (off + count - 1) >> SLICE_BITS;
for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
max6875_update_slice(client, slice);
memcpy(buf, &data->data[off], count);
return count;
}
static const struct bin_attribute user_eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
},
.size = USER_EEPROM_SIZE,
.read = max6875_read,
};
static int max6875_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct max6875_data *data;
int err;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
| I2C_FUNC_SMBUS_READ_BYTE))
return -ENODEV;
/* Only bind to even addresses */
if (client->addr & 1)
return -ENODEV;
data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* A fake client is created on the odd address */
data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1);
if (IS_ERR(data->fake_client)) {
err = PTR_ERR(data->fake_client);
goto exit_kfree;
}
/* Init real i2c_client */
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
err = sysfs_create_bin_file(&client->dev.kobj, &user_eeprom_attr);
if (err)
goto exit_remove_fake;
return 0;
exit_remove_fake:
i2c_unregister_device(data->fake_client);
exit_kfree:
kfree(data);
return err;
}
static void max6875_remove(struct i2c_client *client)
{
struct max6875_data *data = i2c_get_clientdata(client);
i2c_unregister_device(data->fake_client);
sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
kfree(data);
}
static const struct i2c_device_id max6875_id[] = {
{ "max6875", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6875_id);
static struct i2c_driver max6875_driver = {
.driver = {
.name = "max6875",
},
.probe = max6875_probe,
.remove = max6875_remove,
.id_table = max6875_id,
};
module_i2c_driver(max6875_driver);
MODULE_AUTHOR("Ben Gardner <[email protected]>");
MODULE_DESCRIPTION("MAX6875 driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/eeprom/max6875.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
* <http://rt2x00.serialmonkey.com>
*
* Module: eeprom_93cx6
* Abstract: EEPROM reader routines for 93cx6 chipsets.
* Supported chipsets: 93c46 & 93c66.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/eeprom_93cx6.h>
MODULE_AUTHOR("http://rt2x00.serialmonkey.com");
MODULE_VERSION("1.0");
MODULE_DESCRIPTION("EEPROM 93cx6 chip driver");
MODULE_LICENSE("GPL");
static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom)
{
eeprom->reg_data_clock = 1;
eeprom->register_write(eeprom);
/*
* Add a short delay for the pulse to work.
* According to the specifications the "maximum minimum"
* time should be 450ns.
*/
ndelay(450);
}
static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
{
eeprom->reg_data_clock = 0;
eeprom->register_write(eeprom);
/*
* Add a short delay for the pulse to work.
* According to the specifications the "maximum minimum"
* time should be 450ns.
*/
ndelay(450);
}
static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
{
/*
* Clear all flags, and enable chip select.
*/
eeprom->register_read(eeprom);
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
eeprom->reg_data_clock = 0;
eeprom->reg_chip_select = 1;
eeprom->drive_data = 1;
eeprom->register_write(eeprom);
/*
* kick a pulse.
*/
eeprom_93cx6_pulse_high(eeprom);
eeprom_93cx6_pulse_low(eeprom);
}
static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom)
{
/*
* Clear chip_select and data_in flags.
*/
eeprom->register_read(eeprom);
eeprom->reg_data_in = 0;
eeprom->reg_chip_select = 0;
eeprom->register_write(eeprom);
/*
* kick a pulse.
*/
eeprom_93cx6_pulse_high(eeprom);
eeprom_93cx6_pulse_low(eeprom);
}
static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
const u16 data, const u16 count)
{
unsigned int i;
eeprom->register_read(eeprom);
/*
* Clear data flags.
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
eeprom->drive_data = 1;
/*
* Start writing all bits.
*/
for (i = count; i > 0; i--) {
/*
* Check if this bit needs to be set.
*/
eeprom->reg_data_in = !!(data & (1 << (i - 1)));
/*
* Write the bit to the eeprom register.
*/
eeprom->register_write(eeprom);
/*
* Kick a pulse.
*/
eeprom_93cx6_pulse_high(eeprom);
eeprom_93cx6_pulse_low(eeprom);
}
eeprom->reg_data_in = 0;
eeprom->register_write(eeprom);
}
static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
u16 *data, const u16 count)
{
unsigned int i;
u16 buf = 0;
eeprom->register_read(eeprom);
/*
* Clear data flags.
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
eeprom->drive_data = 0;
/*
* Start reading all bits.
*/
for (i = count; i > 0; i--) {
eeprom_93cx6_pulse_high(eeprom);
eeprom->register_read(eeprom);
/*
* Clear data_in flag.
*/
eeprom->reg_data_in = 0;
/*
* Read if the bit has been set.
*/
if (eeprom->reg_data_out)
buf |= (1 << (i - 1));
eeprom_93cx6_pulse_low(eeprom);
}
*data = buf;
}
/**
* eeprom_93cx6_read - Read a word from eeprom
* @eeprom: Pointer to eeprom structure
* @word: Word index from where we should start reading
* @data: target pointer where the information will have to be stored
*
* This function will read the eeprom data as host-endian word
* into the given data pointer.
*/
void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
u16 *data)
{
u16 command;
/*
* Initialize the eeprom register
*/
eeprom_93cx6_startup(eeprom);
/*
* Select the read opcode and the word to be read.
*/
command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word;
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
/*
* Read the requested 16 bits.
*/
eeprom_93cx6_read_bits(eeprom, data, 16);
/*
* Cleanup eeprom register.
*/
eeprom_93cx6_cleanup(eeprom);
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_read);
/**
* eeprom_93cx6_multiread - Read multiple words from eeprom
* @eeprom: Pointer to eeprom structure
* @word: Word index from where we should start reading
* @data: target pointer where the information will have to be stored
* @words: Number of words that should be read.
*
* This function will read all requested words from the eeprom,
* this is done by calling eeprom_93cx6_read() multiple times.
* But with the additional change that while the eeprom_93cx6_read
* will return host ordered bytes, this method will return little
* endian words.
*/
void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
__le16 *data, const u16 words)
{
unsigned int i;
u16 tmp;
for (i = 0; i < words; i++) {
tmp = 0;
eeprom_93cx6_read(eeprom, word + i, &tmp);
data[i] = cpu_to_le16(tmp);
}
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
/**
* eeprom_93cx6_readb - Read a byte from eeprom
* @eeprom: Pointer to eeprom structure
* @byte: Byte index from where we should start reading
* @data: target pointer where the information will have to be stored
*
* This function will read a byte of the eeprom data
* into the given data pointer.
*/
void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte,
u8 *data)
{
u16 command;
u16 tmp;
/*
* Initialize the eeprom register
*/
eeprom_93cx6_startup(eeprom);
/*
* Select the read opcode and the byte to be read.
*/
command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte;
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1);
/*
* Read the requested 8 bits.
*/
eeprom_93cx6_read_bits(eeprom, &tmp, 8);
*data = tmp & 0xff;
/*
* Cleanup eeprom register.
*/
eeprom_93cx6_cleanup(eeprom);
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_readb);
/**
* eeprom_93cx6_multireadb - Read multiple bytes from eeprom
* @eeprom: Pointer to eeprom structure
* @byte: Index from where we should start reading
* @data: target pointer where the information will have to be stored
* @bytes: Number of bytes that should be read.
*
* This function will read all requested bytes from the eeprom,
* this is done by calling eeprom_93cx6_readb() multiple times.
*/
void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte,
u8 *data, const u16 bytes)
{
unsigned int i;
for (i = 0; i < bytes; i++)
eeprom_93cx6_readb(eeprom, byte + i, &data[i]);
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb);
/**
* eeprom_93cx6_wren - set the write enable state
* @eeprom: Pointer to eeprom structure
* @enable: true to enable writes, otherwise disable writes
*
* Set the EEPROM write enable state to either allow or deny
* writes depending on the @enable value.
*/
void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
{
u16 command;
/* start the command */
eeprom_93cx6_startup(eeprom);
/* create command to enable/disable */
command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
command <<= (eeprom->width - 2);
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
eeprom_93cx6_cleanup(eeprom);
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
/**
* eeprom_93cx6_write - write data to the EEPROM
* @eeprom: Pointer to eeprom structure
* @addr: Address to write data to.
* @data: The data to write to address @addr.
*
* Write the @data to the specified @addr in the EEPROM and
* waiting for the device to finish writing.
*
* Note, since we do not expect large number of write operations
* we delay in between parts of the operation to avoid using excessive
* amounts of CPU time busy waiting.
*/
void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
{
int timeout = 100;
u16 command;
/* start the command */
eeprom_93cx6_startup(eeprom);
command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
command |= addr;
/* send write command */
eeprom_93cx6_write_bits(eeprom, command,
PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
/* send data */
eeprom_93cx6_write_bits(eeprom, data, 16);
/* get ready to check for busy */
eeprom->drive_data = 0;
eeprom->reg_chip_select = 1;
eeprom->register_write(eeprom);
/* wait at-least 250ns to get DO to be the busy signal */
usleep_range(1000, 2000);
/* wait for DO to go high to signify finish */
while (true) {
eeprom->register_read(eeprom);
if (eeprom->reg_data_out)
break;
usleep_range(1000, 2000);
if (--timeout <= 0) {
printk(KERN_ERR "%s: timeout\n", __func__);
break;
}
}
eeprom_93cx6_cleanup(eeprom);
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
| linux-master | drivers/misc/eeprom/eeprom_93cx6.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 1998, 1999 Frodo Looijaard <[email protected]> and
* Philip Edelbrock <[email protected]>
* Copyright (C) 2003 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2003 IBM Corp.
* Copyright (C) 2004 Jean Delvare <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/capability.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
0x55, 0x56, 0x57, I2C_CLIENT_END };
/* Size of EEPROM in bytes */
#define EEPROM_SIZE 256
/* possible types of eeprom devices */
enum eeprom_nature {
UNKNOWN,
VAIO,
};
/* Each client has this additional data */
struct eeprom_data {
struct mutex update_lock;
u8 valid; /* bitfield, bit!=0 if slice is valid */
unsigned long last_updated[8]; /* In jiffies, 8 slices */
u8 data[EEPROM_SIZE]; /* Register values */
enum eeprom_nature nature;
};
static void eeprom_update_client(struct i2c_client *client, u8 slice)
{
struct eeprom_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
if (!(data->valid & (1 << slice)) ||
time_after(jiffies, data->last_updated[slice] + 300 * HZ)) {
dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice);
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
for (i = slice << 5; i < (slice + 1) << 5; i += 32)
if (i2c_smbus_read_i2c_block_data(client, i,
32, data->data + i)
!= 32)
goto exit;
} else {
for (i = slice << 5; i < (slice + 1) << 5; i += 2) {
int word = i2c_smbus_read_word_data(client, i);
if (word < 0)
goto exit;
data->data[i] = word & 0xff;
data->data[i + 1] = word >> 8;
}
}
data->last_updated[slice] = jiffies;
data->valid |= (1 << slice);
}
exit:
mutex_unlock(&data->update_lock);
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
struct eeprom_data *data = i2c_get_clientdata(client);
u8 slice;
/* Only refresh slices which contain requested bytes */
for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
eeprom_update_client(client, slice);
/* Hide Vaio private settings to regular users:
- BIOS passwords: bytes 0x00 to 0x0f
- UUID: bytes 0x10 to 0x1f
- Serial number: 0xc0 to 0xdf */
if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
int i;
for (i = 0; i < count; i++) {
if ((off + i <= 0x1f) ||
(off + i >= 0xc0 && off + i <= 0xdf))
buf[i] = 0;
else
buf[i] = data->data[off + i];
}
} else {
memcpy(buf, &data->data[off], count);
}
return count;
}
static const struct bin_attribute eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
},
.size = EEPROM_SIZE,
.read = eeprom_read,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
/* EDID EEPROMs are often 24C00 EEPROMs, which answer to all
addresses 0x50-0x57, but we only care about 0x50. So decline
attaching to addresses >= 0x51 on DDC buses */
if (!(adapter->class & I2C_CLASS_SPD) && client->addr >= 0x51)
return -ENODEV;
/* There are four ways we can read the EEPROM data:
(1) I2C block reads (faster, but unsupported by most adapters)
(2) Word reads (128% overhead)
(3) Consecutive byte reads (88% overhead, unsafe)
(4) Regular byte data reads (265% overhead)
The third and fourth methods are not implemented by this driver
because all known adapters support one of the first two. */
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
strscpy(info->type, "eeprom", I2C_NAME_SIZE);
return 0;
}
static int eeprom_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct eeprom_data *data;
data = devm_kzalloc(&client->dev, sizeof(struct eeprom_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
memset(data->data, 0xff, EEPROM_SIZE);
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->nature = UNKNOWN;
/* Detect the Vaio nature of EEPROMs.
We use the "PCG-" or "VGN-" prefix as the signature. */
if (client->addr == 0x57
&& i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
char name[4];
name[0] = i2c_smbus_read_byte_data(client, 0x80);
name[1] = i2c_smbus_read_byte_data(client, 0x81);
name[2] = i2c_smbus_read_byte_data(client, 0x82);
name[3] = i2c_smbus_read_byte_data(client, 0x83);
if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
dev_info(&client->dev, "Vaio EEPROM detected, "
"enabling privacy protection\n");
data->nature = VAIO;
}
}
/* Let the users know they are using deprecated driver */
dev_notice(&client->dev,
"eeprom driver is deprecated, please use at24 instead\n");
/* create the sysfs eeprom file */
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
static void eeprom_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
}
static const struct i2c_device_id eeprom_id[] = {
{ "eeprom", 0 },
{ }
};
static struct i2c_driver eeprom_driver = {
.driver = {
.name = "eeprom",
},
.probe = eeprom_probe,
.remove = eeprom_remove,
.id_table = eeprom_id,
.class = I2C_CLASS_DDC | I2C_CLASS_SPD,
.detect = eeprom_detect,
.address_list = normal_i2c,
};
module_i2c_driver(eeprom_driver);
MODULE_AUTHOR("Frodo Looijaard <[email protected]> and "
"Philip Edelbrock <[email protected]> and "
"Greg Kroah-Hartman <[email protected]>");
MODULE_DESCRIPTION("I2C EEPROM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/eeprom/eeprom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ee1004 - driver for DDR4 SPD EEPROMs
*
* Copyright (C) 2017-2019 Jean Delvare
*
* Based on the at24 driver:
* Copyright (C) 2005-2007 David Brownell
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*/
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
/*
* DDR4 memory modules use special EEPROMs following the Jedec EE1004
* specification. These are 512-byte EEPROMs using a single I2C address
* in the 0x50-0x57 range for data. One of two 256-byte page is selected
* by writing a command to I2C address 0x36 or 0x37 on the same I2C bus.
*
* Therefore we need to request these 2 additional addresses, and serialize
* access to all such EEPROMs with a single mutex.
*
* We assume it is safe to read up to 32 bytes at once from these EEPROMs.
* We use SMBus access even if I2C is available, these EEPROMs are small
* enough, and reading from them infrequent enough, that we favor simplicity
* over performance.
*/
#define EE1004_ADDR_SET_PAGE 0x36
#define EE1004_NUM_PAGES 2
#define EE1004_PAGE_SIZE 256
#define EE1004_PAGE_SHIFT 8
#define EE1004_EEPROM_SIZE (EE1004_PAGE_SIZE * EE1004_NUM_PAGES)
/*
* Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
* from page selection to end of read.
*/
static DEFINE_MUTEX(ee1004_bus_lock);
static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES];
static unsigned int ee1004_dev_count;
static int ee1004_current_page;
static const struct i2c_device_id ee1004_ids[] = {
{ "ee1004", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ee1004_ids);
/*-------------------------------------------------------------------------*/
static int ee1004_get_current_page(void)
{
int err;
err = i2c_smbus_read_byte(ee1004_set_page[0]);
if (err == -ENXIO) {
/* Nack means page 1 is selected */
return 1;
}
if (err < 0) {
/* Anything else is a real error, bail out */
return err;
}
/* Ack means page 0 is selected, returned value meaningless */
return 0;
}
static int ee1004_set_current_page(struct device *dev, int page)
{
int ret;
if (page == ee1004_current_page)
return 0;
/* Data is ignored */
ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00);
/*
* Don't give up just yet. Some memory modules will select the page
* but not ack the command. Check which page is selected now.
*/
if (ret == -ENXIO && ee1004_get_current_page() == page)
ret = 0;
if (ret < 0) {
dev_err(dev, "Failed to select page %d (%d)\n", page, ret);
return ret;
}
dev_dbg(dev, "Selected page %d\n", page);
ee1004_current_page = page;
return 0;
}
static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
unsigned int offset, size_t count)
{
int status, page;
page = offset >> EE1004_PAGE_SHIFT;
offset &= (1 << EE1004_PAGE_SHIFT) - 1;
status = ee1004_set_current_page(&client->dev, page);
if (status)
return status;
/* Can't cross page boundaries */
if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
size_t requested = count;
int ret = 0;
/*
* Read data from chip, protecting against concurrent access to
* other EE1004 SPD EEPROMs on the same adapter.
*/
mutex_lock(&ee1004_bus_lock);
while (count) {
ret = ee1004_eeprom_read(client, buf, off, count);
if (ret < 0)
goto out;
buf += ret;
off += ret;
count -= ret;
}
out:
mutex_unlock(&ee1004_bus_lock);
return ret < 0 ? ret : requested;
}
static BIN_ATTR_RO(eeprom, EE1004_EEPROM_SIZE);
static struct bin_attribute *ee1004_attrs[] = {
&bin_attr_eeprom,
NULL
};
BIN_ATTRIBUTE_GROUPS(ee1004);
static void ee1004_cleanup(int idx)
{
if (--ee1004_dev_count == 0)
while (--idx >= 0) {
i2c_unregister_device(ee1004_set_page[idx]);
ee1004_set_page[idx] = NULL;
}
}
static int ee1004_probe(struct i2c_client *client)
{
int err, cnr = 0;
/* Make sure we can operate on this adapter */
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_I2C_BLOCK) &&
!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
/* Use 2 dummy devices for page select command */
mutex_lock(&ee1004_bus_lock);
if (++ee1004_dev_count == 1) {
for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) {
struct i2c_client *cl;
cl = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr);
if (IS_ERR(cl)) {
err = PTR_ERR(cl);
goto err_clients;
}
ee1004_set_page[cnr] = cl;
}
/* Remember current page to avoid unneeded page select */
err = ee1004_get_current_page();
if (err < 0)
goto err_clients;
dev_dbg(&client->dev, "Currently selected page: %d\n", err);
ee1004_current_page = err;
} else if (client->adapter != ee1004_set_page[0]->adapter) {
dev_err(&client->dev,
"Driver only supports devices on a single I2C bus\n");
err = -EOPNOTSUPP;
goto err_clients;
}
mutex_unlock(&ee1004_bus_lock);
dev_info(&client->dev,
"%u byte EE1004-compliant SPD EEPROM, read-only\n",
EE1004_EEPROM_SIZE);
return 0;
err_clients:
ee1004_cleanup(cnr);
mutex_unlock(&ee1004_bus_lock);
return err;
}
static void ee1004_remove(struct i2c_client *client)
{
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
ee1004_cleanup(EE1004_NUM_PAGES);
mutex_unlock(&ee1004_bus_lock);
}
/*-------------------------------------------------------------------------*/
static struct i2c_driver ee1004_driver = {
.driver = {
.name = "ee1004",
.dev_groups = ee1004_groups,
},
.probe = ee1004_probe,
.remove = ee1004_remove,
.id_table = ee1004_ids,
};
module_i2c_driver(ee1004_driver);
MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
MODULE_AUTHOR("Jean Delvare");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/eeprom/ee1004.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* EEPROMs access control driver for display configuration EEPROMs
* on DigsyMTC board.
*
* (C) 2011 DENX Software Engineering, Anatolij Gustschin <[email protected]>
*
* FIXME: this driver is used on a device-tree probed platform: it
* should be defined as a bit-banged SPI device and probed from the device
* tree and not like this with static grabbing of a few numbered GPIO
* lines at random.
*
* Add proper SPI and EEPROM in arch/powerpc/boot/dts/digsy_mtc.dts
* and delete this driver.
*/
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
#include <linux/eeprom_93xx46.h>
#define GPIO_EEPROM_CLK 216
#define GPIO_EEPROM_CS 210
#define GPIO_EEPROM_DI 217
#define GPIO_EEPROM_DO 249
#define GPIO_EEPROM_OE 255
#define EE_SPI_BUS_NUM 1
static void digsy_mtc_op_prepare(void *p)
{
/* enable */
gpio_set_value(GPIO_EEPROM_OE, 0);
}
static void digsy_mtc_op_finish(void *p)
{
/* disable */
gpio_set_value(GPIO_EEPROM_OE, 1);
}
struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
.flags = EE_ADDR8,
.prepare = digsy_mtc_op_prepare,
.finish = digsy_mtc_op_finish,
};
static struct spi_gpio_platform_data eeprom_spi_gpio_data = {
.num_chipselect = 1,
};
static struct platform_device digsy_mtc_eeprom = {
.name = "spi_gpio",
.id = EE_SPI_BUS_NUM,
.dev = {
.platform_data = &eeprom_spi_gpio_data,
},
};
static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
.dev_id = "spi_gpio",
.table = {
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
"sck", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_DI,
"mosi", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_DO,
"miso", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CS,
"cs", GPIO_ACTIVE_HIGH),
{ },
},
};
static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = {
{
.modalias = "93xx46",
.max_speed_hz = 1000000,
.bus_num = EE_SPI_BUS_NUM,
.chip_select = 0,
.mode = SPI_MODE_0,
.platform_data = &digsy_mtc_eeprom_data,
},
};
static int __init digsy_mtc_eeprom_devices_init(void)
{
int ret;
ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH,
"93xx46 EEPROMs OE");
if (ret) {
pr_err("can't request gpio %d\n", GPIO_EEPROM_OE);
return ret;
}
gpiod_add_lookup_table(&eeprom_spi_gpiod_table);
spi_register_board_info(digsy_mtc_eeprom_info,
ARRAY_SIZE(digsy_mtc_eeprom_info));
return platform_device_register(&digsy_mtc_eeprom);
}
device_initcall(digsy_mtc_eeprom_devices_init);
| linux-master | drivers/misc/eeprom/digsy_mtc_eeprom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for most of the SPI EEPROMs, such as Atmel AT25 models
* and Cypress FRAMs FM25 models.
*
* Copyright (C) 2006 David Brownell
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spi/eeprom.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
/*
* NOTE: this is an *EEPROM* driver. The vagaries of product naming
* mean that some AT25 products are EEPROMs, and others are FLASH.
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
* not this one!
*
* EEPROMs that can be used with this driver include, for example:
* AT25M02, AT25128B
*/
#define FM25_SN_LEN 8 /* serial number length */
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
struct at25_data {
struct spi_eeprom chip;
struct spi_device *spi;
struct mutex lock;
unsigned addrlen;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
u8 sernum[FM25_SN_LEN];
u8 command[EE_MAXADDRLEN + 1];
};
#define AT25_WREN 0x06 /* latch the write enable */
#define AT25_WRDI 0x04 /* reset the write enable */
#define AT25_RDSR 0x05 /* read status register */
#define AT25_WRSR 0x01 /* write status register */
#define AT25_READ 0x03 /* read byte(s) */
#define AT25_WRITE 0x02 /* write byte(s)/sector */
#define FM25_SLEEP 0xb9 /* enter sleep mode */
#define FM25_RDID 0x9f /* read device ID */
#define FM25_RDSN 0xc3 /* read S/N */
#define AT25_SR_nRDY 0x01 /* nRDY = write-in-progress */
#define AT25_SR_WEN 0x02 /* write enable (latched) */
#define AT25_SR_BP0 0x04 /* BP for software writeprotect */
#define AT25_SR_BP1 0x08
#define AT25_SR_WPEN 0x80 /* writeprotect enable */
#define AT25_INSTR_BIT3 0x08 /* additional address bit in instr */
#define FM25_ID_LEN 9 /* ID length */
/*
* Specs often allow 5ms for a page write, sometimes 20ms;
* it's important to recover from write timeouts.
*/
#define EE_TIMEOUT 25
/*-------------------------------------------------------------------------*/
#define io_limit PAGE_SIZE /* bytes */
static int at25_ee_read(void *priv, unsigned int offset,
void *val, size_t count)
{
struct at25_data *at25 = priv;
char *buf = val;
size_t max_chunk = spi_max_transfer_size(at25->spi);
unsigned int msg_offset = offset;
size_t bytes_left = count;
size_t segment;
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
struct spi_message m;
u8 instr;
if (unlikely(offset >= at25->chip.byte_len))
return -EINVAL;
if ((offset + count) > at25->chip.byte_len)
count = at25->chip.byte_len - offset;
if (unlikely(!count))
return -EINVAL;
do {
segment = min(bytes_left, max_chunk);
cp = at25->command;
instr = AT25_READ;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
if (msg_offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
mutex_lock(&at25->lock);
*cp++ = instr;
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = msg_offset >> 16;
fallthrough;
case 2:
*cp++ = msg_offset >> 8;
fallthrough;
case 1:
case 0: /* can't happen: for better code generation */
*cp++ = msg_offset >> 0;
}
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = at25->command;
t[0].len = at25->addrlen + 1;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = segment;
spi_message_add_tail(&t[1], &m);
status = spi_sync(at25->spi, &m);
mutex_unlock(&at25->lock);
if (status)
return status;
msg_offset += segment;
buf += segment;
bytes_left -= segment;
} while (bytes_left > 0);
dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n",
count, offset);
return 0;
}
/* Read extra registers as ID or serial number */
static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
int len)
{
int status;
struct spi_transfer t[2];
struct spi_message m;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = at25->command;
t[0].len = 1;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
mutex_lock(&at25->lock);
at25->command[0] = command;
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
mutex_unlock(&at25->lock);
return status;
}
static ssize_t sernum_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct at25_data *at25;
at25 = dev_get_drvdata(dev);
return sysfs_emit(buf, "%*ph\n", (int)sizeof(at25->sernum), at25->sernum);
}
static DEVICE_ATTR_RO(sernum);
static struct attribute *sernum_attrs[] = {
&dev_attr_sernum.attr,
NULL,
};
ATTRIBUTE_GROUPS(sernum);
static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at25_data *at25 = priv;
size_t maxsz = spi_max_transfer_size(at25->spi);
const char *buf = val;
int status = 0;
unsigned buf_size;
u8 *bounce;
if (unlikely(off >= at25->chip.byte_len))
return -EFBIG;
if ((off + count) > at25->chip.byte_len)
count = at25->chip.byte_len - off;
if (unlikely(!count))
return -EINVAL;
/* Temp buffer starts with command and address */
buf_size = at25->chip.page_size;
if (buf_size > io_limit)
buf_size = io_limit;
bounce = kmalloc(buf_size + at25->addrlen + 1, GFP_KERNEL);
if (!bounce)
return -ENOMEM;
/*
* For write, rollover is within the page ... so we write at
* most one page, then manually roll over to the next page.
*/
mutex_lock(&at25->lock);
do {
unsigned long timeout, retries;
unsigned segment;
unsigned offset = off;
u8 *cp = bounce;
int sr;
u8 instr;
*cp = AT25_WREN;
status = spi_write(at25->spi, cp, 1);
if (status < 0) {
dev_dbg(&at25->spi->dev, "WREN --> %d\n", status);
break;
}
instr = AT25_WRITE;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
*cp++ = instr;
/* 8/16/24-bit address is written MSB first */
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
fallthrough;
case 2:
*cp++ = offset >> 8;
fallthrough;
case 1:
case 0: /* can't happen: for better code generation */
*cp++ = offset >> 0;
}
/* Write as much of a page as we can */
segment = buf_size - (offset % buf_size);
if (segment > count)
segment = count;
if (segment > maxsz)
segment = maxsz;
memcpy(cp, buf, segment);
status = spi_write(at25->spi, bounce,
segment + at25->addrlen + 1);
dev_dbg(&at25->spi->dev, "write %u bytes at %u --> %d\n",
segment, offset, status);
if (status < 0)
break;
/*
* REVISIT this should detect (or prevent) failed writes
* to read-only sections of the EEPROM...
*/
/* Wait for non-busy status */
timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
retries = 0;
do {
sr = spi_w8r8(at25->spi, AT25_RDSR);
if (sr < 0 || (sr & AT25_SR_nRDY)) {
dev_dbg(&at25->spi->dev,
"rdsr --> %d (%02x)\n", sr, sr);
/* at HZ=100, this is sloooow */
msleep(1);
continue;
}
if (!(sr & AT25_SR_nRDY))
break;
} while (retries++ < 3 || time_before_eq(jiffies, timeout));
if ((sr < 0) || (sr & AT25_SR_nRDY)) {
dev_err(&at25->spi->dev,
"write %u bytes offset %u, timeout after %u msecs\n",
segment, offset,
jiffies_to_msecs(jiffies -
(timeout - EE_TIMEOUT)));
status = -ETIMEDOUT;
break;
}
off += segment;
buf += segment;
count -= segment;
} while (count > 0);
mutex_unlock(&at25->lock);
kfree(bounce);
return status;
}
/*-------------------------------------------------------------------------*/
static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
{
u32 val;
int err;
strscpy(chip->name, "at25", sizeof(chip->name));
err = device_property_read_u32(dev, "size", &val);
if (err)
err = device_property_read_u32(dev, "at25,byte-len", &val);
if (err) {
dev_err(dev, "Error: missing \"size\" property\n");
return err;
}
chip->byte_len = val;
err = device_property_read_u32(dev, "pagesize", &val);
if (err)
err = device_property_read_u32(dev, "at25,page-size", &val);
if (err) {
dev_err(dev, "Error: missing \"pagesize\" property\n");
return err;
}
chip->page_size = val;
err = device_property_read_u32(dev, "address-width", &val);
if (err) {
err = device_property_read_u32(dev, "at25,addr-mode", &val);
if (err) {
dev_err(dev, "Error: missing \"address-width\" property\n");
return err;
}
chip->flags = (u16)val;
} else {
switch (val) {
case 9:
chip->flags |= EE_INSTR_BIT3_IS_ADDR;
fallthrough;
case 8:
chip->flags |= EE_ADDR1;
break;
case 16:
chip->flags |= EE_ADDR2;
break;
case 24:
chip->flags |= EE_ADDR3;
break;
default:
dev_err(dev,
"Error: bad \"address-width\" property: %u\n",
val);
return -ENODEV;
}
if (device_property_present(dev, "read-only"))
chip->flags |= EE_READONLY;
}
return 0;
}
static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
{
struct at25_data *at25 = container_of(chip, struct at25_data, chip);
u8 sernum[FM25_SN_LEN];
u8 id[FM25_ID_LEN];
int i;
strscpy(chip->name, "fm25", sizeof(chip->name));
/* Get ID of chip */
fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
if (id[6] != 0xc2) {
dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]);
return -ENODEV;
}
/* Set size found in ID */
if (id[7] < 0x21 || id[7] > 0x26) {
dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
return -ENODEV;
}
chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
if (chip->byte_len > 64 * 1024)
chip->flags |= EE_ADDR3;
else
chip->flags |= EE_ADDR2;
if (id[8]) {
fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
/* Swap byte order */
for (i = 0; i < FM25_SN_LEN; i++)
at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
}
chip->page_size = PAGE_SIZE;
return 0;
}
static const struct of_device_id at25_of_match[] = {
{ .compatible = "atmel,at25" },
{ .compatible = "cypress,fm25" },
{ }
};
MODULE_DEVICE_TABLE(of, at25_of_match);
static const struct spi_device_id at25_spi_ids[] = {
{ .name = "at25" },
{ .name = "fm25" },
{ }
};
MODULE_DEVICE_TABLE(spi, at25_spi_ids);
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
int err;
int sr;
struct spi_eeprom *pdata;
bool is_fram;
/*
* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
*/
sr = spi_w8r8(spi, AT25_RDSR);
if (sr < 0 || sr & AT25_SR_nRDY) {
dev_dbg(&spi->dev, "rdsr --> %d (%02x)\n", sr, sr);
return -ENXIO;
}
at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL);
if (!at25)
return -ENOMEM;
mutex_init(&at25->lock);
at25->spi = spi;
spi_set_drvdata(spi, at25);
is_fram = fwnode_device_is_compatible(dev_fwnode(&spi->dev), "cypress,fm25");
/* Chip description */
pdata = dev_get_platdata(&spi->dev);
if (pdata) {
at25->chip = *pdata;
} else {
if (is_fram)
err = at25_fram_to_chip(&spi->dev, &at25->chip);
else
err = at25_fw_to_chip(&spi->dev, &at25->chip);
if (err)
return err;
}
/* For now we only support 8/16/24 bit addressing */
if (at25->chip.flags & EE_ADDR1)
at25->addrlen = 1;
else if (at25->chip.flags & EE_ADDR2)
at25->addrlen = 2;
else if (at25->chip.flags & EE_ADDR3)
at25->addrlen = 3;
else {
dev_dbg(&spi->dev, "unsupported address type\n");
return -EINVAL;
}
at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
at25->nvmem_config.root_only = true;
at25->nvmem_config.owner = THIS_MODULE;
at25->nvmem_config.compat = true;
at25->nvmem_config.base_dev = &spi->dev;
at25->nvmem_config.reg_read = at25_ee_read;
at25->nvmem_config.reg_write = at25_ee_write;
at25->nvmem_config.priv = at25;
at25->nvmem_config.stride = 1;
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = at25->chip.byte_len;
at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
(at25->chip.byte_len < 1024) ?
at25->chip.byte_len : (at25->chip.byte_len / 1024),
(at25->chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name, is_fram ? "fram" : "eeprom",
(at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct spi_driver at25_driver = {
.driver = {
.name = "at25",
.of_match_table = at25_of_match,
.dev_groups = sernum_groups,
},
.probe = at25_probe,
.id_table = at25_spi_ids,
};
module_spi_driver(at25_driver);
MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:at25");
| linux-master | drivers/misc/eeprom/at25.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/compat.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uacce.h>
static struct class *uacce_class;
static dev_t uacce_devt;
static DEFINE_XARRAY_ALLOC(uacce_xa);
/*
* If the parent driver or the device disappears, the queue state is invalid and
* ops are not usable anymore.
*/
static bool uacce_queue_is_valid(struct uacce_queue *q)
{
return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
}
static int uacce_start_queue(struct uacce_queue *q)
{
int ret;
if (q->state != UACCE_Q_INIT)
return -EINVAL;
if (q->uacce->ops->start_queue) {
ret = q->uacce->ops->start_queue(q);
if (ret < 0)
return ret;
}
q->state = UACCE_Q_STARTED;
return 0;
}
static int uacce_put_queue(struct uacce_queue *q)
{
struct uacce_device *uacce = q->uacce;
if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
uacce->ops->stop_queue(q);
if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
uacce->ops->put_queue)
uacce->ops->put_queue(q);
q->state = UACCE_Q_ZOMBIE;
return 0;
}
static long uacce_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
long ret = -ENXIO;
/*
* uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
* user. Avoid a circular lock dependency with uacce_fops_mmap(), which
* gets called with mmap_lock held, by taking uacce->mutex instead of
* q->mutex. Doing this in uacce_fops_mmap() is not possible because
* uacce_fops_open() calls iommu_sva_bind_device(), which takes
* mmap_lock, while holding uacce->mutex.
*/
mutex_lock(&uacce->mutex);
if (!uacce_queue_is_valid(q))
goto out_unlock;
switch (cmd) {
case UACCE_CMD_START_Q:
ret = uacce_start_queue(q);
break;
case UACCE_CMD_PUT_Q:
ret = uacce_put_queue(q);
break;
default:
if (uacce->ops->ioctl)
ret = uacce->ops->ioctl(q, cmd, arg);
else
ret = -EINVAL;
}
out_unlock:
mutex_unlock(&uacce->mutex);
return ret;
}
#ifdef CONFIG_COMPAT
static long uacce_fops_compat_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
arg = (unsigned long)compat_ptr(arg);
return uacce_fops_unl_ioctl(filep, cmd, arg);
}
#endif
static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
{
u32 pasid;
struct iommu_sva *handle;
if (!(uacce->flags & UACCE_DEV_SVA))
return 0;
handle = iommu_sva_bind_device(uacce->parent, current->mm);
if (IS_ERR(handle))
return PTR_ERR(handle);
pasid = iommu_sva_get_pasid(handle);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(handle);
return -ENODEV;
}
q->handle = handle;
q->pasid = pasid;
return 0;
}
static void uacce_unbind_queue(struct uacce_queue *q)
{
if (!q->handle)
return;
iommu_sva_unbind_device(q->handle);
q->handle = NULL;
}
static int uacce_fops_open(struct inode *inode, struct file *filep)
{
struct uacce_device *uacce;
struct uacce_queue *q;
int ret;
uacce = xa_load(&uacce_xa, iminor(inode));
if (!uacce)
return -ENODEV;
q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
if (!q)
return -ENOMEM;
mutex_lock(&uacce->mutex);
if (!uacce->parent) {
ret = -EINVAL;
goto out_with_mem;
}
ret = uacce_bind_queue(uacce, q);
if (ret)
goto out_with_mem;
q->uacce = uacce;
if (uacce->ops->get_queue) {
ret = uacce->ops->get_queue(uacce, q->pasid, q);
if (ret < 0)
goto out_with_bond;
}
init_waitqueue_head(&q->wait);
filep->private_data = q;
q->state = UACCE_Q_INIT;
q->mapping = filep->f_mapping;
mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
mutex_unlock(&uacce->mutex);
return 0;
out_with_bond:
uacce_unbind_queue(q);
out_with_mem:
kfree(q);
mutex_unlock(&uacce->mutex);
return ret;
}
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
mutex_lock(&uacce->mutex);
uacce_put_queue(q);
uacce_unbind_queue(q);
list_del(&q->list);
mutex_unlock(&uacce->mutex);
kfree(q);
return 0;
}
static void uacce_vma_close(struct vm_area_struct *vma)
{
struct uacce_queue *q = vma->vm_private_data;
if (vma->vm_pgoff < UACCE_MAX_REGION) {
struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff];
mutex_lock(&q->mutex);
q->qfrs[vma->vm_pgoff] = NULL;
mutex_unlock(&q->mutex);
kfree(qfr);
}
}
static const struct vm_operations_struct uacce_vm_ops = {
.close = uacce_vma_close,
};
static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
struct uacce_qfile_region *qfr;
enum uacce_qfrt type = UACCE_MAX_REGION;
int ret = 0;
if (vma->vm_pgoff < UACCE_MAX_REGION)
type = vma->vm_pgoff;
else
return -EINVAL;
qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
if (!qfr)
return -ENOMEM;
vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
vma->vm_ops = &uacce_vm_ops;
vma->vm_private_data = q;
qfr->type = type;
mutex_lock(&q->mutex);
if (!uacce_queue_is_valid(q)) {
ret = -ENXIO;
goto out_with_lock;
}
if (q->qfrs[type]) {
ret = -EEXIST;
goto out_with_lock;
}
switch (type) {
case UACCE_QFRT_MMIO:
case UACCE_QFRT_DUS:
if (!uacce->ops->mmap) {
ret = -EINVAL;
goto out_with_lock;
}
ret = uacce->ops->mmap(q, vma, qfr);
if (ret)
goto out_with_lock;
break;
default:
ret = -EINVAL;
goto out_with_lock;
}
q->qfrs[type] = qfr;
mutex_unlock(&q->mutex);
return ret;
out_with_lock:
mutex_unlock(&q->mutex);
kfree(qfr);
return ret;
}
static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
{
struct uacce_queue *q = file->private_data;
struct uacce_device *uacce = q->uacce;
__poll_t ret = 0;
mutex_lock(&q->mutex);
if (!uacce_queue_is_valid(q))
goto out_unlock;
poll_wait(file, &q->wait, wait);
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
ret = EPOLLIN | EPOLLRDNORM;
out_unlock:
mutex_unlock(&q->mutex);
return ret;
}
static const struct file_operations uacce_fops = {
.owner = THIS_MODULE,
.open = uacce_fops_open,
.release = uacce_fops_release,
.unlocked_ioctl = uacce_fops_unl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = uacce_fops_compat_ioctl,
#endif
.mmap = uacce_fops_mmap,
.poll = uacce_fops_poll,
};
#define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
static ssize_t api_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%s\n", uacce->api_ver);
}
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%u\n", uacce->flags);
}
static ssize_t available_instances_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
if (!uacce->ops->get_available_instances)
return -ENODEV;
return sysfs_emit(buf, "%d\n",
uacce->ops->get_available_instances(uacce));
}
static ssize_t algorithms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%s\n", uacce->algs);
}
static ssize_t region_mmio_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
}
static ssize_t region_dus_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
}
static ssize_t isolate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
}
static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct uacce_device *uacce = to_uacce_device(dev);
u32 val;
val = uacce->ops->isolate_err_threshold_read(uacce);
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct uacce_device *uacce = to_uacce_device(dev);
unsigned long val;
int ret;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val > UACCE_MAX_ERR_THRESHOLD)
return -EINVAL;
ret = uacce->ops->isolate_err_threshold_write(uacce, val);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RO(api);
static DEVICE_ATTR_RO(flags);
static DEVICE_ATTR_RO(available_instances);
static DEVICE_ATTR_RO(algorithms);
static DEVICE_ATTR_RO(region_mmio_size);
static DEVICE_ATTR_RO(region_dus_size);
static DEVICE_ATTR_RO(isolate);
static DEVICE_ATTR_RW(isolate_strategy);
static struct attribute *uacce_dev_attrs[] = {
&dev_attr_api.attr,
&dev_attr_flags.attr,
&dev_attr_available_instances.attr,
&dev_attr_algorithms.attr,
&dev_attr_region_mmio_size.attr,
&dev_attr_region_dus_size.attr,
&dev_attr_isolate.attr,
&dev_attr_isolate_strategy.attr,
NULL,
};
static umode_t uacce_dev_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct uacce_device *uacce = to_uacce_device(dev);
if (((attr == &dev_attr_region_mmio_size.attr) &&
(!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
((attr == &dev_attr_region_dus_size.attr) &&
(!uacce->qf_pg_num[UACCE_QFRT_DUS])))
return 0;
if (attr == &dev_attr_isolate_strategy.attr &&
(!uacce->ops->isolate_err_threshold_read &&
!uacce->ops->isolate_err_threshold_write))
return 0;
if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
return 0;
return attr->mode;
}
static struct attribute_group uacce_dev_group = {
.is_visible = uacce_dev_is_visible,
.attrs = uacce_dev_attrs,
};
__ATTRIBUTE_GROUPS(uacce_dev);
static void uacce_release(struct device *dev)
{
struct uacce_device *uacce = to_uacce_device(dev);
kfree(uacce);
}
static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
{
int ret;
if (!(flags & UACCE_DEV_SVA))
return flags;
flags &= ~UACCE_DEV_SVA;
ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
if (ret) {
dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
return flags;
}
ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
if (ret) {
dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
return flags;
}
return flags | UACCE_DEV_SVA;
}
static void uacce_disable_sva(struct uacce_device *uacce)
{
if (!(uacce->flags & UACCE_DEV_SVA))
return;
iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
}
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
* @interface: pointer of uacce_interface for register
*
* Returns uacce pointer if success and ERR_PTR if not
* Need check returned negotiated uacce->flags
*/
struct uacce_device *uacce_alloc(struct device *parent,
struct uacce_interface *interface)
{
unsigned int flags = interface->flags;
struct uacce_device *uacce;
int ret;
uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
if (!uacce)
return ERR_PTR(-ENOMEM);
flags = uacce_enable_sva(parent, flags);
uacce->parent = parent;
uacce->flags = flags;
uacce->ops = interface->ops;
ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
GFP_KERNEL);
if (ret < 0)
goto err_with_uacce;
INIT_LIST_HEAD(&uacce->queues);
mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
uacce->dev.groups = uacce_dev_groups;
uacce->dev.parent = uacce->parent;
uacce->dev.release = uacce_release;
dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
return uacce;
err_with_uacce:
uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(uacce_alloc);
/**
* uacce_register() - add the accelerator to cdev and export to user space
* @uacce: The initialized uacce device
*
* Return 0 if register succeeded, or an error.
*/
int uacce_register(struct uacce_device *uacce)
{
if (!uacce)
return -ENODEV;
uacce->cdev = cdev_alloc();
if (!uacce->cdev)
return -ENOMEM;
uacce->cdev->ops = &uacce_fops;
uacce->cdev->owner = THIS_MODULE;
return cdev_device_add(uacce->cdev, &uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_register);
/**
* uacce_remove() - remove the accelerator
* @uacce: the accelerator to remove
*/
void uacce_remove(struct uacce_device *uacce)
{
struct uacce_queue *q, *next_q;
if (!uacce)
return;
/*
* uacce_fops_open() may be running concurrently, even after we remove
* the cdev. Holding uacce->mutex ensures that open() does not obtain a
* removed uacce device.
*/
mutex_lock(&uacce->mutex);
/* ensure no open queue remains */
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
/*
* Taking q->mutex ensures that fops do not use the defunct
* uacce->ops after the queue is disabled.
*/
mutex_lock(&q->mutex);
uacce_put_queue(q);
mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
/*
* unmap remaining mapping from user space, preventing user still
* access the mmaped area while parent device is already removed
*/
unmap_mapping_range(q->mapping, 0, 0, 1);
}
/* disable sva now since no opened queues */
uacce_disable_sva(uacce);
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
/*
* uacce exists as long as there are open fds, but ops will be freed
* now. Ensure that bugs cause NULL deref rather than use-after-free.
*/
uacce->ops = NULL;
uacce->parent = NULL;
mutex_unlock(&uacce->mutex);
put_device(&uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_remove);
static int __init uacce_init(void)
{
int ret;
uacce_class = class_create(UACCE_NAME);
if (IS_ERR(uacce_class))
return PTR_ERR(uacce_class);
ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
if (ret)
class_destroy(uacce_class);
return ret;
}
static __exit void uacce_exit(void)
{
unregister_chrdev_region(uacce_devt, MINORMASK);
class_destroy(uacce_class);
}
subsys_initcall(uacce_init);
module_exit(uacce_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
MODULE_DESCRIPTION("Accelerator interface for Userland applications");
| linux-master | drivers/misc/uacce/uacce.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lis3lv02d.c - ST LIS3LV02DL accelerometer driver
*
* Copyright (C) 2007-2008 Yan Burman
* Copyright (C) 2008 Eric Piel
* Copyright (C) 2008-2009 Pavel Machek
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/of.h>
#include "lis3lv02d.h"
#define DRIVER_NAME "lis3lv02d"
/* joystick device poll interval in milliseconds */
#define MDPS_POLL_INTERVAL 50
#define MDPS_POLL_MIN 0
#define MDPS_POLL_MAX 2000
#define LIS3_SYSFS_POWERDOWN_DELAY 5000 /* In milliseconds */
#define SELFTEST_OK 0
#define SELFTEST_FAIL -1
#define SELFTEST_IRQ -2
#define IRQ_LINE0 0
#define IRQ_LINE1 1
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
* because they are generated even if the data do not change. So it's better
* to keep the interrupt for the free-fall event. The values are updated at
* 40Hz (at the lowest frequency), but as it can be pretty time consuming on
* some low processor, we poll the sensor only at 20Hz... enough for the
* joystick.
*/
#define LIS3_PWRON_DELAY_WAI_12B (5000)
#define LIS3_PWRON_DELAY_WAI_8B (3000)
/*
* LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG
* LIS302D spec says: 18 mG / digit
* LIS3_ACCURACY is used to increase accuracy of the intermediate
* calculation results.
*/
#define LIS3_ACCURACY 1024
/* Sensitivity values for -2G +2G scale */
#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
/*
* LIS331DLH spec says 1LSBs corresponds 4G/4096 -> 1LSB is 1000/1024 mG.
* Below macros defines sensitivity values for +/-2G. Dataout bits for
* +/-2G range is 12 bits so 4 bits adjustment must be done to get 12bit
* data from 16bit value. Currently this driver supports only 2G range.
*/
#define LIS3DLH_SENSITIVITY_2G ((LIS3_ACCURACY * 1000) / 1024)
#define SHIFT_ADJ_2G 4
#define LIS3_DEFAULT_FUZZ_12B 3
#define LIS3_DEFAULT_FLAT_12B 3
#define LIS3_DEFAULT_FUZZ_8B 1
#define LIS3_DEFAULT_FLAT_8B 1
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
EXPORT_SYMBOL_GPL(lis3_dev);
/* just like param_set_int() but does sanity-check so that it won't point
* over the axis array size
*/
static int param_set_axis(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
if (!ret) {
int val = *(int *)kp->arg;
if (val < 0)
val = -val;
if (!val || val > 3)
return -EINVAL;
}
return ret;
}
static const struct kernel_param_ops param_ops_axis = {
.set = param_set_axis,
.get = param_get_int,
};
#define param_check_axis(name, p) param_check_int(name, p)
module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644);
MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions");
static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
{
s8 lo;
if (lis3->read(lis3, reg, &lo) < 0)
return 0;
return lo;
}
static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg)
{
u8 lo, hi;
lis3->read(lis3, reg - 1, &lo);
lis3->read(lis3, reg, &hi);
/* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */
return (s16)((hi << 8) | lo);
}
/* 12bits for 2G range, 13 bits for 4G range and 14 bits for 8G range */
static s16 lis331dlh_read_data(struct lis3lv02d *lis3, int reg)
{
u8 lo, hi;
int v;
lis3->read(lis3, reg - 1, &lo);
lis3->read(lis3, reg, &hi);
v = (int) ((hi << 8) | lo);
return (s16) v >> lis3->shift_adj;
}
/**
* lis3lv02d_get_axis - For the given axis, give the value converted
* @axis: 1,2,3 - can also be negative
* @hw_values: raw values returned by the hardware
*
* Returns the converted value.
*/
static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
{
if (axis > 0)
return hw_values[axis - 1];
else
return -hw_values[-axis - 1];
}
/**
* lis3lv02d_get_xyz - Get X, Y and Z axis values from the accelerometer
* @lis3: pointer to the device struct
* @x: where to store the X axis value
* @y: where to store the Y axis value
* @z: where to store the Z axis value
*
* Note that 40Hz input device can eat up about 10% CPU at 800MHZ
*/
static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
{
int position[3];
int i;
if (lis3->blkread) {
if (lis3->whoami == WAI_12B) {
u16 data[3];
lis3->blkread(lis3, OUTX_L, 6, (u8 *)data);
for (i = 0; i < 3; i++)
position[i] = (s16)le16_to_cpu(data[i]);
} else {
u8 data[5];
/* Data: x, dummy, y, dummy, z */
lis3->blkread(lis3, OUTX, 5, data);
for (i = 0; i < 3; i++)
position[i] = (s8)data[i * 2];
}
} else {
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
}
for (i = 0; i < 3; i++)
position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
*x = lis3lv02d_get_axis(lis3->ac.x, position);
*y = lis3lv02d_get_axis(lis3->ac.y, position);
*z = lis3lv02d_get_axis(lis3->ac.z, position);
}
/* conversion btw sampling rate and the register values */
static int lis3_12_rates[4] = {40, 160, 640, 2560};
static int lis3_8_rates[2] = {100, 400};
static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
/* ODR is Output Data Rate */
static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
{
u8 ctrl;
int shift;
lis3->read(lis3, CTRL_REG1, &ctrl);
ctrl &= lis3->odr_mask;
shift = ffs(lis3->odr_mask) - 1;
return (ctrl >> shift);
}
static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
{
int odr_idx = lis3lv02d_get_odr_index(lis3);
int div = lis3->odrs[odr_idx];
if (div == 0) {
if (odr_idx == 0) {
/* Power-down mode, not sampling no need to sleep */
return 0;
}
dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
return -ENXIO;
}
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / div);
return 0;
}
static int lis3lv02d_set_odr(struct lis3lv02d *lis3, int rate)
{
u8 ctrl;
int i, len, shift;
if (!rate)
return -EINVAL;
lis3->read(lis3, CTRL_REG1, &ctrl);
ctrl &= ~lis3->odr_mask;
len = 1 << hweight_long(lis3->odr_mask); /* # of possible values */
shift = ffs(lis3->odr_mask) - 1;
for (i = 0; i < len; i++)
if (lis3->odrs[i] == rate) {
lis3->write(lis3, CTRL_REG1,
ctrl | (i << shift));
return 0;
}
return -EINVAL;
}
static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
{
u8 ctlreg, reg;
s16 x, y, z;
u8 selftest;
int ret;
u8 ctrl_reg_data;
unsigned char irq_cfg;
mutex_lock(&lis3->mutex);
irq_cfg = lis3->irq_cfg;
if (lis3->whoami == WAI_8B) {
lis3->data_ready_count[IRQ_LINE0] = 0;
lis3->data_ready_count[IRQ_LINE1] = 0;
/* Change interrupt cfg to data ready for selftest */
atomic_inc(&lis3->wake_thread);
lis3->irq_cfg = LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY;
lis3->read(lis3, CTRL_REG3, &ctrl_reg_data);
lis3->write(lis3, CTRL_REG3, (ctrl_reg_data &
~(LIS3_IRQ1_MASK | LIS3_IRQ2_MASK)) |
(LIS3_IRQ1_DATA_READY | LIS3_IRQ2_DATA_READY));
}
if ((lis3->whoami == WAI_3DC) || (lis3->whoami == WAI_3DLH)) {
ctlreg = CTRL_REG4;
selftest = CTRL4_ST0;
} else {
ctlreg = CTRL_REG1;
if (lis3->whoami == WAI_12B)
selftest = CTRL1_ST;
else
selftest = CTRL1_STP;
}
lis3->read(lis3, ctlreg, ®);
lis3->write(lis3, ctlreg, (reg | selftest));
ret = lis3lv02d_get_pwron_wait(lis3);
if (ret)
goto fail;
/* Read directly to avoid axis remap */
x = lis3->read_data(lis3, OUTX);
y = lis3->read_data(lis3, OUTY);
z = lis3->read_data(lis3, OUTZ);
/* back to normal settings */
lis3->write(lis3, ctlreg, reg);
ret = lis3lv02d_get_pwron_wait(lis3);
if (ret)
goto fail;
results[0] = x - lis3->read_data(lis3, OUTX);
results[1] = y - lis3->read_data(lis3, OUTY);
results[2] = z - lis3->read_data(lis3, OUTZ);
ret = 0;
if (lis3->whoami == WAI_8B) {
/* Restore original interrupt configuration */
atomic_dec(&lis3->wake_thread);
lis3->write(lis3, CTRL_REG3, ctrl_reg_data);
lis3->irq_cfg = irq_cfg;
if ((irq_cfg & LIS3_IRQ1_MASK) &&
lis3->data_ready_count[IRQ_LINE0] < 2) {
ret = SELFTEST_IRQ;
goto fail;
}
if ((irq_cfg & LIS3_IRQ2_MASK) &&
lis3->data_ready_count[IRQ_LINE1] < 2) {
ret = SELFTEST_IRQ;
goto fail;
}
}
if (lis3->pdata) {
int i;
for (i = 0; i < 3; i++) {
/* Check against selftest acceptance limits */
if ((results[i] < lis3->pdata->st_min_limits[i]) ||
(results[i] > lis3->pdata->st_max_limits[i])) {
ret = SELFTEST_FAIL;
goto fail;
}
}
}
/* test passed */
fail:
mutex_unlock(&lis3->mutex);
return ret;
}
/*
* Order of registers in the list affects to order of the restore process.
* Perhaps it is a good idea to set interrupt enable register as a last one
* after all other configurations
*/
static u8 lis3_wai8_regs[] = { FF_WU_CFG_1, FF_WU_THS_1, FF_WU_DURATION_1,
FF_WU_CFG_2, FF_WU_THS_2, FF_WU_DURATION_2,
CLICK_CFG, CLICK_SRC, CLICK_THSY_X, CLICK_THSZ,
CLICK_TIMELIMIT, CLICK_LATENCY, CLICK_WINDOW,
CTRL_REG1, CTRL_REG2, CTRL_REG3};
static u8 lis3_wai12_regs[] = {FF_WU_CFG, FF_WU_THS_L, FF_WU_THS_H,
FF_WU_DURATION, DD_CFG, DD_THSI_L, DD_THSI_H,
DD_THSE_L, DD_THSE_H,
CTRL_REG1, CTRL_REG3, CTRL_REG2};
static inline void lis3_context_save(struct lis3lv02d *lis3)
{
int i;
for (i = 0; i < lis3->regs_size; i++)
lis3->read(lis3, lis3->regs[i], &lis3->reg_cache[i]);
lis3->regs_stored = true;
}
static inline void lis3_context_restore(struct lis3lv02d *lis3)
{
int i;
if (lis3->regs_stored)
for (i = 0; i < lis3->regs_size; i++)
lis3->write(lis3, lis3->regs[i], lis3->reg_cache[i]);
}
void lis3lv02d_poweroff(struct lis3lv02d *lis3)
{
if (lis3->reg_ctrl)
lis3_context_save(lis3);
/* disable X,Y,Z axis and power down */
lis3->write(lis3, CTRL_REG1, 0x00);
if (lis3->reg_ctrl)
lis3->reg_ctrl(lis3, LIS3_REG_OFF);
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
int lis3lv02d_poweron(struct lis3lv02d *lis3)
{
int err;
u8 reg;
lis3->init(lis3);
/*
* Common configuration
* BDU: (12 bits sensors only) LSB and MSB values are not updated until
* both have been read. So the value read will always be correct.
* Set BOOT bit to refresh factory tuning values.
*/
if (lis3->pdata) {
lis3->read(lis3, CTRL_REG2, ®);
if (lis3->whoami == WAI_12B)
reg |= CTRL2_BDU | CTRL2_BOOT;
else if (lis3->whoami == WAI_3DLH)
reg |= CTRL2_BOOT_3DLH;
else
reg |= CTRL2_BOOT_8B;
lis3->write(lis3, CTRL_REG2, reg);
if (lis3->whoami == WAI_3DLH) {
lis3->read(lis3, CTRL_REG4, ®);
reg |= CTRL4_BDU;
lis3->write(lis3, CTRL_REG4, reg);
}
}
err = lis3lv02d_get_pwron_wait(lis3);
if (err)
return err;
if (lis3->reg_ctrl)
lis3_context_restore(lis3);
return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
static void lis3lv02d_joystick_poll(struct input_dev *input)
{
struct lis3lv02d *lis3 = input_get_drvdata(input);
int x, y, z;
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_Z, z);
input_sync(input);
mutex_unlock(&lis3->mutex);
}
static int lis3lv02d_joystick_open(struct input_dev *input)
{
struct lis3lv02d *lis3 = input_get_drvdata(input);
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
if (lis3->pdata && lis3->whoami == WAI_8B && lis3->idev)
atomic_set(&lis3->wake_thread, 1);
/*
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
*/
lis3lv02d_joystick_poll(input);
return 0;
}
static void lis3lv02d_joystick_close(struct input_dev *input)
{
struct lis3lv02d *lis3 = input_get_drvdata(input);
atomic_set(&lis3->wake_thread, 0);
if (lis3->pm_dev)
pm_runtime_put(lis3->pm_dev);
}
static irqreturn_t lis302dl_interrupt(int irq, void *data)
{
struct lis3lv02d *lis3 = data;
if (!test_bit(0, &lis3->misc_opened))
goto out;
/*
* Be careful: on some HP laptops the bios force DD when on battery and
* the lid is closed. This leads to interrupts as soon as a little move
* is done.
*/
atomic_inc(&lis3->count);
wake_up_interruptible(&lis3->misc_wait);
kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
out:
if (atomic_read(&lis3->wake_thread))
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
struct input_dev *dev = lis3->idev;
u8 click_src;
mutex_lock(&lis3->mutex);
lis3->read(lis3, CLICK_SRC, &click_src);
if (click_src & CLICK_SINGLE_X) {
input_report_key(dev, lis3->mapped_btns[0], 1);
input_report_key(dev, lis3->mapped_btns[0], 0);
}
if (click_src & CLICK_SINGLE_Y) {
input_report_key(dev, lis3->mapped_btns[1], 1);
input_report_key(dev, lis3->mapped_btns[1], 0);
}
if (click_src & CLICK_SINGLE_Z) {
input_report_key(dev, lis3->mapped_btns[2], 1);
input_report_key(dev, lis3->mapped_btns[2], 0);
}
input_sync(dev);
mutex_unlock(&lis3->mutex);
}
static inline void lis302dl_data_ready(struct lis3lv02d *lis3, int index)
{
int dummy;
/* Dummy read to ack interrupt */
lis3lv02d_get_xyz(lis3, &dummy, &dummy, &dummy);
lis3->data_ready_count[index]++;
}
static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
{
struct lis3lv02d *lis3 = data;
u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ1_MASK;
if (irq_cfg == LIS3_IRQ1_CLICK)
lis302dl_interrupt_handle_click(lis3);
else if (unlikely(irq_cfg == LIS3_IRQ1_DATA_READY))
lis302dl_data_ready(lis3, IRQ_LINE0);
else
lis3lv02d_joystick_poll(lis3->idev);
return IRQ_HANDLED;
}
static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
{
struct lis3lv02d *lis3 = data;
u8 irq_cfg = lis3->irq_cfg & LIS3_IRQ2_MASK;
if (irq_cfg == LIS3_IRQ2_CLICK)
lis302dl_interrupt_handle_click(lis3);
else if (unlikely(irq_cfg == LIS3_IRQ2_DATA_READY))
lis302dl_data_ready(lis3, IRQ_LINE1);
else
lis3lv02d_joystick_poll(lis3->idev);
return IRQ_HANDLED;
}
static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
if (test_and_set_bit(0, &lis3->misc_opened))
return -EBUSY; /* already open */
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
atomic_set(&lis3->count, 0);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
clear_bit(0, &lis3->misc_opened); /* release the device */
if (lis3->pm_dev)
pm_runtime_put(lis3->pm_dev);
return 0;
}
static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
DECLARE_WAITQUEUE(wait, current);
u32 data;
unsigned char byte_data;
ssize_t retval = 1;
if (count < 1)
return -EINVAL;
add_wait_queue(&lis3->misc_wait, &wait);
while (true) {
set_current_state(TASK_INTERRUPTIBLE);
data = atomic_xchg(&lis3->count, 0);
if (data)
break;
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
goto out;
}
schedule();
}
if (data < 255)
byte_data = data;
else
byte_data = 255;
/* make sure we are not going into copy_to_user() with
* TASK_INTERRUPTIBLE state */
set_current_state(TASK_RUNNING);
if (copy_to_user(buf, &byte_data, sizeof(byte_data)))
retval = -EFAULT;
out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&lis3->misc_wait, &wait);
return retval;
}
static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
poll_wait(file, &lis3->misc_wait, wait);
if (atomic_read(&lis3->count))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static int lis3lv02d_misc_fasync(int fd, struct file *file, int on)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
return fasync_helper(fd, file, on, &lis3->async_queue);
}
static const struct file_operations lis3lv02d_misc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = lis3lv02d_misc_read,
.open = lis3lv02d_misc_open,
.release = lis3lv02d_misc_release,
.poll = lis3lv02d_misc_poll,
.fasync = lis3lv02d_misc_fasync,
};
int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
{
struct input_dev *input_dev;
int err;
int max_val, fuzz, flat;
int btns[] = {BTN_X, BTN_Y, BTN_Z};
if (lis3->idev)
return -EINVAL;
input_dev = input_allocate_device();
if (!input_dev)
return -ENOMEM;
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
input_dev->dev.parent = &lis3->pdev->dev;
input_dev->open = lis3lv02d_joystick_open;
input_dev->close = lis3lv02d_joystick_close;
max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
flat = LIS3_DEFAULT_FLAT_12B;
} else {
fuzz = LIS3_DEFAULT_FUZZ_8B;
flat = LIS3_DEFAULT_FLAT_8B;
}
fuzz = (fuzz * lis3->scale) / LIS3_ACCURACY;
flat = (flat * lis3->scale) / LIS3_ACCURACY;
input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
input_set_drvdata(input_dev, lis3);
lis3->idev = input_dev;
err = input_setup_polling(input_dev, lis3lv02d_joystick_poll);
if (err)
goto err_free_input;
input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL);
input_set_min_poll_interval(input_dev, MDPS_POLL_MIN);
input_set_max_poll_interval(input_dev, MDPS_POLL_MAX);
lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
err = input_register_device(lis3->idev);
if (err)
goto err_free_input;
return 0;
err_free_input:
input_free_device(input_dev);
lis3->idev = NULL;
return err;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
{
if (lis3->irq)
free_irq(lis3->irq, lis3);
if (lis3->pdata && lis3->pdata->irq2)
free_irq(lis3->pdata->irq2, lis3);
if (!lis3->idev)
return;
if (lis3->irq)
misc_deregister(&lis3->miscdev);
input_unregister_device(lis3->idev);
lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
/* Sysfs stuff */
static void lis3lv02d_sysfs_poweron(struct lis3lv02d *lis3)
{
/*
* SYSFS functions are fast visitors so put-call
* immediately after the get-call. However, keep
* chip running for a while and schedule delayed
* suspend. This way periodic sysfs calls doesn't
* suffer from relatively long power up time.
*/
if (lis3->pm_dev) {
pm_runtime_get_sync(lis3->pm_dev);
pm_runtime_put_noidle(lis3->pm_dev);
pm_schedule_suspend(lis3->pm_dev, LIS3_SYSFS_POWERDOWN_DELAY);
}
}
static ssize_t lis3lv02d_selftest_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
s16 values[3];
static const char ok[] = "OK";
static const char fail[] = "FAIL";
static const char irq[] = "FAIL_IRQ";
const char *res;
lis3lv02d_sysfs_poweron(lis3);
switch (lis3lv02d_selftest(lis3, values)) {
case SELFTEST_FAIL:
res = fail;
break;
case SELFTEST_IRQ:
res = irq;
break;
case SELFTEST_OK:
default:
res = ok;
break;
}
return sprintf(buf, "%s %d %d %d\n", res,
values[0], values[1], values[2]);
}
static ssize_t lis3lv02d_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
int x, y, z;
lis3lv02d_sysfs_poweron(lis3);
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
mutex_unlock(&lis3->mutex);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
static ssize_t lis3lv02d_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
int odr_idx;
lis3lv02d_sysfs_poweron(lis3);
odr_idx = lis3lv02d_get_odr_index(lis3);
return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
}
static ssize_t lis3lv02d_rate_set(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct lis3lv02d *lis3 = dev_get_drvdata(dev);
unsigned long rate;
int ret;
ret = kstrtoul(buf, 0, &rate);
if (ret)
return ret;
lis3lv02d_sysfs_poweron(lis3);
if (lis3lv02d_set_odr(lis3, rate))
return -EINVAL;
return count;
}
static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
lis3lv02d_rate_set);
static struct attribute *lis3lv02d_attributes[] = {
&dev_attr_selftest.attr,
&dev_attr_position.attr,
&dev_attr_rate.attr,
NULL
};
static const struct attribute_group lis3lv02d_attribute_group = {
.attrs = lis3lv02d_attributes
};
static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
{
lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(lis3->pdev))
return PTR_ERR(lis3->pdev);
platform_set_drvdata(lis3->pdev, lis3);
return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
}
void lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
platform_device_unregister(lis3->pdev);
if (lis3->pm_dev) {
/* Barrier after the sysfs remove */
pm_runtime_barrier(lis3->pm_dev);
/* SYSFS may have left chip running. Turn off if necessary */
if (!pm_runtime_suspended(lis3->pm_dev))
lis3lv02d_poweroff(lis3);
pm_runtime_disable(lis3->pm_dev);
pm_runtime_set_suspended(lis3->pm_dev);
}
kfree(lis3->reg_cache);
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
struct lis3lv02d_platform_data *p)
{
int err;
int ctrl2 = p->hipass_ctrl;
if (p->click_flags) {
lis3->write(lis3, CLICK_CFG, p->click_flags);
lis3->write(lis3, CLICK_TIMELIMIT, p->click_time_limit);
lis3->write(lis3, CLICK_LATENCY, p->click_latency);
lis3->write(lis3, CLICK_WINDOW, p->click_window);
lis3->write(lis3, CLICK_THSZ, p->click_thresh_z & 0xf);
lis3->write(lis3, CLICK_THSY_X,
(p->click_thresh_x & 0xf) |
(p->click_thresh_y << 4));
if (lis3->idev) {
input_set_capability(lis3->idev, EV_KEY, BTN_X);
input_set_capability(lis3->idev, EV_KEY, BTN_Y);
input_set_capability(lis3->idev, EV_KEY, BTN_Z);
}
}
if (p->wakeup_flags) {
lis3->write(lis3, FF_WU_CFG_1, p->wakeup_flags);
lis3->write(lis3, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
/* pdata value + 1 to keep this backward compatible*/
lis3->write(lis3, FF_WU_DURATION_1, p->duration1 + 1);
ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
}
if (p->wakeup_flags2) {
lis3->write(lis3, FF_WU_CFG_2, p->wakeup_flags2);
lis3->write(lis3, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
/* pdata value + 1 to keep this backward compatible*/
lis3->write(lis3, FF_WU_DURATION_2, p->duration2 + 1);
ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
}
/* Configure hipass filters */
lis3->write(lis3, CTRL_REG2, ctrl2);
if (p->irq2) {
err = request_threaded_irq(p->irq2,
NULL,
lis302dl_interrupt_thread2_8b,
IRQF_TRIGGER_RISING | IRQF_ONESHOT |
(p->irq_flags2 & IRQF_TRIGGER_MASK),
DRIVER_NAME, lis3);
if (err < 0)
pr_err("No second IRQ. Limited functionality\n");
}
}
#ifdef CONFIG_OF
int lis3lv02d_init_dt(struct lis3lv02d *lis3)
{
struct lis3lv02d_platform_data *pdata;
struct device_node *np = lis3->of_node;
u32 val;
s32 sval;
if (!lis3->of_node)
return 0;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
if (of_property_read_bool(np, "st,click-single-x"))
pdata->click_flags |= LIS3_CLICK_SINGLE_X;
if (of_property_read_bool(np, "st,click-double-x"))
pdata->click_flags |= LIS3_CLICK_DOUBLE_X;
if (of_property_read_bool(np, "st,click-single-y"))
pdata->click_flags |= LIS3_CLICK_SINGLE_Y;
if (of_property_read_bool(np, "st,click-double-y"))
pdata->click_flags |= LIS3_CLICK_DOUBLE_Y;
if (of_property_read_bool(np, "st,click-single-z"))
pdata->click_flags |= LIS3_CLICK_SINGLE_Z;
if (of_property_read_bool(np, "st,click-double-z"))
pdata->click_flags |= LIS3_CLICK_DOUBLE_Z;
if (!of_property_read_u32(np, "st,click-threshold-x", &val))
pdata->click_thresh_x = val;
if (!of_property_read_u32(np, "st,click-threshold-y", &val))
pdata->click_thresh_y = val;
if (!of_property_read_u32(np, "st,click-threshold-z", &val))
pdata->click_thresh_z = val;
if (!of_property_read_u32(np, "st,click-time-limit", &val))
pdata->click_time_limit = val;
if (!of_property_read_u32(np, "st,click-latency", &val))
pdata->click_latency = val;
if (!of_property_read_u32(np, "st,click-window", &val))
pdata->click_window = val;
if (of_property_read_bool(np, "st,irq1-disable"))
pdata->irq_cfg |= LIS3_IRQ1_DISABLE;
if (of_property_read_bool(np, "st,irq1-ff-wu-1"))
pdata->irq_cfg |= LIS3_IRQ1_FF_WU_1;
if (of_property_read_bool(np, "st,irq1-ff-wu-2"))
pdata->irq_cfg |= LIS3_IRQ1_FF_WU_2;
if (of_property_read_bool(np, "st,irq1-data-ready"))
pdata->irq_cfg |= LIS3_IRQ1_DATA_READY;
if (of_property_read_bool(np, "st,irq1-click"))
pdata->irq_cfg |= LIS3_IRQ1_CLICK;
if (of_property_read_bool(np, "st,irq2-disable"))
pdata->irq_cfg |= LIS3_IRQ2_DISABLE;
if (of_property_read_bool(np, "st,irq2-ff-wu-1"))
pdata->irq_cfg |= LIS3_IRQ2_FF_WU_1;
if (of_property_read_bool(np, "st,irq2-ff-wu-2"))
pdata->irq_cfg |= LIS3_IRQ2_FF_WU_2;
if (of_property_read_bool(np, "st,irq2-data-ready"))
pdata->irq_cfg |= LIS3_IRQ2_DATA_READY;
if (of_property_read_bool(np, "st,irq2-click"))
pdata->irq_cfg |= LIS3_IRQ2_CLICK;
if (of_property_read_bool(np, "st,irq-open-drain"))
pdata->irq_cfg |= LIS3_IRQ_OPEN_DRAIN;
if (of_property_read_bool(np, "st,irq-active-low"))
pdata->irq_cfg |= LIS3_IRQ_ACTIVE_LOW;
if (!of_property_read_u32(np, "st,wu-duration-1", &val))
pdata->duration1 = val;
if (!of_property_read_u32(np, "st,wu-duration-2", &val))
pdata->duration2 = val;
if (of_property_read_bool(np, "st,wakeup-x-lo"))
pdata->wakeup_flags |= LIS3_WAKEUP_X_LO;
if (of_property_read_bool(np, "st,wakeup-x-hi"))
pdata->wakeup_flags |= LIS3_WAKEUP_X_HI;
if (of_property_read_bool(np, "st,wakeup-y-lo"))
pdata->wakeup_flags |= LIS3_WAKEUP_Y_LO;
if (of_property_read_bool(np, "st,wakeup-y-hi"))
pdata->wakeup_flags |= LIS3_WAKEUP_Y_HI;
if (of_property_read_bool(np, "st,wakeup-z-lo"))
pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup-z-hi"))
pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
if (of_get_property(np, "st,wakeup-threshold", &val))
pdata->wakeup_thresh = val;
if (of_property_read_bool(np, "st,wakeup2-x-lo"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_X_LO;
if (of_property_read_bool(np, "st,wakeup2-x-hi"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_X_HI;
if (of_property_read_bool(np, "st,wakeup2-y-lo"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_LO;
if (of_property_read_bool(np, "st,wakeup2-y-hi"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_HI;
if (of_property_read_bool(np, "st,wakeup2-z-lo"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
if (of_property_read_bool(np, "st,wakeup2-z-hi"))
pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
if (of_get_property(np, "st,wakeup2-threshold", &val))
pdata->wakeup_thresh2 = val;
if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
switch (val) {
case 1:
pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_1HZ;
break;
case 2:
pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_2HZ;
break;
case 4:
pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_4HZ;
break;
case 8:
pdata->hipass_ctrl = LIS3_HIPASS_CUTFF_8HZ;
break;
}
}
if (of_property_read_bool(np, "st,hipass1-disable"))
pdata->hipass_ctrl |= LIS3_HIPASS1_DISABLE;
if (of_property_read_bool(np, "st,hipass2-disable"))
pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE;
if (of_property_read_s32(np, "st,axis-x", &sval) == 0)
pdata->axis_x = sval;
if (of_property_read_s32(np, "st,axis-y", &sval) == 0)
pdata->axis_y = sval;
if (of_property_read_s32(np, "st,axis-z", &sval) == 0)
pdata->axis_z = sval;
if (of_property_read_u32(np, "st,default-rate", &val) == 0)
pdata->default_rate = val;
if (of_property_read_s32(np, "st,min-limit-x", &sval) == 0)
pdata->st_min_limits[0] = sval;
if (of_property_read_s32(np, "st,min-limit-y", &sval) == 0)
pdata->st_min_limits[1] = sval;
if (of_property_read_s32(np, "st,min-limit-z", &sval) == 0)
pdata->st_min_limits[2] = sval;
if (of_property_read_s32(np, "st,max-limit-x", &sval) == 0)
pdata->st_max_limits[0] = sval;
if (of_property_read_s32(np, "st,max-limit-y", &sval) == 0)
pdata->st_max_limits[1] = sval;
if (of_property_read_s32(np, "st,max-limit-z", &sval) == 0)
pdata->st_max_limits[2] = sval;
lis3->pdata = pdata;
return 0;
}
#else
int lis3lv02d_init_dt(struct lis3lv02d *lis3)
{
return 0;
}
#endif
EXPORT_SYMBOL_GPL(lis3lv02d_init_dt);
/*
* Initialise the accelerometer and the various subsystems.
* Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *lis3)
{
int err;
irq_handler_t thread_fn;
int irq_flags = 0;
lis3->whoami = lis3lv02d_read_8(lis3, WHO_AM_I);
switch (lis3->whoami) {
case WAI_12B:
pr_info("12 bits sensor found\n");
lis3->read_data = lis3lv02d_read_12;
lis3->mdps_max_val = 2048;
lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
lis3->odrs = lis3_12_rates;
lis3->odr_mask = CTRL1_DF0 | CTRL1_DF1;
lis3->scale = LIS3_SENSITIVITY_12B;
lis3->regs = lis3_wai12_regs;
lis3->regs_size = ARRAY_SIZE(lis3_wai12_regs);
break;
case WAI_8B:
pr_info("8 bits sensor found\n");
lis3->read_data = lis3lv02d_read_8;
lis3->mdps_max_val = 128;
lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
lis3->odrs = lis3_8_rates;
lis3->odr_mask = CTRL1_DR;
lis3->scale = LIS3_SENSITIVITY_8B;
lis3->regs = lis3_wai8_regs;
lis3->regs_size = ARRAY_SIZE(lis3_wai8_regs);
break;
case WAI_3DC:
pr_info("8 bits 3DC sensor found\n");
lis3->read_data = lis3lv02d_read_8;
lis3->mdps_max_val = 128;
lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
lis3->odrs = lis3_3dc_rates;
lis3->odr_mask = CTRL1_ODR0|CTRL1_ODR1|CTRL1_ODR2|CTRL1_ODR3;
lis3->scale = LIS3_SENSITIVITY_8B;
break;
case WAI_3DLH:
pr_info("16 bits lis331dlh sensor found\n");
lis3->read_data = lis331dlh_read_data;
lis3->mdps_max_val = 2048; /* 12 bits for 2G */
lis3->shift_adj = SHIFT_ADJ_2G;
lis3->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
lis3->odrs = lis3_3dlh_rates;
lis3->odr_mask = CTRL1_DR0 | CTRL1_DR1;
lis3->scale = LIS3DLH_SENSITIVITY_2G;
break;
default:
pr_err("unknown sensor type 0x%X\n", lis3->whoami);
return -ENODEV;
}
lis3->reg_cache = kzalloc(max(sizeof(lis3_wai8_regs),
sizeof(lis3_wai12_regs)), GFP_KERNEL);
if (lis3->reg_cache == NULL)
return -ENOMEM;
mutex_init(&lis3->mutex);
atomic_set(&lis3->wake_thread, 0);
lis3lv02d_add_fs(lis3);
err = lis3lv02d_poweron(lis3);
if (err) {
lis3lv02d_remove_fs(lis3);
return err;
}
if (lis3->pm_dev) {
pm_runtime_set_active(lis3->pm_dev);
pm_runtime_enable(lis3->pm_dev);
}
if (lis3lv02d_joystick_enable(lis3))
pr_err("joystick initialization failed\n");
/* passing in platform specific data is purely optional and only
* used by the SPI transport layer at the moment */
if (lis3->pdata) {
struct lis3lv02d_platform_data *p = lis3->pdata;
if (lis3->whoami == WAI_8B)
lis3lv02d_8b_configure(lis3, p);
irq_flags = p->irq_flags1 & IRQF_TRIGGER_MASK;
lis3->irq_cfg = p->irq_cfg;
if (p->irq_cfg)
lis3->write(lis3, CTRL_REG3, p->irq_cfg);
if (p->default_rate)
lis3lv02d_set_odr(lis3, p->default_rate);
}
/* bail if we did not get an IRQ from the bus layer */
if (!lis3->irq) {
pr_debug("No IRQ. Disabling /dev/freefall\n");
goto out;
}
/*
* The sensor can generate interrupts for free-fall and direction
* detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
* the things simple and _fast_ we activate it only for free-fall, so
* no need to read register (very slow with ACPI). For the same reason,
* we forbid shared interrupts.
*
* IRQF_TRIGGER_RISING seems pointless on HP laptops because the
* io-apic is not configurable (and generates a warning) but I keep it
* in case of support for other hardware.
*/
if (lis3->pdata && lis3->whoami == WAI_8B)
thread_fn = lis302dl_interrupt_thread1_8b;
else
thread_fn = NULL;
err = request_threaded_irq(lis3->irq, lis302dl_interrupt,
thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT |
irq_flags,
DRIVER_NAME, lis3);
if (err < 0) {
pr_err("Cannot get IRQ\n");
goto out;
}
lis3->miscdev.minor = MISC_DYNAMIC_MINOR;
lis3->miscdev.name = "freefall";
lis3->miscdev.fops = &lis3lv02d_misc_fops;
if (misc_register(&lis3->miscdev))
pr_err("misc_register failed\n");
out:
return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/lis3lv02d/lis3lv02d.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lis3lv02d_spi - SPI glue layer for lis3lv02d
*
* Copyright (c) 2009 Daniel Mack <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include "lis3lv02d.h"
#define DRV_NAME "lis3lv02d_spi"
#define LIS3_SPI_READ 0x80
static int lis3_spi_read(struct lis3lv02d *lis3, int reg, u8 *v)
{
struct spi_device *spi = lis3->bus_priv;
int ret = spi_w8r8(spi, reg | LIS3_SPI_READ);
if (ret < 0)
return -EINVAL;
*v = (u8) ret;
return 0;
}
static int lis3_spi_write(struct lis3lv02d *lis3, int reg, u8 val)
{
u8 tmp[2] = { reg, val };
struct spi_device *spi = lis3->bus_priv;
return spi_write(spi, tmp, sizeof(tmp));
}
static int lis3_spi_init(struct lis3lv02d *lis3)
{
u8 reg;
int ret;
/* power up the device */
ret = lis3->read(lis3, CTRL_REG1, ®);
if (ret < 0)
return ret;
reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
return lis3->write(lis3, CTRL_REG1, reg);
}
static union axis_conversion lis3lv02d_axis_normal =
{ .as_array = { 1, 2, 3 } };
#ifdef CONFIG_OF
static const struct of_device_id lis302dl_spi_dt_ids[] = {
{ .compatible = "st,lis302dl-spi" },
{}
};
MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids);
#endif
static int lis302dl_spi_probe(struct spi_device *spi)
{
int ret;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_0;
ret = spi_setup(spi);
if (ret < 0)
return ret;
lis3_dev.bus_priv = spi;
lis3_dev.init = lis3_spi_init;
lis3_dev.read = lis3_spi_read;
lis3_dev.write = lis3_spi_write;
lis3_dev.irq = spi->irq;
lis3_dev.ac = lis3lv02d_axis_normal;
lis3_dev.pdata = spi->dev.platform_data;
#ifdef CONFIG_OF
if (of_match_device(lis302dl_spi_dt_ids, &spi->dev)) {
lis3_dev.of_node = spi->dev.of_node;
ret = lis3lv02d_init_dt(&lis3_dev);
if (ret)
return ret;
}
#endif
spi_set_drvdata(spi, &lis3_dev);
return lis3lv02d_init_device(&lis3_dev);
}
static void lis302dl_spi_remove(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
lis3lv02d_remove_fs(&lis3_dev);
}
#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_spi_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(&lis3_dev);
return 0;
}
static int lis3lv02d_spi_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
lis3lv02d_spi_resume);
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
.pm = &lis3lv02d_spi_pm,
.of_match_table = of_match_ptr(lis302dl_spi_dt_ids),
},
.probe = lis302dl_spi_probe,
.remove = lis302dl_spi_remove,
};
module_spi_driver(lis302dl_spi_driver);
MODULE_AUTHOR("Daniel Mack <[email protected]>");
MODULE_DESCRIPTION("lis3lv02d SPI glue layer");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:" DRV_NAME);
| linux-master | drivers/misc/lis3lv02d/lis3lv02d_spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/hwmon/lis3lv02d_i2c.c
*
* Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
* Driver is based on corresponding SPI driver written by Daniel Mack
* (lis3lv02d_spi.c (C) 2009 Daniel Mack <[email protected]> ).
*
* Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include "lis3lv02d.h"
#define DRV_NAME "lis3lv02d_i2c"
static const char reg_vdd[] = "Vdd";
static const char reg_vdd_io[] = "Vdd_IO";
static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state)
{
int ret;
if (state == LIS3_REG_OFF) {
ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators),
lis3->regulators);
} else {
ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators),
lis3->regulators);
/* Chip needs time to wakeup. Not mentioned in datasheet */
usleep_range(10000, 20000);
}
return ret;
}
static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
{
struct i2c_client *c = lis3->bus_priv;
return i2c_smbus_write_byte_data(c, reg, value);
}
static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
{
struct i2c_client *c = lis3->bus_priv;
*v = i2c_smbus_read_byte_data(c, reg);
return 0;
}
static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len,
u8 *v)
{
struct i2c_client *c = lis3->bus_priv;
reg |= (1 << 7); /* 7th bit enables address auto incrementation */
return i2c_smbus_read_i2c_block_data(c, reg, len, v);
}
static int lis3_i2c_init(struct lis3lv02d *lis3)
{
u8 reg;
int ret;
lis3_reg_ctrl(lis3, LIS3_REG_ON);
lis3->read(lis3, WHO_AM_I, ®);
if (reg != lis3->whoami)
printk(KERN_ERR "lis3: power on failure\n");
/* power up the device */
ret = lis3->read(lis3, CTRL_REG1, ®);
if (ret < 0)
return ret;
if (lis3->whoami == WAI_3DLH)
reg |= CTRL1_PM0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
else
reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen;
return lis3->write(lis3, CTRL_REG1, reg);
}
/* Default axis mapping but it can be overwritten by platform data */
static union axis_conversion lis3lv02d_axis_map =
{ .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
#ifdef CONFIG_OF
static const struct of_device_id lis3lv02d_i2c_dt_ids[] = {
{ .compatible = "st,lis3lv02d" },
{}
};
MODULE_DEVICE_TABLE(of, lis3lv02d_i2c_dt_ids);
#endif
static int lis3lv02d_i2c_probe(struct i2c_client *client)
{
int ret = 0;
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
#ifdef CONFIG_OF
if (of_match_device(lis3lv02d_i2c_dt_ids, &client->dev)) {
lis3_dev.of_node = client->dev.of_node;
ret = lis3lv02d_init_dt(&lis3_dev);
if (ret)
return ret;
pdata = lis3_dev.pdata;
}
#endif
if (pdata) {
if ((pdata->driver_features & LIS3_USE_BLOCK_READ) &&
(i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK)))
lis3_dev.blkread = lis3_i2c_blockread;
if (pdata->axis_x)
lis3lv02d_axis_map.x = pdata->axis_x;
if (pdata->axis_y)
lis3lv02d_axis_map.y = pdata->axis_y;
if (pdata->axis_z)
lis3lv02d_axis_map.z = pdata->axis_z;
if (pdata->setup_resources)
ret = pdata->setup_resources();
if (ret)
goto fail;
}
lis3_dev.regulators[0].supply = reg_vdd;
lis3_dev.regulators[1].supply = reg_vdd_io;
ret = regulator_bulk_get(&client->dev,
ARRAY_SIZE(lis3_dev.regulators),
lis3_dev.regulators);
if (ret < 0)
goto fail;
lis3_dev.pdata = pdata;
lis3_dev.bus_priv = client;
lis3_dev.init = lis3_i2c_init;
lis3_dev.read = lis3_i2c_read;
lis3_dev.write = lis3_i2c_write;
lis3_dev.irq = client->irq;
lis3_dev.ac = lis3lv02d_axis_map;
lis3_dev.pm_dev = &client->dev;
i2c_set_clientdata(client, &lis3_dev);
/* Provide power over the init call */
lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON);
ret = lis3lv02d_init_device(&lis3_dev);
lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF);
if (ret)
goto fail2;
return 0;
fail2:
regulator_bulk_free(ARRAY_SIZE(lis3_dev.regulators),
lis3_dev.regulators);
fail:
if (pdata && pdata->release_resources)
pdata->release_resources();
return ret;
}
static void lis3lv02d_i2c_remove(struct i2c_client *client)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
if (pdata && pdata->release_resources)
pdata->release_resources();
lis3lv02d_joystick_disable(lis3);
lis3lv02d_remove_fs(&lis3_dev);
regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
lis3_dev.regulators);
}
#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(lis3);
return 0;
}
static int lis3lv02d_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
/*
* pm_runtime documentation says that devices should always
* be powered on at resume. Pm_runtime turns them off after system
* wide resume is complete.
*/
if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
pm_runtime_suspended(dev))
lis3lv02d_poweron(lis3);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int lis3_i2c_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweroff(lis3);
return 0;
}
static int lis3_i2c_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweron(lis3);
return 0;
}
#endif /* CONFIG_PM */
static const struct i2c_device_id lis3lv02d_id[] = {
{"lis3lv02d", LIS3LV02D},
{"lis331dlh", LIS331DLH},
{}
};
MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
static const struct dev_pm_ops lis3_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend,
lis3lv02d_i2c_resume)
SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend,
lis3_i2c_runtime_resume,
NULL)
};
static struct i2c_driver lis3lv02d_i2c_driver = {
.driver = {
.name = DRV_NAME,
.pm = &lis3_pm_ops,
.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
},
.probe = lis3lv02d_i2c_probe,
.remove = lis3lv02d_i2c_remove,
.id_table = lis3lv02d_id,
};
module_i2c_driver(lis3lv02d_i2c_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("lis3lv02d I2C interface");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/lis3lv02d/lis3lv02d_i2c.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Pvpanic Device Support
*
* Copyright (C) 2013 Fujitsu.
* Copyright (C) 2018 ZTE.
* Copyright (C) 2021 Oracle.
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/panic_notifier.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
MODULE_AUTHOR("Mihai Carabas <[email protected]>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
static struct list_head pvpanic_list;
static spinlock_t pvpanic_lock;
static void
pvpanic_send_event(unsigned int event)
{
struct pvpanic_instance *pi_cur;
if (!spin_trylock(&pvpanic_lock))
return;
list_for_each_entry(pi_cur, &pvpanic_list, list) {
if (event & pi_cur->capability & pi_cur->events)
iowrite8(event, pi_cur->base);
}
spin_unlock(&pvpanic_lock);
}
static int
pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused)
{
unsigned int event = PVPANIC_PANICKED;
if (kexec_crash_loaded())
event = PVPANIC_CRASH_LOADED;
pvpanic_send_event(event);
return NOTIFY_DONE;
}
/*
* Call our notifier very early on panic, deferring the
* action taken to the hypervisor.
*/
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
.priority = INT_MAX,
};
static void pvpanic_remove(void *param)
{
struct pvpanic_instance *pi_cur, *pi_next;
struct pvpanic_instance *pi = param;
spin_lock(&pvpanic_lock);
list_for_each_entry_safe(pi_cur, pi_next, &pvpanic_list, list) {
if (pi_cur == pi) {
list_del(&pi_cur->list);
break;
}
}
spin_unlock(&pvpanic_lock);
}
int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
{
if (!pi || !pi->base)
return -EINVAL;
spin_lock(&pvpanic_lock);
list_add(&pi->list, &pvpanic_list);
spin_unlock(&pvpanic_lock);
dev_set_drvdata(dev, pi);
return devm_add_action_or_reset(dev, pvpanic_remove, pi);
}
EXPORT_SYMBOL_GPL(devm_pvpanic_probe);
static int pvpanic_init(void)
{
INIT_LIST_HEAD(&pvpanic_list);
spin_lock_init(&pvpanic_lock);
atomic_notifier_chain_register(&panic_notifier_list, &pvpanic_panic_nb);
return 0;
}
module_init(pvpanic_init);
static void pvpanic_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list, &pvpanic_panic_nb);
}
module_exit(pvpanic_exit);
| linux-master | drivers/misc/pvpanic/pvpanic.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Pvpanic PCI Device Support
*
* Copyright (C) 2021 Oracle.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
MODULE_AUTHOR("Mihai Carabas <[email protected]>");
MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->capability);
}
static DEVICE_ATTR_RO(capability);
static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
unsigned int tmp;
int err;
err = kstrtouint(buf, 16, &tmp);
if (err)
return err;
if ((tmp & pi->capability) != tmp)
return -EINVAL;
pi->events = tmp;
return count;
}
static DEVICE_ATTR_RW(events);
static struct attribute *pvpanic_pci_dev_attrs[] = {
&dev_attr_capability.attr,
&dev_attr_events.attr,
NULL
};
ATTRIBUTE_GROUPS(pvpanic_pci_dev);
static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pvpanic_instance *pi;
void __iomem *base;
int ret;
ret = pcim_enable_device(pdev);
if (ret < 0)
return ret;
base = pcim_iomap(pdev, 0, 0);
if (!base)
return -ENOMEM;
pi = devm_kmalloc(&pdev->dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
return -ENOMEM;
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
/* initlize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;
return devm_pvpanic_probe(&pdev->dev, pi);
}
static const struct pci_device_id pvpanic_pci_id_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
{}
};
MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
.probe = pvpanic_pci_probe,
.driver = {
.dev_groups = pvpanic_pci_dev_groups,
},
};
module_pci_driver(pvpanic_pci_driver);
| linux-master | drivers/misc/pvpanic/pvpanic-pci.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Pvpanic MMIO Device Support
*
* Copyright (C) 2013 Fujitsu.
* Copyright (C) 2018 ZTE.
* Copyright (C) 2021 Oracle.
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
MODULE_AUTHOR("Hu Tao <[email protected]>");
MODULE_DESCRIPTION("pvpanic-mmio device driver");
MODULE_LICENSE("GPL");
static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->capability);
}
static DEVICE_ATTR_RO(capability);
static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
unsigned int tmp;
int err;
err = kstrtouint(buf, 16, &tmp);
if (err)
return err;
if ((tmp & pi->capability) != tmp)
return -EINVAL;
pi->events = tmp;
return count;
}
static DEVICE_ATTR_RW(events);
static struct attribute *pvpanic_mmio_dev_attrs[] = {
&dev_attr_capability.attr,
&dev_attr_events.attr,
NULL
};
ATTRIBUTE_GROUPS(pvpanic_mmio_dev);
static int pvpanic_mmio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pvpanic_instance *pi;
struct resource *res;
void __iomem *base;
res = platform_get_mem_or_io(pdev, 0);
if (!res)
return -EINVAL;
switch (resource_type(res)) {
case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
break;
case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
break;
default:
return -EINVAL;
}
pi = devm_kmalloc(dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
return -ENOMEM;
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
/* initialize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;
return devm_pvpanic_probe(dev, pi);
}
static const struct of_device_id pvpanic_mmio_match[] = {
{ .compatible = "qemu,pvpanic-mmio", },
{}
};
MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
static const struct acpi_device_id pvpanic_device_ids[] = {
{ "QEMU0001", 0 },
{ "", 0 }
};
MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
static struct platform_driver pvpanic_mmio_driver = {
.driver = {
.name = "pvpanic-mmio",
.of_match_table = pvpanic_mmio_match,
.acpi_match_table = pvpanic_device_ids,
.dev_groups = pvpanic_mmio_dev_groups,
},
.probe = pvpanic_mmio_probe,
};
module_platform_driver(pvpanic_mmio_driver);
| linux-master | drivers/misc/pvpanic/pvpanic-mmio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* usb.c - Hardware dependent module for USB
*
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <linux/most.h>
#define USB_MTU 512
#define NO_ISOCHRONOUS_URB 0
#define AV_PACKETS_PER_XACT 2
#define BUF_CHAIN_SIZE 0xFFFF
#define MAX_NUM_ENDPOINTS 30
#define MAX_SUFFIX_LEN 10
#define MAX_STRING_LEN 80
#define MAX_BUF_SIZE 0xFFFF
#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
#define USB_DEV_ID_OS81118 0xCF18 /* PID: USB OS81118 */
#define USB_DEV_ID_OS81119 0xCF19 /* PID: USB OS81119 */
#define USB_DEV_ID_OS81210 0xCF30 /* PID: USB OS81210 */
/* DRCI Addresses */
#define DRCI_REG_NI_STATE 0x0100
#define DRCI_REG_PACKET_BW 0x0101
#define DRCI_REG_NODE_ADDR 0x0102
#define DRCI_REG_NODE_POS 0x0103
#define DRCI_REG_MEP_FILTER 0x0140
#define DRCI_REG_HASH_TBL0 0x0141
#define DRCI_REG_HASH_TBL1 0x0142
#define DRCI_REG_HASH_TBL2 0x0143
#define DRCI_REG_HASH_TBL3 0x0144
#define DRCI_REG_HW_ADDR_HI 0x0145
#define DRCI_REG_HW_ADDR_MI 0x0146
#define DRCI_REG_HW_ADDR_LO 0x0147
#define DRCI_REG_BASE 0x1100
#define DRCI_COMMAND 0x02
#define DRCI_READ_REQ 0xA0
#define DRCI_WRITE_REQ 0xA1
/**
* struct most_dci_obj - Direct Communication Interface
* @kobj:position in sysfs
* @usb_device: pointer to the usb device
* @reg_addr: register address for arbitrary DCI access
*/
struct most_dci_obj {
struct device dev;
struct usb_device *usb_device;
u16 reg_addr;
};
#define to_dci_obj(p) container_of(p, struct most_dci_obj, dev)
struct most_dev;
struct clear_hold_work {
struct work_struct ws;
struct most_dev *mdev;
unsigned int channel;
int pipe;
};
#define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
/**
* struct most_dev - holds all usb interface specific stuff
* @usb_device: pointer to usb device
* @iface: hardware interface
* @cap: channel capabilities
* @conf: channel configuration
* @dci: direct communication interface of hardware
* @ep_address: endpoint address table
* @description: device description
* @suffix: suffix for channel name
* @channel_lock: synchronize channel access
* @padding_active: indicates channel uses padding
* @is_channel_healthy: health status table of each channel
* @busy_urbs: list of anchored items
* @io_mutex: synchronize I/O with disconnect
* @link_stat_timer: timer for link status reports
* @poll_work_obj: work for polling link status
*/
struct most_dev {
struct device dev;
struct usb_device *usb_device;
struct most_interface iface;
struct most_channel_capability *cap;
struct most_channel_config *conf;
struct most_dci_obj *dci;
u8 *ep_address;
char description[MAX_STRING_LEN];
char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
bool padding_active[MAX_NUM_ENDPOINTS];
bool is_channel_healthy[MAX_NUM_ENDPOINTS];
struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
struct usb_anchor *busy_urbs;
struct mutex io_mutex;
struct timer_list link_stat_timer;
struct work_struct poll_work_obj;
void (*on_netinfo)(struct most_interface *most_iface,
unsigned char link_state, unsigned char *addrs);
};
#define to_mdev(d) container_of(d, struct most_dev, iface)
#define to_mdev_from_dev(d) container_of(d, struct most_dev, dev)
#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
static void wq_clear_halt(struct work_struct *wq_obj);
static void wq_netinfo(struct work_struct *wq_obj);
/**
* drci_rd_reg - read a DCI register
* @dev: usb device
* @reg: register address
* @buf: buffer to store data
*
* This is reads data from INIC's direct register communication interface
*/
static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
{
int retval;
__le16 *dma_buf;
u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
DRCI_READ_REQ, req_type,
0x0000,
reg, dma_buf, sizeof(*dma_buf),
USB_CTRL_GET_TIMEOUT);
*buf = le16_to_cpu(*dma_buf);
kfree(dma_buf);
if (retval < 0)
return retval;
return 0;
}
/**
* drci_wr_reg - write a DCI register
* @dev: usb device
* @reg: register address
* @data: data to write
*
* This is writes data to INIC's direct register communication interface
*/
static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
{
return usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
DRCI_WRITE_REQ,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
data,
reg,
NULL,
0,
USB_CTRL_SET_TIMEOUT);
}
static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
{
return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
}
/**
* get_stream_frame_size - calculate frame size of current configuration
* @dev: device structure
* @cfg: channel configuration
*/
static unsigned int get_stream_frame_size(struct device *dev,
struct most_channel_config *cfg)
{
unsigned int frame_size;
unsigned int sub_size = cfg->subbuffer_size;
if (!sub_size) {
dev_warn(dev, "Misconfig: Subbuffer size zero.\n");
return 0;
}
switch (cfg->data_type) {
case MOST_CH_ISOC:
frame_size = AV_PACKETS_PER_XACT * sub_size;
break;
case MOST_CH_SYNC:
if (cfg->packets_per_xact == 0) {
dev_warn(dev, "Misconfig: Packets per XACT zero\n");
frame_size = 0;
} else if (cfg->packets_per_xact == 0xFF) {
frame_size = (USB_MTU / sub_size) * sub_size;
} else {
frame_size = cfg->packets_per_xact * sub_size;
}
break;
default:
dev_warn(dev, "Query frame size of non-streaming channel\n");
frame_size = 0;
break;
}
return frame_size;
}
/**
* hdm_poison_channel - mark buffers of this channel as invalid
* @iface: pointer to the interface
* @channel: channel ID
*
* This unlinks all URBs submitted to the HCD,
* calls the associated completion function of the core and removes
* them from the list.
*
* Returns 0 on success or error code otherwise.
*/
static int hdm_poison_channel(struct most_interface *iface, int channel)
{
struct most_dev *mdev = to_mdev(iface);
unsigned long flags;
spinlock_t *lock; /* temp. lock */
if (channel < 0 || channel >= iface->num_channels) {
dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
return -ECHRNG;
}
lock = mdev->channel_lock + channel;
spin_lock_irqsave(lock, flags);
mdev->is_channel_healthy[channel] = false;
spin_unlock_irqrestore(lock, flags);
cancel_work_sync(&mdev->clear_work[channel].ws);
mutex_lock(&mdev->io_mutex);
usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (mdev->padding_active[channel])
mdev->padding_active[channel] = false;
if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
del_timer_sync(&mdev->link_stat_timer);
cancel_work_sync(&mdev->poll_work_obj);
}
mutex_unlock(&mdev->io_mutex);
return 0;
}
/**
* hdm_add_padding - add padding bytes
* @mdev: most device
* @channel: channel ID
* @mbo: buffer object
*
* This inserts the INIC hardware specific padding bytes into a streaming
* channel's buffer
*/
static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
{
struct most_channel_config *conf = &mdev->conf[channel];
unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
return -EINVAL;
num_frames = mbo->buffer_length / frame_size;
if (num_frames < 1) {
dev_err(&mdev->usb_device->dev,
"Missed minimal transfer unit.\n");
return -EINVAL;
}
for (j = num_frames - 1; j > 0; j--)
memmove(mbo->virt_address + j * USB_MTU,
mbo->virt_address + j * frame_size,
frame_size);
mbo->buffer_length = num_frames * USB_MTU;
return 0;
}
/**
* hdm_remove_padding - remove padding bytes
* @mdev: most device
* @channel: channel ID
* @mbo: buffer object
*
* This takes the INIC hardware specific padding bytes off a streaming
* channel's buffer.
*/
static int hdm_remove_padding(struct most_dev *mdev, int channel,
struct mbo *mbo)
{
struct most_channel_config *const conf = &mdev->conf[channel];
unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
return -EINVAL;
num_frames = mbo->processed_length / USB_MTU;
for (j = 1; j < num_frames; j++)
memmove(mbo->virt_address + frame_size * j,
mbo->virt_address + USB_MTU * j,
frame_size);
mbo->processed_length = frame_size * num_frames;
return 0;
}
/**
* hdm_write_completion - completion function for submitted Tx URBs
* @urb: the URB that has been completed
*
* This checks the status of the completed URB. In case the URB has been
* unlinked before, it is immediately freed. On any other error the MBO
* transfer flag is set. On success it frees allocated resources and calls
* the completion function.
*
* Context: interrupt!
*/
static void hdm_write_completion(struct urb *urb)
{
struct mbo *mbo = urb->context;
struct most_dev *mdev = to_mdev(mbo->ifp);
unsigned int channel = mbo->hdm_channel_id;
spinlock_t *lock = mdev->channel_lock + channel;
unsigned long flags;
spin_lock_irqsave(lock, flags);
mbo->processed_length = 0;
mbo->status = MBO_E_INVAL;
if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
case 0:
case -ESHUTDOWN:
mbo->processed_length = urb->actual_length;
mbo->status = MBO_SUCCESS;
break;
case -EPIPE:
dev_warn(&mdev->usb_device->dev,
"Broken pipe on ep%02x\n",
mdev->ep_address[channel]);
mdev->is_channel_healthy[channel] = false;
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
}
}
spin_unlock_irqrestore(lock, flags);
if (likely(mbo->complete))
mbo->complete(mbo);
usb_free_urb(urb);
}
/**
* hdm_read_completion - completion function for submitted Rx URBs
* @urb: the URB that has been completed
*
* This checks the status of the completed URB. In case the URB has been
* unlinked before it is immediately freed. On any other error the MBO transfer
* flag is set. On success it frees allocated resources, removes
* padding bytes -if necessary- and calls the completion function.
*
* Context: interrupt!
*/
static void hdm_read_completion(struct urb *urb)
{
struct mbo *mbo = urb->context;
struct most_dev *mdev = to_mdev(mbo->ifp);
unsigned int channel = mbo->hdm_channel_id;
struct device *dev = &mdev->usb_device->dev;
spinlock_t *lock = mdev->channel_lock + channel;
unsigned long flags;
spin_lock_irqsave(lock, flags);
mbo->processed_length = 0;
mbo->status = MBO_E_INVAL;
if (likely(mdev->is_channel_healthy[channel])) {
switch (urb->status) {
case 0:
case -ESHUTDOWN:
mbo->processed_length = urb->actual_length;
mbo->status = MBO_SUCCESS;
if (mdev->padding_active[channel] &&
hdm_remove_padding(mdev, channel, mbo)) {
mbo->processed_length = 0;
mbo->status = MBO_E_INVAL;
}
break;
case -EPIPE:
dev_warn(dev, "Broken pipe on ep%02x\n",
mdev->ep_address[channel]);
mdev->is_channel_healthy[channel] = false;
mdev->clear_work[channel].pipe = urb->pipe;
schedule_work(&mdev->clear_work[channel].ws);
break;
case -ENODEV:
case -EPROTO:
mbo->status = MBO_E_CLOSE;
break;
case -EOVERFLOW:
dev_warn(dev, "Babble on ep%02x\n",
mdev->ep_address[channel]);
break;
}
}
spin_unlock_irqrestore(lock, flags);
if (likely(mbo->complete))
mbo->complete(mbo);
usb_free_urb(urb);
}
/**
* hdm_enqueue - receive a buffer to be used for data transfer
* @iface: interface to enqueue to
* @channel: ID of the channel
* @mbo: pointer to the buffer object
*
* This allocates a new URB and fills it according to the channel
* that is being used for transmission of data. Before the URB is
* submitted it is stored in the private anchor list.
*
* Returns 0 on success. On any error the URB is freed and a error code
* is returned.
*
* Context: Could in _some_ cases be interrupt!
*/
static int hdm_enqueue(struct most_interface *iface, int channel,
struct mbo *mbo)
{
struct most_dev *mdev = to_mdev(iface);
struct most_channel_config *conf;
int retval = 0;
struct urb *urb;
unsigned long length;
void *virt_address;
if (!mbo)
return -EINVAL;
if (iface->num_channels <= channel || channel < 0)
return -ECHRNG;
urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_KERNEL);
if (!urb)
return -ENOMEM;
conf = &mdev->conf[channel];
mutex_lock(&mdev->io_mutex);
if (!mdev->usb_device) {
retval = -ENODEV;
goto err_free_urb;
}
if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
hdm_add_padding(mdev, channel, mbo)) {
retval = -EINVAL;
goto err_free_urb;
}
urb->transfer_dma = mbo->bus_address;
virt_address = mbo->virt_address;
length = mbo->buffer_length;
if (conf->direction & MOST_CH_TX) {
usb_fill_bulk_urb(urb, mdev->usb_device,
usb_sndbulkpipe(mdev->usb_device,
mdev->ep_address[channel]),
virt_address,
length,
hdm_write_completion,
mbo);
if (conf->data_type != MOST_CH_ISOC &&
conf->data_type != MOST_CH_SYNC)
urb->transfer_flags |= URB_ZERO_PACKET;
} else {
usb_fill_bulk_urb(urb, mdev->usb_device,
usb_rcvbulkpipe(mdev->usb_device,
mdev->ep_address[channel]),
virt_address,
length + conf->extra_len,
hdm_read_completion,
mbo);
}
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
dev_err(&mdev->usb_device->dev,
"URB submit failed with error %d.\n", retval);
goto err_unanchor_urb;
}
mutex_unlock(&mdev->io_mutex);
return 0;
err_unanchor_urb:
usb_unanchor_urb(urb);
err_free_urb:
usb_free_urb(urb);
mutex_unlock(&mdev->io_mutex);
return retval;
}
static void *hdm_dma_alloc(struct mbo *mbo, u32 size)
{
struct most_dev *mdev = to_mdev(mbo->ifp);
return usb_alloc_coherent(mdev->usb_device, size, GFP_KERNEL,
&mbo->bus_address);
}
static void hdm_dma_free(struct mbo *mbo, u32 size)
{
struct most_dev *mdev = to_mdev(mbo->ifp);
usb_free_coherent(mdev->usb_device, size, mbo->virt_address,
mbo->bus_address);
}
/**
* hdm_configure_channel - receive channel configuration from core
* @iface: interface
* @channel: channel ID
* @conf: structure that holds the configuration information
*
* The attached network interface controller (NIC) supports a padding mode
* to avoid short packets on USB, hence increasing the performance due to a
* lower interrupt load. This mode is default for synchronous data and can
* be switched on for isochronous data. In case padding is active the
* driver needs to know the frame size of the payload in order to calculate
* the number of bytes it needs to pad when transmitting or to cut off when
* receiving data.
*
*/
static int hdm_configure_channel(struct most_interface *iface, int channel,
struct most_channel_config *conf)
{
unsigned int num_frames;
unsigned int frame_size;
struct most_dev *mdev = to_mdev(iface);
struct device *dev = &mdev->usb_device->dev;
if (!conf) {
dev_err(dev, "Bad config pointer.\n");
return -EINVAL;
}
if (channel < 0 || channel >= iface->num_channels) {
dev_err(dev, "Channel ID out of range.\n");
return -EINVAL;
}
mdev->is_channel_healthy[channel] = true;
mdev->clear_work[channel].channel = channel;
mdev->clear_work[channel].mdev = mdev;
INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
if (!conf->num_buffers || !conf->buffer_size) {
dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
return -EINVAL;
}
if (conf->data_type != MOST_CH_SYNC &&
!(conf->data_type == MOST_CH_ISOC &&
conf->packets_per_xact != 0xFF)) {
mdev->padding_active[channel] = false;
/*
* Since the NIC's padding mode is not going to be
* used, we can skip the frame size calculations and
* move directly on to exit.
*/
goto exit;
}
mdev->padding_active[channel] = true;
frame_size = get_stream_frame_size(&mdev->dev, conf);
if (frame_size == 0 || frame_size > USB_MTU) {
dev_warn(dev, "Misconfig: frame size wrong\n");
return -EINVAL;
}
num_frames = conf->buffer_size / frame_size;
if (conf->buffer_size % frame_size) {
u16 old_size = conf->buffer_size;
conf->buffer_size = num_frames * frame_size;
dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
mdev->suffix[channel], old_size, conf->buffer_size);
}
/* calculate extra length to comply w/ HW padding */
conf->extra_len = num_frames * (USB_MTU - frame_size);
exit:
mdev->conf[channel] = *conf;
if (conf->data_type == MOST_CH_ASYNC) {
u16 ep = mdev->ep_address[channel];
if (start_sync_ep(mdev->usb_device, ep) < 0)
dev_warn(dev, "sync for ep%02x failed", ep);
}
return 0;
}
/**
* hdm_request_netinfo - request network information
* @iface: pointer to interface
* @channel: channel ID
*
* This is used as trigger to set up the link status timer that
* polls for the NI state of the INIC every 2 seconds.
*
*/
static void hdm_request_netinfo(struct most_interface *iface, int channel,
void (*on_netinfo)(struct most_interface *,
unsigned char,
unsigned char *))
{
struct most_dev *mdev = to_mdev(iface);
mdev->on_netinfo = on_netinfo;
if (!on_netinfo)
return;
mdev->link_stat_timer.expires = jiffies + HZ;
mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
}
/**
* link_stat_timer_handler - schedule work obtaining mac address and link status
* @t: pointer to timer_list which holds a pointer to the USB device instance
*
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
*/
static void link_stat_timer_handler(struct timer_list *t)
{
struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
add_timer(&mdev->link_stat_timer);
}
/**
* wq_netinfo - work queue function to deliver latest networking information
* @wq_obj: object that holds data for our deferred work to do
*
* This retrieves the network interface status of the USB INIC
*/
static void wq_netinfo(struct work_struct *wq_obj)
{
struct most_dev *mdev = to_mdev_from_work(wq_obj);
struct usb_device *usb_device = mdev->usb_device;
struct device *dev = &usb_device->dev;
u16 hi, mi, lo, link;
u8 hw_addr[6];
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi)) {
dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
return;
}
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi)) {
dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
return;
}
if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo)) {
dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
return;
}
if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link)) {
dev_err(dev, "Vendor request 'link status' failed\n");
return;
}
hw_addr[0] = hi >> 8;
hw_addr[1] = hi;
hw_addr[2] = mi >> 8;
hw_addr[3] = mi;
hw_addr[4] = lo >> 8;
hw_addr[5] = lo;
if (mdev->on_netinfo)
mdev->on_netinfo(&mdev->iface, link, hw_addr);
}
/**
* wq_clear_halt - work queue function
* @wq_obj: work_struct object to execute
*
* This sends a clear_halt to the given USB pipe.
*/
static void wq_clear_halt(struct work_struct *wq_obj)
{
struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
struct most_dev *mdev = clear_work->mdev;
unsigned int channel = clear_work->channel;
int pipe = clear_work->pipe;
int snd_pipe;
int peer;
mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
if (usb_clear_halt(mdev->usb_device, pipe))
dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
/* If the functional Stall condition has been set on an
* asynchronous rx channel, we need to clear the tx channel
* too, since the hardware runs its clean-up sequence on both
* channels, as they are physically one on the network.
*
* The USB interface that exposes the asynchronous channels
* contains always two endpoints, and two only.
*/
if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
mdev->conf[channel].direction == MOST_CH_RX) {
if (channel == 0)
peer = 1;
else
peer = 0;
snd_pipe = usb_sndbulkpipe(mdev->usb_device,
mdev->ep_address[peer]);
usb_clear_halt(mdev->usb_device, snd_pipe);
}
mdev->is_channel_healthy[channel] = true;
most_resume_enqueue(&mdev->iface, channel);
mutex_unlock(&mdev->io_mutex);
}
/*
* hdm_usb_fops - file operation table for USB driver
*/
static const struct file_operations hdm_usb_fops = {
.owner = THIS_MODULE,
};
/*
* usb_device_id - ID table for HCD device probing
*/
static const struct usb_device_id usbid[] = {
{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
{ } /* Terminating entry */
};
struct regs {
const char *name;
u16 reg;
};
static const struct regs ro_regs[] = {
{ "ni_state", DRCI_REG_NI_STATE },
{ "packet_bandwidth", DRCI_REG_PACKET_BW },
{ "node_address", DRCI_REG_NODE_ADDR },
{ "node_position", DRCI_REG_NODE_POS },
};
static const struct regs rw_regs[] = {
{ "mep_filter", DRCI_REG_MEP_FILTER },
{ "mep_hash0", DRCI_REG_HASH_TBL0 },
{ "mep_hash1", DRCI_REG_HASH_TBL1 },
{ "mep_hash2", DRCI_REG_HASH_TBL2 },
{ "mep_hash3", DRCI_REG_HASH_TBL3 },
{ "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
{ "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
{ "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
};
static int get_stat_reg_addr(const struct regs *regs, int size,
const char *name, u16 *reg_addr)
{
int i;
for (i = 0; i < size; i++) {
if (sysfs_streq(name, regs[i].name)) {
*reg_addr = regs[i].reg;
return 0;
}
}
return -EINVAL;
}
#define get_static_reg_addr(regs, name, reg_addr) \
get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
static ssize_t value_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const char *name = attr->attr.name;
struct most_dci_obj *dci_obj = to_dci_obj(dev);
u16 val;
u16 reg_addr;
int err;
if (sysfs_streq(name, "arb_address"))
return sysfs_emit(buf, "%04x\n", dci_obj->reg_addr);
if (sysfs_streq(name, "arb_value"))
reg_addr = dci_obj->reg_addr;
else if (get_static_reg_addr(ro_regs, name, ®_addr) &&
get_static_reg_addr(rw_regs, name, ®_addr))
return -EINVAL;
err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
if (err < 0)
return err;
return sysfs_emit(buf, "%04x\n", val);
}
static ssize_t value_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u16 val;
u16 reg_addr;
const char *name = attr->attr.name;
struct most_dci_obj *dci_obj = to_dci_obj(dev);
struct usb_device *usb_dev = dci_obj->usb_device;
int err;
err = kstrtou16(buf, 16, &val);
if (err)
return err;
if (sysfs_streq(name, "arb_address")) {
dci_obj->reg_addr = val;
return count;
}
if (sysfs_streq(name, "arb_value"))
err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
else if (sysfs_streq(name, "sync_ep"))
err = start_sync_ep(usb_dev, val);
else if (!get_static_reg_addr(rw_regs, name, ®_addr))
err = drci_wr_reg(usb_dev, reg_addr, val);
else
return -EINVAL;
if (err < 0)
return err;
return count;
}
static DEVICE_ATTR(ni_state, 0444, value_show, NULL);
static DEVICE_ATTR(packet_bandwidth, 0444, value_show, NULL);
static DEVICE_ATTR(node_address, 0444, value_show, NULL);
static DEVICE_ATTR(node_position, 0444, value_show, NULL);
static DEVICE_ATTR(sync_ep, 0200, NULL, value_store);
static DEVICE_ATTR(mep_filter, 0644, value_show, value_store);
static DEVICE_ATTR(mep_hash0, 0644, value_show, value_store);
static DEVICE_ATTR(mep_hash1, 0644, value_show, value_store);
static DEVICE_ATTR(mep_hash2, 0644, value_show, value_store);
static DEVICE_ATTR(mep_hash3, 0644, value_show, value_store);
static DEVICE_ATTR(mep_eui48_hi, 0644, value_show, value_store);
static DEVICE_ATTR(mep_eui48_mi, 0644, value_show, value_store);
static DEVICE_ATTR(mep_eui48_lo, 0644, value_show, value_store);
static DEVICE_ATTR(arb_address, 0644, value_show, value_store);
static DEVICE_ATTR(arb_value, 0644, value_show, value_store);
static struct attribute *dci_attrs[] = {
&dev_attr_ni_state.attr,
&dev_attr_packet_bandwidth.attr,
&dev_attr_node_address.attr,
&dev_attr_node_position.attr,
&dev_attr_sync_ep.attr,
&dev_attr_mep_filter.attr,
&dev_attr_mep_hash0.attr,
&dev_attr_mep_hash1.attr,
&dev_attr_mep_hash2.attr,
&dev_attr_mep_hash3.attr,
&dev_attr_mep_eui48_hi.attr,
&dev_attr_mep_eui48_mi.attr,
&dev_attr_mep_eui48_lo.attr,
&dev_attr_arb_address.attr,
&dev_attr_arb_value.attr,
NULL,
};
ATTRIBUTE_GROUPS(dci);
static void release_dci(struct device *dev)
{
struct most_dci_obj *dci = to_dci_obj(dev);
put_device(dev->parent);
kfree(dci);
}
static void release_mdev(struct device *dev)
{
struct most_dev *mdev = to_mdev_from_dev(dev);
kfree(mdev);
}
/**
* hdm_probe - probe function of USB device driver
* @interface: Interface of the attached USB device
* @id: Pointer to the USB ID table.
*
* This allocates and initializes the device instance, adds the new
* entry to the internal list, scans the USB descriptors and registers
* the interface with the core.
* Additionally, the DCI objects are created and the hardware is sync'd.
*
* Return 0 on success. In case of an error a negative number is returned.
*/
static int
hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
struct usb_device *usb_dev = interface_to_usbdev(interface);
struct device *dev = &usb_dev->dev;
struct most_dev *mdev;
unsigned int i;
unsigned int num_endpoints;
struct most_channel_capability *tmp_cap;
struct usb_endpoint_descriptor *ep_desc;
int ret = -ENOMEM;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
usb_set_intfdata(interface, mdev);
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
if (num_endpoints > MAX_NUM_ENDPOINTS) {
kfree(mdev);
return -EINVAL;
}
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev->usb_device = usb_dev;
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev->iface.mod = hdm_usb_fops.owner;
mdev->iface.dev = &mdev->dev;
mdev->iface.driver_dev = &interface->dev;
mdev->iface.interface = ITYPE_USB;
mdev->iface.configure = hdm_configure_channel;
mdev->iface.request_netinfo = hdm_request_netinfo;
mdev->iface.enqueue = hdm_enqueue;
mdev->iface.poison_channel = hdm_poison_channel;
mdev->iface.dma_alloc = hdm_dma_alloc;
mdev->iface.dma_free = hdm_dma_free;
mdev->iface.description = mdev->description;
mdev->iface.num_channels = num_endpoints;
snprintf(mdev->description, sizeof(mdev->description),
"%d-%s:%d.%d",
usb_dev->bus->busnum,
usb_dev->devpath,
usb_dev->config->desc.bConfigurationValue,
usb_iface_desc->desc.bInterfaceNumber);
mdev->dev.init_name = mdev->description;
mdev->dev.parent = &interface->dev;
mdev->dev.release = release_mdev;
mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
if (!mdev->conf)
goto err_free_mdev;
mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
if (!mdev->cap)
goto err_free_conf;
mdev->iface.channel_vector = mdev->cap;
mdev->ep_address =
kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
if (!mdev->ep_address)
goto err_free_cap;
mdev->busy_urbs =
kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
if (!mdev->busy_urbs)
goto err_free_ep_address;
tmp_cap = mdev->cap;
for (i = 0; i < num_endpoints; i++) {
ep_desc = &usb_iface_desc->endpoint[i].desc;
mdev->ep_address[i] = ep_desc->bEndpointAddress;
mdev->padding_active[i] = false;
mdev->is_channel_healthy[i] = true;
snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
mdev->ep_address[i]);
tmp_cap->name_suffix = &mdev->suffix[i][0];
tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
MOST_CH_ISOC | MOST_CH_SYNC;
if (usb_endpoint_dir_in(ep_desc))
tmp_cap->direction = MOST_CH_RX;
else
tmp_cap->direction = MOST_CH_TX;
tmp_cap++;
init_usb_anchor(&mdev->busy_urbs[i]);
spin_lock_init(&mdev->channel_lock[i]);
}
dev_dbg(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct),
usb_dev->bus->busnum,
usb_dev->devnum);
dev_dbg(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
usb_dev->bus->busnum,
usb_dev->devpath,
usb_dev->config->desc.bConfigurationValue,
usb_iface_desc->desc.bInterfaceNumber);
ret = most_register_interface(&mdev->iface);
if (ret)
goto err_free_busy_urbs;
mutex_lock(&mdev->io_mutex);
if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
mdev->dci = kzalloc(sizeof(*mdev->dci), GFP_KERNEL);
if (!mdev->dci) {
mutex_unlock(&mdev->io_mutex);
most_deregister_interface(&mdev->iface);
ret = -ENOMEM;
goto err_free_busy_urbs;
}
mdev->dci->dev.init_name = "dci";
mdev->dci->dev.parent = get_device(mdev->iface.dev);
mdev->dci->dev.groups = dci_groups;
mdev->dci->dev.release = release_dci;
if (device_register(&mdev->dci->dev)) {
mutex_unlock(&mdev->io_mutex);
most_deregister_interface(&mdev->iface);
ret = -ENOMEM;
goto err_free_dci;
}
mdev->dci->usb_device = mdev->usb_device;
}
mutex_unlock(&mdev->io_mutex);
return 0;
err_free_dci:
put_device(&mdev->dci->dev);
err_free_busy_urbs:
kfree(mdev->busy_urbs);
err_free_ep_address:
kfree(mdev->ep_address);
err_free_cap:
kfree(mdev->cap);
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
put_device(&mdev->dev);
return ret;
}
/**
* hdm_disconnect - disconnect function of USB device driver
* @interface: Interface of the attached USB device
*
* This deregisters the interface with the core, removes the kernel timer
* and frees resources.
*
* Context: hub kernel thread
*/
static void hdm_disconnect(struct usb_interface *interface)
{
struct most_dev *mdev = usb_get_intfdata(interface);
mutex_lock(&mdev->io_mutex);
usb_set_intfdata(interface, NULL);
mdev->usb_device = NULL;
mutex_unlock(&mdev->io_mutex);
del_timer_sync(&mdev->link_stat_timer);
cancel_work_sync(&mdev->poll_work_obj);
if (mdev->dci)
device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
kfree(mdev->busy_urbs);
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
put_device(&mdev->dci->dev);
put_device(&mdev->dev);
}
static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
{
struct most_dev *mdev = usb_get_intfdata(interface);
int i;
mutex_lock(&mdev->io_mutex);
for (i = 0; i < mdev->iface.num_channels; i++) {
most_stop_enqueue(&mdev->iface, i);
usb_kill_anchored_urbs(&mdev->busy_urbs[i]);
}
mutex_unlock(&mdev->io_mutex);
return 0;
}
static int hdm_resume(struct usb_interface *interface)
{
struct most_dev *mdev = usb_get_intfdata(interface);
int i;
mutex_lock(&mdev->io_mutex);
for (i = 0; i < mdev->iface.num_channels; i++)
most_resume_enqueue(&mdev->iface, i);
mutex_unlock(&mdev->io_mutex);
return 0;
}
static struct usb_driver hdm_usb = {
.name = "hdm_usb",
.id_table = usbid,
.probe = hdm_probe,
.disconnect = hdm_disconnect,
.resume = hdm_resume,
.suspend = hdm_suspend,
};
module_usb_driver(hdm_usb);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm <[email protected]>");
MODULE_DESCRIPTION("HDM_4_USB");
| linux-master | drivers/most/most_usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* configfs.c - Implementation of configfs interface to the driver stack
*
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/configfs.h>
#include <linux/most.h>
#define MAX_STRING_SIZE 80
struct mdev_link {
struct config_item item;
struct list_head list;
bool create_link;
bool destroy_link;
u16 num_buffers;
u16 buffer_size;
u16 subbuffer_size;
u16 packets_per_xact;
u16 dbr_size;
char datatype[MAX_STRING_SIZE];
char direction[MAX_STRING_SIZE];
char name[MAX_STRING_SIZE];
char device[MAX_STRING_SIZE];
char channel[MAX_STRING_SIZE];
char comp[MAX_STRING_SIZE];
char comp_params[MAX_STRING_SIZE];
};
static struct list_head mdev_link_list;
static int set_cfg_buffer_size(struct mdev_link *link)
{
return most_set_cfg_buffer_size(link->device, link->channel,
link->buffer_size);
}
static int set_cfg_subbuffer_size(struct mdev_link *link)
{
return most_set_cfg_subbuffer_size(link->device, link->channel,
link->subbuffer_size);
}
static int set_cfg_dbr_size(struct mdev_link *link)
{
return most_set_cfg_dbr_size(link->device, link->channel,
link->dbr_size);
}
static int set_cfg_num_buffers(struct mdev_link *link)
{
return most_set_cfg_num_buffers(link->device, link->channel,
link->num_buffers);
}
static int set_cfg_packets_xact(struct mdev_link *link)
{
return most_set_cfg_packets_xact(link->device, link->channel,
link->packets_per_xact);
}
static int set_cfg_direction(struct mdev_link *link)
{
return most_set_cfg_direction(link->device, link->channel,
link->direction);
}
static int set_cfg_datatype(struct mdev_link *link)
{
return most_set_cfg_datatype(link->device, link->channel,
link->datatype);
}
static int (*set_config_val[])(struct mdev_link *link) = {
set_cfg_buffer_size,
set_cfg_subbuffer_size,
set_cfg_dbr_size,
set_cfg_num_buffers,
set_cfg_packets_xact,
set_cfg_direction,
set_cfg_datatype,
};
static struct mdev_link *to_mdev_link(struct config_item *item)
{
return container_of(item, struct mdev_link, item);
}
static int set_config_and_add_link(struct mdev_link *mdev_link)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(set_config_val); i++) {
ret = set_config_val[i](mdev_link);
if (ret < 0 && ret != -ENODEV) {
pr_err("Config failed\n");
return ret;
}
}
return most_add_link(mdev_link->device, mdev_link->channel,
mdev_link->comp, mdev_link->name,
mdev_link->comp_params);
}
static ssize_t mdev_link_create_link_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
bool tmp;
int ret;
ret = kstrtobool(page, &tmp);
if (ret)
return ret;
if (!tmp)
return count;
ret = set_config_and_add_link(mdev_link);
if (ret && ret != -ENODEV)
return ret;
list_add_tail(&mdev_link->list, &mdev_link_list);
mdev_link->create_link = tmp;
mdev_link->destroy_link = false;
return count;
}
static ssize_t mdev_link_destroy_link_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
bool tmp;
int ret;
ret = kstrtobool(page, &tmp);
if (ret)
return ret;
if (!tmp)
return count;
ret = most_remove_link(mdev_link->device, mdev_link->channel,
mdev_link->comp);
if (ret)
return ret;
if (!list_empty(&mdev_link_list))
list_del(&mdev_link->list);
mdev_link->destroy_link = tmp;
return count;
}
static ssize_t mdev_link_direction_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->direction);
}
static ssize_t mdev_link_direction_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
if (!sysfs_streq(page, "dir_rx") && !sysfs_streq(page, "rx") &&
!sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
return -EINVAL;
strcpy(mdev_link->direction, page);
strim(mdev_link->direction);
return count;
}
static ssize_t mdev_link_datatype_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->datatype);
}
static ssize_t mdev_link_datatype_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
if (!sysfs_streq(page, "control") && !sysfs_streq(page, "async") &&
!sysfs_streq(page, "sync") && !sysfs_streq(page, "isoc") &&
!sysfs_streq(page, "isoc_avp"))
return -EINVAL;
strcpy(mdev_link->datatype, page);
strim(mdev_link->datatype);
return count;
}
static ssize_t mdev_link_device_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->device);
}
static ssize_t mdev_link_device_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
strscpy(mdev_link->device, page, sizeof(mdev_link->device));
strim(mdev_link->device);
return count;
}
static ssize_t mdev_link_channel_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->channel);
}
static ssize_t mdev_link_channel_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
strscpy(mdev_link->channel, page, sizeof(mdev_link->channel));
strim(mdev_link->channel);
return count;
}
static ssize_t mdev_link_comp_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", to_mdev_link(item)->comp);
}
static ssize_t mdev_link_comp_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
strscpy(mdev_link->comp, page, sizeof(mdev_link->comp));
strim(mdev_link->comp);
return count;
}
static ssize_t mdev_link_comp_params_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n",
to_mdev_link(item)->comp_params);
}
static ssize_t mdev_link_comp_params_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
strscpy(mdev_link->comp_params, page, sizeof(mdev_link->comp_params));
strim(mdev_link->comp_params);
return count;
}
static ssize_t mdev_link_num_buffers_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
to_mdev_link(item)->num_buffers);
}
static ssize_t mdev_link_num_buffers_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
ret = kstrtou16(page, 0, &mdev_link->num_buffers);
if (ret)
return ret;
return count;
}
static ssize_t mdev_link_buffer_size_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
to_mdev_link(item)->buffer_size);
}
static ssize_t mdev_link_buffer_size_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
ret = kstrtou16(page, 0, &mdev_link->buffer_size);
if (ret)
return ret;
return count;
}
static ssize_t mdev_link_subbuffer_size_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
to_mdev_link(item)->subbuffer_size);
}
static ssize_t mdev_link_subbuffer_size_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
ret = kstrtou16(page, 0, &mdev_link->subbuffer_size);
if (ret)
return ret;
return count;
}
static ssize_t mdev_link_packets_per_xact_show(struct config_item *item,
char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n",
to_mdev_link(item)->packets_per_xact);
}
static ssize_t mdev_link_packets_per_xact_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
ret = kstrtou16(page, 0, &mdev_link->packets_per_xact);
if (ret)
return ret;
return count;
}
static ssize_t mdev_link_dbr_size_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%d\n", to_mdev_link(item)->dbr_size);
}
static ssize_t mdev_link_dbr_size_store(struct config_item *item,
const char *page, size_t count)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
ret = kstrtou16(page, 0, &mdev_link->dbr_size);
if (ret)
return ret;
return count;
}
CONFIGFS_ATTR_WO(mdev_link_, create_link);
CONFIGFS_ATTR_WO(mdev_link_, destroy_link);
CONFIGFS_ATTR(mdev_link_, device);
CONFIGFS_ATTR(mdev_link_, channel);
CONFIGFS_ATTR(mdev_link_, comp);
CONFIGFS_ATTR(mdev_link_, comp_params);
CONFIGFS_ATTR(mdev_link_, num_buffers);
CONFIGFS_ATTR(mdev_link_, buffer_size);
CONFIGFS_ATTR(mdev_link_, subbuffer_size);
CONFIGFS_ATTR(mdev_link_, packets_per_xact);
CONFIGFS_ATTR(mdev_link_, datatype);
CONFIGFS_ATTR(mdev_link_, direction);
CONFIGFS_ATTR(mdev_link_, dbr_size);
static struct configfs_attribute *mdev_link_attrs[] = {
&mdev_link_attr_create_link,
&mdev_link_attr_destroy_link,
&mdev_link_attr_device,
&mdev_link_attr_channel,
&mdev_link_attr_comp,
&mdev_link_attr_comp_params,
&mdev_link_attr_num_buffers,
&mdev_link_attr_buffer_size,
&mdev_link_attr_subbuffer_size,
&mdev_link_attr_packets_per_xact,
&mdev_link_attr_datatype,
&mdev_link_attr_direction,
&mdev_link_attr_dbr_size,
NULL,
};
static void mdev_link_release(struct config_item *item)
{
struct mdev_link *mdev_link = to_mdev_link(item);
int ret;
if (mdev_link->destroy_link)
goto free_item;
ret = most_remove_link(mdev_link->device, mdev_link->channel,
mdev_link->comp);
if (ret) {
pr_err("Removing link failed.\n");
goto free_item;
}
if (!list_empty(&mdev_link_list))
list_del(&mdev_link->list);
free_item:
kfree(to_mdev_link(item));
}
static struct configfs_item_operations mdev_link_item_ops = {
.release = mdev_link_release,
};
static const struct config_item_type mdev_link_type = {
.ct_item_ops = &mdev_link_item_ops,
.ct_attrs = mdev_link_attrs,
.ct_owner = THIS_MODULE,
};
struct most_common {
struct config_group group;
struct module *mod;
struct configfs_subsystem subsys;
};
static struct most_common *to_most_common(struct configfs_subsystem *subsys)
{
return container_of(subsys, struct most_common, subsys);
}
static struct config_item *most_common_make_item(struct config_group *group,
const char *name)
{
struct mdev_link *mdev_link;
struct most_common *mc = to_most_common(group->cg_subsys);
mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
if (!mdev_link)
return ERR_PTR(-ENOMEM);
if (!try_module_get(mc->mod)) {
kfree(mdev_link);
return ERR_PTR(-ENOLCK);
}
config_item_init_type_name(&mdev_link->item, name,
&mdev_link_type);
if (!strcmp(group->cg_item.ci_namebuf, "most_cdev"))
strcpy(mdev_link->comp, "cdev");
else if (!strcmp(group->cg_item.ci_namebuf, "most_net"))
strcpy(mdev_link->comp, "net");
else if (!strcmp(group->cg_item.ci_namebuf, "most_video"))
strcpy(mdev_link->comp, "video");
strcpy(mdev_link->name, name);
return &mdev_link->item;
}
static void most_common_release(struct config_item *item)
{
struct config_group *group = to_config_group(item);
kfree(to_most_common(group->cg_subsys));
}
static struct configfs_item_operations most_common_item_ops = {
.release = most_common_release,
};
static void most_common_disconnect(struct config_group *group,
struct config_item *item)
{
struct most_common *mc = to_most_common(group->cg_subsys);
module_put(mc->mod);
}
static struct configfs_group_operations most_common_group_ops = {
.make_item = most_common_make_item,
.disconnect_notify = most_common_disconnect,
};
static const struct config_item_type most_common_type = {
.ct_item_ops = &most_common_item_ops,
.ct_group_ops = &most_common_group_ops,
.ct_owner = THIS_MODULE,
};
static struct most_common most_cdev = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "most_cdev",
.ci_type = &most_common_type,
},
},
},
};
static struct most_common most_net = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "most_net",
.ci_type = &most_common_type,
},
},
},
};
static struct most_common most_video = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "most_video",
.ci_type = &most_common_type,
},
},
},
};
struct most_snd_grp {
struct config_group group;
bool create_card;
struct list_head list;
};
static struct most_snd_grp *to_most_snd_grp(struct config_item *item)
{
return container_of(to_config_group(item), struct most_snd_grp, group);
}
static struct config_item *most_snd_grp_make_item(struct config_group *group,
const char *name)
{
struct mdev_link *mdev_link;
mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
if (!mdev_link)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&mdev_link->item, name, &mdev_link_type);
mdev_link->create_link = false;
strcpy(mdev_link->name, name);
strcpy(mdev_link->comp, "sound");
return &mdev_link->item;
}
static ssize_t most_snd_grp_create_card_store(struct config_item *item,
const char *page, size_t count)
{
struct most_snd_grp *snd_grp = to_most_snd_grp(item);
int ret;
bool tmp;
ret = kstrtobool(page, &tmp);
if (ret)
return ret;
if (tmp) {
ret = most_cfg_complete("sound");
if (ret)
return ret;
}
snd_grp->create_card = tmp;
return count;
}
CONFIGFS_ATTR_WO(most_snd_grp_, create_card);
static struct configfs_attribute *most_snd_grp_attrs[] = {
&most_snd_grp_attr_create_card,
NULL,
};
static void most_snd_grp_release(struct config_item *item)
{
struct most_snd_grp *group = to_most_snd_grp(item);
list_del(&group->list);
kfree(group);
}
static struct configfs_item_operations most_snd_grp_item_ops = {
.release = most_snd_grp_release,
};
static struct configfs_group_operations most_snd_grp_group_ops = {
.make_item = most_snd_grp_make_item,
};
static const struct config_item_type most_snd_grp_type = {
.ct_item_ops = &most_snd_grp_item_ops,
.ct_group_ops = &most_snd_grp_group_ops,
.ct_attrs = most_snd_grp_attrs,
.ct_owner = THIS_MODULE,
};
struct most_sound {
struct configfs_subsystem subsys;
struct list_head soundcard_list;
struct module *mod;
};
static struct config_group *most_sound_make_group(struct config_group *group,
const char *name)
{
struct most_snd_grp *most;
struct most_sound *ms = container_of(group->cg_subsys,
struct most_sound, subsys);
list_for_each_entry(most, &ms->soundcard_list, list) {
if (!most->create_card) {
pr_info("adapter configuration still in progress.\n");
return ERR_PTR(-EPROTO);
}
}
if (!try_module_get(ms->mod))
return ERR_PTR(-ENOLCK);
most = kzalloc(sizeof(*most), GFP_KERNEL);
if (!most) {
module_put(ms->mod);
return ERR_PTR(-ENOMEM);
}
config_group_init_type_name(&most->group, name, &most_snd_grp_type);
list_add_tail(&most->list, &ms->soundcard_list);
return &most->group;
}
static void most_sound_disconnect(struct config_group *group,
struct config_item *item)
{
struct most_sound *ms = container_of(group->cg_subsys,
struct most_sound, subsys);
module_put(ms->mod);
}
static struct configfs_group_operations most_sound_group_ops = {
.make_group = most_sound_make_group,
.disconnect_notify = most_sound_disconnect,
};
static const struct config_item_type most_sound_type = {
.ct_group_ops = &most_sound_group_ops,
.ct_owner = THIS_MODULE,
};
static struct most_sound most_sound_subsys = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "most_sound",
.ci_type = &most_sound_type,
},
},
},
};
int most_register_configfs_subsys(struct most_component *c)
{
int ret;
if (!strcmp(c->name, "cdev")) {
most_cdev.mod = c->mod;
ret = configfs_register_subsystem(&most_cdev.subsys);
} else if (!strcmp(c->name, "net")) {
most_net.mod = c->mod;
ret = configfs_register_subsystem(&most_net.subsys);
} else if (!strcmp(c->name, "video")) {
most_video.mod = c->mod;
ret = configfs_register_subsystem(&most_video.subsys);
} else if (!strcmp(c->name, "sound")) {
most_sound_subsys.mod = c->mod;
ret = configfs_register_subsystem(&most_sound_subsys.subsys);
} else {
return -ENODEV;
}
if (ret) {
pr_err("Error %d while registering subsystem %s\n",
ret, c->name);
}
return ret;
}
EXPORT_SYMBOL_GPL(most_register_configfs_subsys);
void most_interface_register_notify(const char *mdev)
{
bool register_snd_card = false;
struct mdev_link *mdev_link;
list_for_each_entry(mdev_link, &mdev_link_list, list) {
if (!strcmp(mdev_link->device, mdev)) {
set_config_and_add_link(mdev_link);
if (!strcmp(mdev_link->comp, "sound"))
register_snd_card = true;
}
}
if (register_snd_card)
most_cfg_complete("sound");
}
void most_deregister_configfs_subsys(struct most_component *c)
{
if (!strcmp(c->name, "cdev"))
configfs_unregister_subsystem(&most_cdev.subsys);
else if (!strcmp(c->name, "net"))
configfs_unregister_subsystem(&most_net.subsys);
else if (!strcmp(c->name, "video"))
configfs_unregister_subsystem(&most_video.subsys);
else if (!strcmp(c->name, "sound"))
configfs_unregister_subsystem(&most_sound_subsys.subsys);
}
EXPORT_SYMBOL_GPL(most_deregister_configfs_subsys);
int __init configfs_init(void)
{
config_group_init(&most_cdev.subsys.su_group);
mutex_init(&most_cdev.subsys.su_mutex);
config_group_init(&most_net.subsys.su_group);
mutex_init(&most_net.subsys.su_mutex);
config_group_init(&most_video.subsys.su_group);
mutex_init(&most_video.subsys.su_mutex);
config_group_init(&most_sound_subsys.subsys.su_group);
mutex_init(&most_sound_subsys.subsys.su_mutex);
INIT_LIST_HEAD(&most_sound_subsys.soundcard_list);
INIT_LIST_HEAD(&mdev_link_list);
return 0;
}
| linux-master | drivers/most/configfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sound.c - Sound component for Mostcore
*
* Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/most.h>
#define DRIVER_NAME "sound"
#define STRING_SIZE 80
static struct most_component comp;
/**
* struct channel - private structure to keep channel specific data
* @substream: stores the substream structure
* @pcm_hardware: low-level hardware description
* @iface: interface for which the channel belongs to
* @cfg: channel configuration
* @card: registered sound card
* @list: list for private use
* @id: channel index
* @period_pos: current period position (ring buffer)
* @buffer_pos: current buffer position (ring buffer)
* @is_stream_running: identifies whether a stream is running or not
* @opened: set when the stream is opened
* @playback_task: playback thread
* @playback_waitq: waitq used by playback thread
* @copy_fn: copy function for PCM-specific format and width
*/
struct channel {
struct snd_pcm_substream *substream;
struct snd_pcm_hardware pcm_hardware;
struct most_interface *iface;
struct most_channel_config *cfg;
struct snd_card *card;
struct list_head list;
int id;
unsigned int period_pos;
unsigned int buffer_pos;
bool is_stream_running;
struct task_struct *playback_task;
wait_queue_head_t playback_waitq;
void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
};
struct sound_adapter {
struct list_head dev_list;
struct most_interface *iface;
struct snd_card *card;
struct list_head list;
bool registered;
int pcm_dev_idx;
};
static struct list_head adpt_list;
#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
SNDRV_PCM_INFO_MMAP_VALID | \
SNDRV_PCM_INFO_BATCH | \
SNDRV_PCM_INFO_INTERLEAVED | \
SNDRV_PCM_INFO_BLOCK_TRANSFER)
static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
{
unsigned int i = 0;
while (i < (bytes / 2)) {
dest[i] = swab16(source[i]);
i++;
}
}
static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
{
unsigned int i = 0;
if (bytes < 2)
return;
while (i < bytes - 2) {
dest[i] = source[i + 2];
dest[i + 1] = source[i + 1];
dest[i + 2] = source[i];
i += 3;
}
}
static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
{
unsigned int i = 0;
while (i < bytes / 4) {
dest[i] = swab32(source[i]);
i++;
}
}
static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
{
memcpy(most, alsa, bytes);
}
static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
{
swap_copy16(most, alsa, bytes);
}
static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
{
swap_copy24(most, alsa, bytes);
}
static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
{
swap_copy32(most, alsa, bytes);
}
static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
{
memcpy(alsa, most, bytes);
}
static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
{
swap_copy16(alsa, most, bytes);
}
static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
{
swap_copy24(alsa, most, bytes);
}
static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
{
swap_copy32(alsa, most, bytes);
}
/**
* get_channel - get pointer to channel
* @iface: interface structure
* @channel_id: channel ID
*
* This traverses the channel list and returns the channel matching the
* ID and interface.
*
* Returns pointer to channel on success or NULL otherwise.
*/
static struct channel *get_channel(struct most_interface *iface,
int channel_id)
{
struct sound_adapter *adpt = iface->priv;
struct channel *channel;
list_for_each_entry(channel, &adpt->dev_list, list) {
if ((channel->iface == iface) && (channel->id == channel_id))
return channel;
}
return NULL;
}
/**
* copy_data - implements data copying function
* @channel: channel
* @mbo: MBO from core
*
* Copy data from/to ring buffer to/from MBO and update the buffer position
*/
static bool copy_data(struct channel *channel, struct mbo *mbo)
{
struct snd_pcm_runtime *const runtime = channel->substream->runtime;
unsigned int const frame_bytes = channel->cfg->subbuffer_size;
unsigned int const buffer_size = runtime->buffer_size;
unsigned int frames;
unsigned int fr0;
if (channel->cfg->direction & MOST_CH_RX)
frames = mbo->processed_length / frame_bytes;
else
frames = mbo->buffer_length / frame_bytes;
fr0 = min(buffer_size - channel->buffer_pos, frames);
channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
mbo->virt_address,
fr0 * frame_bytes);
if (frames > fr0) {
/* wrap around at end of ring buffer */
channel->copy_fn(runtime->dma_area,
mbo->virt_address + fr0 * frame_bytes,
(frames - fr0) * frame_bytes);
}
channel->buffer_pos += frames;
if (channel->buffer_pos >= buffer_size)
channel->buffer_pos -= buffer_size;
channel->period_pos += frames;
if (channel->period_pos >= runtime->period_size) {
channel->period_pos -= runtime->period_size;
return true;
}
return false;
}
/**
* playback_thread - function implements the playback thread
* @data: private data
*
* Thread which does the playback functionality in a loop. It waits for a free
* MBO from mostcore for a particular channel and copy the data from ring buffer
* to MBO. Submit the MBO back to mostcore, after copying the data.
*
* Returns 0 on success or error code otherwise.
*/
static int playback_thread(void *data)
{
struct channel *const channel = data;
while (!kthread_should_stop()) {
struct mbo *mbo = NULL;
bool period_elapsed = false;
wait_event_interruptible(
channel->playback_waitq,
kthread_should_stop() ||
(channel->is_stream_running &&
(mbo = most_get_mbo(channel->iface, channel->id,
&comp))));
if (!mbo)
continue;
if (channel->is_stream_running)
period_elapsed = copy_data(channel, mbo);
else
memset(mbo->virt_address, 0, mbo->buffer_length);
most_submit_mbo(mbo);
if (period_elapsed)
snd_pcm_period_elapsed(channel->substream);
}
return 0;
}
/**
* pcm_open - implements open callback function for PCM middle layer
* @substream: pointer to ALSA PCM substream
*
* This is called when a PCM substream is opened. At least, the function should
* initialize the runtime->hw record.
*
* Returns 0 on success or error code otherwise.
*/
static int pcm_open(struct snd_pcm_substream *substream)
{
struct channel *channel = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct most_channel_config *cfg = channel->cfg;
int ret;
channel->substream = substream;
if (cfg->direction == MOST_CH_TX) {
channel->playback_task = kthread_run(playback_thread, channel,
"most_audio_playback");
if (IS_ERR(channel->playback_task)) {
pr_err("Couldn't start thread\n");
return PTR_ERR(channel->playback_task);
}
}
ret = most_start_channel(channel->iface, channel->id, &comp);
if (ret) {
pr_err("most_start_channel() failed!\n");
if (cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
return ret;
}
runtime->hw = channel->pcm_hardware;
return 0;
}
/**
* pcm_close - implements close callback function for PCM middle layer
* @substream: sub-stream pointer
*
* Obviously, this is called when a PCM substream is closed. Any private
* instance for a PCM substream allocated in the open callback will be
* released here.
*
* Returns 0 on success or error code otherwise.
*/
static int pcm_close(struct snd_pcm_substream *substream)
{
struct channel *channel = substream->private_data;
if (channel->cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
most_stop_channel(channel->iface, channel->id, &comp);
return 0;
}
/**
* pcm_prepare - implements prepare callback function for PCM middle layer
* @substream: substream pointer
*
* This callback is called when the PCM is "prepared". Format rate, sample rate,
* etc., can be set here. This callback can be called many times at each setup.
*
* Returns 0 on success or error code otherwise.
*/
static int pcm_prepare(struct snd_pcm_substream *substream)
{
struct channel *channel = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct most_channel_config *cfg = channel->cfg;
int width = snd_pcm_format_physical_width(runtime->format);
channel->copy_fn = NULL;
if (cfg->direction == MOST_CH_TX) {
if (snd_pcm_format_big_endian(runtime->format) || width == 8)
channel->copy_fn = alsa_to_most_memcpy;
else if (width == 16)
channel->copy_fn = alsa_to_most_copy16;
else if (width == 24)
channel->copy_fn = alsa_to_most_copy24;
else if (width == 32)
channel->copy_fn = alsa_to_most_copy32;
} else {
if (snd_pcm_format_big_endian(runtime->format) || width == 8)
channel->copy_fn = most_to_alsa_memcpy;
else if (width == 16)
channel->copy_fn = most_to_alsa_copy16;
else if (width == 24)
channel->copy_fn = most_to_alsa_copy24;
else if (width == 32)
channel->copy_fn = most_to_alsa_copy32;
}
if (!channel->copy_fn)
return -EINVAL;
channel->period_pos = 0;
channel->buffer_pos = 0;
return 0;
}
/**
* pcm_trigger - implements trigger callback function for PCM middle layer
* @substream: substream pointer
* @cmd: action to perform
*
* This is called when the PCM is started, stopped or paused. The action will be
* specified in the second argument, SNDRV_PCM_TRIGGER_XXX
*
* Returns 0 on success or error code otherwise.
*/
static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct channel *channel = substream->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
channel->is_stream_running = true;
wake_up_interruptible(&channel->playback_waitq);
return 0;
case SNDRV_PCM_TRIGGER_STOP:
channel->is_stream_running = false;
return 0;
default:
return -EINVAL;
}
return 0;
}
/**
* pcm_pointer - implements pointer callback function for PCM middle layer
* @substream: substream pointer
*
* This callback is called when the PCM middle layer inquires the current
* hardware position on the buffer. The position must be returned in frames,
* ranging from 0 to buffer_size-1.
*/
static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
{
struct channel *channel = substream->private_data;
return channel->buffer_pos;
}
/*
* Initialization of struct snd_pcm_ops
*/
static const struct snd_pcm_ops pcm_ops = {
.open = pcm_open,
.close = pcm_close,
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
};
static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
{
char *num;
int ret;
num = strsep(&buf, "x");
if (!num)
goto err;
ret = kstrtou16(num, 0, ch_num);
if (ret)
goto err;
*sample_res = strsep(&buf, ".\n");
if (!*sample_res)
goto err;
return 0;
err:
pr_err("Bad PCM format\n");
return -EINVAL;
}
static const struct sample_resolution_info {
const char *sample_res;
int bytes;
u64 formats;
} sinfo[] = {
{ "8", 1, SNDRV_PCM_FMTBIT_S8 },
{ "16", 2, SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE },
{ "24", 3, SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE },
{ "32", 4, SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE },
};
static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
u16 ch_num, char *sample_res,
struct most_channel_config *cfg)
{
int i;
for (i = 0; i < ARRAY_SIZE(sinfo); i++) {
if (!strcmp(sample_res, sinfo[i].sample_res))
goto found;
}
pr_err("Unsupported PCM format\n");
return -EINVAL;
found:
if (!ch_num) {
pr_err("Bad number of channels\n");
return -EINVAL;
}
if (cfg->subbuffer_size != ch_num * sinfo[i].bytes) {
pr_err("Audio resolution doesn't fit subbuffer size\n");
return -EINVAL;
}
pcm_hw->info = MOST_PCM_INFO;
pcm_hw->rates = SNDRV_PCM_RATE_48000;
pcm_hw->rate_min = 48000;
pcm_hw->rate_max = 48000;
pcm_hw->buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
pcm_hw->period_bytes_min = cfg->buffer_size;
pcm_hw->period_bytes_max = cfg->buffer_size;
pcm_hw->periods_min = 1;
pcm_hw->periods_max = cfg->num_buffers;
pcm_hw->channels_min = ch_num;
pcm_hw->channels_max = ch_num;
pcm_hw->formats = sinfo[i].formats;
return 0;
}
static void release_adapter(struct sound_adapter *adpt)
{
struct channel *channel, *tmp;
list_for_each_entry_safe(channel, tmp, &adpt->dev_list, list) {
list_del(&channel->list);
kfree(channel);
}
if (adpt->card)
snd_card_free(adpt->card);
list_del(&adpt->list);
kfree(adpt);
}
/**
* audio_probe_channel - probe function of the driver module
* @iface: pointer to interface instance
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
* @device_name: name of the device to be created in /dev
* @arg_list: string that provides the desired audio resolution
*
* Creates sound card, pcm device, sets pcm ops and registers sound card.
*
* Returns 0 on success or error code otherwise.
*/
static int audio_probe_channel(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg,
char *device_name, char *arg_list)
{
struct channel *channel;
struct sound_adapter *adpt;
struct snd_pcm *pcm;
int playback_count = 0;
int capture_count = 0;
int ret;
int direction;
u16 ch_num;
char *sample_res;
char arg_list_cpy[STRING_SIZE];
if (cfg->data_type != MOST_CH_SYNC) {
pr_err("Incompatible channel type\n");
return -EINVAL;
}
strscpy(arg_list_cpy, arg_list, STRING_SIZE);
ret = split_arg_list(arg_list_cpy, &ch_num, &sample_res);
if (ret < 0)
return ret;
list_for_each_entry(adpt, &adpt_list, list) {
if (adpt->iface != iface)
continue;
if (adpt->registered)
return -ENOSPC;
adpt->pcm_dev_idx++;
goto skip_adpt_alloc;
}
adpt = kzalloc(sizeof(*adpt), GFP_KERNEL);
if (!adpt)
return -ENOMEM;
adpt->iface = iface;
INIT_LIST_HEAD(&adpt->dev_list);
iface->priv = adpt;
list_add_tail(&adpt->list, &adpt_list);
ret = snd_card_new(iface->driver_dev, -1, "INIC", THIS_MODULE,
sizeof(*channel), &adpt->card);
if (ret < 0)
goto err_free_adpt;
snprintf(adpt->card->driver, sizeof(adpt->card->driver),
"%s", DRIVER_NAME);
snprintf(adpt->card->shortname, sizeof(adpt->card->shortname),
"Microchip INIC");
snprintf(adpt->card->longname, sizeof(adpt->card->longname),
"%s at %s", adpt->card->shortname, iface->description);
skip_adpt_alloc:
if (get_channel(iface, channel_id)) {
pr_err("channel (%s:%d) is already linked\n",
iface->description, channel_id);
return -EEXIST;
}
if (cfg->direction == MOST_CH_TX) {
playback_count = 1;
direction = SNDRV_PCM_STREAM_PLAYBACK;
} else {
capture_count = 1;
direction = SNDRV_PCM_STREAM_CAPTURE;
}
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
if (!channel) {
ret = -ENOMEM;
goto err_free_adpt;
}
channel->card = adpt->card;
channel->cfg = cfg;
channel->iface = iface;
channel->id = channel_id;
init_waitqueue_head(&channel->playback_waitq);
list_add_tail(&channel->list, &adpt->dev_list);
ret = audio_set_hw_params(&channel->pcm_hardware, ch_num, sample_res,
cfg);
if (ret)
goto err_free_adpt;
ret = snd_pcm_new(adpt->card, device_name, adpt->pcm_dev_idx,
playback_count, capture_count, &pcm);
if (ret < 0)
goto err_free_adpt;
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
return 0;
err_free_adpt:
release_adapter(adpt);
return ret;
}
static int audio_create_sound_card(void)
{
int ret;
struct sound_adapter *adpt;
list_for_each_entry(adpt, &adpt_list, list) {
if (!adpt->registered)
goto adpt_alloc;
}
return -ENODEV;
adpt_alloc:
ret = snd_card_register(adpt->card);
if (ret < 0) {
release_adapter(adpt);
return ret;
}
adpt->registered = true;
return 0;
}
/**
* audio_disconnect_channel - function to disconnect a channel
* @iface: pointer to interface instance
* @channel_id: channel index
*
* This frees allocated memory and removes the sound card from ALSA
*
* Returns 0 on success or error code otherwise.
*/
static int audio_disconnect_channel(struct most_interface *iface,
int channel_id)
{
struct channel *channel;
struct sound_adapter *adpt = iface->priv;
channel = get_channel(iface, channel_id);
if (!channel)
return -EINVAL;
list_del(&channel->list);
kfree(channel);
if (list_empty(&adpt->dev_list))
release_adapter(adpt);
return 0;
}
/**
* audio_rx_completion - completion handler for rx channels
* @mbo: pointer to buffer object that has completed
*
* This searches for the channel this MBO belongs to and copy the data from MBO
* to ring buffer
*
* Returns 0 on success or error code otherwise.
*/
static int audio_rx_completion(struct mbo *mbo)
{
struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
bool period_elapsed = false;
if (!channel)
return -EINVAL;
if (channel->is_stream_running)
period_elapsed = copy_data(channel, mbo);
most_put_mbo(mbo);
if (period_elapsed)
snd_pcm_period_elapsed(channel->substream);
return 0;
}
/**
* audio_tx_completion - completion handler for tx channels
* @iface: pointer to interface instance
* @channel_id: channel index/ID
*
* This searches the channel that belongs to this combination of interface
* pointer and channel ID and wakes a process sitting in the wait queue of
* this channel.
*
* Returns 0 on success or error code otherwise.
*/
static int audio_tx_completion(struct most_interface *iface, int channel_id)
{
struct channel *channel = get_channel(iface, channel_id);
if (!channel)
return -EINVAL;
wake_up_interruptible(&channel->playback_waitq);
return 0;
}
/*
* Initialization of the struct most_component
*/
static struct most_component comp = {
.mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
.disconnect_channel = audio_disconnect_channel,
.rx_completion = audio_rx_completion,
.tx_completion = audio_tx_completion,
.cfg_complete = audio_create_sound_card,
};
static int __init audio_init(void)
{
int ret;
INIT_LIST_HEAD(&adpt_list);
ret = most_register_component(&comp);
if (ret) {
pr_err("Failed to register %s\n", comp.name);
return ret;
}
ret = most_register_configfs_subsys(&comp);
if (ret) {
pr_err("Failed to register %s configfs subsys\n", comp.name);
most_deregister_component(&comp);
}
return ret;
}
static void __exit audio_exit(void)
{
most_deregister_configfs_subsys(&comp);
most_deregister_component(&comp);
}
module_init(audio_init);
module_exit(audio_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm <[email protected]>");
MODULE_DESCRIPTION("Sound Component Module for Mostcore");
| linux-master | drivers/most/most_snd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* core.c - Implementation of core module of MOST Linux driver stack
*
* Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/kobject.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/sysfs.h>
#include <linux/kthread.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/most.h>
#define MAX_CHANNELS 64
#define STRING_SIZE 80
static struct ida mdev_id;
static int dummy_num_buffers;
static struct list_head comp_list;
struct pipe {
struct most_component *comp;
int refs;
int num_buffers;
};
struct most_channel {
struct device dev;
struct completion cleanup;
atomic_t mbo_ref;
atomic_t mbo_nq_level;
u16 channel_id;
char name[STRING_SIZE];
bool is_poisoned;
struct mutex start_mutex; /* channel activation synchronization */
struct mutex nq_mutex; /* nq thread synchronization */
int is_starving;
struct most_interface *iface;
struct most_channel_config cfg;
bool keep_mbo;
bool enqueue_halt;
struct list_head fifo;
spinlock_t fifo_lock; /* fifo access synchronization */
struct list_head halt_fifo;
struct list_head list;
struct pipe pipe0;
struct pipe pipe1;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
wait_queue_head_t hdm_fifo_wq;
};
#define to_channel(d) container_of(d, struct most_channel, dev)
struct interface_private {
int dev_id;
char name[STRING_SIZE];
struct most_channel *channel[MAX_CHANNELS];
struct list_head channel_list;
};
static const struct {
int most_ch_data_type;
const char *name;
} ch_data_type[] = {
{ MOST_CH_CONTROL, "control" },
{ MOST_CH_ASYNC, "async" },
{ MOST_CH_SYNC, "sync" },
{ MOST_CH_ISOC, "isoc"},
{ MOST_CH_ISOC, "isoc_avp"},
};
/**
* list_pop_mbo - retrieves the first MBO of the list and removes it
* @ptr: the list head to grab the MBO from.
*/
#define list_pop_mbo(ptr) \
({ \
struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
list_del(&_mbo->list); \
_mbo; \
})
/**
* most_free_mbo_coherent - free an MBO and its coherent buffer
* @mbo: most buffer
*/
static void most_free_mbo_coherent(struct mbo *mbo)
{
struct most_channel *c = mbo->context;
u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
if (c->iface->dma_free)
c->iface->dma_free(mbo, coherent_buf_size);
else
kfree(mbo->virt_address);
kfree(mbo);
if (atomic_sub_and_test(1, &c->mbo_ref))
complete(&c->cleanup);
}
/**
* flush_channel_fifos - clear the channel fifos
* @c: pointer to channel object
*/
static void flush_channel_fifos(struct most_channel *c)
{
unsigned long flags, hf_flags;
struct mbo *mbo, *tmp;
if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
return;
spin_lock_irqsave(&c->fifo_lock, flags);
list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
list_del(&mbo->list);
spin_unlock_irqrestore(&c->fifo_lock, flags);
most_free_mbo_coherent(mbo);
spin_lock_irqsave(&c->fifo_lock, flags);
}
spin_unlock_irqrestore(&c->fifo_lock, flags);
spin_lock_irqsave(&c->fifo_lock, hf_flags);
list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
list_del(&mbo->list);
spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
most_free_mbo_coherent(mbo);
spin_lock_irqsave(&c->fifo_lock, hf_flags);
}
spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
dev_warn(&c->dev, "Channel or trash fifo not empty\n");
}
/**
* flush_trash_fifo - clear the trash fifo
* @c: pointer to channel object
*/
static int flush_trash_fifo(struct most_channel *c)
{
struct mbo *mbo, *tmp;
unsigned long flags;
spin_lock_irqsave(&c->fifo_lock, flags);
list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
list_del(&mbo->list);
spin_unlock_irqrestore(&c->fifo_lock, flags);
most_free_mbo_coherent(mbo);
spin_lock_irqsave(&c->fifo_lock, flags);
}
spin_unlock_irqrestore(&c->fifo_lock, flags);
return 0;
}
static ssize_t available_directions_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
strcpy(buf, "");
if (c->iface->channel_vector[i].direction & MOST_CH_RX)
strcat(buf, "rx ");
if (c->iface->channel_vector[i].direction & MOST_CH_TX)
strcat(buf, "tx ");
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t available_datatypes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
strcpy(buf, "");
if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
strcat(buf, "control ");
if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
strcat(buf, "async ");
if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
strcat(buf, "sync ");
if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
strcat(buf, "isoc ");
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t number_of_packet_buffers_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
return snprintf(buf, PAGE_SIZE, "%d\n",
c->iface->channel_vector[i].num_buffers_packet);
}
static ssize_t number_of_stream_buffers_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
return snprintf(buf, PAGE_SIZE, "%d\n",
c->iface->channel_vector[i].num_buffers_streaming);
}
static ssize_t size_of_packet_buffer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
return snprintf(buf, PAGE_SIZE, "%d\n",
c->iface->channel_vector[i].buffer_size_packet);
}
static ssize_t size_of_stream_buffer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
unsigned int i = c->channel_id;
return snprintf(buf, PAGE_SIZE, "%d\n",
c->iface->channel_vector[i].buffer_size_streaming);
}
static ssize_t channel_starving_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
}
static ssize_t set_number_of_buffers_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
}
static ssize_t set_buffer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
}
static ssize_t set_direction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
if (c->cfg.direction & MOST_CH_TX)
return snprintf(buf, PAGE_SIZE, "tx\n");
else if (c->cfg.direction & MOST_CH_RX)
return snprintf(buf, PAGE_SIZE, "rx\n");
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
static ssize_t set_datatype_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int i;
struct most_channel *c = to_channel(dev);
for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
return snprintf(buf, PAGE_SIZE, "%s",
ch_data_type[i].name);
}
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
static ssize_t set_subbuffer_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
}
static ssize_t set_packets_per_xact_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
}
static ssize_t set_dbr_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct most_channel *c = to_channel(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
}
#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
static umode_t channel_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device_attribute *dev_attr = to_dev_attr(attr);
struct device *dev = kobj_to_dev(kobj);
struct most_channel *c = to_channel(dev);
if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
(c->iface->interface != ITYPE_MEDIALB_DIM2))
return 0;
if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
(c->iface->interface != ITYPE_USB))
return 0;
return attr->mode;
}
#define DEV_ATTR(_name) (&dev_attr_##_name.attr)
static DEVICE_ATTR_RO(available_directions);
static DEVICE_ATTR_RO(available_datatypes);
static DEVICE_ATTR_RO(number_of_packet_buffers);
static DEVICE_ATTR_RO(number_of_stream_buffers);
static DEVICE_ATTR_RO(size_of_stream_buffer);
static DEVICE_ATTR_RO(size_of_packet_buffer);
static DEVICE_ATTR_RO(channel_starving);
static DEVICE_ATTR_RO(set_buffer_size);
static DEVICE_ATTR_RO(set_number_of_buffers);
static DEVICE_ATTR_RO(set_direction);
static DEVICE_ATTR_RO(set_datatype);
static DEVICE_ATTR_RO(set_subbuffer_size);
static DEVICE_ATTR_RO(set_packets_per_xact);
static DEVICE_ATTR_RO(set_dbr_size);
static struct attribute *channel_attrs[] = {
DEV_ATTR(available_directions),
DEV_ATTR(available_datatypes),
DEV_ATTR(number_of_packet_buffers),
DEV_ATTR(number_of_stream_buffers),
DEV_ATTR(size_of_stream_buffer),
DEV_ATTR(size_of_packet_buffer),
DEV_ATTR(channel_starving),
DEV_ATTR(set_buffer_size),
DEV_ATTR(set_number_of_buffers),
DEV_ATTR(set_direction),
DEV_ATTR(set_datatype),
DEV_ATTR(set_subbuffer_size),
DEV_ATTR(set_packets_per_xact),
DEV_ATTR(set_dbr_size),
NULL,
};
static const struct attribute_group channel_attr_group = {
.attrs = channel_attrs,
.is_visible = channel_attr_is_visible,
};
static const struct attribute_group *channel_attr_groups[] = {
&channel_attr_group,
NULL,
};
static ssize_t description_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_interface *iface = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
}
static ssize_t interface_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct most_interface *iface = dev_get_drvdata(dev);
switch (iface->interface) {
case ITYPE_LOOPBACK:
return snprintf(buf, PAGE_SIZE, "loopback\n");
case ITYPE_I2C:
return snprintf(buf, PAGE_SIZE, "i2c\n");
case ITYPE_I2S:
return snprintf(buf, PAGE_SIZE, "i2s\n");
case ITYPE_TSI:
return snprintf(buf, PAGE_SIZE, "tsi\n");
case ITYPE_HBI:
return snprintf(buf, PAGE_SIZE, "hbi\n");
case ITYPE_MEDIALB_DIM:
return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
case ITYPE_MEDIALB_DIM2:
return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
case ITYPE_USB:
return snprintf(buf, PAGE_SIZE, "usb\n");
case ITYPE_PCIE:
return snprintf(buf, PAGE_SIZE, "pcie\n");
}
return snprintf(buf, PAGE_SIZE, "unknown\n");
}
static DEVICE_ATTR_RO(description);
static DEVICE_ATTR_RO(interface);
static struct attribute *interface_attrs[] = {
DEV_ATTR(description),
DEV_ATTR(interface),
NULL,
};
static const struct attribute_group interface_attr_group = {
.attrs = interface_attrs,
};
static const struct attribute_group *interface_attr_groups[] = {
&interface_attr_group,
NULL,
};
static struct most_component *match_component(char *name)
{
struct most_component *comp;
list_for_each_entry(comp, &comp_list, list) {
if (!strcmp(comp->name, name))
return comp;
}
return NULL;
}
struct show_links_data {
int offs;
char *buf;
};
static int print_links(struct device *dev, void *data)
{
struct show_links_data *d = data;
int offs = d->offs;
char *buf = d->buf;
struct most_channel *c;
struct most_interface *iface = dev_get_drvdata(dev);
list_for_each_entry(c, &iface->p->channel_list, list) {
if (c->pipe0.comp) {
offs += scnprintf(buf + offs,
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe0.comp->name,
dev_name(iface->dev),
dev_name(&c->dev));
}
if (c->pipe1.comp) {
offs += scnprintf(buf + offs,
PAGE_SIZE - offs,
"%s:%s:%s\n",
c->pipe1.comp->name,
dev_name(iface->dev),
dev_name(&c->dev));
}
}
d->offs = offs;
return 0;
}
static int most_match(struct device *dev, struct device_driver *drv)
{
if (!strcmp(dev_name(dev), "most"))
return 0;
else
return 1;
}
static struct bus_type mostbus = {
.name = "most",
.match = most_match,
};
static ssize_t links_show(struct device_driver *drv, char *buf)
{
struct show_links_data d = { .buf = buf };
bus_for_each_dev(&mostbus, NULL, &d, print_links);
return d.offs;
}
static ssize_t components_show(struct device_driver *drv, char *buf)
{
struct most_component *comp;
int offs = 0;
list_for_each_entry(comp, &comp_list, list) {
offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
comp->name);
}
return offs;
}
/**
* get_channel - get pointer to channel
* @mdev: name of the device interface
* @mdev_ch: name of channel
*/
static struct most_channel *get_channel(char *mdev, char *mdev_ch)
{
struct device *dev = NULL;
struct most_interface *iface;
struct most_channel *c, *tmp;
dev = bus_find_device_by_name(&mostbus, NULL, mdev);
if (!dev)
return NULL;
put_device(dev);
iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (!strcmp(dev_name(&c->dev), mdev_ch))
return c;
}
return NULL;
}
static
inline int link_channel_to_component(struct most_channel *c,
struct most_component *comp,
char *name,
char *comp_param)
{
int ret;
struct most_component **comp_ptr;
if (!c->pipe0.comp)
comp_ptr = &c->pipe0.comp;
else if (!c->pipe1.comp)
comp_ptr = &c->pipe1.comp;
else
return -ENOSPC;
*comp_ptr = comp;
ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
comp_param);
if (ret) {
*comp_ptr = NULL;
return ret;
}
return 0;
}
int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
c->cfg.buffer_size = val;
return 0;
}
int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
c->cfg.subbuffer_size = val;
return 0;
}
int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
c->cfg.dbr_size = val;
return 0;
}
int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
c->cfg.num_buffers = val;
return 0;
}
int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
{
int i;
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
if (!strcmp(buf, ch_data_type[i].name)) {
c->cfg.data_type = ch_data_type[i].most_ch_data_type;
break;
}
}
if (i == ARRAY_SIZE(ch_data_type))
dev_warn(&c->dev, "Invalid attribute settings\n");
return 0;
}
int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
if (!strcmp(buf, "dir_rx")) {
c->cfg.direction = MOST_CH_RX;
} else if (!strcmp(buf, "rx")) {
c->cfg.direction = MOST_CH_RX;
} else if (!strcmp(buf, "dir_tx")) {
c->cfg.direction = MOST_CH_TX;
} else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
dev_err(&c->dev, "Invalid direction\n");
return -ENODATA;
}
return 0;
}
int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
c->cfg.packets_per_xact = val;
return 0;
}
int most_cfg_complete(char *comp_name)
{
struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
return -ENODEV;
return comp->cfg_complete();
}
int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
char *comp_param)
{
struct most_channel *c = get_channel(mdev, mdev_ch);
struct most_component *comp = match_component(comp_name);
if (!c || !comp)
return -ENODEV;
return link_channel_to_component(c, comp, link_name, comp_param);
}
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
struct most_component *comp;
comp = match_component(comp_name);
if (!comp)
return -ENODEV;
c = get_channel(mdev, mdev_ch);
if (!c)
return -ENODEV;
if (comp->disconnect_channel(c->iface, c->channel_id))
return -EIO;
if (c->pipe0.comp == comp)
c->pipe0.comp = NULL;
if (c->pipe1.comp == comp)
c->pipe1.comp = NULL;
return 0;
}
#define DRV_ATTR(_name) (&driver_attr_##_name.attr)
static DRIVER_ATTR_RO(links);
static DRIVER_ATTR_RO(components);
static struct attribute *mc_attrs[] = {
DRV_ATTR(links),
DRV_ATTR(components),
NULL,
};
static const struct attribute_group mc_attr_group = {
.attrs = mc_attrs,
};
static const struct attribute_group *mc_attr_groups[] = {
&mc_attr_group,
NULL,
};
static struct device_driver mostbus_driver = {
.name = "most_core",
.bus = &mostbus,
.groups = mc_attr_groups,
};
static inline void trash_mbo(struct mbo *mbo)
{
unsigned long flags;
struct most_channel *c = mbo->context;
spin_lock_irqsave(&c->fifo_lock, flags);
list_add(&mbo->list, &c->trash_fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
}
static bool hdm_mbo_ready(struct most_channel *c)
{
bool empty;
if (c->enqueue_halt)
return false;
spin_lock_irq(&c->fifo_lock);
empty = list_empty(&c->halt_fifo);
spin_unlock_irq(&c->fifo_lock);
return !empty;
}
static void nq_hdm_mbo(struct mbo *mbo)
{
unsigned long flags;
struct most_channel *c = mbo->context;
spin_lock_irqsave(&c->fifo_lock, flags);
list_add_tail(&mbo->list, &c->halt_fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
wake_up_interruptible(&c->hdm_fifo_wq);
}
static int hdm_enqueue_thread(void *data)
{
struct most_channel *c = data;
struct mbo *mbo;
int ret;
typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
while (likely(!kthread_should_stop())) {
wait_event_interruptible(c->hdm_fifo_wq,
hdm_mbo_ready(c) ||
kthread_should_stop());
mutex_lock(&c->nq_mutex);
spin_lock_irq(&c->fifo_lock);
if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
spin_unlock_irq(&c->fifo_lock);
mutex_unlock(&c->nq_mutex);
continue;
}
mbo = list_pop_mbo(&c->halt_fifo);
spin_unlock_irq(&c->fifo_lock);
if (c->cfg.direction == MOST_CH_RX)
mbo->buffer_length = c->cfg.buffer_size;
ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
mutex_unlock(&c->nq_mutex);
if (unlikely(ret)) {
dev_err(&c->dev, "Buffer enqueue failed\n");
nq_hdm_mbo(mbo);
c->hdm_enqueue_task = NULL;
return 0;
}
}
return 0;
}
static int run_enqueue_thread(struct most_channel *c, int channel_id)
{
struct task_struct *task =
kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
channel_id);
if (IS_ERR(task))
return PTR_ERR(task);
c->hdm_enqueue_task = task;
return 0;
}
/**
* arm_mbo - recycle MBO for further usage
* @mbo: most buffer
*
* This puts an MBO back to the list to have it ready for up coming
* tx transactions.
*
* In case the MBO belongs to a channel that recently has been
* poisoned, the MBO is scheduled to be trashed.
* Calls the completion handler of an attached component.
*/
static void arm_mbo(struct mbo *mbo)
{
unsigned long flags;
struct most_channel *c;
c = mbo->context;
if (c->is_poisoned) {
trash_mbo(mbo);
return;
}
spin_lock_irqsave(&c->fifo_lock, flags);
++*mbo->num_buffers_ptr;
list_add_tail(&mbo->list, &c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
if (c->pipe0.refs && c->pipe0.comp->tx_completion)
c->pipe0.comp->tx_completion(c->iface, c->channel_id);
if (c->pipe1.refs && c->pipe1.comp->tx_completion)
c->pipe1.comp->tx_completion(c->iface, c->channel_id);
}
/**
* arm_mbo_chain - helper function that arms an MBO chain for the HDM
* @c: pointer to interface channel
* @dir: direction of the channel
* @compl: pointer to completion function
*
* This allocates buffer objects including the containing DMA coherent
* buffer and puts them in the fifo.
* Buffers of Rx channels are put in the kthread fifo, hence immediately
* submitted to the HDM.
*
* Returns the number of allocated and enqueued MBOs.
*/
static int arm_mbo_chain(struct most_channel *c, int dir,
void (*compl)(struct mbo *))
{
unsigned int i;
struct mbo *mbo;
unsigned long flags;
u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
atomic_set(&c->mbo_nq_level, 0);
for (i = 0; i < c->cfg.num_buffers; i++) {
mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
if (!mbo)
goto flush_fifos;
mbo->context = c;
mbo->ifp = c->iface;
mbo->hdm_channel_id = c->channel_id;
if (c->iface->dma_alloc) {
mbo->virt_address =
c->iface->dma_alloc(mbo, coherent_buf_size);
} else {
mbo->virt_address =
kzalloc(coherent_buf_size, GFP_KERNEL);
}
if (!mbo->virt_address)
goto release_mbo;
mbo->complete = compl;
mbo->num_buffers_ptr = &dummy_num_buffers;
if (dir == MOST_CH_RX) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
} else {
spin_lock_irqsave(&c->fifo_lock, flags);
list_add_tail(&mbo->list, &c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
}
}
return c->cfg.num_buffers;
release_mbo:
kfree(mbo);
flush_fifos:
flush_channel_fifos(c);
return 0;
}
/**
* most_submit_mbo - submits an MBO to fifo
* @mbo: most buffer
*/
void most_submit_mbo(struct mbo *mbo)
{
if (WARN_ONCE(!mbo || !mbo->context,
"Bad buffer or missing channel reference\n"))
return;
nq_hdm_mbo(mbo);
}
EXPORT_SYMBOL_GPL(most_submit_mbo);
/**
* most_write_completion - write completion handler
* @mbo: most buffer
*
* This recycles the MBO for further usage. In case the channel has been
* poisoned, the MBO is scheduled to be trashed.
*/
static void most_write_completion(struct mbo *mbo)
{
struct most_channel *c;
c = mbo->context;
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
trash_mbo(mbo);
else
arm_mbo(mbo);
}
int channel_has_mbo(struct most_interface *iface, int id,
struct most_component *comp)
{
struct most_channel *c = iface->p->channel[id];
unsigned long flags;
int empty;
if (unlikely(!c))
return -EINVAL;
if (c->pipe0.refs && c->pipe1.refs &&
((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
(comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
return 0;
spin_lock_irqsave(&c->fifo_lock, flags);
empty = list_empty(&c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
return !empty;
}
EXPORT_SYMBOL_GPL(channel_has_mbo);
/**
* most_get_mbo - get pointer to an MBO of pool
* @iface: pointer to interface instance
* @id: channel ID
* @comp: driver component
*
* This attempts to get a free buffer out of the channel fifo.
* Returns a pointer to MBO on success or NULL otherwise.
*/
struct mbo *most_get_mbo(struct most_interface *iface, int id,
struct most_component *comp)
{
struct mbo *mbo;
struct most_channel *c;
unsigned long flags;
int *num_buffers_ptr;
c = iface->p->channel[id];
if (unlikely(!c))
return NULL;
if (c->pipe0.refs && c->pipe1.refs &&
((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
(comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
return NULL;
if (comp == c->pipe0.comp)
num_buffers_ptr = &c->pipe0.num_buffers;
else if (comp == c->pipe1.comp)
num_buffers_ptr = &c->pipe1.num_buffers;
else
num_buffers_ptr = &dummy_num_buffers;
spin_lock_irqsave(&c->fifo_lock, flags);
if (list_empty(&c->fifo)) {
spin_unlock_irqrestore(&c->fifo_lock, flags);
return NULL;
}
mbo = list_pop_mbo(&c->fifo);
--*num_buffers_ptr;
spin_unlock_irqrestore(&c->fifo_lock, flags);
mbo->num_buffers_ptr = num_buffers_ptr;
mbo->buffer_length = c->cfg.buffer_size;
return mbo;
}
EXPORT_SYMBOL_GPL(most_get_mbo);
/**
* most_put_mbo - return buffer to pool
* @mbo: most buffer
*/
void most_put_mbo(struct mbo *mbo)
{
struct most_channel *c = mbo->context;
if (c->cfg.direction == MOST_CH_TX) {
arm_mbo(mbo);
return;
}
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
}
EXPORT_SYMBOL_GPL(most_put_mbo);
/**
* most_read_completion - read completion handler
* @mbo: most buffer
*
* This function is called by the HDM when data has been received from the
* hardware and copied to the buffer of the MBO.
*
* In case the channel has been poisoned it puts the buffer in the trash queue.
* Otherwise, it passes the buffer to an component for further processing.
*/
static void most_read_completion(struct mbo *mbo)
{
struct most_channel *c = mbo->context;
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
trash_mbo(mbo);
return;
}
if (mbo->status == MBO_E_INVAL) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level);
return;
}
if (atomic_sub_and_test(1, &c->mbo_nq_level))
c->is_starving = 1;
if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
c->pipe0.comp->rx_completion(mbo) == 0)
return;
if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
c->pipe1.comp->rx_completion(mbo) == 0)
return;
most_put_mbo(mbo);
}
/**
* most_start_channel - prepares a channel for communication
* @iface: pointer to interface instance
* @id: channel ID
* @comp: driver component
*
* This prepares the channel for usage. Cross-checks whether the
* channel's been properly configured.
*
* Returns 0 on success or error code otherwise.
*/
int most_start_channel(struct most_interface *iface, int id,
struct most_component *comp)
{
int num_buffer;
int ret;
struct most_channel *c = iface->p->channel[id];
if (unlikely(!c))
return -EINVAL;
mutex_lock(&c->start_mutex);
if (c->pipe0.refs + c->pipe1.refs > 0)
goto out; /* already started by another component */
if (!try_module_get(iface->mod)) {
dev_err(&c->dev, "Failed to acquire HDM lock\n");
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
c->cfg.extra_len = 0;
if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
ret = -EINVAL;
goto err_put_module;
}
init_waitqueue_head(&c->hdm_fifo_wq);
if (c->cfg.direction == MOST_CH_RX)
num_buffer = arm_mbo_chain(c, c->cfg.direction,
most_read_completion);
else
num_buffer = arm_mbo_chain(c, c->cfg.direction,
most_write_completion);
if (unlikely(!num_buffer)) {
ret = -ENOMEM;
goto err_put_module;
}
ret = run_enqueue_thread(c, id);
if (ret)
goto err_put_module;
c->is_starving = 0;
c->pipe0.num_buffers = c->cfg.num_buffers / 2;
c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
atomic_set(&c->mbo_ref, num_buffer);
out:
if (comp == c->pipe0.comp)
c->pipe0.refs++;
if (comp == c->pipe1.comp)
c->pipe1.refs++;
mutex_unlock(&c->start_mutex);
return 0;
err_put_module:
module_put(iface->mod);
mutex_unlock(&c->start_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(most_start_channel);
/**
* most_stop_channel - stops a running channel
* @iface: pointer to interface instance
* @id: channel ID
* @comp: driver component
*/
int most_stop_channel(struct most_interface *iface, int id,
struct most_component *comp)
{
struct most_channel *c;
if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
pr_err("Bad interface or index out of range\n");
return -EINVAL;
}
c = iface->p->channel[id];
if (unlikely(!c))
return -EINVAL;
mutex_lock(&c->start_mutex);
if (c->pipe0.refs + c->pipe1.refs >= 2)
goto out;
if (c->hdm_enqueue_task)
kthread_stop(c->hdm_enqueue_task);
c->hdm_enqueue_task = NULL;
if (iface->mod)
module_put(iface->mod);
c->is_poisoned = true;
if (c->iface->poison_channel(c->iface, c->channel_id)) {
dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
c->iface->description);
mutex_unlock(&c->start_mutex);
return -EAGAIN;
}
flush_trash_fifo(c);
flush_channel_fifos(c);
#ifdef CMPL_INTERRUPTIBLE
if (wait_for_completion_interruptible(&c->cleanup)) {
dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
mutex_unlock(&c->start_mutex);
return -EINTR;
}
#else
wait_for_completion(&c->cleanup);
#endif
c->is_poisoned = false;
out:
if (comp == c->pipe0.comp)
c->pipe0.refs--;
if (comp == c->pipe1.comp)
c->pipe1.refs--;
mutex_unlock(&c->start_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(most_stop_channel);
/**
* most_register_component - registers a driver component with the core
* @comp: driver component
*/
int most_register_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
list_add_tail(&comp->list, &comp_list);
return 0;
}
EXPORT_SYMBOL_GPL(most_register_component);
static int disconnect_channels(struct device *dev, void *data)
{
struct most_interface *iface;
struct most_channel *c, *tmp;
struct most_component *comp = data;
iface = dev_get_drvdata(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (c->pipe0.comp == comp || c->pipe1.comp == comp)
comp->disconnect_channel(c->iface, c->channel_id);
if (c->pipe0.comp == comp)
c->pipe0.comp = NULL;
if (c->pipe1.comp == comp)
c->pipe1.comp = NULL;
}
return 0;
}
/**
* most_deregister_component - deregisters a driver component with the core
* @comp: driver component
*/
int most_deregister_component(struct most_component *comp)
{
if (!comp) {
pr_err("Bad component\n");
return -EINVAL;
}
bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels);
list_del(&comp->list);
return 0;
}
EXPORT_SYMBOL_GPL(most_deregister_component);
static void release_channel(struct device *dev)
{
struct most_channel *c = to_channel(dev);
kfree(c);
}
/**
* most_register_interface - registers an interface with core
* @iface: device interface
*
* Allocates and initializes a new interface instance and all of its channels.
* Returns a pointer to kobject or an error pointer.
*/
int most_register_interface(struct most_interface *iface)
{
unsigned int i;
int id;
struct most_channel *c;
if (!iface || !iface->enqueue || !iface->configure ||
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
return -EINVAL;
id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(iface->dev, "Failed to allocate device ID\n");
return id;
}
iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
if (!iface->p) {
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
INIT_LIST_HEAD(&iface->p->channel_list);
iface->p->dev_id = id;
strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
iface->dev->bus = &mostbus;
iface->dev->groups = interface_attr_groups;
dev_set_drvdata(iface->dev, iface);
if (device_register(iface->dev)) {
dev_err(iface->dev, "Failed to register interface device\n");
kfree(iface->p);
put_device(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
for (i = 0; i < iface->num_channels; i++) {
const char *name_suffix = iface->channel_vector[i].name_suffix;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
goto err_free_resources;
if (!name_suffix)
snprintf(c->name, STRING_SIZE, "ch%d", i);
else
snprintf(c->name, STRING_SIZE, "%s", name_suffix);
c->dev.init_name = c->name;
c->dev.parent = iface->dev;
c->dev.groups = channel_attr_groups;
c->dev.release = release_channel;
iface->p->channel[i] = c;
c->is_starving = 0;
c->iface = iface;
c->channel_id = i;
c->keep_mbo = false;
c->enqueue_halt = false;
c->is_poisoned = false;
c->cfg.direction = 0;
c->cfg.data_type = 0;
c->cfg.num_buffers = 0;
c->cfg.buffer_size = 0;
c->cfg.subbuffer_size = 0;
c->cfg.packets_per_xact = 0;
spin_lock_init(&c->fifo_lock);
INIT_LIST_HEAD(&c->fifo);
INIT_LIST_HEAD(&c->trash_fifo);
INIT_LIST_HEAD(&c->halt_fifo);
init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex);
mutex_init(&c->nq_mutex);
list_add_tail(&c->list, &iface->p->channel_list);
if (device_register(&c->dev)) {
dev_err(&c->dev, "Failed to register channel device\n");
goto err_free_most_channel;
}
}
most_interface_register_notify(iface->description);
return 0;
err_free_most_channel:
put_device(&c->dev);
err_free_resources:
while (i > 0) {
c = iface->p->channel[--i];
device_unregister(&c->dev);
}
kfree(iface->p);
device_unregister(iface->dev);
ida_simple_remove(&mdev_id, id);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(most_register_interface);
/**
* most_deregister_interface - deregisters an interface with core
* @iface: device interface
*
* Before removing an interface instance from the list, all running
* channels are stopped and poisoned.
*/
void most_deregister_interface(struct most_interface *iface)
{
int i;
struct most_channel *c;
for (i = 0; i < iface->num_channels; i++) {
c = iface->p->channel[i];
if (c->pipe0.comp)
c->pipe0.comp->disconnect_channel(c->iface,
c->channel_id);
if (c->pipe1.comp)
c->pipe1.comp->disconnect_channel(c->iface,
c->channel_id);
c->pipe0.comp = NULL;
c->pipe1.comp = NULL;
list_del(&c->list);
device_unregister(&c->dev);
}
ida_simple_remove(&mdev_id, iface->p->dev_id);
kfree(iface->p);
device_unregister(iface->dev);
}
EXPORT_SYMBOL_GPL(most_deregister_interface);
/**
* most_stop_enqueue - prevents core from enqueueing MBOs
* @iface: pointer to interface
* @id: channel id
*
* This is called by an HDM that _cannot_ attend to its duties and
* is imminent to get run over by the core. The core is not going to
* enqueue any further packets unless the flagging HDM calls
* most_resume enqueue().
*/
void most_stop_enqueue(struct most_interface *iface, int id)
{
struct most_channel *c = iface->p->channel[id];
if (!c)
return;
mutex_lock(&c->nq_mutex);
c->enqueue_halt = true;
mutex_unlock(&c->nq_mutex);
}
EXPORT_SYMBOL_GPL(most_stop_enqueue);
/**
* most_resume_enqueue - allow core to enqueue MBOs again
* @iface: pointer to interface
* @id: channel id
*
* This clears the enqueue halt flag and enqueues all MBOs currently
* sitting in the wait fifo.
*/
void most_resume_enqueue(struct most_interface *iface, int id)
{
struct most_channel *c = iface->p->channel[id];
if (!c)
return;
mutex_lock(&c->nq_mutex);
c->enqueue_halt = false;
mutex_unlock(&c->nq_mutex);
wake_up_interruptible(&c->hdm_fifo_wq);
}
EXPORT_SYMBOL_GPL(most_resume_enqueue);
static int __init most_init(void)
{
int err;
INIT_LIST_HEAD(&comp_list);
ida_init(&mdev_id);
err = bus_register(&mostbus);
if (err) {
pr_err("Failed to register most bus\n");
return err;
}
err = driver_register(&mostbus_driver);
if (err) {
pr_err("Failed to register core driver\n");
goto err_unregister_bus;
}
configfs_init();
return 0;
err_unregister_bus:
bus_unregister(&mostbus);
return err;
}
static void __exit most_exit(void)
{
driver_unregister(&mostbus_driver);
bus_unregister(&mostbus);
ida_destroy(&mdev_id);
}
subsys_initcall(most_init);
module_exit(most_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm <[email protected]>");
MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
| linux-master | drivers/most/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cdev.c - Character device component for Mostcore
*
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/idr.h>
#include <linux/most.h>
#define CHRDEV_REGION_SIZE 50
static struct cdev_component {
dev_t devno;
struct ida minor_id;
unsigned int major;
struct class *class;
struct most_component cc;
} comp;
struct comp_channel {
wait_queue_head_t wq;
spinlock_t unlink; /* synchronization lock to unlink channels */
struct cdev cdev;
struct device *dev;
struct mutex io_mutex;
struct most_interface *iface;
struct most_channel_config *cfg;
unsigned int channel_id;
dev_t devno;
size_t mbo_offs;
DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
int access_ref;
struct list_head list;
};
#define to_channel(d) container_of(d, struct comp_channel, cdev)
static LIST_HEAD(channel_list);
static DEFINE_SPINLOCK(ch_list_lock);
static inline bool ch_has_mbo(struct comp_channel *c)
{
return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
}
static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
{
if (!kfifo_peek(&c->fifo, mbo)) {
*mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
if (*mbo)
kfifo_in(&c->fifo, mbo, 1);
}
return *mbo;
}
static struct comp_channel *get_channel(struct most_interface *iface, int id)
{
struct comp_channel *c, *tmp;
unsigned long flags;
spin_lock_irqsave(&ch_list_lock, flags);
list_for_each_entry_safe(c, tmp, &channel_list, list) {
if ((c->iface == iface) && (c->channel_id == id)) {
spin_unlock_irqrestore(&ch_list_lock, flags);
return c;
}
}
spin_unlock_irqrestore(&ch_list_lock, flags);
return NULL;
}
static void stop_channel(struct comp_channel *c)
{
struct mbo *mbo;
while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
most_put_mbo(mbo);
most_stop_channel(c->iface, c->channel_id, &comp.cc);
}
static void destroy_cdev(struct comp_channel *c)
{
unsigned long flags;
device_destroy(comp.class, c->devno);
cdev_del(&c->cdev);
spin_lock_irqsave(&ch_list_lock, flags);
list_del(&c->list);
spin_unlock_irqrestore(&ch_list_lock, flags);
}
static void destroy_channel(struct comp_channel *c)
{
ida_simple_remove(&comp.minor_id, MINOR(c->devno));
kfifo_free(&c->fifo);
kfree(c);
}
/**
* comp_open - implements the syscall to open the device
* @inode: inode pointer
* @filp: file pointer
*
* This stores the channel pointer in the private data field of
* the file structure and activates the channel within the core.
*/
static int comp_open(struct inode *inode, struct file *filp)
{
struct comp_channel *c;
int ret;
c = to_channel(inode->i_cdev);
filp->private_data = c;
if (((c->cfg->direction == MOST_CH_RX) &&
((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
((c->cfg->direction == MOST_CH_TX) &&
((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
return -EACCES;
}
mutex_lock(&c->io_mutex);
if (!c->dev) {
mutex_unlock(&c->io_mutex);
return -ENODEV;
}
if (c->access_ref) {
mutex_unlock(&c->io_mutex);
return -EBUSY;
}
c->mbo_offs = 0;
ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
if (!ret)
c->access_ref = 1;
mutex_unlock(&c->io_mutex);
return ret;
}
/**
* comp_close - implements the syscall to close the device
* @inode: inode pointer
* @filp: file pointer
*
* This stops the channel within the core.
*/
static int comp_close(struct inode *inode, struct file *filp)
{
struct comp_channel *c = to_channel(inode->i_cdev);
mutex_lock(&c->io_mutex);
spin_lock(&c->unlink);
c->access_ref = 0;
spin_unlock(&c->unlink);
if (c->dev) {
stop_channel(c);
mutex_unlock(&c->io_mutex);
} else {
mutex_unlock(&c->io_mutex);
destroy_channel(c);
}
return 0;
}
/**
* comp_write - implements the syscall to write to the device
* @filp: file pointer
* @buf: pointer to user buffer
* @count: number of bytes to write
* @offset: offset from where to start writing
*/
static ssize_t comp_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
int ret;
size_t to_copy, left;
struct mbo *mbo = NULL;
struct comp_channel *c = filp->private_data;
mutex_lock(&c->io_mutex);
while (c->dev && !ch_get_mbo(c, &mbo)) {
mutex_unlock(&c->io_mutex);
if ((filp->f_flags & O_NONBLOCK))
return -EAGAIN;
if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
return -ERESTARTSYS;
mutex_lock(&c->io_mutex);
}
if (unlikely(!c->dev)) {
ret = -ENODEV;
goto unlock;
}
to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
if (left == to_copy) {
ret = -EFAULT;
goto unlock;
}
c->mbo_offs += to_copy - left;
if (c->mbo_offs >= c->cfg->buffer_size ||
c->cfg->data_type == MOST_CH_CONTROL ||
c->cfg->data_type == MOST_CH_ASYNC) {
kfifo_skip(&c->fifo);
mbo->buffer_length = c->mbo_offs;
c->mbo_offs = 0;
most_submit_mbo(mbo);
}
ret = to_copy - left;
unlock:
mutex_unlock(&c->io_mutex);
return ret;
}
/**
* comp_read - implements the syscall to read from the device
* @filp: file pointer
* @buf: pointer to user buffer
* @count: number of bytes to read
* @offset: offset from where to start reading
*/
static ssize_t
comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
size_t to_copy, not_copied, copied;
struct mbo *mbo = NULL;
struct comp_channel *c = filp->private_data;
mutex_lock(&c->io_mutex);
while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
mutex_unlock(&c->io_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(c->wq,
(!kfifo_is_empty(&c->fifo) ||
(!c->dev))))
return -ERESTARTSYS;
mutex_lock(&c->io_mutex);
}
/* make sure we don't submit to gone devices */
if (unlikely(!c->dev)) {
mutex_unlock(&c->io_mutex);
return -ENODEV;
}
to_copy = min_t(size_t,
count,
mbo->processed_length - c->mbo_offs);
not_copied = copy_to_user(buf,
mbo->virt_address + c->mbo_offs,
to_copy);
copied = to_copy - not_copied;
c->mbo_offs += copied;
if (c->mbo_offs >= mbo->processed_length) {
kfifo_skip(&c->fifo);
most_put_mbo(mbo);
c->mbo_offs = 0;
}
mutex_unlock(&c->io_mutex);
return copied;
}
static __poll_t comp_poll(struct file *filp, poll_table *wait)
{
struct comp_channel *c = filp->private_data;
__poll_t mask = 0;
poll_wait(filp, &c->wq, wait);
mutex_lock(&c->io_mutex);
if (c->cfg->direction == MOST_CH_RX) {
if (!c->dev || !kfifo_is_empty(&c->fifo))
mask |= EPOLLIN | EPOLLRDNORM;
} else {
if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
mask |= EPOLLOUT | EPOLLWRNORM;
}
mutex_unlock(&c->io_mutex);
return mask;
}
/*
* Initialization of struct file_operations
*/
static const struct file_operations channel_fops = {
.owner = THIS_MODULE,
.read = comp_read,
.write = comp_write,
.open = comp_open,
.release = comp_close,
.poll = comp_poll,
};
/**
* comp_disconnect_channel - disconnect a channel
* @iface: pointer to interface instance
* @channel_id: channel index
*
* This frees allocated memory and removes the cdev that represents this
* channel in user space.
*/
static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
{
struct comp_channel *c;
c = get_channel(iface, channel_id);
if (!c)
return -EINVAL;
mutex_lock(&c->io_mutex);
spin_lock(&c->unlink);
c->dev = NULL;
spin_unlock(&c->unlink);
destroy_cdev(c);
if (c->access_ref) {
stop_channel(c);
wake_up_interruptible(&c->wq);
mutex_unlock(&c->io_mutex);
} else {
mutex_unlock(&c->io_mutex);
destroy_channel(c);
}
return 0;
}
/**
* comp_rx_completion - completion handler for rx channels
* @mbo: pointer to buffer object that has completed
*
* This searches for the channel linked to this MBO and stores it in the local
* fifo buffer.
*/
static int comp_rx_completion(struct mbo *mbo)
{
struct comp_channel *c;
if (!mbo)
return -EINVAL;
c = get_channel(mbo->ifp, mbo->hdm_channel_id);
if (!c)
return -EINVAL;
spin_lock(&c->unlink);
if (!c->access_ref || !c->dev) {
spin_unlock(&c->unlink);
return -ENODEV;
}
kfifo_in(&c->fifo, &mbo, 1);
spin_unlock(&c->unlink);
#ifdef DEBUG_MESG
if (kfifo_is_full(&c->fifo))
dev_warn(c->dev, "Fifo is full\n");
#endif
wake_up_interruptible(&c->wq);
return 0;
}
/**
* comp_tx_completion - completion handler for tx channels
* @iface: pointer to interface instance
* @channel_id: channel index/ID
*
* This wakes sleeping processes in the wait-queue.
*/
static int comp_tx_completion(struct most_interface *iface, int channel_id)
{
struct comp_channel *c;
c = get_channel(iface, channel_id);
if (!c)
return -EINVAL;
if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
dev_warn(c->dev, "Channel ID out of range\n");
return -EINVAL;
}
wake_up_interruptible(&c->wq);
return 0;
}
/**
* comp_probe - probe function of the driver module
* @iface: pointer to interface instance
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
* @name: name of the device to be created
* @args: pointer to array of component parameters (from configfs)
*
* This allocates a channel object and creates the device node in /dev
*
* Returns 0 on success or error code otherwise.
*/
static int comp_probe(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg, char *name, char *args)
{
struct comp_channel *c;
unsigned long cl_flags;
int retval;
int current_minor;
if (!cfg || !name)
return -EINVAL;
c = get_channel(iface, channel_id);
if (c)
return -EEXIST;
current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
retval = -ENOMEM;
goto err_remove_ida;
}
c->devno = MKDEV(comp.major, current_minor);
cdev_init(&c->cdev, &channel_fops);
c->cdev.owner = THIS_MODULE;
retval = cdev_add(&c->cdev, c->devno, 1);
if (retval < 0)
goto err_free_c;
c->iface = iface;
c->cfg = cfg;
c->channel_id = channel_id;
c->access_ref = 0;
spin_lock_init(&c->unlink);
INIT_KFIFO(c->fifo);
retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
if (retval)
goto err_del_cdev_and_free_channel;
init_waitqueue_head(&c->wq);
mutex_init(&c->io_mutex);
spin_lock_irqsave(&ch_list_lock, cl_flags);
list_add_tail(&c->list, &channel_list);
spin_unlock_irqrestore(&ch_list_lock, cl_flags);
c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
if (IS_ERR(c->dev)) {
retval = PTR_ERR(c->dev);
goto err_free_kfifo_and_del_list;
}
kobject_uevent(&c->dev->kobj, KOBJ_ADD);
return 0;
err_free_kfifo_and_del_list:
kfifo_free(&c->fifo);
list_del(&c->list);
err_del_cdev_and_free_channel:
cdev_del(&c->cdev);
err_free_c:
kfree(c);
err_remove_ida:
ida_simple_remove(&comp.minor_id, current_minor);
return retval;
}
static struct cdev_component comp = {
.cc = {
.mod = THIS_MODULE,
.name = "cdev",
.probe_channel = comp_probe,
.disconnect_channel = comp_disconnect_channel,
.rx_completion = comp_rx_completion,
.tx_completion = comp_tx_completion,
},
};
static int __init most_cdev_init(void)
{
int err;
comp.class = class_create("most_cdev");
if (IS_ERR(comp.class))
return PTR_ERR(comp.class);
ida_init(&comp.minor_id);
err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
if (err < 0)
goto dest_ida;
comp.major = MAJOR(comp.devno);
err = most_register_component(&comp.cc);
if (err)
goto free_cdev;
err = most_register_configfs_subsys(&comp.cc);
if (err)
goto deregister_comp;
return 0;
deregister_comp:
most_deregister_component(&comp.cc);
free_cdev:
unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
dest_ida:
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
return err;
}
static void __exit most_cdev_exit(void)
{
struct comp_channel *c, *tmp;
most_deregister_configfs_subsys(&comp.cc);
most_deregister_component(&comp.cc);
list_for_each_entry_safe(c, tmp, &channel_list, list) {
destroy_cdev(c);
destroy_channel(c);
}
unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
ida_destroy(&comp.minor_id);
class_destroy(comp.class);
}
module_init(most_cdev_init);
module_exit(most_cdev_exit);
MODULE_AUTHOR("Christian Gromm <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("character device component for mostcore");
| linux-master | drivers/most/most_cdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPUFreq support for Armada 370/XP platforms.
*
* Copyright (C) 2012-2016 Marvell
*
* Yehuda Yitschak <[email protected]>
* Gregory Clement <[email protected]>
* Thomas Petazzoni <[email protected]>
*/
#define pr_fmt(fmt) "mvebu-pmsu: " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/resource.h>
static int __init armada_xp_pmsu_cpufreq_init(void)
{
struct device_node *np;
struct resource res;
int ret, cpu;
if (!of_machine_is_compatible("marvell,armadaxp"))
return 0;
/*
* In order to have proper cpufreq handling, we need to ensure
* that the Device Tree description of the CPU clock includes
* the definition of the PMU DFS registers. If not, we do not
* register the clock notifier and the cpufreq driver. This
* piece of code is only for compatibility with old Device
* Trees.
*/
np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
if (!np)
return 0;
ret = of_address_to_resource(np, 1, &res);
if (ret) {
pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
of_node_put(np);
return 0;
}
of_node_put(np);
/*
* For each CPU, this loop registers the operating points
* supported (which are the nominal CPU frequency and half of
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
for_each_possible_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("Cannot get CPU %d\n", cpu);
continue;
}
clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
return PTR_ERR(clk);
}
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
return ret;
}
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
dev_pm_opp_remove(cpu_dev, clk_get_rate(clk));
clk_put(clk);
dev_err(cpu_dev, "Failed to register OPPs\n");
return ret;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
cpumask_of(cpu_dev->id));
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
| linux-master | drivers/cpufreq/mvebu-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD K7 Powernow driver.
* (C) 2003 Dave Jones on behalf of SuSE Labs.
*
* Based upon datasheets & sample CPUs kindly provided by AMD.
*
* Errata 5:
* CPU may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15:
* CPU with half frequency multipliers may hang upon wakeup from disconnect.
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dmi.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
#include "powernow-k7.h"
struct psb_s {
u8 signature[10];
u8 tableversion;
u8 flags;
u16 settlingtime;
u8 reserved1;
u8 numpst;
};
struct pst_s {
u32 cpuid;
u8 fsbspeed;
u8 maxfid;
u8 startvid;
u8 numpstates;
};
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
union powernow_acpi_control_t {
struct {
unsigned long fid:5,
vid:5,
sgtc:20,
res1:2;
} bits;
unsigned long val;
};
#endif
/* divide by 1000 to get VCore voltage in V. */
static const int mobile_vid_table[32] = {
2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
1075, 1050, 1025, 1000, 975, 950, 925, 0,
};
/* divide by 10 to get FID. */
static const int fid_codes[32] = {
110, 115, 120, 125, 50, 55, 60, 65,
70, 75, 80, 85, 90, 95, 100, 105,
30, 190, 40, 200, 130, 135, 140, 210,
150, 225, 160, 165, 170, 180, -1, -1,
};
/* This parameter is used in order to force ACPI instead of legacy method for
* configuration purpose.
*/
static int acpi_force;
static struct cpufreq_frequency_table *powernow_table;
static unsigned int can_scale_bus;
static unsigned int can_scale_vid;
static unsigned int minimum_speed = -1;
static unsigned int maximum_speed;
static unsigned int number_scales;
static unsigned int fsb;
static unsigned int latency;
static char have_a0;
static int check_fsb(unsigned int fsbspeed)
{
int delta;
unsigned int f = fsb / 1000;
delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
return delta < 5;
}
static const struct x86_cpu_id powernow_k7_cpuids[] = {
X86_MATCH_VENDOR_FAM(AMD, 6, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids);
static int check_powernow(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int maxei, eax, ebx, ecx, edx;
if (!x86_match_cpu(powernow_k7_cpuids))
return 0;
/* Get maximum capabilities */
maxei = cpuid_eax(0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
#ifdef MODULE
pr_info("No powernow capabilities detected\n");
#endif
return 0;
}
if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
/* Check we can actually do something before we say anything.*/
if (!(edx & (1 << 1 | 1 << 2)))
return 0;
pr_info("PowerNOW! Technology present. Can scale: ");
if (edx & 1 << 1) {
pr_cont("frequency");
can_scale_bus = 1;
}
if ((edx & (1 << 1 | 1 << 2)) == 0x6)
pr_cont(" and ");
if (edx & 1 << 2) {
pr_cont("voltage");
can_scale_vid = 1;
}
pr_cont("\n");
return 1;
}
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
static void invalidate_entry(unsigned int entry)
{
powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
}
#endif
static int get_ranges(unsigned char *pst)
{
unsigned int j;
unsigned int speed;
u8 fid, vid;
powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table)
return -ENOMEM;
for (j = 0 ; j < number_scales; j++) {
fid = *pst++;
powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
powernow_table[j].driver_data = fid; /* lower 8 bits */
speed = powernow_table[j].frequency;
if ((fid_codes[fid] % 10) == 5) {
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (have_a0 == 1)
invalidate_entry(j);
#endif
}
if (speed < minimum_speed)
minimum_speed = speed;
if (speed > maximum_speed)
maximum_speed = speed;
vid = *pst++;
powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */
pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
}
powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
powernow_table[number_scales].driver_data = 0;
return 0;
}
static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
union msr_fidvidstatus fidvidstatus;
int cfid;
/* fid are the lower 8 bits of the index we stored into
* the cpufreq frequency table in powernow_decode_bios,
* vid are the upper 8 bits.
*/
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
freqs.new = powernow_table[index].frequency;
/* Now do the magic poking into the MSRs. */
if (have_a0 == 1) /* A0 errata 5 */
local_irq_disable();
if (freqs.old > freqs.new) {
/* Going down, so change FID first */
change_FID(fid);
change_VID(vid);
} else {
/* Going up, so change VID first */
change_VID(vid);
change_FID(fid);
}
if (have_a0 == 1)
local_irq_enable();
return 0;
}
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
static struct acpi_processor_performance *acpi_processor_perf;
static int powernow_acpi_init(void)
{
int i;
int retval = 0;
union powernow_acpi_control_t pc;
if (acpi_processor_perf != NULL && powernow_table != NULL) {
retval = -EINVAL;
goto err0;
}
acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
if (!acpi_processor_perf) {
retval = -ENOMEM;
goto err0;
}
if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
GFP_KERNEL)) {
retval = -ENOMEM;
goto err05;
}
if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
retval = -EIO;
goto err1;
}
if (acpi_processor_perf->control_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE) {
retval = -ENODEV;
goto err2;
}
if (acpi_processor_perf->status_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE) {
retval = -ENODEV;
goto err2;
}
number_scales = acpi_processor_perf->state_count;
if (number_scales < 2) {
retval = -ENODEV;
goto err2;
}
powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table) {
retval = -ENOMEM;
goto err2;
}
pc.val = (unsigned long) acpi_processor_perf->states[0].control;
for (i = 0; i < number_scales; i++) {
u8 fid, vid;
struct acpi_processor_px *state =
&acpi_processor_perf->states[i];
unsigned int speed, speed_mhz;
pc.val = (unsigned long) state->control;
pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
i,
(u32) state->core_frequency,
(u32) state->power,
(u32) state->transition_latency,
(u32) state->control,
pc.bits.sgtc);
vid = pc.bits.vid;
fid = pc.bits.fid;
powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
powernow_table[i].driver_data = fid; /* lower 8 bits */
powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */
speed = powernow_table[i].frequency;
speed_mhz = speed / 1000;
/* processor_perflib will multiply the MHz value by 1000 to
* get a KHz value (e.g. 1266000). However, powernow-k7 works
* with true KHz values (e.g. 1266768). To ensure that all
* powernow frequencies are available, we must ensure that
* ACPI doesn't restrict them, so we round up the MHz value
* to ensure that perflib's computed KHz value is greater than
* or equal to powernow's KHz value.
*/
if (speed % 1000 > 0)
speed_mhz++;
if ((fid_codes[fid] % 10) == 5) {
if (have_a0 == 1)
invalidate_entry(i);
}
pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed_mhz, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
if (state->core_frequency != speed_mhz) {
state->core_frequency = speed_mhz;
pr_debug(" Corrected ACPI frequency to %d\n",
speed_mhz);
}
if (latency < pc.bits.sgtc)
latency = pc.bits.sgtc;
if (speed < minimum_speed)
minimum_speed = speed;
if (speed > maximum_speed)
maximum_speed = speed;
}
powernow_table[i].frequency = CPUFREQ_TABLE_END;
powernow_table[i].driver_data = 0;
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
return 0;
err2:
acpi_processor_unregister_performance(0);
err1:
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
err05:
kfree(acpi_processor_perf);
err0:
pr_warn("ACPI perflib can not be used on this platform\n");
acpi_processor_perf = NULL;
return retval;
}
#else
static int powernow_acpi_init(void)
{
pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
return -EINVAL;
}
#endif
static void print_pst_entry(struct pst_s *pst, unsigned int j)
{
pr_debug("PST:%d (@%p)\n", j, pst);
pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
}
static int powernow_decode_bios(int maxfid, int startvid)
{
struct psb_s *psb;
struct pst_s *pst;
unsigned int i, j;
unsigned char *p;
unsigned int etuple;
unsigned int ret;
etuple = cpuid_eax(0x80000001);
for (i = 0xC0000; i < 0xffff0 ; i += 16) {
p = phys_to_virt(i);
if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
pr_debug("Found PSB header at %p\n", p);
psb = (struct psb_s *) p;
pr_debug("Table version: 0x%x\n", psb->tableversion);
if (psb->tableversion != 0x12) {
pr_info("Sorry, only v1.2 tables supported right now\n");
return -ENODEV;
}
pr_debug("Flags: 0x%x\n", psb->flags);
if ((psb->flags & 1) == 0)
pr_debug("Mobile voltage regulator\n");
else
pr_debug("Desktop voltage regulator\n");
latency = psb->settlingtime;
if (latency < 100) {
pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
latency);
latency = 100;
}
pr_debug("Settling Time: %d microseconds.\n",
psb->settlingtime);
pr_debug("Has %d PST tables. (Only dumping ones "
"relevant to this CPU).\n",
psb->numpst);
p += sizeof(*psb);
pst = (struct pst_s *) p;
for (j = 0; j < psb->numpst; j++) {
pst = (struct pst_s *) p;
number_scales = pst->numpstates;
if ((etuple == pst->cpuid) &&
check_fsb(pst->fsbspeed) &&
(maxfid == pst->maxfid) &&
(startvid == pst->startvid)) {
print_pst_entry(pst, j);
p = (char *)pst + sizeof(*pst);
ret = get_ranges(p);
return ret;
} else {
unsigned int k;
p = (char *)pst + sizeof(*pst);
for (k = 0; k < number_scales; k++)
p += 2;
}
}
pr_info("No PST tables match this cpuid (0x%x)\n",
etuple);
pr_info("This is indicative of a broken BIOS\n");
return -EINVAL;
}
p++;
}
return -ENODEV;
}
/*
* We use the fact that the bus frequency is somehow
* a multiple of 100000/3 khz, then we compute sgtc according
* to this multiple.
* That way, we match more how AMD thinks all of that work.
* We will then get the same kind of behaviour already tested under
* the "well-known" other OS.
*/
static int fixup_sgtc(void)
{
unsigned int sgtc;
unsigned int m;
m = fsb / 3333;
if ((m % 10) >= 5)
m += 5;
m /= 10;
sgtc = 100 * m * latency;
sgtc = sgtc / 3;
if (sgtc > 0xfffff) {
pr_warn("SGTC too large %d\n", sgtc);
sgtc = 0xfffff;
}
return sgtc;
}
static unsigned int powernow_get(unsigned int cpu)
{
union msr_fidvidstatus fidvidstatus;
unsigned int cfid;
if (cpu)
return 0;
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
}
static int acer_cpufreq_pst(const struct dmi_system_id *d)
{
pr_warn("%s laptop with broken PST tables in BIOS detected\n",
d->ident);
pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
pr_warn("cpufreq scaling has been disabled as a result of this\n");
return 0;
}
/*
* Some Athlon laptops have really fucked PST tables.
* A BIOS update is all that can save them.
* Mention this, and disable cpufreq.
*/
static const struct dmi_system_id powernow_dmi_table[] = {
{
.callback = acer_cpufreq_pst,
.ident = "Acer Aspire",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
},
},
{ }
};
static int powernow_cpu_init(struct cpufreq_policy *policy)
{
union msr_fidvidstatus fidvidstatus;
int result;
if (policy->cpu != 0)
return -ENODEV;
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
pr_warn("can not determine bus frequency\n");
return -EINVAL;
}
pr_debug("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
pr_info("PSB/PST known to be broken - trying ACPI instead\n");
result = powernow_acpi_init();
} else {
result = powernow_decode_bios(fidvidstatus.bits.MFID,
fidvidstatus.bits.SVID);
if (result) {
pr_info("Trying ACPI perflib\n");
maximum_speed = 0;
minimum_speed = -1;
latency = 0;
result = powernow_acpi_init();
if (result) {
pr_info("ACPI and legacy methods failed\n");
}
} else {
/* SGTC use the bus clock as timer */
latency = fixup_sgtc();
pr_info("SGTC: %d\n", latency);
}
}
if (result)
return result;
pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
minimum_speed/1000, maximum_speed/1000);
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
policy->freq_table = powernow_table;
return 0;
}
static int powernow_cpu_exit(struct cpufreq_policy *policy)
{
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
acpi_processor_unregister_performance(0);
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
kfree(acpi_processor_perf);
}
#endif
kfree(powernow_table);
return 0;
}
static struct cpufreq_driver powernow_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernow_target,
.get = powernow_get,
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
.bios_limit = acpi_processor_get_bios_limit,
#endif
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
.attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
{
if (check_powernow() == 0)
return -ENODEV;
return cpufreq_register_driver(&powernow_driver);
}
static void __exit powernow_exit(void)
{
cpufreq_unregister_driver(&powernow_driver);
}
module_param(acpi_force, int, 0444);
MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
MODULE_LICENSE("GPL");
late_initcall(powernow_init);
module_exit(powernow_exit);
| linux-master | drivers/cpufreq/powernow-k7.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2008 Marvell International Ltd.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/soc/pxa/cpu.h>
#include <linux/clk/pxa.h>
#include <linux/slab.h>
#include <linux/io.h>
#define HSS_104M (0)
#define HSS_156M (1)
#define HSS_208M (2)
#define HSS_312M (3)
#define SMCFS_78M (0)
#define SMCFS_104M (2)
#define SMCFS_208M (5)
#define SFLFS_104M (0)
#define SFLFS_156M (1)
#define SFLFS_208M (2)
#define SFLFS_312M (3)
#define XSPCLK_156M (0)
#define XSPCLK_NONE (3)
#define DMCFS_26M (0)
#define DMCFS_260M (3)
#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
#define ACCR_HSS(x) (((x) & 0x3) << 14)
#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
#define ACCR_XN(x) (((x) & 0x7) << 8)
#define ACCR_XL(x) ((x) & 0x1f)
struct pxa3xx_freq_info {
unsigned int cpufreq_mhz;
unsigned int core_xl : 5;
unsigned int core_xn : 3;
unsigned int hss : 2;
unsigned int dmcfs : 2;
unsigned int smcfs : 3;
unsigned int sflfs : 2;
unsigned int df_clkdiv : 3;
int vcc_core; /* in mV */
int vcc_sram; /* in mV */
};
#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
{ \
.cpufreq_mhz = cpufreq, \
.core_xl = _xl, \
.core_xn = _xn, \
.hss = HSS_##_hss##M, \
.dmcfs = DMCFS_##_dmc##M, \
.smcfs = SMCFS_##_smc##M, \
.sflfs = SFLFS_##_sfl##M, \
.df_clkdiv = _dfi, \
.vcc_core = vcore, \
.vcc_sram = vsram, \
}
static struct pxa3xx_freq_info pxa300_freqs[] = {
/* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
};
static struct pxa3xx_freq_info pxa320_freqs[] = {
/* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
};
static unsigned int pxa3xx_freqs_num;
static struct pxa3xx_freq_info *pxa3xx_freqs;
static struct cpufreq_frequency_table *pxa3xx_freqs_table;
static int setup_freqs_table(struct cpufreq_policy *policy,
struct pxa3xx_freq_info *freqs, int num)
{
struct cpufreq_frequency_table *table;
int i;
table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL);
if (table == NULL)
return -ENOMEM;
for (i = 0; i < num; i++) {
table[i].driver_data = i;
table[i].frequency = freqs[i].cpufreq_mhz * 1000;
}
table[num].driver_data = i;
table[num].frequency = CPUFREQ_TABLE_END;
pxa3xx_freqs = freqs;
pxa3xx_freqs_num = num;
pxa3xx_freqs_table = table;
policy->freq_table = table;
return 0;
}
static void __update_core_freq(struct pxa3xx_freq_info *info)
{
u32 mask, disable, enable, xclkcfg;
mask = ACCR_XN_MASK | ACCR_XL_MASK;
disable = mask | ACCR_XSPCLK_MASK;
enable = ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
/* No clock until core PLL is re-locked */
enable |= ACCR_XSPCLK(XSPCLK_NONE);
xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask);
}
static void __update_bus_freq(struct pxa3xx_freq_info *info)
{
u32 mask, disable, enable;
mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
ACCR_DMCFS_MASK;
disable = mask;
enable = ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
pxa3xx_clk_update_accr(disable, enable, 0, mask);
}
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
{
return pxa3xx_get_clk_frequency_khz(0);
}
static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
{
struct pxa3xx_freq_info *next;
unsigned long flags;
if (policy->cpu != 0)
return -EINVAL;
next = &pxa3xx_freqs[index];
local_irq_save(flags);
__update_core_freq(next);
__update_bus_freq(next);
local_irq_restore(flags);
return 0;
}
static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
{
int ret = -EINVAL;
/* set default policy and cpuinfo */
policy->min = policy->cpuinfo.min_freq = 104000;
policy->max = policy->cpuinfo.max_freq =
(cpu_is_pxa320()) ? 806000 : 624000;
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, pxa300_freqs,
ARRAY_SIZE(pxa300_freqs));
if (cpu_is_pxa320())
ret = setup_freqs_table(policy, pxa320_freqs,
ARRAY_SIZE(pxa320_freqs));
if (ret) {
pr_err("failed to setup frequency table\n");
return ret;
}
pr_info("CPUFREQ support for PXA3xx initialized\n");
return 0;
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
static int __init cpufreq_init(void)
{
if (cpu_is_pxa3xx())
return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
return 0;
}
module_init(cpufreq_init);
static void __exit cpufreq_exit(void)
{
cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
}
module_exit(cpufreq_exit);
MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/pxa3xx-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* CPU frequency scaling for S5PC110/S5PV210
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
static void __iomem *clk_base;
static void __iomem *dmc_base[2];
#define S5P_CLKREG(x) (clk_base + (x))
#define S5P_APLL_LOCK S5P_CLKREG(0x00)
#define S5P_APLL_CON S5P_CLKREG(0x100)
#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000)
#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004)
#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100)
#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104)
#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100)
/* CLKSRC0 */
#define S5P_CLKSRC0_MUX200_SHIFT (16)
#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT)
#define S5P_CLKSRC0_MUX166_MASK (0x1<<20)
#define S5P_CLKSRC0_MUX133_MASK (0x1<<24)
/* CLKSRC2 */
#define S5P_CLKSRC2_G3D_SHIFT (0)
#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT)
#define S5P_CLKSRC2_MFC_SHIFT (4)
#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT)
/* CLKDIV0 */
#define S5P_CLKDIV0_APLL_SHIFT (0)
#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
#define S5P_CLKDIV0_A2M_SHIFT (4)
#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
#define S5P_CLKDIV0_HCLK200_SHIFT (8)
#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT)
#define S5P_CLKDIV0_PCLK100_SHIFT (12)
#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT)
#define S5P_CLKDIV0_HCLK166_SHIFT (16)
#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT)
#define S5P_CLKDIV0_PCLK83_SHIFT (20)
#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT)
#define S5P_CLKDIV0_HCLK133_SHIFT (24)
#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT)
#define S5P_CLKDIV0_PCLK66_SHIFT (28)
#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT)
/* CLKDIV2 */
#define S5P_CLKDIV2_G3D_SHIFT (0)
#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT)
#define S5P_CLKDIV2_MFC_SHIFT (4)
#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT)
/* CLKDIV6 */
#define S5P_CLKDIV6_ONEDRAM_SHIFT (28)
#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT)
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
static DEFINE_MUTEX(set_freq_lock);
/* APLL M,P,S values for 1G/800Mhz */
#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
/* Use 800MHz when entering sleep mode */
#define SLEEP_FREQ (800 * 1000)
/* Tracks if CPU frequency can be updated anymore */
static bool no_cpufreq_access;
/*
* DRAM configurations to calculate refresh counter for changing
* frequency of memory.
*/
struct dram_conf {
unsigned long freq; /* HZ */
unsigned long refresh; /* DRAM refresh counter * 1000 */
};
/* DRAM configuration (DMC0 and DMC1) */
static struct dram_conf s5pv210_dram_conf[2];
enum perf_level {
L0, L1, L2, L3, L4,
};
enum s5pv210_mem_type {
LPDDR = 0x1,
LPDDR2 = 0x2,
DDR2 = 0x4,
};
enum s5pv210_dmc_port {
DMC0 = 0,
DMC1,
};
static struct cpufreq_frequency_table s5pv210_freq_table[] = {
{0, L0, 1000*1000},
{0, L1, 800*1000},
{0, L2, 400*1000},
{0, L3, 200*1000},
{0, L4, 100*1000},
{0, 0, CPUFREQ_TABLE_END},
};
static struct regulator *arm_regulator;
static struct regulator *int_regulator;
struct s5pv210_dvs_conf {
int arm_volt; /* uV */
int int_volt; /* uV */
};
static const int arm_volt_max = 1350000;
static const int int_volt_max = 1250000;
static struct s5pv210_dvs_conf dvs_conf[] = {
[L0] = {
.arm_volt = 1250000,
.int_volt = 1100000,
},
[L1] = {
.arm_volt = 1200000,
.int_volt = 1100000,
},
[L2] = {
.arm_volt = 1050000,
.int_volt = 1100000,
},
[L3] = {
.arm_volt = 950000,
.int_volt = 1100000,
},
[L4] = {
.arm_volt = 950000,
.int_volt = 1000000,
},
};
static u32 clkdiv_val[5][11] = {
/*
* Clock divider value for following
* { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
* HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
* ONEDRAM, MFC, G3D }
*/
/* L0 : [1000/200/100][166/83][133/66][200/200] */
{0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
/* L1 : [800/200/100][166/83][133/66][200/200] */
{0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
/* L2 : [400/200/100][166/83][133/66][200/200] */
{1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
/* L3 : [200/200/100][166/83][133/66][200/200] */
{3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
/* L4 : [100/100/100][83/83][66/66][100/100] */
{7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
};
/*
* This function set DRAM refresh counter
* according to operating frequency of DRAM
* ch: DMC port number 0 or 1
* freq: Operating frequency of DRAM(KHz)
*/
static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
{
unsigned long tmp, tmp1;
void __iomem *reg = NULL;
if (ch == DMC0) {
reg = (dmc_base[0] + 0x30);
} else if (ch == DMC1) {
reg = (dmc_base[1] + 0x30);
} else {
pr_err("Cannot find DMC port\n");
return;
}
/* Find current DRAM frequency */
tmp = s5pv210_dram_conf[ch].freq;
tmp /= freq;
tmp1 = s5pv210_dram_conf[ch].refresh;
tmp1 /= tmp;
writel_relaxed(tmp1, reg);
}
static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
unsigned int priv_index;
unsigned int pll_changing = 0;
unsigned int bus_speed_changing = 0;
unsigned int old_freq, new_freq;
int arm_volt, int_volt;
int ret = 0;
mutex_lock(&set_freq_lock);
if (no_cpufreq_access) {
pr_err("Denied access to %s as it is disabled temporarily\n",
__func__);
ret = -EINVAL;
goto exit;
}
old_freq = policy->cur;
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
priv_index = cpufreq_table_find_index_h(policy, old_freq, false);
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
if (new_freq > old_freq) {
ret = regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
if (ret)
goto exit;
ret = regulator_set_voltage(int_regulator,
int_volt, int_volt_max);
if (ret)
goto exit;
}
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
pll_changing = 1;
/* Check if there need to change System bus clock */
if ((index == L4) || (priv_index == L4))
bus_speed_changing = 1;
if (bus_speed_changing) {
/*
* Reconfigure DRAM refresh counter value for minimum
* temporary clock while changing divider.
* expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
*/
if (pll_changing)
s5pv210_set_refresh(DMC1, 83000);
else
s5pv210_set_refresh(DMC1, 100000);
s5pv210_set_refresh(DMC0, 83000);
}
/*
* APLL should be changed in this level
* APLL -> MPLL(for stable transition) -> APLL
* Some clock source's clock API are not prepared.
* Do not use clock API in below code.
*/
if (pll_changing) {
/*
* 1. Temporary Change divider for MFC and G3D
* SCLKA2M(200/1=200)->(200/4=50)Mhz
*/
reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
(3 << S5P_CLKDIV2_MFC_SHIFT);
writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/*
* 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
* (200/4=50)->(667/4=166)Mhz
*/
reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
(1 << S5P_CLKSRC2_MFC_SHIFT);
writel_relaxed(reg, S5P_CLK_SRC2);
do {
reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
* 3. DMC1 refresh count for 133Mhz if (index == L4) is
* true refresh counter is already programmed in upper
* code. 0x287@83Mhz
*/
if (!bus_speed_changing)
s5pv210_set_refresh(DMC1, 133000);
/* 4. SCLKAPLL -> SCLKMPLL */
reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
writel_relaxed(reg, S5P_CLK_SRC0);
do {
reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
}
/* Change divider */
reg = readl_relaxed(S5P_CLK_DIV0);
reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
(clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
(clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
(clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
(clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
(clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
(clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
(clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
writel_relaxed(reg, S5P_CLK_DIV0);
do {
reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & 0xff);
/* ARM MCS value changed */
reg = readl_relaxed(S5P_ARM_MCS_CON);
reg &= ~0x3;
if (index >= L3)
reg |= 0x3;
else
reg |= 0x1;
writel_relaxed(reg, S5P_ARM_MCS_CON);
if (pll_changing) {
/* 5. Set Lock time = 30us*24Mhz = 0x2cf */
writel_relaxed(0x2cf, S5P_APLL_LOCK);
/*
* 6. Turn on APLL
* 6-1. Set PMS values
* 6-2. Wait until the PLL is locked
*/
if (index == L0)
writel_relaxed(APLL_VAL_1000, S5P_APLL_CON);
else
writel_relaxed(APLL_VAL_800, S5P_APLL_CON);
do {
reg = readl_relaxed(S5P_APLL_CON);
} while (!(reg & (0x1 << 29)));
/*
* 7. Change source clock from SCLKMPLL(667Mhz)
* to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
* (667/4=166)->(200/4=50)Mhz
*/
reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
(0 << S5P_CLKSRC2_MFC_SHIFT);
writel_relaxed(reg, S5P_CLK_SRC2);
do {
reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
* 8. Change divider for MFC and G3D
* (200/4=50)->(200/1=200)Mhz
*/
reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
(clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/* 9. Change MPLL to APLL in MSYS_MUX */
reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
writel_relaxed(reg, S5P_CLK_SRC0);
do {
reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
/*
* 10. DMC1 refresh counter
* L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
* Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
*/
if (!bus_speed_changing)
s5pv210_set_refresh(DMC1, 200000);
}
/*
* L4 level needs to change memory bus speed, hence ONEDRAM clock
* divider and memory refresh parameter should be changed
*/
if (bus_speed_changing) {
reg = readl_relaxed(S5P_CLK_DIV6);
reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
writel_relaxed(reg, S5P_CLK_DIV6);
do {
reg = readl_relaxed(S5P_CLKDIV_STAT1);
} while (reg & (1 << 15));
/* Reconfigure DRAM refresh counter value */
if (index != L4) {
/*
* DMC0 : 166Mhz
* DMC1 : 200Mhz
*/
s5pv210_set_refresh(DMC0, 166000);
s5pv210_set_refresh(DMC1, 200000);
} else {
/*
* DMC0 : 83Mhz
* DMC1 : 100Mhz
*/
s5pv210_set_refresh(DMC0, 83000);
s5pv210_set_refresh(DMC1, 100000);
}
}
if (new_freq < old_freq) {
regulator_set_voltage(int_regulator,
int_volt, int_volt_max);
regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
}
pr_debug("Perf changed[L%d]\n", index);
exit:
mutex_unlock(&set_freq_lock);
return ret;
}
static int check_mem_type(void __iomem *dmc_reg)
{
unsigned long val;
val = readl_relaxed(dmc_reg + 0x4);
val = (val & (0xf << 8));
return val >> 8;
}
static int s5pv210_cpu_init(struct cpufreq_policy *policy)
{
unsigned long mem_type;
int ret;
policy->clk = clk_get(NULL, "armclk");
if (IS_ERR(policy->clk))
return PTR_ERR(policy->clk);
dmc0_clk = clk_get(NULL, "sclk_dmc0");
if (IS_ERR(dmc0_clk)) {
ret = PTR_ERR(dmc0_clk);
goto out_dmc0;
}
dmc1_clk = clk_get(NULL, "hclk_msys");
if (IS_ERR(dmc1_clk)) {
ret = PTR_ERR(dmc1_clk);
goto out_dmc1;
}
if (policy->cpu != 0) {
ret = -EINVAL;
goto out_dmc1;
}
/*
* check_mem_type : This driver only support LPDDR & LPDDR2.
* other memory type is not supported.
*/
mem_type = check_mem_type(dmc_base[0]);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
goto out_dmc1;
}
/* Find current refresh counter and frequency each DMC */
s5pv210_dram_conf[0].refresh = (readl_relaxed(dmc_base[0] + 0x30) * 1000);
s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
s5pv210_dram_conf[1].refresh = (readl_relaxed(dmc_base[1] + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->suspend_freq = SLEEP_FREQ;
cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
clk_put(policy->clk);
return ret;
}
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int ret;
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
no_cpufreq_access = true;
return NOTIFY_DONE;
}
static struct cpufreq_driver s5pv210_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s5pv210_target,
.get = cpufreq_generic_get,
.init = s5pv210_cpu_init,
.name = "s5pv210",
.suspend = cpufreq_generic_suspend,
.resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
};
static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
.notifier_call = s5pv210_cpufreq_reboot_notifier_event,
};
static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
int id, result = 0;
/*
* HACK: This is a temporary workaround to get access to clock
* and DMC controller registers directly and remove static mappings
* and dependencies on platform headers. It is necessary to enable
* S5PV210 multi-platform support and will be removed together with
* this whole driver as soon as S5PV210 gets migrated to use
* cpufreq-dt driver.
*/
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator))
return dev_err_probe(dev, PTR_ERR(arm_regulator),
"failed to get regulator vddarm\n");
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
result = dev_err_probe(dev, PTR_ERR(int_regulator),
"failed to get regulator vddint\n");
goto err_int_regulator;
}
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {
dev_err(dev, "failed to find clock controller DT node\n");
result = -ENODEV;
goto err_clock;
}
clk_base = of_iomap(np, 0);
of_node_put(np);
if (!clk_base) {
dev_err(dev, "failed to map clock registers\n");
result = -EFAULT;
goto err_clock;
}
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
id = of_alias_get_id(np, "dmc");
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
dev_err(dev, "failed to get alias of dmc node '%pOFn'\n", np);
of_node_put(np);
result = id;
goto err_clk_base;
}
dmc_base[id] = of_iomap(np, 0);
if (!dmc_base[id]) {
dev_err(dev, "failed to map dmc%d registers\n", id);
of_node_put(np);
result = -EFAULT;
goto err_dmc;
}
}
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
if (!dmc_base[id]) {
dev_err(dev, "failed to find dmc%d node\n", id);
result = -ENODEV;
goto err_dmc;
}
}
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver);
err_dmc:
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
if (dmc_base[id]) {
iounmap(dmc_base[id]);
dmc_base[id] = NULL;
}
err_clk_base:
iounmap(clk_base);
err_clock:
regulator_put(int_regulator);
err_int_regulator:
regulator_put(arm_regulator);
return result;
}
static struct platform_driver s5pv210_cpufreq_platdrv = {
.driver = {
.name = "s5pv210-cpufreq",
},
.probe = s5pv210_cpufreq_probe,
};
builtin_platform_driver(s5pv210_cpufreq_platdrv);
| linux-master | drivers/cpufreq/s5pv210-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acpi-cpufreq.c - ACPI Processor P-States Driver
*
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
* Copyright (C) 2002 - 2004 Dominik Brodowski <[email protected]>
* Copyright (C) 2006 Denis Sadykov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/compiler.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
enum {
UNDEFINED_CAPABLE = 0,
SYSTEM_INTEL_MSR_CAPABLE,
SYSTEM_AMD_MSR_CAPABLE,
SYSTEM_IO_CAPABLE,
};
#define INTEL_MSR_RANGE (0xffff)
#define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
};
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
{
return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
}
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
msr = lo | ((u64)hi << 32);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
msr = lo | ((u64)hi << 32);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
}
static int boost_set_msr(bool enable)
{
u32 msr_addr;
u64 msr_mask, val;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
msr_addr = MSR_IA32_MISC_ENABLE;
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
break;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
msr_addr = MSR_K7_HWCR;
msr_mask = MSR_K7_HWCR_CPB_DIS;
break;
default:
return -EINVAL;
}
rdmsrl(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
wrmsrl(msr_addr, val);
return 0;
}
static void boost_set_msr_each(void *p_en)
{
bool enable = (bool) p_en;
boost_set_msr(enable);
}
static int set_boost(struct cpufreq_policy *policy, int val)
{
on_each_cpu_mask(policy->cpus, boost_set_msr_each,
(void *)(long)val, 1);
pr_debug("CPU %*pbl: Core Boosting %s.\n",
cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
return 0;
}
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct acpi_cpufreq_data *data = policy->driver_data;
if (unlikely(!data))
return -ENODEV;
return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}
cpufreq_freq_attr_ro(freqdomain_cpus);
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
size_t count)
{
int ret;
unsigned int val = 0;
if (!acpi_cpufreq_driver.set_boost)
return -EINVAL;
ret = kstrtouint(buf, 10, &val);
if (ret || val > 1)
return -EINVAL;
cpus_read_lock();
set_boost(policy, val);
cpus_read_unlock();
return count;
}
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
}
cpufreq_freq_attr_rw(cpb);
#endif
static int check_est_cpu(unsigned int cpuid)
{
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
return cpu_has(cpu, X86_FEATURE_EST);
}
static int check_amd_hwpstate_cpu(unsigned int cpuid)
{
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}
static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
int i;
perf = to_perf_data(data);
for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status)
return policy->freq_table[i].frequency;
}
return 0;
}
static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct cpufreq_frequency_table *pos;
struct acpi_processor_performance *perf;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
msr &= AMD_MSR_RANGE;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
msr &= HYGON_MSR_RANGE;
else
msr &= INTEL_MSR_RANGE;
perf = to_perf_data(data);
cpufreq_for_each_entry(pos, policy->freq_table)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
return policy->freq_table[0].frequency;
}
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
{
struct acpi_cpufreq_data *data = policy->driver_data;
switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
case SYSTEM_AMD_MSR_CAPABLE:
return extract_msr(policy, val);
case SYSTEM_IO_CAPABLE:
return extract_io(policy, val);
default:
return 0;
}
}
static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
{
u32 val, dummy __always_unused;
rdmsr(MSR_IA32_PERF_CTL, val, dummy);
return val;
}
static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
{
u32 lo, hi;
rdmsr(MSR_IA32_PERF_CTL, lo, hi);
lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
wrmsr(MSR_IA32_PERF_CTL, lo, hi);
}
static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
{
u32 val, dummy __always_unused;
rdmsr(MSR_AMD_PERF_CTL, val, dummy);
return val;
}
static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
{
wrmsr(MSR_AMD_PERF_CTL, val, 0);
}
static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
{
u32 val;
acpi_os_read_port(reg->address, &val, reg->bit_width);
return val;
}
static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
{
acpi_os_write_port(reg->address, val, reg->bit_width);
}
struct drv_cmd {
struct acpi_pct_register *reg;
u32 val;
union {
void (*write)(struct acpi_pct_register *reg, u32 val);
u32 (*read)(struct acpi_pct_register *reg);
} func;
};
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
cmd->val = cmd->func.read(cmd->reg);
}
static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
struct acpi_processor_performance *perf = to_perf_data(data);
struct drv_cmd cmd = {
.reg = &perf->control_register,
.func.read = data->cpu_freq_read,
};
int err;
err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
return cmd.val;
}
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
cmd->func.write(cmd->reg, cmd->val);
}
static void drv_write(struct acpi_cpufreq_data *data,
const struct cpumask *mask, u32 val)
{
struct acpi_processor_performance *perf = to_perf_data(data);
struct drv_cmd cmd = {
.reg = &perf->control_register,
.val = val,
.func.write = data->cpu_freq_write,
};
int this_cpu;
this_cpu = get_cpu();
if (cpumask_test_cpu(this_cpu, mask))
do_drv_write(&cmd);
smp_call_function_many(mask, do_drv_write, &cmd, 1);
put_cpu();
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
u32 val;
if (unlikely(cpumask_empty(mask)))
return 0;
val = drv_read(data, mask);
pr_debug("%s = %u\n", __func__, val);
return val;
}
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
struct acpi_cpufreq_data *data;
struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
pr_debug("%s (%d)\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
return 0;
data = policy->driver_data;
if (unlikely(!data || !policy->freq_table))
return 0;
cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
* Force set the frequency on next target call.
*/
data->resume = 1;
}
pr_debug("cur freq = %u\n", freq);
return freq;
}
static unsigned int check_freqs(struct cpufreq_policy *policy,
const struct cpumask *mask, unsigned int freq)
{
struct acpi_cpufreq_data *data = policy->driver_data;
unsigned int cur_freq;
unsigned int i;
for (i = 0; i < 100; i++) {
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
udelay(10);
}
return 0;
}
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
const struct cpumask *mask;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
if (unlikely(!data)) {
return -ENODEV;
}
perf = to_perf_data(data);
next_perf_state = policy->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
next_perf_state);
data->resume = 0;
} else {
pr_debug("Already at target state (P%d)\n",
next_perf_state);
return 0;
}
}
/*
* The core won't allow CPUs to go away until the governor has been
* stopped, so we can rely on the stability of policy->cpus.
*/
mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
cpumask_of(policy->cpu) : policy->cpus;
drv_write(data, mask, perf->states[next_perf_state].control);
if (acpi_pstate_strict) {
if (!check_freqs(policy, mask,
policy->freq_table[index].frequency)) {
pr_debug("%s (%d)\n", __func__, policy->cpu);
result = -EAGAIN;
}
}
if (!result)
perf->state = next_perf_state;
return result;
}
static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
struct cpufreq_frequency_table *entry;
unsigned int next_perf_state, next_freq, index;
/*
* Find the closest frequency above target_freq.
*/
if (policy->cached_target_freq == target_freq)
index = policy->cached_resolved_idx;
else
index = cpufreq_table_find_index_dl(policy, target_freq,
false);
entry = &policy->freq_table[index];
next_freq = entry->frequency;
next_perf_state = entry->driver_data;
perf = to_perf_data(data);
if (perf->state == next_perf_state) {
if (unlikely(data->resume))
data->resume = 0;
else
return next_freq;
}
data->cpu_freq_write(&perf->control_register,
perf->states[next_perf_state].control);
perf->state = next_perf_state;
return next_freq;
}
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
struct acpi_processor_performance *perf;
perf = to_perf_data(data);
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
unsigned long freqn = perf->states[0].core_frequency * 1000;
for (i = 0; i < (perf->state_count-1); i++) {
freq = freqn;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
perf->state = i;
return freq;
}
}
perf->state = perf->state_count-1;
return freqn;
} else {
/* assume CPU is at P0... */
perf->state = 0;
return perf->states[0].core_frequency * 1000;
}
}
static void free_acpi_perf_data(void)
{
unsigned int i;
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
for_each_possible_cpu(i)
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
->shared_cpu_map);
free_percpu(acpi_perf_data);
}
static int cpufreq_boost_down_prep(unsigned int cpu)
{
/*
* Clear the boost-disable bit on the CPU_DOWN path so that
* this cpu cannot block the remaining ones from boosting.
*/
return boost_set_msr(1);
}
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
* Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
* in order to determine correct frequency and voltage pairings. We can
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
static int __init acpi_cpufreq_early_init(void)
{
unsigned int i;
pr_debug("%s\n", __func__);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
pr_debug("Memory allocation error for acpi_perf_data.\n");
return -ENOMEM;
}
for_each_possible_cpu(i) {
if (!zalloc_cpumask_var_node(
&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
GFP_KERNEL, cpu_to_node(i))) {
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data();
return -ENOMEM;
}
}
/* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
#ifdef CONFIG_SMP
/*
* Some BIOSes do SW_ANY coordination internally, either set it up in hw
* or do it in BIOS firmware and won't inform about it to OS. If not
* detected, this has a side effect of making CPU run at a different speed
* than OS intended it to run at. Detect it and handle it cleanly.
*/
static int bios_with_sw_any_bug;
static int sw_any_bug_found(const struct dmi_system_id *d)
{
bios_with_sw_any_bug = 1;
return 0;
}
static const struct dmi_system_id sw_any_bug_dmi_table[] = {
{
.callback = sw_any_bug_found,
.ident = "Supermicro Server X6DLP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
DMI_MATCH(DMI_BIOS_VERSION, "080010"),
DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
},
},
{ }
};
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
{
/* Intel Xeon Processor 7100 Series Specification Update
* https://www.intel.com/Assets/PDF/specupdate/314554.pdf
* AL30: A Machine Check Exception (MCE) Occurring during an
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
* Both Processor Cores to Lock Up. */
if (c->x86_vendor == X86_VENDOR_INTEL) {
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
(c->x86_stepping == 8)) {
pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
return -ENODEV;
}
}
return 0;
}
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;
if (acpi_pstate_strict)
return 0;
ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
highest_perf = amd_get_highest_perf();
else
highest_perf = perf_caps.highest_perf;
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
}
if (highest_perf < nominal_perf) {
pr_debug("CPU%d: nominal performance above highest\n", cpu);
return 0;
}
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
unsigned int valid_states = 0;
unsigned int result = 0;
u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
pr_debug("%s\n", __func__);
#ifdef CONFIG_SMP
if (blacklisted)
return blacklisted;
blacklisted = acpi_cpufreq_blacklist(c);
if (blacklisted)
return blacklisted;
#endif
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
result = -ENOMEM;
goto err_free;
}
perf = per_cpu_ptr(acpi_perf_data, cpu);
data->acpi_perf_cpu = cpu;
policy->driver_data = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
result = acpi_processor_register_performance(perf, cpu);
if (result)
goto err_free_mask;
policy->shared_type = perf->shared_type;
/*
* Will let policy->cpus know about dependency only when software
* coordination is required.
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
cpumask_copy(policy->cpus, perf->shared_cpu_map);
}
cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
!acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,
topology_sibling_cpumask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif
/* capability check */
if (perf->state_count <= 1) {
pr_debug("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if (perf->control_register.space_id != perf->status_register.space_id) {
result = -ENODEV;
goto err_unreg;
}
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 0xf) {
pr_debug("AMD K8 systems must use native drivers.\n");
result = -ENODEV;
goto err_unreg;
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
data->cpu_freq_read = cpu_freq_read_io;
data->cpu_freq_write = cpu_freq_write_io;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
data->cpu_freq_read = cpu_freq_read_intel;
data->cpu_freq_write = cpu_freq_write_intel;
break;
}
if (check_amd_hwpstate_cpu(cpu)) {
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
data->cpu_freq_read = cpu_freq_read_amd;
data->cpu_freq_write = cpu_freq_write_amd;
break;
}
result = -ENODEV;
goto err_unreg;
default:
pr_debug("Unknown addr space %d\n",
(u32) (perf->control_register.space_id));
result = -ENODEV;
goto err_unreg;
}
freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i = 0; i < perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) >
policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency =
perf->states[i].transition_latency * 1000;
}
/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
policy->cpuinfo.transition_latency > 20 * 1000) {
policy->cpuinfo.transition_latency = 20 * 1000;
pr_info_once("P-state transition latency capped at 20 uS\n");
}
/* table init */
for (i = 0; i < perf->state_count; i++) {
if (i > 0 && perf->states[i].core_frequency >=
freq_table[valid_states-1].frequency / 1000)
continue;
freq_table[valid_states].driver_data = i;
freq_table[valid_states].frequency =
perf->states[i].core_frequency * 1000;
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
max_boost_ratio = get_max_boost_ratio(cpu);
if (max_boost_ratio) {
unsigned int freq = freq_table[0].frequency;
/*
* Because the loop above sorts the freq_table entries in the
* descending order, freq is the maximum frequency in the table.
* Assume that it corresponds to the CPPC nominal frequency and
* use it to set cpuinfo.max_freq.
*/
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
* If the maximum "boost" frequency is unknown, ask the arch
* scale-invariance code to use the "nominal" performance for
* CPU utilization scaling so as to prevent the schedutil
* governor from selecting inadequate CPU frequencies.
*/
arch_set_max_freq_ratio(true);
}
policy->freq_table = freq_table;
perf->state = 0;
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
/*
* The core will not set policy->cur, because
* cpufreq_driver->get is NULL, so we need to set it here.
* However, we have to guess it, because the current speed is
* unknown and not detectable via IO ports.
*/
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
break;
default:
break;
}
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
(i == perf->state ? '*' : ' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
*/
data->resume = 1;
policy->fast_switch_possible = !acpi_pstate_strict &&
!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
if (acpi_cpufreq_driver.set_boost)
set_boost(policy, acpi_cpufreq_driver.boost_enabled);
return result;
err_unreg:
acpi_processor_unregister_performance(cpu);
err_free_mask:
free_cpumask_var(data->freqdomain_cpus);
err_free:
kfree(data);
policy->driver_data = NULL;
return result;
}
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("%s\n", __func__);
cpufreq_boost_down_prep(policy->cpu);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
acpi_processor_unregister_performance(data->acpi_perf_cpu);
free_cpumask_var(data->freqdomain_cpus);
kfree(policy->freq_table);
kfree(data);
return 0;
}
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("%s\n", __func__);
data->resume = 1;
return 0;
}
static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
#endif
NULL,
};
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = acpi_cpufreq_target,
.fast_switch = acpi_cpufreq_fast_switch,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
};
static void __init acpi_cpufreq_boost_init(void)
{
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
pr_debug("Boost capabilities not present in the processor\n");
return;
}
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
}
static int __init acpi_cpufreq_probe(struct platform_device *pdev)
{
int ret;
if (acpi_disabled)
return -ENODEV;
/* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver())
return -ENODEV;
pr_debug("%s\n", __func__);
ret = acpi_cpufreq_early_init();
if (ret)
return ret;
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
/* this is a sysfs file with a strange name and an even stranger
* semantic - per CPU instantiation, but system global effect.
* Lets enable it only on AMD CPUs for compatibility reasons and
* only if configured. This is considered legacy code, which
* will probably be removed at some point in the future.
*/
if (!check_amd_hwpstate_cpu(0)) {
struct freq_attr **attr;
pr_debug("CPB unsupported, do not expose it\n");
for (attr = acpi_cpufreq_attr; *attr; attr++)
if (*attr == &cpb) {
*attr = NULL;
break;
}
}
#endif
acpi_cpufreq_boost_init();
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
if (ret) {
free_acpi_perf_data();
}
return ret;
}
static void acpi_cpufreq_remove(struct platform_device *pdev)
{
pr_debug("%s\n", __func__);
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
}
static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
.remove_new = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
{
return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe);
}
static void __exit acpi_cpufreq_exit(void)
{
platform_driver_unregister(&acpi_cpufreq_platdrv);
}
module_param(acpi_pstate_strict, uint, 0644);
MODULE_PARM_DESC(acpi_pstate_strict,
"value 0 or non-zero. non-zero -> strict ACPI checks are "
"performed during frequency changes.");
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
MODULE_ALIAS("platform:acpi-cpufreq");
| linux-master | drivers/cpufreq/acpi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sc520_freq.c: cpufreq driver for the AMD Elan sc520
*
* Copyright (C) 2005 Sean Young <[email protected]>
*
* Based on elanfreq.c
*
* 2005-03-30: - initial revision
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/cpufreq.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
static __u8 __iomem *cpuctl;
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0, 0x01, 100000},
{0, 0x02, 133000},
{0, 0, CPUFREQ_TABLE_END},
};
static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
{
u8 clockspeed_reg = *cpuctl;
switch (clockspeed_reg & 0x03) {
default:
pr_err("error: cpuctl register has unexpected value %02x\n",
clockspeed_reg);
fallthrough;
case 0x01:
return 100000;
case 0x02:
return 133000;
}
}
static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
u8 clockspeed_reg;
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
*cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data;
local_irq_enable();
return 0;
}
/*
* Module init and exit code
*/
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
c->x86 != 4 || c->x86_model != 9)
return -ENODEV;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->freq_table = sc520_freq_table;
return 0;
}
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 9, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, sc520_ids);
static int __init sc520_freq_init(void)
{
int err;
if (!x86_match_cpu(sc520_ids))
return -ENODEV;
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if (!cpuctl) {
pr_err("sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}
err = cpufreq_register_driver(&sc520_freq_driver);
if (err)
iounmap(cpuctl);
return err;
}
static void __exit sc520_freq_exit(void)
{
cpufreq_unregister_driver(&sc520_freq_driver);
iounmap(cpuctl);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
module_init(sc520_freq_init);
module_exit(sc520_freq_exit);
| linux-master | drivers/cpufreq/sc520_freq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* amd-pstate.c - AMD Processor P-state Frequency Driver
*
* Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
*
* Author: Huang Rui <[email protected]>
*
* AMD P-State introduces a new CPU performance scaling design for AMD
* processors using the ACPI Collaborative Performance and Power Control (CPPC)
* feature which works with the AMD SMU firmware providing a finer grained
* frequency control range. It is to replace the legacy ACPI P-States control,
* allows a flexible, low-latency interface for the Linux kernel to directly
* communicate the performance hints to hardware.
*
* AMD P-State is supported on recent AMD Zen base CPU series include some of
* Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
* P-State supported system. And there are two types of hardware implementations
* for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
* X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/compiler.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
#include <linux/amd-pstate.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
/*
* TODO: We need more time to fine tune processors with shared memory solution
* with community together.
*
* There are some performance drops on the CPU benchmarks which reports from
* Suse. We are co-working with them to fine tune the shared memory solution. So
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
* the frequency that a core is going to operate during
* short periods of activity. EPP values will be utilized for
* different OS profiles (balanced, performance, power savings)
* display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
* 0 default
* 1 performance
* 2 balance_performance
* 3 balance_power
* 4 power
*/
enum energy_perf_value_index {
EPP_INDEX_DEFAULT = 0,
EPP_INDEX_PERFORMANCE,
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
};
static const char * const energy_perf_strings[] = {
[EPP_INDEX_DEFAULT] = "default",
[EPP_INDEX_PERFORMANCE] = "performance",
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
NULL
};
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0,
[EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
[EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
};
typedef int (*cppc_mode_transition_fn)(int);
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
for (i=0; i < AMD_PSTATE_MAX; i++) {
if (!strncmp(str, amd_pstate_mode_string[i], size))
return i;
}
return -EINVAL;
}
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
{
u64 epp;
int ret;
if (boot_cpu_has(X86_FEATURE_CPPC)) {
if (!cppc_req_cached) {
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
&cppc_req_cached);
if (epp)
return epp;
}
epp = (cppc_req_cached >> 24) & 0xFF;
} else {
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
return -EIO;
}
}
return (s16)(epp & 0xff);
}
static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
{
s16 epp;
int index = -EINVAL;
epp = amd_pstate_get_epp(cpudata, 0);
if (epp < 0)
return epp;
switch (epp) {
case AMD_CPPC_EPP_PERFORMANCE:
index = EPP_INDEX_PERFORMANCE;
break;
case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
index = EPP_INDEX_BALANCE_PERFORMANCE;
break;
case AMD_CPPC_EPP_BALANCE_POWERSAVE:
index = EPP_INDEX_BALANCE_POWERSAVE;
break;
case AMD_CPPC_EPP_POWERSAVE:
index = EPP_INDEX_POWERSAVE;
break;
default:
break;
}
return index;
}
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
int ret;
struct cppc_perf_ctrls perf_ctrls;
if (boot_cpu_has(X86_FEATURE_CPPC)) {
u64 value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
WRITE_ONCE(cpudata->cppc_req_cached, value);
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (!ret)
cpudata->epp_cached = epp;
} else {
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
cpudata->epp_cached = epp;
}
return ret;
}
static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
int pref_index)
{
int epp = -EINVAL;
int ret;
if (!pref_index) {
pr_debug("EPP pref_index is invalid\n");
return -EINVAL;
}
if (epp == -EINVAL)
epp = epp_values[pref_index];
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
pr_debug("EPP cannot be set under performance policy\n");
return -EBUSY;
}
ret = amd_pstate_set_epp(cpudata, epp);
return ret;
}
static inline int pstate_enable(bool enable)
{
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
if (enable == cppc_enabled)
return 0;
for_each_present_cpu(cpu) {
unsigned long logical_id = topology_logical_die_id(cpu);
if (test_bit(logical_id, &logical_proc_id_mask))
continue;
set_bit(logical_id, &logical_proc_id_mask);
ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
enable);
if (ret)
return ret;
}
cppc_enabled = enable;
return 0;
}
static int cppc_enable(bool enable)
{
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
if (enable == cppc_enabled)
return 0;
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
/* Enable autonomous mode for EPP */
if (cppc_state == AMD_PSTATE_ACTIVE) {
/* Set desired perf as zero to allow EPP firmware control */
perf_ctrls.desired_perf = 0;
ret = cppc_set_perf(cpu, &perf_ctrls);
if (ret)
return ret;
}
}
cppc_enabled = enable;
return ret;
}
DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
static inline int amd_pstate_enable(bool enable)
{
return static_call(amd_pstate_enable)(enable);
}
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
/*
* TODO: Introduce AMD specific power feature.
*
* CPPC entry doesn't indicate the highest performance in some ASICs.
*/
highest_perf = amd_get_highest_perf();
if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
highest_perf = amd_get_highest_perf();
if (highest_perf > cppc_perf.highest_perf)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
}
ret = cppc_set_auto_sel(cpudata->cpu,
(cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
if (ret)
pr_warn("failed to set auto_sel, ret: %d\n", ret);
return ret;
}
DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
if (fast_switch)
wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
else
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
READ_ONCE(cpudata->cppc_req_cached));
}
static void cppc_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
struct cppc_perf_ctrls perf_ctrls;
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
max_perf, fast_switch);
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
{
u64 aperf, mperf, tsc;
unsigned long flags;
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
local_irq_restore(flags);
return false;
}
local_irq_restore(flags);
cpudata->cur.aperf = aperf;
cpudata->cur.mperf = mperf;
cpudata->cur.tsc = tsc;
cpudata->cur.aperf -= cpudata->prev.aperf;
cpudata->cur.mperf -= cpudata->prev.mperf;
cpudata->cur.tsc -= cpudata->prev.tsc;
cpudata->prev.aperf = aperf;
cpudata->prev.mperf = mperf;
cpudata->prev.tsc = tsc;
cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
return true;
}
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
{
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
}
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(des_perf);
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
cpudata->cpu, (value != prev), fast_switch);
}
if (value == prev)
return;
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_update_perf(cpudata, min_perf, des_perf,
max_perf, fast_switch);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
struct amd_cpudata *cpudata = policy->driver_data;
unsigned long max_perf, min_perf, des_perf, cap_perf;
if (!cpudata->max_freq)
return -ENODEV;
cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
cpudata->max_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
* If fast_switch is desired, then there aren't any registered
* transition notifiers. See comment for
* cpufreq_enable_fast_switch().
*/
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
amd_pstate_update(cpudata, min_perf, des_perf,
max_perf, fast_switch, policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
}
static int amd_pstate_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
return amd_pstate_update_freq(policy, target_freq, false);
}
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return amd_pstate_update_freq(policy, target_freq, true);
}
static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long _min_perf,
unsigned long target_perf,
unsigned long capacity)
{
unsigned long max_perf, min_perf, des_perf,
cap_perf, lowest_nonlinear_perf, max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
unsigned int target_freq;
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
max_freq = READ_ONCE(cpudata->max_freq);
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
min_perf = READ_ONCE(cpudata->highest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
max_perf = cap_perf;
if (max_perf < min_perf)
max_perf = min_perf;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
target_freq = div_u64(des_perf * max_freq, max_perf);
policy->cur = target_freq;
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
cpufreq_cpu_put(policy);
}
static int amd_get_min_freq(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
/* Switch to khz */
return cppc_perf.lowest_freq * 1000;
}
static int amd_get_max_freq(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
u32 max_perf, max_freq, nominal_freq, nominal_perf;
u64 boost_ratio;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
nominal_freq = cppc_perf.nominal_freq;
nominal_perf = READ_ONCE(cpudata->nominal_perf);
max_perf = READ_ONCE(cpudata->highest_perf);
boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
nominal_perf);
max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
/* Switch to khz */
return max_freq * 1000;
}
static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
/* Switch to khz */
return cppc_perf.nominal_freq * 1000;
}
static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
nominal_freq, nominal_perf;
u64 lowest_nonlinear_ratio;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
nominal_freq = cppc_perf.nominal_freq;
nominal_perf = READ_ONCE(cpudata->nominal_perf);
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
nominal_perf);
lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
/* Switch to khz */
return lowest_nonlinear_freq * 1000;
}
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
struct amd_cpudata *cpudata = policy->driver_data;
int ret;
if (!cpudata->boost_supported) {
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EINVAL;
}
if (state)
policy->cpuinfo.max_freq = cpudata->max_freq;
else
policy->cpuinfo.max_freq = cpudata->nominal_freq;
policy->max = policy->cpuinfo.max_freq;
ret = freq_qos_update_request(&cpudata->req[1],
policy->cpuinfo.max_freq);
if (ret < 0)
return ret;
return 0;
}
static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
{
u32 highest_perf, nominal_perf;
highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
if (highest_perf <= nominal_perf)
return;
cpudata->boost_supported = true;
current_pstate_driver->boost_enabled = true;
}
static void amd_perf_ctl_reset(unsigned int cpu)
{
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
* which is ideal for initialization process.
*/
amd_perf_ctl_reset(policy->cpu);
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
if (!cpudata)
return -ENOMEM;
cpudata->cpu = policy->cpu;
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
min_freq = amd_get_min_freq(cpudata);
max_freq = amd_get_max_freq(cpudata);
nominal_freq = amd_get_nominal_freq(cpudata);
lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
min_freq, max_freq);
ret = -EINVAL;
goto free_cpudata1;
}
policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
policy->min = min_freq;
policy->max = max_freq;
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
if (boot_cpu_has(X86_FEATURE_CPPC))
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
FREQ_QOS_MIN, policy->cpuinfo.min_freq);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
}
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
FREQ_QOS_MAX, policy->cpuinfo.max_freq);
if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
goto free_cpudata2;
}
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
amd_pstate_boost_init(cpudata);
if (!current_pstate_driver->adjust_perf)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
kfree(cpudata);
return ret;
}
static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
policy->fast_switch_possible = false;
kfree(cpudata);
return 0;
}
static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
{
int ret;
ret = amd_pstate_enable(true);
if (ret)
pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
return ret;
}
static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
{
int ret;
ret = amd_pstate_enable(false);
if (ret)
pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
return ret;
}
/* Sysfs attributes */
/*
* This frequency is to indicate the maximum hardware frequency.
* If boost is not active but supported, the frequency will be larger than the
* one in cpuinfo.
*/
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
max_freq = amd_get_max_freq(cpudata);
if (max_freq < 0)
return max_freq;
return sysfs_emit(buf, "%u\n", max_freq);
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
freq = amd_get_lowest_nonlinear_freq(cpudata);
if (freq < 0)
return freq;
return sysfs_emit(buf, "%u\n", freq);
}
/*
* In some of ASICs, the highest_perf is not the one in the _CPC table, so we
* need to expose it to sysfs.
*/
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
u32 perf;
struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->highest_perf);
return sysfs_emit(buf, "%u\n", perf);
}
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
int i = 0;
int offset = 0;
while (energy_perf_strings[i] != NULL)
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
sysfs_emit_at(buf, offset, "\n");
return offset;
}
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
ret = match_string(energy_perf_strings, -1, str_preference);
if (ret < 0)
return -EINVAL;
mutex_lock(&amd_pstate_limits_lock);
ret = amd_pstate_set_energy_pref_index(cpudata, ret);
mutex_unlock(&amd_pstate_limits_lock);
return ret ?: count;
}
static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
int preference;
preference = amd_pstate_get_energy_pref_index(cpudata);
if (preference < 0)
return preference;
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
static void amd_pstate_driver_cleanup(void)
{
amd_pstate_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
static int amd_pstate_register_driver(int mode)
{
int ret;
if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
current_pstate_driver = &amd_pstate_driver;
else if (mode == AMD_PSTATE_ACTIVE)
current_pstate_driver = &amd_pstate_epp_driver;
else
return -EINVAL;
cppc_state = mode;
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
amd_pstate_driver_cleanup();
return ret;
}
return 0;
}
static int amd_pstate_unregister_driver(int dummy)
{
cpufreq_unregister_driver(current_pstate_driver);
amd_pstate_driver_cleanup();
return 0;
}
static int amd_pstate_change_mode_without_dvr_change(int mode)
{
int cpu = 0;
cppc_state = mode;
if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
for_each_present_cpu(cpu) {
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
}
return 0;
}
static int amd_pstate_change_driver_mode(int mode)
{
int ret;
ret = amd_pstate_unregister_driver(0);
if (ret)
return ret;
ret = amd_pstate_register_driver(mode);
if (ret)
return ret;
return 0;
}
static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = {
[AMD_PSTATE_DISABLE] = {
[AMD_PSTATE_DISABLE] = NULL,
[AMD_PSTATE_PASSIVE] = amd_pstate_register_driver,
[AMD_PSTATE_ACTIVE] = amd_pstate_register_driver,
[AMD_PSTATE_GUIDED] = amd_pstate_register_driver,
},
[AMD_PSTATE_PASSIVE] = {
[AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
[AMD_PSTATE_PASSIVE] = NULL,
[AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
[AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change,
},
[AMD_PSTATE_ACTIVE] = {
[AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
[AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode,
[AMD_PSTATE_ACTIVE] = NULL,
[AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode,
},
[AMD_PSTATE_GUIDED] = {
[AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver,
[AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change,
[AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode,
[AMD_PSTATE_GUIDED] = NULL,
},
};
static ssize_t amd_pstate_show_status(char *buf)
{
if (!current_pstate_driver)
return sysfs_emit(buf, "disable\n");
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
static int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
if (size > strlen("passive") || size < strlen("active"))
return -EINVAL;
mode_idx = get_mode_idx_from_str(buf, size);
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
if (mode_state_machine[cppc_state][mode_idx])
return mode_state_machine[cppc_state][mode_idx](mode_idx);
return 0;
}
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
mutex_lock(&amd_pstate_driver_lock);
ret = amd_pstate_show_status(buf);
mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
int ret;
mutex_lock(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
NULL,
};
static struct freq_attr *amd_pstate_epp_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
&energy_performance_preference,
&energy_performance_available_preferences,
NULL,
};
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
NULL
};
static const struct attribute_group amd_pstate_global_attr_group = {
.name = "amd_pstate",
.attrs = pstate_global_attributes,
};
static bool amd_pstate_acpi_pm_profile_server(void)
{
switch (acpi_gbl_FADT.preferred_profile) {
case PM_ENTERPRISE_SERVER:
case PM_SOHO_SERVER:
case PM_PERFORMANCE_SERVER:
return true;
}
return false;
}
static bool amd_pstate_acpi_pm_profile_undefined(void)
{
if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
return true;
if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
return true;
return false;
}
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
* which is ideal for initialization process.
*/
amd_perf_ctl_reset(policy->cpu);
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
if (!cpudata)
return -ENOMEM;
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
min_freq = amd_get_min_freq(cpudata);
max_freq = amd_get_max_freq(cpudata);
nominal_freq = amd_get_nominal_freq(cpudata);
lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
min_freq, max_freq);
ret = -EINVAL;
goto free_cpudata1;
}
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
amd_pstate_acpi_pm_profile_undefined())
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
if (boot_cpu_has(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret)
return ret;
WRITE_ONCE(cpudata->cppc_req_cached, value);
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
if (ret)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
amd_pstate_boost_init(cpudata);
return 0;
free_cpudata1:
kfree(cpudata);
return ret;
}
static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
return 0;
}
static void amd_pstate_epp_init(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
u32 max_perf, min_perf;
u64 value;
s16 epp;
max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
min_perf = max_perf;
/* Initial min/max values for CPPC Performance Controls Register */
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
/* CPPC EPP feature require to set zero to the desire perf bit */
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
if (cpudata->epp_policy == cpudata->policy)
goto skip_epp;
cpudata->epp_policy = cpudata->policy;
/* Get BIOS pre-defined epp value */
epp = amd_pstate_get_epp(cpudata, value);
if (epp < 0) {
/**
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
goto skip_epp;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
/* Set initial EPP value */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_set_epp(cpudata, epp);
skip_epp:
cpufreq_cpu_put(policy);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
cpudata->policy = policy->policy;
amd_pstate_epp_init(policy->cpu);
return 0;
}
static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
{
struct cppc_perf_ctrls perf_ctrls;
u64 value, max_perf;
int ret;
ret = amd_pstate_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
if (boot_cpu_has(X86_FEATURE_CPPC)) {
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
if (cppc_state == AMD_PSTATE_ACTIVE) {
amd_pstate_epp_reenable(cpudata);
cpudata->suspended = false;
}
return 0;
}
static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
int min_perf;
u64 value;
min_perf = READ_ONCE(cpudata->lowest_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock);
if (boot_cpu_has(X86_FEATURE_CPPC)) {
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
/* Set max perf same as min perf */
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(min_perf);
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.desired_perf = 0;
perf_ctrls.max_perf = min_perf;
perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
mutex_unlock(&amd_pstate_limits_lock);
}
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
if (cpudata->suspended)
return 0;
if (cppc_state == AMD_PSTATE_ACTIVE)
amd_pstate_epp_offline(policy);
return 0;
}
static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
return 0;
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
int ret;
/* avoid suspending when EPP is not enabled */
if (cppc_state != AMD_PSTATE_ACTIVE)
return 0;
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
/* disable CPPC in lowlevel firmware */
ret = amd_pstate_enable(false);
if (ret)
pr_err("failed to suspend, return %d\n", ret);
return 0;
}
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
mutex_lock(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/
amd_pstate_epp_reenable(cpudata);
mutex_unlock(&amd_pstate_limits_lock);
cpudata->suspended = false;
}
return 0;
}
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
.target = amd_pstate_target,
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
.suspend = amd_pstate_cpu_suspend,
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = amd_pstate_epp_verify_policy,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
.offline = amd_pstate_epp_cpu_offline,
.online = amd_pstate_epp_cpu_online,
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
static int __init amd_pstate_set_driver(int mode_idx)
{
if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
cppc_state = mode_idx;
if (cppc_state == AMD_PSTATE_DISABLE)
pr_info("driver is explicitly disabled\n");
if (cppc_state == AMD_PSTATE_ACTIVE)
current_pstate_driver = &amd_pstate_epp_driver;
if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
current_pstate_driver = &amd_pstate_driver;
return 0;
}
return -EINVAL;
}
static int __init amd_pstate_init(void)
{
struct device *dev_root;
int ret;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
}
/* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver())
return -EEXIST;
switch (cppc_state) {
case AMD_PSTATE_UNDEFINED:
/* Disable on the following configs by default:
* 1. Undefined platforms
* 2. Server platforms
* 3. Shared memory designs
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
amd_pstate_acpi_pm_profile_server() ||
!boot_cpu_has(X86_FEATURE_CPPC)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
if (ret)
return ret;
break;
case AMD_PSTATE_DISABLE:
return -ENODEV;
case AMD_PSTATE_PASSIVE:
case AMD_PSTATE_ACTIVE:
case AMD_PSTATE_GUIDED:
break;
default:
return -EINVAL;
}
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
if (cppc_state != AMD_PSTATE_ACTIVE)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
static_call_update(amd_pstate_init_perf, cppc_init_perf);
static_call_update(amd_pstate_update_perf, cppc_update_perf);
}
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
pr_err("failed to enable with return %d\n", ret);
return ret;
}
ret = cpufreq_register_driver(current_pstate_driver);
if (ret)
pr_err("failed to register with return %d\n", ret);
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
put_device(dev_root);
if (ret) {
pr_err("sysfs attribute export failed with error %d.\n", ret);
goto global_attr_free;
}
}
return ret;
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
return ret;
}
device_initcall(amd_pstate_init);
static int __init amd_pstate_param(char *str)
{
size_t size;
int mode_idx;
if (!str)
return -EINVAL;
size = strlen(str);
mode_idx = get_mode_idx_from_str(str, size);
return amd_pstate_set_driver(mode_idx);
}
early_param("amd_pstate", amd_pstate_param);
MODULE_AUTHOR("Huang Rui <[email protected]>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
| linux-master | drivers/cpufreq/amd-pstate.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP
*/
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include "cpufreq-dt.h"
#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
#define IMX7ULP_MAX_RUN_FREQ 528000
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
static struct device *cpu_dev;
static int cpufreq_opp_token;
enum IMX7ULP_CPUFREQ_CLKS {
ARM,
CORE,
SCS_SEL,
HSRUN_CORE,
HSRUN_SCS_SEL,
FIRC,
};
static struct clk_bulk_data imx7ulp_clks[] = {
{ .id = "arm" },
{ .id = "core" },
{ .id = "scs_sel" },
{ .id = "hsrun_core" },
{ .id = "hsrun_scs_sel" },
{ .id = "firc" },
};
static unsigned int imx7ulp_get_intermediate(struct cpufreq_policy *policy,
unsigned int index)
{
return clk_get_rate(imx7ulp_clks[FIRC].clk);
}
static int imx7ulp_target_intermediate(struct cpufreq_policy *policy,
unsigned int index)
{
unsigned int newfreq = policy->freq_table[index].frequency;
clk_set_parent(imx7ulp_clks[SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
clk_set_parent(imx7ulp_clks[HSRUN_SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
if (newfreq > IMX7ULP_MAX_RUN_FREQ)
clk_set_parent(imx7ulp_clks[ARM].clk,
imx7ulp_clks[HSRUN_CORE].clk);
else
clk_set_parent(imx7ulp_clks[ARM].clk, imx7ulp_clks[CORE].clk);
return 0;
}
static struct cpufreq_dt_platform_data imx7ulp_data = {
.target_intermediate = imx7ulp_target_intermediate,
.get_intermediate = imx7ulp_get_intermediate,
};
static int imx_cpufreq_dt_probe(struct platform_device *pdev)
{
struct platform_device *dt_pdev;
u32 cell_value, supported_hw[2];
int speed_grade, mkt_segment;
int ret;
cpu_dev = get_cpu_device(0);
if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
return -ENODEV;
if (of_machine_is_compatible("fsl,imx7ulp")) {
ret = clk_bulk_get(cpu_dev, ARRAY_SIZE(imx7ulp_clks),
imx7ulp_clks);
if (ret)
return ret;
dt_pdev = platform_device_register_data(NULL, "cpufreq-dt",
-1, &imx7ulp_data,
sizeof(imx7ulp_data));
if (IS_ERR(dt_pdev)) {
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
ret = PTR_ERR(dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
}
cpufreq_dt_pdev = dt_pdev;
return 0;
}
ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
if (ret)
return ret;
if (of_machine_is_compatible("fsl,imx8mn") ||
of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
if (of_machine_is_compatible("fsl,imx8mp"))
mkt_segment = (cell_value & IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK)
>> IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT;
else
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK)
>> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
* Early samples without fuses written report "0 0" which may NOT
* match any OPP defined in DT. So clamp to minimum OPP defined in
* DT to avoid warning for "no OPPs".
*
* Applies to i.MX8M series SoCs.
*/
if (mkt_segment == 0 && speed_grade == 0) {
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
if (of_machine_is_compatible("fsl,imx8mn") ||
of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}
supported_hw[0] = BIT(speed_grade);
supported_hw[1] = BIT(mkt_segment);
dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
if (cpufreq_opp_token < 0) {
ret = cpufreq_opp_token;
dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
return ret;
}
cpufreq_dt_pdev = platform_device_register_data(
&pdev->dev, "cpufreq-dt", -1, NULL, 0);
if (IS_ERR(cpufreq_dt_pdev)) {
dev_pm_opp_put_supported_hw(cpufreq_opp_token);
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
return ret;
}
return 0;
}
static void imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
}
static struct platform_driver imx_cpufreq_dt_driver = {
.probe = imx_cpufreq_dt_probe,
.remove_new = imx_cpufreq_dt_remove,
.driver = {
.name = "imx-cpufreq-dt",
},
};
module_platform_driver(imx_cpufreq_dt_driver);
MODULE_ALIAS("platform:imx-cpufreq-dt");
MODULE_DESCRIPTION("Freescale i.MX cpufreq speed grading driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/imx-cpufreq-dt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
/*
* In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
* the CPU frequency subset and voltage value of each OPP varies
* based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
* defines the voltage and frequency value based on the msm-id in SMEM
* and speedbin blown in the efuse combination.
* The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
* to provide the OPP framework with required information.
* This is used to determine the voltage and frequency value for each OPP of
* operating-points-v2 table when it is parsed by the OPP framework.
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
#include <dt-bindings/arm/qcom,ids.h>
struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
int (*get_version)(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv);
const char **genpd_names;
};
struct qcom_cpufreq_drv {
int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
};
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
static void get_krait_bin_format_a(struct device *cpu_dev,
int *speed, int *pvs, int *pvs_ver,
u8 *buf)
{
u32 pte_efuse;
pte_efuse = *((u32 *)buf);
*speed = pte_efuse & 0xf;
if (*speed == 0xf)
*speed = (pte_efuse >> 4) & 0xf;
if (*speed == 0xf) {
*speed = 0;
dev_warn(cpu_dev, "Speed bin: Defaulting to %d\n", *speed);
} else {
dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
}
*pvs = (pte_efuse >> 10) & 0x7;
if (*pvs == 0x7)
*pvs = (pte_efuse >> 13) & 0x7;
if (*pvs == 0x7) {
*pvs = 0;
dev_warn(cpu_dev, "PVS bin: Defaulting to %d\n", *pvs);
} else {
dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
}
}
static void get_krait_bin_format_b(struct device *cpu_dev,
int *speed, int *pvs, int *pvs_ver,
u8 *buf)
{
u32 pte_efuse, redundant_sel;
pte_efuse = *((u32 *)buf);
redundant_sel = (pte_efuse >> 24) & 0x7;
*pvs_ver = (pte_efuse >> 4) & 0x3;
switch (redundant_sel) {
case 1:
*pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
*speed = (pte_efuse >> 27) & 0xf;
break;
case 2:
*pvs = (pte_efuse >> 27) & 0xf;
*speed = pte_efuse & 0x7;
break;
default:
/* 4 bits of PVS are in efuse register bits 31, 8-6. */
*pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
*speed = pte_efuse & 0x7;
}
/* Check SPEED_BIN_BLOW_STATUS */
if (pte_efuse & BIT(3)) {
dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
} else {
dev_warn(cpu_dev, "Speed bin not set. Defaulting to 0!\n");
*speed = 0;
}
/* Check PVS_BLOW_STATUS */
pte_efuse = *(((u32 *)buf) + 1);
pte_efuse &= BIT(21);
if (pte_efuse) {
dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
} else {
dev_warn(cpu_dev, "PVS bin not set. Defaulting to 0!\n");
*pvs = 0;
}
dev_dbg(cpu_dev, "PVS version: %d\n", *pvs_ver);
}
static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
size_t len;
u32 msm_id;
u8 *speedbin;
int ret;
*pvs_name = NULL;
ret = qcom_smem_get_soc_id(&msm_id);
if (ret)
return ret;
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
switch (msm_id) {
case QCOM_ID_MSM8996:
case QCOM_ID_APQ8096:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
break;
default:
BUG();
break;
}
kfree(speedbin);
return 0;
}
static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
int speed = 0, pvs = 0, pvs_ver = 0;
u8 *speedbin;
size_t len;
int ret = 0;
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
switch (len) {
case 4:
get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
speedbin);
break;
case 8:
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
speedbin);
break;
default:
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
ret = -ENODEV;
goto len_error;
}
snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
speed, pvs, pvs_ver);
drv->versions = (1 << speed);
len_error:
kfree(speedbin);
return ret;
}
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
static const struct qcom_cpufreq_match_data match_data_krait = {
.get_version = qcom_cpufreq_krait_name_version,
};
static const char *qcs404_genpd_names[] = { "cpr", NULL };
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
.genpd_names = qcs404_genpd_names,
};
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
struct device *cpu_dev;
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
char *pvs_name = pvs_name_buffer;
unsigned cpu;
const struct of_device_id *match;
int ret;
cpu_dev = get_cpu_device(0);
if (!cpu_dev)
return -ENODEV;
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
}
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
match = pdev->dev.platform_data;
drv->data = match->data;
if (!drv->data) {
ret = -ENODEV;
goto free_drv;
}
if (drv->data->get_version) {
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
if (IS_ERR(speedbin_nvmem)) {
ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
goto free_drv;
}
ret = drv->data->get_version(cpu_dev,
speedbin_nvmem, &pvs_name, drv);
if (ret) {
nvmem_cell_put(speedbin_nvmem);
goto free_drv;
}
nvmem_cell_put(speedbin_nvmem);
}
of_node_put(np);
drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
GFP_KERNEL);
if (!drv->opp_tokens) {
ret = -ENOMEM;
goto free_drv;
}
for_each_possible_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
ret = -ENODEV;
goto free_opp;
}
if (drv->data->get_version) {
config.supported_hw = &drv->versions;
config.supported_hw_count = 1;
if (pvs_name)
config.prop_name = pvs_name;
}
if (drv->data->genpd_names) {
config.genpd_names = drv->data->genpd_names;
config.virt_devs = NULL;
}
if (config.supported_hw || config.genpd_names) {
drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
if (drv->opp_tokens[cpu] < 0) {
ret = drv->opp_tokens[cpu];
dev_err(cpu_dev, "Failed to set OPP config\n");
goto free_opp;
}
}
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
if (!IS_ERR(cpufreq_dt_pdev)) {
platform_set_drvdata(pdev, drv);
return 0;
}
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
for_each_possible_cpu(cpu)
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
kfree(drv->opp_tokens);
free_drv:
kfree(drv);
return ret;
}
static void qcom_cpufreq_remove(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
kfree(drv->opp_tokens);
kfree(drv);
}
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
},
};
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq8064", .data = &match_data_krait },
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
{},
};
MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
/*
* Since the driver depends on smem and nvmem drivers, which may
* return EPROBE_DEFER, all the real activity is done in the probe,
* which may be defered as well. The init here is only registering
* the driver and the platform device.
*/
static int __init qcom_cpufreq_init(void)
{
struct device_node *np = of_find_node_by_path("/");
const struct of_device_id *match;
int ret;
if (!np)
return -ENODEV;
match = of_match_node(qcom_cpufreq_match_list, np);
of_node_put(np);
if (!match)
return -ENODEV;
ret = platform_driver_register(&qcom_cpufreq_driver);
if (unlikely(ret < 0))
return ret;
cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem",
-1, match, sizeof(*match));
ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
if (0 == ret)
return 0;
platform_driver_unregister(&qcom_cpufreq_driver);
return ret;
}
module_init(qcom_cpufreq_init);
static void __exit qcom_cpufreq_exit(void)
{
platform_device_unregister(cpufreq_pdev);
platform_driver_unregister(&qcom_cpufreq_driver);
}
module_exit(qcom_cpufreq_exit);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/qcom-cpufreq-nvmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Raspberry Pi cpufreq driver
*
* Copyright (C) 2019, Nicolas Saenz Julienne <[email protected]>
*/
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#define RASPBERRYPI_FREQ_INTERVAL 100000000
static struct platform_device *cpufreq_dt;
static int raspberrypi_cpufreq_probe(struct platform_device *pdev)
{
struct device *cpu_dev;
unsigned long min, max;
unsigned long rate;
struct clk *clk;
int ret;
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("Cannot get CPU for cpufreq driver\n");
return -ENODEV;
}
clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);
}
/*
* The max and min frequencies are configurable in the Raspberry Pi
* firmware, so we query them at runtime.
*/
min = roundup(clk_round_rate(clk, 0), RASPBERRYPI_FREQ_INTERVAL);
max = roundup(clk_round_rate(clk, ULONG_MAX), RASPBERRYPI_FREQ_INTERVAL);
clk_put(clk);
for (rate = min; rate <= max; rate += RASPBERRYPI_FREQ_INTERVAL) {
ret = dev_pm_opp_add(cpu_dev, rate, 0);
if (ret)
goto remove_opp;
}
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
ret = PTR_ERR_OR_ZERO(cpufreq_dt);
if (ret) {
dev_err(cpu_dev, "Failed to create platform device, %d\n", ret);
goto remove_opp;
}
return 0;
remove_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
static void raspberrypi_cpufreq_remove(struct platform_device *pdev)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(0);
if (cpu_dev)
dev_pm_opp_remove_all_dynamic(cpu_dev);
platform_device_unregister(cpufreq_dt);
}
/*
* Since the driver depends on clk-raspberrypi, which may return EPROBE_DEFER,
* all the activity is performed in the probe, which may be defered as well.
*/
static struct platform_driver raspberrypi_cpufreq_driver = {
.driver = {
.name = "raspberrypi-cpufreq",
},
.probe = raspberrypi_cpufreq_probe,
.remove_new = raspberrypi_cpufreq_remove,
};
module_platform_driver(raspberrypi_cpufreq_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <[email protected]");
MODULE_DESCRIPTION("Raspberry Pi cpufreq driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:raspberrypi-cpufreq");
| linux-master | drivers/cpufreq/raspberrypi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved
*/
#include <linux/cpufreq.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
#define TEGRA186_NUM_CLUSTERS 2
#define EDVD_OFFSET_A57(core) ((SZ_64K * 6) + (0x20 + (core) * 0x4))
#define EDVD_OFFSET_DENVER(core) ((SZ_64K * 7) + (0x20 + (core) * 0x4))
#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff
#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
struct tegra186_cpufreq_cpu {
unsigned int bpmp_cluster_id;
unsigned int edvd_offset;
};
static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
/* CPU0 - A57 Cluster */
{
.bpmp_cluster_id = 1,
.edvd_offset = EDVD_OFFSET_A57(0)
},
/* CPU1 - Denver Cluster */
{
.bpmp_cluster_id = 0,
.edvd_offset = EDVD_OFFSET_DENVER(0)
},
/* CPU2 - Denver Cluster */
{
.bpmp_cluster_id = 0,
.edvd_offset = EDVD_OFFSET_DENVER(1)
},
/* CPU3 - A57 Cluster */
{
.bpmp_cluster_id = 1,
.edvd_offset = EDVD_OFFSET_A57(1)
},
/* CPU4 - A57 Cluster */
{
.bpmp_cluster_id = 1,
.edvd_offset = EDVD_OFFSET_A57(2)
},
/* CPU5 - A57 Cluster */
{
.bpmp_cluster_id = 1,
.edvd_offset = EDVD_OFFSET_A57(3)
},
};
struct tegra186_cpufreq_cluster {
struct cpufreq_frequency_table *table;
u32 ref_clk_khz;
u32 div;
};
struct tegra186_cpufreq_data {
void __iomem *regs;
const struct tegra186_cpufreq_cpu *cpus;
struct tegra186_cpufreq_cluster clusters[];
};
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
return 0;
}
static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
u32 edvd_val = tbl->driver_data;
writel(edvd_val, data->regs + edvd_offset);
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
edvd_offset = data->cpus[policy->cpu].edvd_offset;
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = tegra186_cpufreq_get,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
.attr = cpufreq_generic_attr,
};
static struct cpufreq_frequency_table *init_vhint_table(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
int err, i, j, num_rates = 0;
dma_addr_t phys;
void *virt;
virt = dma_alloc_coherent(bpmp->dev, sizeof(*data), &phys,
GFP_KERNEL);
if (!virt)
return ERR_PTR(-ENOMEM);
data = (struct cpu_vhint_data *)virt;
memset(&req, 0, sizeof(req));
req.addr = phys;
req.cluster_id = cluster_id;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_CPU_VHINT;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err) {
table = ERR_PTR(err);
goto free;
}
if (msg.rx.ret) {
table = ERR_PTR(-EINVAL);
goto free;
}
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
continue;
/* Only store lowest voltage index for each rate */
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
num_rates++;
}
table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
goto free;
}
cluster->ref_clk_khz = data->ref_clk_hz / 1000;
cluster->div = data->pdiv * data->mdiv;
for (i = data->vfloor, j = 0; i <= data->vceil; i++) {
struct cpufreq_frequency_table *point;
u16 ndiv = data->ndiv[i];
u32 edvd_val = 0;
if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
continue;
/* Only store lowest voltage index for each rate */
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
edvd_val |= i << EDVD_CORE_VOLT_FREQ_V_SHIFT;
edvd_val |= ndiv << EDVD_CORE_VOLT_FREQ_F_SHIFT;
point = &table[j++];
point->driver_data = edvd_val;
point->frequency = (cluster->ref_clk_khz * ndiv) / cluster->div;
}
table[j].frequency = CPUFREQ_TABLE_END;
free:
dma_free_coherent(bpmp->dev, sizeof(*data), virt, phys);
return table;
}
static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->cpus = tegra186_cpus;
bpmp = tegra_bpmp_get(&pdev->dev);
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;
}
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
if (IS_ERR(cluster->table)) {
err = PTR_ERR(cluster->table);
goto put_bpmp;
}
}
tegra186_cpufreq_driver.driver_data = data;
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
put_bpmp:
tegra_bpmp_put(bpmp);
return err;
}
static void tegra186_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra186_cpufreq_driver);
}
static const struct of_device_id tegra186_cpufreq_of_match[] = {
{ .compatible = "nvidia,tegra186-ccplex-cluster", },
{ }
};
MODULE_DEVICE_TABLE(of, tegra186_cpufreq_of_match);
static struct platform_driver tegra186_cpufreq_platform_driver = {
.driver = {
.name = "tegra186-cpufreq",
.of_match_table = tegra186_cpufreq_of_match,
},
.probe = tegra186_cpufreq_probe,
.remove_new = tegra186_cpufreq_remove,
};
module_platform_driver(tegra186_cpufreq_platform_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra186 cpufreq driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/tegra186-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU frequency scaling for DaVinci
*
* Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
*
* Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <[email protected]>
*
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Updated to support OMAP3
* Rajendra Nayak <[email protected]>
*/
#include <linux/types.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_device.h>
#include <linux/export.h>
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
struct clk *asyncclk;
unsigned long asyncrate;
};
static struct davinci_cpufreq cpufreq;
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
unsigned int old_freq, new_freq;
int ret = 0;
old_freq = policy->cur;
new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
if (pdata->set_voltage && new_freq > old_freq) {
ret = pdata->set_voltage(idx);
if (ret)
return ret;
}
ret = clk_set_rate(armclk, new_freq * 1000);
if (ret)
return ret;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
return ret;
}
/* if moving to lower freq, lower the voltage after lowering freq */
if (pdata->set_voltage && new_freq < old_freq)
pdata->set_voltage(idx);
return 0;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct cpufreq_frequency_table *freq_table = pdata->freq_table;
if (policy->cpu != 0)
return -EINVAL;
/* Finish platform specific initialization */
if (pdata->init) {
result = pdata->init();
if (result)
return result;
}
policy->clk = cpufreq.armclk;
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
cpufreq_generic_init(policy, freq_table, 2000 * 1000);
return 0;
}
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
.attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
{
struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
struct clk *asyncclk;
if (!pdata)
return -EINVAL;
if (!pdata->freq_table)
return -EINVAL;
cpufreq.dev = &pdev->dev;
cpufreq.armclk = clk_get(NULL, "arm");
if (IS_ERR(cpufreq.armclk)) {
dev_err(cpufreq.dev, "Unable to get ARM clock\n");
return PTR_ERR(cpufreq.armclk);
}
asyncclk = clk_get(cpufreq.dev, "async");
if (!IS_ERR(asyncclk)) {
cpufreq.asyncclk = asyncclk;
cpufreq.asyncrate = clk_get_rate(asyncclk);
}
return cpufreq_register_driver(&davinci_driver);
}
static void __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&davinci_driver);
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
}
static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
},
.remove_new = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
{
return platform_driver_probe(&davinci_cpufreq_driver,
davinci_cpufreq_probe);
}
| linux-master | drivers/cpufreq/davinci-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
*
* Copyright (C) 2003 David S. Miller ([email protected])
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/asi.h>
#include <asm/timer.h>
struct us2e_freq_percpu_info {
struct cpufreq_frequency_table table[6];
};
/* Indexed by cpu number. */
static struct us2e_freq_percpu_info *us2e_freq_table;
#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
* in the ESTAR mode control register.
*/
#define ESTAR_MODE_DIV_1 0x0000000000000000UL
#define ESTAR_MODE_DIV_2 0x0000000000000001UL
#define ESTAR_MODE_DIV_4 0x0000000000000003UL
#define ESTAR_MODE_DIV_6 0x0000000000000002UL
#define ESTAR_MODE_DIV_8 0x0000000000000004UL
#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
#define MCTRL0_REFR_COUNT_SHIFT 8
#define MCTRL0_REFR_INTERVAL 7800
#define MCTRL0_REFR_CLKS_P_CNT 64
static unsigned long read_hbreg(unsigned long addr)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
static void write_hbreg(unsigned long addr, unsigned long val)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
: "memory");
if (addr == HBIRD_ESTAR_MODE_ADDR) {
/* Need to wait 16 clock cycles for the PLL to lock. */
udelay(1);
}
}
static void self_refresh_ctl(int enable)
{
unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (enable)
mctrl |= MCTRL0_SREFRESH_ENAB;
else
mctrl &= ~MCTRL0_SREFRESH_ENAB;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
}
static void frob_mem_refresh(int cpu_slowing_down,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long old_refr_count, refr_count, mctrl;
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
>> MCTRL0_REFR_COUNT_SHIFT;
mctrl &= ~MCTRL0_REFR_COUNT_MASK;
mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
unsigned long usecs;
/* We have to wait for both refresh counts (old
* and new) to go to zero.
*/
usecs = (MCTRL0_REFR_CLKS_P_CNT *
(refr_count + old_refr_count) *
1000000UL *
old_divisor) / clock_tick;
udelay(usecs + 1UL);
}
}
static void us2e_transition(unsigned long estar, unsigned long new_bits,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
estar &= ~ESTAR_MODE_DIV_MASK;
/* This is based upon the state transition diagram in the IIe manual. */
if (old_divisor == 2 && divisor == 1) {
self_refresh_ctl(0);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
} else if (old_divisor == 1 && divisor == 2) {
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
self_refresh_ctl(1);
} else if (old_divisor == 1 && divisor > 2) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
1, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor > 2 && divisor == 1) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
old_divisor, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor < divisor) {
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
} else if (old_divisor > divisor) {
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
} else {
BUG();
}
}
static unsigned long index_to_estar_mode(unsigned int index)
{
switch (index) {
case 0:
return ESTAR_MODE_DIV_1;
case 1:
return ESTAR_MODE_DIV_2;
case 2:
return ESTAR_MODE_DIV_4;
case 3:
return ESTAR_MODE_DIV_6;
case 4:
return ESTAR_MODE_DIV_8;
default:
BUG();
}
}
static unsigned long index_to_divisor(unsigned int index)
{
switch (index) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 6;
case 4:
return 8;
default:
BUG();
}
}
static unsigned long estar_to_divisor(unsigned long estar)
{
unsigned long ret;
switch (estar & ESTAR_MODE_DIV_MASK) {
case ESTAR_MODE_DIV_1:
ret = 1;
break;
case ESTAR_MODE_DIV_2:
ret = 2;
break;
case ESTAR_MODE_DIV_4:
ret = 4;
break;
case ESTAR_MODE_DIV_6:
ret = 6;
break;
case ESTAR_MODE_DIV_8:
ret = 8;
break;
default:
BUG();
}
return ret;
}
static void __us2e_freq_get(void *arg)
{
unsigned long *estar = arg;
*estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
}
static unsigned int us2e_freq_get(unsigned int cpu)
{
unsigned long clock_tick, estar;
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
return 0;
return clock_tick / estar_to_divisor(estar);
}
static void __us2e_freq_target(void *arg)
{
unsigned int cpu = smp_processor_id();
unsigned int *index = arg;
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(*index);
divisor = index_to_divisor(*index);
new_freq /= divisor;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
old_divisor = estar_to_divisor(estar);
if (old_divisor != divisor) {
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
}
}
static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
}
static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
table[0].driver_data = 0;
table[0].frequency = clock_tick / 1;
table[1].driver_data = 1;
table[1].frequency = clock_tick / 2;
table[2].driver_data = 2;
table[2].frequency = clock_tick / 4;
table[2].driver_data = 3;
table[2].frequency = clock_tick / 6;
table[2].driver_data = 4;
table[2].frequency = clock_tick / 8;
table[2].driver_data = 5;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
policy->freq_table = table;
return 0;
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
us2e_freq_target(policy, 0);
return 0;
}
static struct cpufreq_driver cpufreq_us2e_driver = {
.name = "UltraSPARC-IIe",
.init = us2e_freq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = us2e_freq_target,
.get = us2e_freq_get,
.exit = us2e_freq_cpu_exit,
};
static int __init us2e_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
if (tlb_type != spitfire)
return -ENODEV;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
ret = cpufreq_register_driver(&cpufreq_us2e_driver);
if (ret)
kfree(us2e_freq_table);
return ret;
}
return -ENODEV;
}
static void __exit us2e_freq_exit(void)
{
cpufreq_unregister_driver(&cpufreq_us2e_driver);
kfree(us2e_freq_table);
}
MODULE_AUTHOR("David S. Miller <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
MODULE_LICENSE("GPL");
module_init(us2e_freq_init);
module_exit(us2e_freq_exit);
| linux-master | drivers/cpufreq/sparc-us2e-cpufreq.c |
/*
* drivers/cpufreq/spear-cpufreq.c
*
* CPU Frequency Scaling for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
* Deepak Sikri <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
/* SPEAr CPUFreq driver data structure */
static struct {
struct clk *clk;
unsigned int transition_latency;
struct cpufreq_frequency_table *freq_tbl;
u32 cnt;
} spear_cpufreq;
static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
{
struct clk *sys_pclk;
int pclk;
/*
* In SPEAr1340, cpu clk's parent sys clk can take input from
* following sources
*/
static const char * const sys_clk_src[] = {
"sys_syn_clk",
"pll1_clk",
"pll2_clk",
"pll3_clk",
};
/*
* As sys clk can have multiple source with their own range
* limitation so we choose possible sources accordingly
*/
if (newfreq <= 300000000)
pclk = 0; /* src is sys_syn_clk */
else if (newfreq > 300000000 && newfreq <= 500000000)
pclk = 3; /* src is pll3_clk */
else if (newfreq == 600000000)
pclk = 1; /* src is pll1_clk */
else
return ERR_PTR(-EINVAL);
/* Get parent to sys clock */
sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
if (IS_ERR(sys_pclk))
pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
return sys_pclk;
}
/*
* In SPEAr1340, we cannot use newfreq directly because we need to actually
* access a source clock (clk) which might not be ancestor of cpu at present.
* Hence in SPEAr1340 we would operate on source clock directly before switching
* cpu clock to it.
*/
static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
{
struct clk *sys_clk;
int ret = 0;
sys_clk = clk_get_parent(spear_cpufreq.clk);
if (IS_ERR(sys_clk)) {
pr_err("failed to get cpu's parent (sys) clock\n");
return PTR_ERR(sys_clk);
}
/* Set the rate of the source clock before changing the parent */
ret = clk_set_rate(sys_pclk, newfreq);
if (ret) {
pr_err("Failed to set sys clk rate to %lu\n", newfreq);
return ret;
}
ret = clk_set_parent(sys_clk, sys_pclk);
if (ret) {
pr_err("Failed to set sys clk parent\n");
return ret;
}
return 0;
}
static int spear_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
long newfreq;
struct clk *srcclk;
int ret, mult = 1;
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
if (of_machine_is_compatible("st,spear1340")) {
/*
* SPEAr1340 is special in the sense that due to the possibility
* of multiple clock sources for cpu clk's parent we can have
* different clock source for different frequency of cpu clk.
* Hence we need to choose one from amongst these possible clock
* sources.
*/
srcclk = spear1340_cpu_get_possible_parent(newfreq);
if (IS_ERR(srcclk)) {
pr_err("Failed to get src clk\n");
return PTR_ERR(srcclk);
}
/* SPEAr1340: src clk is always 2 * intended cpu clk */
mult = 2;
} else {
/*
* src clock to be altered is ancestor of cpu clock. Hence we
* can directly work on cpu clk
*/
srcclk = spear_cpufreq.clk;
}
newfreq = clk_round_rate(srcclk, newfreq * mult);
if (newfreq <= 0) {
pr_err("clk_round_rate failed for cpu src clock\n");
return newfreq;
}
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
else
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
if (ret)
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
return ret;
}
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = spear_cpufreq.clk;
cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
spear_cpufreq.transition_latency);
return 0;
}
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
.attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
const struct property *prop;
struct cpufreq_frequency_table *freq_tbl;
const __be32 *val;
int cnt, i, ret;
np = of_cpu_device_node_get(0);
if (!np) {
pr_err("No cpu node found\n");
return -ENODEV;
}
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
prop = of_find_property(np, "cpufreq_tbl", NULL);
if (!prop || !prop->value) {
pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
cnt = prop->length / sizeof(u32);
val = prop->value;
freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
}
for (i = 0; i < cnt; i++)
freq_tbl[i].frequency = be32_to_cpup(val++);
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
of_node_put(np);
spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(spear_cpufreq.clk)) {
pr_err("Unable to get CPU clock\n");
ret = PTR_ERR(spear_cpufreq.clk);
goto out_put_mem;
}
ret = cpufreq_register_driver(&spear_cpufreq_driver);
if (!ret)
return 0;
pr_err("failed register driver: %d\n", ret);
clk_put(spear_cpufreq.clk);
out_put_mem:
kfree(freq_tbl);
return ret;
out_put_node:
of_node_put(np);
return ret;
}
static struct platform_driver spear_cpufreq_platdrv = {
.driver = {
.name = "spear-cpufreq",
},
.probe = spear_cpufreq_probe,
};
module_platform_driver(spear_cpufreq_platdrv);
MODULE_AUTHOR("Deepak Sikri <[email protected]>");
MODULE_DESCRIPTION("SPEAr CPUFreq driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/spear-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 MediaTek Inc.
*/
#include <linux/bitfield.h>
#include <linux/cpufreq.h>
#include <linux/energy_model.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
#define LUT_FREQ GENMASK(11, 0)
#define LUT_ROW_SIZE 0x4
#define CPUFREQ_HW_STATUS BIT(0)
#define SVS_HW_STATUS BIT(1)
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
REG_FREQ_PERF_STATE,
REG_FREQ_HW_STATE,
REG_EM_POWER_TBL,
REG_FREQ_LATENCY,
REG_ARRAY_SIZE,
};
struct mtk_cpufreq_data {
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
void __iomem *base;
int nr_opp;
};
static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
[REG_FREQ_LUT_TABLE] = 0x0,
[REG_FREQ_ENABLE] = 0x84,
[REG_FREQ_PERF_STATE] = 0x88,
[REG_FREQ_HW_STATE] = 0x8c,
[REG_EM_POWER_TBL] = 0x90,
[REG_FREQ_LATENCY] = 0x110,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
struct mtk_cpufreq_data *data;
struct cpufreq_policy *policy;
int i;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
if (!policy)
return 0;
data = policy->driver_data;
for (i = 0; i < data->nr_opp; i++) {
if (data->table[i].frequency < *KHz)
break;
}
i--;
*KHz = data->table[i].frequency;
/* Provide micro-Watts value to the Energy Model */
*uW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
i * LUT_ROW_SIZE);
return 0;
}
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
struct mtk_cpufreq_data *data = policy->driver_data;
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
struct mtk_cpufreq_data *data;
struct cpufreq_policy *policy;
unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]);
index = min(index, LUT_MAX_ENTRIES - 1);
return data->table[index].frequency;
}
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct mtk_cpufreq_data *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
struct mtk_cpufreq_data *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
void __iomem *base_table;
data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
sizeof(*data->table), GFP_KERNEL);
if (!data->table)
return -ENOMEM;
base_table = data->reg_bases[REG_FREQ_LUT_TABLE];
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE));
freq = FIELD_GET(LUT_FREQ, temp) * 1000;
if (freq == prev_freq)
break;
data->table[i].frequency = freq;
dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
prev_freq = freq;
}
data->table[i].frequency = CPUFREQ_TABLE_END;
data->nr_opp = i;
return 0;
}
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
const u16 *offsets)
{
struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
void __iomem *base;
int ret, i;
int index;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
"#performance-domain-cells",
policy->cpus, &args);
if (ret < 0)
return ret;
index = args.args[0];
of_node_put(args.np);
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
return -ENODEV;
}
if (!request_mem_region(res->start, resource_size(res), res->name)) {
dev_err(dev, "failed to request resource %pR\n", res);
return -EBUSY;
}
base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(dev, "failed to map resource %pR\n", res);
ret = -ENOMEM;
goto release_region;
}
data->base = base;
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
data->reg_bases[i] = base + offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
dev_info(dev, "Domain-%d failed to create freq table\n", index);
return ret;
}
policy->freq_table = data->table;
policy->driver_data = data;
return 0;
release_region:
release_mem_region(res->start, resource_size(res));
return ret;
}
static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
struct mtk_cpufreq_data *data;
unsigned int latency;
int ret;
/* Get the bases of cpufreq for domains */
ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev));
if (ret) {
dev_info(&pdev->dev, "CPUFreq resource init failed\n");
return ret;
}
data = policy->driver_data;
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
/* HW should be in enabled state to proceed now */
writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]);
if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig,
(sig & pwr_hw) == pwr_hw, POLL_USEC,
TIMEOUT_USEC)) {
if (!(sig & CPUFREQ_HW_STATUS)) {
pr_info("cpufreq hardware of CPU%d is not enabled\n",
policy->cpu);
return -ENODEV;
}
pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
}
return 0;
}
static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
/* HW should be in paused state now */
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
iounmap(base);
release_mem_region(res->start, resource_size(res));
return 0;
}
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
struct mtk_cpufreq_data *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
}
static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = mtk_cpufreq_hw_target_index,
.get = mtk_cpufreq_hw_get,
.init = mtk_cpufreq_hw_cpu_init,
.exit = mtk_cpufreq_hw_cpu_exit,
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
.attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
const void *data;
int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -EINVAL;
platform_set_drvdata(pdev, (void *) data);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
if (ret)
dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
return ret;
}
static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
.remove_new = mtk_cpufreq_hw_driver_remove,
.driver = {
.name = "mtk-cpufreq-hw",
.of_match_table = mtk_cpufreq_hw_match,
},
};
module_platform_driver(mtk_cpufreq_hw_driver);
MODULE_AUTHOR("Hector Yuan <[email protected]>");
MODULE_DESCRIPTION("Mediatek cpufreq-hw driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/mediatek-cpufreq-hw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/cpufreq/cpufreq_conservative.c
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <[email protected]>.
* Jun Nakajima <[email protected]>
* (C) 2009 Alexander Clouter <[email protected]>
*/
#include <linux/slab.h>
#include "cpufreq_governor.h"
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
{
return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
}
struct cs_dbs_tuners {
unsigned int down_threshold;
unsigned int freq_step;
};
/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_FREQUENCY_STEP (5)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy)
{
unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows... */
if (unlikely(freq_step == 0))
freq_step = DEF_FREQUENCY_STEP;
return freq_step;
}
/*
* Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency. Every sampling_rate *
* sampling_down_factor, we check, if current idle time is more than 80%
* (default), then we try to decrease frequency
*
* Frequency updates happen at minimum steps of 5% (default) of maximum
* frequency
*/
static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
unsigned int freq_step;
/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
*/
if (cs_tuners->freq_step == 0)
goto out;
/*
* If requested_freq is out of range, it is likely that the limits
* changed in the meantime, so fall back to current frequency in that
* case.
*/
if (requested_freq > policy->max || requested_freq < policy->min) {
requested_freq = policy->cur;
dbs_info->requested_freq = requested_freq;
}
freq_step = get_freq_step(cs_tuners, policy);
/*
* Decrease requested_freq one freq_step for each idle period that
* we didn't update the frequency.
*/
if (policy_dbs->idle_periods < UINT_MAX) {
unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
if (requested_freq > policy->min + freq_steps)
requested_freq -= freq_steps;
else
requested_freq = policy->min;
policy_dbs->idle_periods = UINT_MAX;
}
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
if (requested_freq == policy->max)
goto out;
requested_freq += freq_step;
if (requested_freq > policy->max)
requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq,
CPUFREQ_RELATION_HE);
dbs_info->requested_freq = requested_freq;
goto out;
}
/* if sampling_down_factor is active break out early */
if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
goto out;
dbs_info->down_skip = 0;
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (requested_freq == policy->min)
goto out;
if (requested_freq > freq_step)
requested_freq -= freq_step;
else
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq,
CPUFREQ_RELATION_LE);
dbs_info->requested_freq = requested_freq;
}
out:
return dbs_data->sampling_rate;
}
/************************** sysfs interface ************************/
static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
return count;
}
static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
return count;
}
static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
/* cannot be lower than 1 otherwise freq will not fall */
if (ret != 1 || input < 1 || input > 100 ||
input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
return count;
}
static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
if (input > 1)
input = 1;
if (input == dbs_data->ignore_nice_load) /* nothing to do */
return count;
dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
gov_update_cpu_data(dbs_data);
return count;
}
static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
if (input > 100)
input = 100;
/*
* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :)
*/
cs_tuners->freq_step = input;
return count;
}
gov_show_one_common(sampling_rate);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(up_threshold);
gov_show_one_common(ignore_nice_load);
gov_show_one(cs, down_threshold);
gov_show_one(cs, freq_step);
gov_attr_rw(sampling_rate);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(up_threshold);
gov_attr_rw(ignore_nice_load);
gov_attr_rw(down_threshold);
gov_attr_rw(freq_step);
static struct attribute *cs_attrs[] = {
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
&down_threshold.attr,
&ignore_nice_load.attr,
&freq_step.attr,
NULL
};
ATTRIBUTE_GROUPS(cs);
/************************** sysfs end ************************/
static struct policy_dbs_info *cs_alloc(void)
{
struct cs_policy_dbs_info *dbs_info;
dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
return dbs_info ? &dbs_info->policy_dbs : NULL;
}
static void cs_free(struct policy_dbs_info *policy_dbs)
{
kfree(to_dbs_info(policy_dbs));
}
static int cs_init(struct dbs_data *dbs_data)
{
struct cs_dbs_tuners *tuners;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners)
return -ENOMEM;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
dbs_data->tuners = tuners;
return 0;
}
static void cs_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
}
static void cs_start(struct cpufreq_policy *policy)
{
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_groups = cs_groups },
.gov_dbs_update = cs_dbs_update,
.alloc = cs_alloc,
.free = cs_free,
.init = cs_init,
.exit = cs_exit,
.start = cs_start,
};
#define CPU_FREQ_GOV_CONSERVATIVE (cs_governor.gov)
MODULE_AUTHOR("Alexander Clouter <[email protected]>");
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors "
"optimised for use in a battery environment");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
struct cpufreq_governor *cpufreq_default_governor(void)
{
return &CPU_FREQ_GOV_CONSERVATIVE;
}
#endif
cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE);
cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE);
| linux-master | drivers/cpufreq/cpufreq_conservative.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
*/
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/units.h>
#include <asm/smp_plat.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
#define KHZ 1000
#define REF_CLK_MHZ 408 /* 408 MHz */
#define US_DELAY 500
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
#define MAX_CNT ~0U
#define NDIV_MASK 0x1FF
#define CORE_OFFSET(cpu) (cpu * 8)
#define CMU_CLKS_BASE 0x2000
#define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
#define MMCRAB_CLUSTER_BASE(cl) (0x30000 + (cl * 0x10000))
#define CLUSTER_ACTMON_BASE(data, cl) \
(data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
#define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
u32 refclk_cnt, last_refclk_cnt;
};
struct read_counters_work {
struct work_struct work;
struct tegra_cpu_ctr c;
};
struct tegra_cpufreq_ops {
void (*read_counters)(struct tegra_cpu_ctr *c);
void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
};
struct tegra_cpufreq_soc {
struct tegra_cpufreq_ops *ops;
int maxcpus_per_cluster;
unsigned int num_clusters;
phys_addr_t actmon_cntr_base;
};
struct tegra194_cpufreq_data {
void __iomem *regs;
struct cpufreq_frequency_table **bpmp_luts;
const struct tegra_cpufreq_soc *soc;
bool icc_dram_bw_scaling;
};
static struct workqueue_struct *read_counters_wq;
static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct dev_pm_opp *opp;
struct device *dev;
int ret;
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
opp = dev_pm_opp_find_freq_exact(dev, freq_khz * KHZ, true);
if (IS_ERR(opp))
return PTR_ERR(opp);
ret = dev_pm_opp_set_opp(dev, opp);
if (ret)
data->icc_dram_bw_scaling = false;
dev_pm_opp_put(opp);
return ret;
}
static void tegra_get_cpu_mpidr(void *mpidr)
{
*((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
}
static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
{
u64 mpidr;
smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
if (cpuid)
*cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
if (clusterid)
*clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
}
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
void __iomem *freq_core_reg;
u64 mpidr_id;
/* use physical id to get address of per core frequency register */
mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
*ndiv = readl(freq_core_reg) & NDIV_MASK;
return 0;
}
static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
void __iomem *freq_core_reg;
u32 cpu, cpuid, clusterid;
u64 mpidr_id;
for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
/* use physical id to get address of per core frequency register */
mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
writel(ndiv, freq_core_reg);
}
}
/*
* This register provides access to two counter values with a single
* 64-bit read. The counter values are used to determine the average
* actual frequency a core has run at over a period of time.
* [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
* [31:0] Core clock counter: Counts on every core clock cycle
*/
static void tegra234_read_counters(struct tegra_cpu_ctr *c)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
void __iomem *actmon_reg;
u32 cpuid, clusterid;
u64 val;
data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
val = readq(actmon_reg);
c->last_refclk_cnt = upper_32_bits(val);
c->last_coreclk_cnt = lower_32_bits(val);
udelay(US_DELAY);
val = readq(actmon_reg);
c->refclk_cnt = upper_32_bits(val);
c->coreclk_cnt = lower_32_bits(val);
}
static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
.read_counters = tegra234_read_counters,
.get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
.get_cpu_ndiv = tegra234_get_cpu_ndiv,
.set_cpu_ndiv = tegra234_set_cpu_ndiv,
};
static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
.num_clusters = 3,
};
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x4000,
.maxcpus_per_cluster = 8,
.num_clusters = 1,
};
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
{
u64 mpidr;
smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
if (cpuid)
*cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
if (clusterid)
*clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
/*
* Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
* The register provides frequency feedback information to
* determine the average actual frequency a core has run at over
* a period of time.
* [31:0] PLLP counter: Counts at fixed frequency (408 MHz)
* [63:32] Core clock counter: counts on every core clock cycle
* where the core is architecturally clocking
*/
static u64 read_freq_feedback(void)
{
u64 val = 0;
asm volatile("mrs %0, s3_0_c15_c0_5" : "=r" (val) : );
return val;
}
static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
*nltbl, u16 ndiv)
{
return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
}
static void tegra194_read_counters(struct tegra_cpu_ctr *c)
{
u64 val;
val = read_freq_feedback();
c->last_refclk_cnt = lower_32_bits(val);
c->last_coreclk_cnt = upper_32_bits(val);
udelay(US_DELAY);
val = read_freq_feedback();
c->refclk_cnt = lower_32_bits(val);
c->coreclk_cnt = upper_32_bits(val);
}
static void tegra_read_counters(struct work_struct *work)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct read_counters_work *read_counters_work;
struct tegra_cpu_ctr *c;
/*
* ref_clk_counter(32 bit counter) runs on constant clk,
* pll_p(408MHz).
* It will take = 2 ^ 32 / 408 MHz to overflow ref clk counter
* = 10526880 usec = 10.527 sec to overflow
*
* Like wise core_clk_counter(32 bit counter) runs on core clock.
* It's synchronized to crab_clk (cpu_crab_clk) which runs at
* freq of cluster. Assuming max cluster clock ~2000MHz,
* It will take = 2 ^ 32 / 2000 MHz to overflow core clk counter
* = ~2.147 sec to overflow
*/
read_counters_work = container_of(work, struct read_counters_work,
work);
c = &read_counters_work->c;
data->soc->ops->read_counters(c);
}
/*
* Return instantaneous cpu speed
* Instantaneous freq is calculated as -
* -Takes sample on every query of getting the freq.
* - Read core and ref clock counters;
* - Delay for X us
* - Read above cycle counters again
* - Calculates freq by subtracting current and previous counters
* divided by the delay time or eqv. of ref_clk_counter in delta time
* - Return Kcycles/second, freq in KHz
*
* delta time period = x sec
* = delta ref_clk_counter / (408 * 10^6) sec
* freq in Hz = cycles/sec
* = (delta cycles / x sec
* = (delta cycles * 408 * 10^6) / delta ref_clk_counter
* in KHz = (delta cycles * 408 * 10^3) / delta ref_clk_counter
*
* @cpu - logical cpu whose freq to be updated
* Returns freq in KHz on success, 0 if cpu is offline
*/
static unsigned int tegra194_calculate_speed(u32 cpu)
{
struct read_counters_work read_counters_work;
struct tegra_cpu_ctr c;
u32 delta_refcnt;
u32 delta_ccnt;
u32 rate_mhz;
/*
* udelay() is required to reconstruct cpu frequency over an
* observation window. Using workqueue to call udelay() with
* interrupts enabled.
*/
read_counters_work.c.cpu = cpu;
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
flush_work(&read_counters_work.work);
c = read_counters_work.c;
if (c.coreclk_cnt < c.last_coreclk_cnt)
delta_ccnt = c.coreclk_cnt + (MAX_CNT - c.last_coreclk_cnt);
else
delta_ccnt = c.coreclk_cnt - c.last_coreclk_cnt;
if (!delta_ccnt)
return 0;
/* ref clock is 32 bits */
if (c.refclk_cnt < c.last_refclk_cnt)
delta_refcnt = c.refclk_cnt + (MAX_CNT - c.last_refclk_cnt);
else
delta_refcnt = c.refclk_cnt - c.last_refclk_cnt;
if (!delta_refcnt) {
pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
return 0;
}
rate_mhz = ((unsigned long)(delta_ccnt * REF_CLK_MHZ)) / delta_refcnt;
return (rate_mhz * KHZ); /* in KHz */
}
static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
{
u64 ndiv_val;
asm volatile("mrs %0, s3_0_c15_c0_4" : "=r" (ndiv_val) : );
*(u64 *)ndiv = ndiv_val;
}
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
}
static void tegra194_set_cpu_ndiv_sysreg(void *data)
{
u64 ndiv_val = *(u64 *)data;
asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
}
static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
{
on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
}
static unsigned int tegra194_get_speed(u32 cpu)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *pos;
u32 cpuid, clusterid;
unsigned int rate;
u64 ndiv;
int ret;
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
/* reconstruct actual cpu freq using counters */
rate = tegra194_calculate_speed(cpu);
/* get last written ndiv value */
ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
if (WARN_ON_ONCE(ret))
return rate;
/*
* If the reconstructed frequency has acceptable delta from
* the last written value, then return freq corresponding
* to the last written ndiv value from freq_table. This is
* done to return consistent value.
*/
cpufreq_for_each_valid_entry(pos, data->bpmp_luts[clusterid]) {
if (pos->driver_data != ndiv)
continue;
if (abs(pos->frequency - rate) > 115200) {
pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
cpu, rate, pos->frequency, ndiv);
} else {
rate = pos->frequency;
}
break;
}
return rate;
}
static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *bpmp_lut,
struct cpufreq_frequency_table **opp_table)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *freq_table = NULL;
struct cpufreq_frequency_table *pos;
struct device *cpu_dev;
struct dev_pm_opp *opp;
unsigned long rate;
int ret, max_opps;
int j = 0;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
return -ENODEV;
}
/* Initialize OPP table mentioned in operating-points-v2 property in DT */
ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
if (!ret) {
max_opps = dev_pm_opp_get_opp_count(cpu_dev);
if (max_opps <= 0) {
dev_err(cpu_dev, "Failed to add OPPs\n");
return max_opps;
}
/* Disable all opps and cross-validate against LUT later */
for (rate = 0; ; rate++) {
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp))
break;
dev_pm_opp_put(opp);
dev_pm_opp_disable(cpu_dev, rate);
}
} else {
dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
data->icc_dram_bw_scaling = false;
return ret;
}
freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
if (!freq_table)
return -ENOMEM;
/*
* Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
* Enable only those DT OPP's which are present in LUT also.
*/
cpufreq_for_each_valid_entry(pos, bpmp_lut) {
opp = dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * KHZ, false);
if (IS_ERR(opp))
continue;
ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
if (ret < 0)
return ret;
freq_table[j].driver_data = pos->driver_data;
freq_table[j].frequency = pos->frequency;
j++;
}
freq_table[j].driver_data = pos->driver_data;
freq_table[j].frequency = CPUFREQ_TABLE_END;
*opp_table = &freq_table[0];
dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
return ret;
}
static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
struct cpufreq_frequency_table *freq_table;
struct cpufreq_frequency_table *bpmp_lut;
u32 start_cpu, cpu;
u32 clusterid;
int ret;
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
return -EINVAL;
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
/* set same policy for all cpus in a cluster */
for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
if (cpu_possible(cpu))
cpumask_set_cpu(cpu, policy->cpus);
}
policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
bpmp_lut = data->bpmp_luts[clusterid];
if (data->icc_dram_bw_scaling) {
ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
if (!ret) {
policy->freq_table = freq_table;
return 0;
}
}
data->icc_dram_bw_scaling = false;
policy->freq_table = bpmp_lut;
pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
return 0;
}
static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
{
/* We did light-weight tear down earlier, nothing to do here */
return 0;
}
static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
{
/*
* Preserve policy->driver_data and don't free resources on light-weight
* tear down.
*/
return 0;
}
static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
return 0;
}
static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
/*
* Each core writes frequency in per core register. Then both cores
* in a cluster run at same frequency which is the maximum frequency
* request out of the values requested by both cores in that cluster.
*/
data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
if (data->icc_dram_bw_scaling)
tegra_cpufreq_set_bw(policy, tbl->frequency);
return 0;
}
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
.init = tegra194_cpufreq_init,
.exit = tegra194_cpufreq_exit,
.online = tegra194_cpufreq_online,
.offline = tegra194_cpufreq_offline,
.attr = cpufreq_generic_attr,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
.read_counters = tegra194_read_counters,
.get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
.get_cpu_ndiv = tegra194_get_cpu_ndiv,
.set_cpu_ndiv = tegra194_set_cpu_ndiv,
};
static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
.num_clusters = 4,
};
static void tegra194_cpufreq_free_resources(void)
{
destroy_workqueue(read_counters_wq);
}
static struct cpufreq_frequency_table *
tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpmp,
unsigned int cluster_id)
{
struct cpufreq_frequency_table *freq_table;
struct mrq_cpu_ndiv_limits_response resp;
unsigned int num_freqs, ndiv, delta_ndiv;
struct mrq_cpu_ndiv_limits_request req;
struct tegra_bpmp_message msg;
u16 freq_table_step_size;
int err, index;
memset(&req, 0, sizeof(req));
req.cluster_id = cluster_id;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_CPU_NDIV_LIMITS;
msg.tx.data = &req;
msg.tx.size = sizeof(req);
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return ERR_PTR(err);
if (msg.rx.ret == -BPMP_EINVAL) {
/* Cluster not available */
return NULL;
}
if (msg.rx.ret)
return ERR_PTR(-EINVAL);
/*
* Make sure frequency table step is a multiple of mdiv to match
* vhint table granularity.
*/
freq_table_step_size = resp.mdiv *
DIV_ROUND_UP(CPUFREQ_TBL_STEP_HZ, resp.ref_clk_hz);
dev_dbg(&pdev->dev, "cluster %d: frequency table step size: %d\n",
cluster_id, freq_table_step_size);
delta_ndiv = resp.ndiv_max - resp.ndiv_min;
if (unlikely(delta_ndiv == 0)) {
num_freqs = 1;
} else {
/* We store both ndiv_min and ndiv_max hence the +1 */
num_freqs = delta_ndiv / freq_table_step_size + 1;
}
num_freqs += (delta_ndiv % freq_table_step_size) ? 1 : 0;
freq_table = devm_kcalloc(&pdev->dev, num_freqs + 1,
sizeof(*freq_table), GFP_KERNEL);
if (!freq_table)
return ERR_PTR(-ENOMEM);
for (index = 0, ndiv = resp.ndiv_min;
ndiv < resp.ndiv_max;
index++, ndiv += freq_table_step_size) {
freq_table[index].driver_data = ndiv;
freq_table[index].frequency = map_ndiv_to_freq(&resp, ndiv);
}
freq_table[index].driver_data = resp.ndiv_max;
freq_table[index++].frequency = map_ndiv_to_freq(&resp, resp.ndiv_max);
freq_table[index].frequency = CPUFREQ_TABLE_END;
return freq_table;
}
static int tegra194_cpufreq_probe(struct platform_device *pdev)
{
const struct tegra_cpufreq_soc *soc;
struct tegra194_cpufreq_data *data;
struct tegra_bpmp *bpmp;
struct device *cpu_dev;
int err, i;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
soc = of_device_get_match_data(&pdev->dev);
if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
data->soc = soc;
} else {
dev_err(&pdev->dev, "soc data missing\n");
return -EINVAL;
}
data->bpmp_luts = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
sizeof(*data->bpmp_luts), GFP_KERNEL);
if (!data->bpmp_luts)
return -ENOMEM;
if (soc->actmon_cntr_base) {
/* mmio registers are used for frequency request and re-construction */
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs))
return PTR_ERR(data->regs);
}
platform_set_drvdata(pdev, data);
bpmp = tegra_bpmp_get(&pdev->dev);
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
if (!read_counters_wq) {
dev_err(&pdev->dev, "fail to create_workqueue\n");
err = -EINVAL;
goto put_bpmp;
}
for (i = 0; i < data->soc->num_clusters; i++) {
data->bpmp_luts[i] = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, i);
if (IS_ERR(data->bpmp_luts[i])) {
err = PTR_ERR(data->bpmp_luts[i]);
goto err_free_res;
}
}
tegra194_cpufreq_driver.driver_data = data;
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
err = -EPROBE_DEFER;
goto err_free_res;
}
if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
if (!err)
data->icc_dram_bw_scaling = true;
}
err = cpufreq_register_driver(&tegra194_cpufreq_driver);
if (!err)
goto put_bpmp;
err_free_res:
tegra194_cpufreq_free_resources();
put_bpmp:
tegra_bpmp_put(bpmp);
return err;
}
static void tegra194_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra194_cpufreq_driver);
tegra194_cpufreq_free_resources();
}
static const struct of_device_id tegra194_cpufreq_of_match[] = {
{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
{ .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
static struct platform_driver tegra194_ccplex_driver = {
.driver = {
.name = "tegra194-cpufreq",
.of_match_table = tegra194_cpufreq_of_match,
},
.probe = tegra194_cpufreq_probe,
.remove_new = tegra194_cpufreq_remove,
};
module_platform_driver(tegra194_ccplex_driver);
MODULE_AUTHOR("Mikko Perttunen <[email protected]>");
MODULE_AUTHOR("Sumit Gupta <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra194 cpufreq driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/tegra194-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Match running platform with pre-defined OPP values for CPUFreq
*
* Author: Ajit Pal Singh <[email protected]>
* Lee Jones <[email protected]>
*
* Copyright (C) 2015 STMicroelectronics (R&D) Limited
*/
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#define VERSION_ELEMENTS 3
#define MAX_PCODE_NAME_LEN 7
#define VERSION_SHIFT 28
#define HW_INFO_INDEX 1
#define MAJOR_ID_INDEX 1
#define MINOR_ID_INDEX 2
/*
* Only match on "suitable for ALL versions" entries
*
* This will be used with the BIT() macro. It sets the
* top bit of a 32bit value and is equal to 0x80000000.
*/
#define DEFAULT_VERSION 31
enum {
PCODE = 0,
SUBSTRATE,
DVFS_MAX_REGFIELDS,
};
/**
* struct sti_cpufreq_ddata - ST CPUFreq Driver Data
*
* @cpu: CPU's OF node
* @syscfg_eng: Engineering Syscon register map
* @syscfg: Syscon register map
*/
static struct sti_cpufreq_ddata {
struct device *cpu;
struct regmap *syscfg_eng;
struct regmap *syscfg;
} ddata;
static int sti_cpufreq_fetch_major(void) {
struct device_node *np = ddata.cpu->of_node;
struct device *dev = ddata.cpu;
unsigned int major_offset;
unsigned int socid;
int ret;
ret = of_property_read_u32_index(np, "st,syscfg",
MAJOR_ID_INDEX, &major_offset);
if (ret) {
dev_err(dev, "No major number offset provided in %pOF [%d]\n",
np, ret);
return ret;
}
ret = regmap_read(ddata.syscfg, major_offset, &socid);
if (ret) {
dev_err(dev, "Failed to read major number from syscon [%d]\n",
ret);
return ret;
}
return ((socid >> VERSION_SHIFT) & 0xf) + 1;
}
static int sti_cpufreq_fetch_minor(void)
{
struct device *dev = ddata.cpu;
struct device_node *np = dev->of_node;
unsigned int minor_offset;
unsigned int minid;
int ret;
ret = of_property_read_u32_index(np, "st,syscfg-eng",
MINOR_ID_INDEX, &minor_offset);
if (ret) {
dev_err(dev,
"No minor number offset provided %pOF [%d]\n",
np, ret);
return ret;
}
ret = regmap_read(ddata.syscfg_eng, minor_offset, &minid);
if (ret) {
dev_err(dev,
"Failed to read the minor number from syscon [%d]\n",
ret);
return ret;
}
return minid & 0xf;
}
static int sti_cpufreq_fetch_regmap_field(const struct reg_field *reg_fields,
int hw_info_offset, int field)
{
struct regmap_field *regmap_field;
struct reg_field reg_field = reg_fields[field];
struct device *dev = ddata.cpu;
unsigned int value;
int ret;
reg_field.reg = hw_info_offset;
regmap_field = devm_regmap_field_alloc(dev,
ddata.syscfg_eng,
reg_field);
if (IS_ERR(regmap_field)) {
dev_err(dev, "Failed to allocate reg field\n");
return PTR_ERR(regmap_field);
}
ret = regmap_field_read(regmap_field, &value);
if (ret) {
dev_err(dev, "Failed to read %s code\n",
field ? "SUBSTRATE" : "PCODE");
return ret;
}
return value;
}
static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
[PCODE] = REG_FIELD(0, 16, 19),
[SUBSTRATE] = REG_FIELD(0, 0, 2),
};
static const struct reg_field *sti_cpufreq_match(void)
{
if (of_machine_is_compatible("st,stih407") ||
of_machine_is_compatible("st,stih410") ||
of_machine_is_compatible("st,stih418"))
return sti_stih407_dvfs_regfields;
return NULL;
}
static int sti_cpufreq_set_opp_info(void)
{
struct device *dev = ddata.cpu;
struct device_node *np = dev->of_node;
const struct reg_field *reg_fields;
unsigned int hw_info_offset;
unsigned int version[VERSION_ELEMENTS];
int pcode, substrate, major, minor;
int opp_token, ret;
char name[MAX_PCODE_NAME_LEN];
struct dev_pm_opp_config config = {
.supported_hw = version,
.supported_hw_count = ARRAY_SIZE(version),
.prop_name = name,
};
reg_fields = sti_cpufreq_match();
if (!reg_fields) {
dev_err(dev, "This SoC doesn't support voltage scaling\n");
return -ENODEV;
}
ret = of_property_read_u32_index(np, "st,syscfg-eng",
HW_INFO_INDEX, &hw_info_offset);
if (ret) {
dev_warn(dev, "Failed to read HW info offset from DT\n");
substrate = DEFAULT_VERSION;
pcode = 0;
goto use_defaults;
}
pcode = sti_cpufreq_fetch_regmap_field(reg_fields,
hw_info_offset,
PCODE);
if (pcode < 0) {
dev_warn(dev, "Failed to obtain process code\n");
/* Use default pcode */
pcode = 0;
}
substrate = sti_cpufreq_fetch_regmap_field(reg_fields,
hw_info_offset,
SUBSTRATE);
if (substrate) {
dev_warn(dev, "Failed to obtain substrate code\n");
/* Use default substrate */
substrate = DEFAULT_VERSION;
}
use_defaults:
major = sti_cpufreq_fetch_major();
if (major < 0) {
dev_err(dev, "Failed to obtain major version\n");
/* Use default major number */
major = DEFAULT_VERSION;
}
minor = sti_cpufreq_fetch_minor();
if (minor < 0) {
dev_err(dev, "Failed to obtain minor version\n");
/* Use default minor number */
minor = DEFAULT_VERSION;
}
snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
version[0] = BIT(major);
version[1] = BIT(minor);
version[2] = BIT(substrate);
opp_token = dev_pm_opp_set_config(dev, &config);
if (opp_token < 0) {
dev_err(dev, "Failed to set OPP config\n");
return opp_token;
}
dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
pcode, major, minor, substrate);
dev_dbg(dev, "version[0]: %x version[1]: %x version[2]: %x\n",
version[0], version[1], version[2]);
return 0;
}
static int sti_cpufreq_fetch_syscon_registers(void)
{
struct device *dev = ddata.cpu;
struct device_node *np = dev->of_node;
ddata.syscfg = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(ddata.syscfg)) {
dev_err(dev, "\"st,syscfg\" not supplied\n");
return PTR_ERR(ddata.syscfg);
}
ddata.syscfg_eng = syscon_regmap_lookup_by_phandle(np, "st,syscfg-eng");
if (IS_ERR(ddata.syscfg_eng)) {
dev_err(dev, "\"st,syscfg-eng\" not supplied\n");
return PTR_ERR(ddata.syscfg_eng);
}
return 0;
}
static int __init sti_cpufreq_init(void)
{
int ret;
if ((!of_machine_is_compatible("st,stih407")) &&
(!of_machine_is_compatible("st,stih410")) &&
(!of_machine_is_compatible("st,stih418")))
return -ENODEV;
ddata.cpu = get_cpu_device(0);
if (!ddata.cpu) {
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
goto skip_voltage_scaling;
}
if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
dev_err(ddata.cpu, "OPP-v2 not supported\n");
goto skip_voltage_scaling;
}
ret = sti_cpufreq_fetch_syscon_registers();
if (ret)
goto skip_voltage_scaling;
ret = sti_cpufreq_set_opp_info();
if (!ret)
goto register_cpufreq_dt;
skip_voltage_scaling:
dev_err(ddata.cpu, "Not doing voltage scaling\n");
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}
module_init(sti_cpufreq_init);
static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
{ .compatible = "st,stih407" },
{ .compatible = "st,stih410" },
{ },
};
MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
MODULE_AUTHOR("Ajitpal Singh <[email protected]>");
MODULE_AUTHOR("Lee Jones <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/sti-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/timex.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
static struct cpufreq_driver longrun_driver;
/**
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
* values into per cent values. In TMTA microcode, the following is valid:
* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
*/
static unsigned int longrun_low_freq, longrun_high_freq;
/**
* longrun_get_policy - get the current LongRun policy
* @policy: struct cpufreq_policy where current policy is written into
*
* Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
* and MSR_TMTA_LONGRUN_CTRL
*/
static void longrun_get_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
if (msr_lo & 0x01)
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
msr_lo &= 0x0000007F;
msr_hi &= 0x0000007F;
if (longrun_high_freq <= longrun_low_freq) {
/* Assume degenerate Longrun table */
policy->min = policy->max = longrun_high_freq;
} else {
policy->min = longrun_low_freq + msr_lo *
((longrun_high_freq - longrun_low_freq) / 100);
policy->max = longrun_low_freq + msr_hi *
((longrun_high_freq - longrun_low_freq) / 100);
}
policy->cpu = 0;
}
/**
* longrun_set_policy - sets a new CPUFreq policy
* @policy: new policy
*
* Sets a new CPUFreq policy on LongRun-capable processors. This function
* has to be called with cpufreq_driver locked.
*/
static int longrun_set_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
u32 pctg_lo, pctg_hi;
if (!policy)
return -EINVAL;
if (longrun_high_freq <= longrun_low_freq) {
/* Assume degenerate Longrun table */
pctg_lo = pctg_hi = 100;
} else {
pctg_lo = (policy->min - longrun_low_freq) /
((longrun_high_freq - longrun_low_freq) / 100);
pctg_hi = (policy->max - longrun_low_freq) /
((longrun_high_freq - longrun_low_freq) / 100);
}
if (pctg_hi > 100)
pctg_hi = 100;
if (pctg_lo > pctg_hi)
pctg_lo = pctg_hi;
/* performance or economy mode */
rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
msr_lo &= 0xFFFFFFFE;
switch (policy->policy) {
case CPUFREQ_POLICY_PERFORMANCE:
msr_lo |= 0x00000001;
break;
case CPUFREQ_POLICY_POWERSAVE:
break;
}
wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
/* lower and upper boundary */
rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
msr_lo &= 0xFFFFFF80;
msr_hi &= 0xFFFFFF80;
msr_lo |= pctg_lo;
msr_hi |= pctg_hi;
wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
return 0;
}
/**
* longrun_verify_poliy - verifies a new CPUFreq policy
* @policy: the policy to verify
*
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
static int longrun_verify_policy(struct cpufreq_policy_data *policy)
{
if (!policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
static unsigned int longrun_get(unsigned int cpu)
{
u32 eax, ebx, ecx, edx;
if (cpu)
return 0;
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
pr_debug("cpuid eax is %u\n", eax);
return eax * 1000;
}
/**
* longrun_determine_freqs - determines the lowest and highest possible core frequency
* @low_freq: an int to put the lowest frequency into
* @high_freq: an int to put the highest frequency into
*
* Determines the lowest and highest possible core frequencies on this CPU.
* This is necessary to calculate the performance percentage according to
* TMTA rules:
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
*/
static int longrun_determine_freqs(unsigned int *low_freq,
unsigned int *high_freq)
{
u32 msr_lo, msr_hi;
u32 save_lo, save_hi;
u32 eax, ebx, ecx, edx;
u32 try_hi;
struct cpuinfo_x86 *c = &cpu_data(0);
if (!low_freq || !high_freq)
return -EINVAL;
if (cpu_has(c, X86_FEATURE_LRTI)) {
/* if the LongRun Table Interface is present, the
* detection is a bit easier:
* For minimum frequency, read out the maximum
* level (msr_hi), write that into "currently
* selected level", and read out the frequency.
* For maximum frequency, read out level zero.
*/
/* minimum */
rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
*low_freq = msr_lo * 1000; /* to kHz */
/* maximum */
wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
*high_freq = msr_lo * 1000; /* to kHz */
pr_debug("longrun table interface told %u - %u kHz\n",
*low_freq, *high_freq);
if (*low_freq > *high_freq)
*low_freq = *high_freq;
return 0;
}
/* set the upper border to the value determined during TSC init */
*high_freq = (cpu_khz / 1000);
*high_freq = *high_freq * 1000;
pr_debug("high frequency is %u kHz\n", *high_freq);
/* get current borders */
rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
save_lo = msr_lo & 0x0000007F;
save_hi = msr_hi & 0x0000007F;
/* if current perf_pctg is larger than 90%, we need to decrease the
* upper limit to make the calculation more accurate.
*/
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
/* try decreasing in 10% steps, some processors react only
* on some barrier values */
for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
/* set to 0 to try_hi perf_pctg */
msr_lo &= 0xFFFFFF80;
msr_hi &= 0xFFFFFF80;
msr_hi |= try_hi;
wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
/* read out current core MHz and current perf_pctg */
cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
/* restore values */
wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
}
pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
/* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
* eqals
* low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
*
* high_freq * perf_pctg is stored tempoarily into "ebx".
*/
ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
if ((ecx > 95) || (ecx == 0) || (eax < ebx))
return -EIO;
edx = ((eax - ebx) * 100) / (100 - ecx);
*low_freq = edx * 1000; /* back to kHz */
pr_debug("low frequency is %u kHz\n", *low_freq);
if (*low_freq > *high_freq)
*low_freq = *high_freq;
return 0;
}
static int longrun_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
/* capability check */
if (policy->cpu != 0)
return -ENODEV;
/* detect low and high frequency */
result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
if (result)
return result;
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = longrun_low_freq;
policy->cpuinfo.max_freq = longrun_high_freq;
longrun_get_policy(policy);
return 0;
}
static struct cpufreq_driver longrun_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = longrun_verify_policy,
.setpolicy = longrun_set_policy,
.get = longrun_get,
.init = longrun_cpu_init,
.name = "longrun",
};
static const struct x86_cpu_id longrun_ids[] = {
X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
/**
* longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
*
* Initializes the LongRun support.
*/
static int __init longrun_init(void)
{
if (!x86_match_cpu(longrun_ids))
return -ENODEV;
return cpufreq_register_driver(&longrun_driver);
}
/**
* longrun_exit - unregisters LongRun support
*/
static void __exit longrun_exit(void)
{
cpufreq_unregister_driver(&longrun_driver);
}
MODULE_AUTHOR("Dominik Brodowski <[email protected]>");
MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
"Efficeon processors.");
MODULE_LICENSE("GPL");
module_init(longrun_init);
module_exit(longrun_exit);
| linux-master | drivers/cpufreq/longrun.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/cpufreq/cpufreq_userspace.c
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2004 Dominik Brodowski <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
/**
* cpufreq_set - set the CPU frequency
* @policy: pointer to policy struct where freq is being set
* @freq: target frequency in kHz
*
* Sets the CPU frequency to freq.
*/
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
mutex_lock(&userspace_mutex);
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
*setspeed = freq;
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
return ret;
}
static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%u\n", policy->cur);
}
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
{
unsigned int *setspeed;
setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
if (!setspeed)
return -ENOMEM;
policy->governor_data = setspeed;
return 0;
}
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
{
mutex_lock(&userspace_mutex);
kfree(policy->governor_data);
policy->governor_data = NULL;
mutex_unlock(&userspace_mutex);
}
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
{
unsigned int *setspeed = policy->governor_data;
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", policy->cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, policy->cpu) = 1;
*setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
return 0;
}
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
{
unsigned int *setspeed = policy->governor_data;
pr_debug("managing cpu %u stopped\n", policy->cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, policy->cpu) = 0;
*setspeed = 0;
mutex_unlock(&userspace_mutex);
}
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
{
unsigned int *setspeed = policy->governor_data;
mutex_lock(&userspace_mutex);
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
else
__cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
}
static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.init = cpufreq_userspace_policy_init,
.exit = cpufreq_userspace_policy_exit,
.start = cpufreq_userspace_policy_start,
.stop = cpufreq_userspace_policy_stop,
.limits = cpufreq_userspace_policy_limits,
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
};
MODULE_AUTHOR("Dominik Brodowski <[email protected]>, "
"Russell King <[email protected]>");
MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
struct cpufreq_governor *cpufreq_default_governor(void)
{
return &cpufreq_gov_userspace;
}
#endif
cpufreq_governor_init(cpufreq_gov_userspace);
cpufreq_governor_exit(cpufreq_gov_userspace);
| linux-master | drivers/cpufreq/cpufreq_userspace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/cpufreq/cpufreq_governor.c
*
* CPUFREQ governors common code
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <[email protected]>.
* (C) 2003 Jun Nakajima <[email protected]>
* (C) 2009 Alexander Clouter <[email protected]>
* (c) 2012 Viresh Kumar <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/slab.h>
#include "cpufreq_governor.h"
#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
static DEFINE_MUTEX(gov_dbs_data_mutex);
/* Common sysfs tunables */
/*
* sampling_rate_store - update sampling rate effective immediately if needed.
*
* If new rate is smaller than the old, simply updating
* dbs.sampling_rate might not be appropriate. For example, if the
* original sampling_rate was 1 second and the requested new sampling rate is 10
* ms because the user needs immediate reaction from ondemand governor, but not
* sure if higher frequency will be required or not, then, the governor may
* change the sampling rate too late; up to 1 second later. Thus, if we are
* reducing the sampling rate, we need to make the new value effective
* immediately.
*
* This must be called with dbs_data->mutex held, otherwise traversing
* policy_dbs_list isn't safe.
*/
ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int sampling_interval;
int ret;
ret = sscanf(buf, "%u", &sampling_interval);
if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
return -EINVAL;
dbs_data->sampling_rate = sampling_interval;
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
*/
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
mutex_lock(&policy_dbs->update_mutex);
/*
* On 32-bit architectures this may race with the
* sample_delay_ns read in dbs_update_util_handler(), but that
* really doesn't matter. If the read returns a value that's
* too big, the sample will be skipped, but the next invocation
* of dbs_update_util_handler() (when the update has been
* completed) will take a sample.
*
* If this runs in parallel with dbs_work_handler(), we may end
* up overwriting the sample_delay_ns value that it has just
* written, but it will be corrected next time a sample is
* taken, so it shouldn't be significant.
*/
gov_update_sample_delay(policy_dbs, 0);
mutex_unlock(&policy_dbs->update_mutex);
}
return count;
}
EXPORT_SYMBOL_GPL(sampling_rate_store);
/**
* gov_update_cpu_data - Update CPU load data.
* @dbs_data: Top-level governor data pointer.
*
* Update CPU load data for all CPUs in the domain governed by @dbs_data
* (that may be a single policy or a bunch of them if governor tunables are
* system-wide).
*
* Call under the @dbs_data mutex.
*/
void gov_update_cpu_data(struct dbs_data *dbs_data)
{
struct policy_dbs_info *policy_dbs;
list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
unsigned int j;
for_each_cpu(j, policy_dbs->policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
dbs_data->io_is_busy);
if (dbs_data->ignore_nice_load)
j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
}
}
EXPORT_SYMBOL_GPL(gov_update_cpu_data);
unsigned int dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int ignore_nice = dbs_data->ignore_nice_load;
unsigned int max_load = 0, idle_periods = UINT_MAX;
unsigned int sampling_rate, io_busy, j;
/*
* Sometimes governors may use an additional multiplier to increase
* sample delays temporarily. Apply that multiplier to sampling_rate
* so as to keep the wake-up-from-idle detection logic a bit
* conservative.
*/
sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
/*
* For the purpose of ondemand, waiting for disk IO is an indication
* that you're performance critical, and not that the system is actually
* idle, so do not add the iowait time to the CPU idle time then.
*/
io_busy = dbs_data->io_is_busy;
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
u64 update_time, cur_idle_time;
unsigned int idle_time, time_elapsed;
unsigned int load;
cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
j_cdbs->prev_cpu_nice = cur_nice;
}
if (unlikely(!time_elapsed)) {
/*
* That can only happen when this function is called
* twice in a row with a very short interval between the
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
} else if (unlikely((int)idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
* just woken up on this CPU now, it would be unfair to
* calculate 'load' the usual way for this elapsed
* time-window, because it would show near-zero load,
* irrespective of how CPU intensive that task actually
* was. This is undesirable for latency-sensitive bursty
* workloads.
*
* To avoid this, reuse the 'load' from the previous
* time-window and give this task a chance to start with
* a reasonably high CPU frequency. However, that
* shouldn't be over-done, lest we get stuck at a high
* load (high frequency) for too long, even when the
* current system load has actually dropped down, so
* clear prev_load to guarantee that the load will be
* computed again next time.
*
* Detecting this situation is easy: an unusually large
* 'idle_time' (as compared to the sampling rate)
* indicates this scenario.
*/
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
if (time_elapsed >= idle_time) {
load = 100 * (time_elapsed - idle_time) / time_elapsed;
} else {
/*
* That can happen if idle_time is returned by
* get_cpu_idle_time_jiffy(). In that case
* idle_time is roughly equal to the difference
* between time_elapsed and "busy time" obtained
* from CPU statistics. Then, the "busy time"
* can end up being greater than time_elapsed
* (for example, if jiffies_64 and the CPU
* statistics are updated by different CPUs),
* so idle_time may in fact be negative. That
* means, though, that the CPU was busy all
* the time (on the rough average) during the
* last sampling interval and 100 can be
* returned as the load.
*/
load = (int)idle_time < 0 ? 100 : 0;
}
j_cdbs->prev_load = load;
}
if (unlikely((int)idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
idle_periods = periods;
}
if (load > max_load)
max_load = load;
}
policy_dbs->idle_periods = idle_periods;
return max_load;
}
EXPORT_SYMBOL_GPL(dbs_update);
static void dbs_work_handler(struct work_struct *work)
{
struct policy_dbs_info *policy_dbs;
struct cpufreq_policy *policy;
struct dbs_governor *gov;
policy_dbs = container_of(work, struct policy_dbs_info, work);
policy = policy_dbs->policy;
gov = dbs_governor_of(policy);
/*
* Make sure cpufreq_governor_limits() isn't evaluating load or the
* ondemand governor isn't updating the sampling rate in parallel.
*/
mutex_lock(&policy_dbs->update_mutex);
gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
mutex_unlock(&policy_dbs->update_mutex);
/* Allow the utilization update handler to queue up more work. */
atomic_set(&policy_dbs->work_count, 0);
/*
* If the update below is reordered with respect to the sample delay
* modification, the utilization update handler may end up using a stale
* sample delay value.
*/
smp_wmb();
policy_dbs->work_in_progress = false;
}
static void dbs_irq_work(struct irq_work *irq_work)
{
struct policy_dbs_info *policy_dbs;
policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
schedule_work_on(smp_processor_id(), &policy_dbs->work);
}
static void dbs_update_util_handler(struct update_util_data *data, u64 time,
unsigned int flags)
{
struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns, lst;
if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
return;
/*
* The work may not be allowed to be queued up right now.
* Possible reasons:
* - Work has already been queued up or is in progress.
* - It is too early (too little time from the previous sample).
*/
if (policy_dbs->work_in_progress)
return;
/*
* If the reads below are reordered before the check above, the value
* of sample_delay_ns used in the computation may be stale.
*/
smp_rmb();
lst = READ_ONCE(policy_dbs->last_sample_time);
delta_ns = time - lst;
if ((s64)delta_ns < policy_dbs->sample_delay_ns)
return;
/*
* If the policy is not shared, the irq_work may be queued up right away
* at this point. Otherwise, we need to ensure that only one of the
* CPUs sharing the policy will do that.
*/
if (policy_dbs->is_shared) {
if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
return;
/*
* If another CPU updated last_sample_time in the meantime, we
* shouldn't be here, so clear the work counter and bail out.
*/
if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
atomic_set(&policy_dbs->work_count, 0);
return;
}
}
policy_dbs->last_sample_time = time;
policy_dbs->work_in_progress = true;
irq_work_queue(&policy_dbs->irq_work);
}
static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
unsigned int delay_us)
{
struct cpufreq_policy *policy = policy_dbs->policy;
int cpu;
gov_update_sample_delay(policy_dbs, delay_us);
policy_dbs->last_sample_time = 0;
for_each_cpu(cpu, policy->cpus) {
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
dbs_update_util_handler);
}
}
static inline void gov_clear_update_util(struct cpufreq_policy *policy)
{
int i;
for_each_cpu(i, policy->cpus)
cpufreq_remove_update_util_hook(i);
synchronize_rcu();
}
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov)
{
struct policy_dbs_info *policy_dbs;
int j;
/* Allocate memory for per-policy governor data. */
policy_dbs = gov->alloc();
if (!policy_dbs)
return NULL;
policy_dbs->policy = policy;
mutex_init(&policy_dbs->update_mutex);
atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
INIT_WORK(&policy_dbs->work, dbs_work_handler);
/* Set policy_dbs for all CPUs, online+offline */
for_each_cpu(j, policy->related_cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = policy_dbs;
}
return policy_dbs;
}
static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
struct dbs_governor *gov)
{
int j;
mutex_destroy(&policy_dbs->update_mutex);
for_each_cpu(j, policy_dbs->policy->related_cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = NULL;
j_cdbs->update_util.func = NULL;
}
gov->free(policy_dbs);
}
static void cpufreq_dbs_data_release(struct kobject *kobj)
{
struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
struct dbs_governor *gov = dbs_data->gov;
gov->exit(dbs_data);
kfree(dbs_data);
}
int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data;
struct policy_dbs_info *policy_dbs;
int ret = 0;
/* State should be equivalent to EXIT */
if (policy->governor_data)
return -EBUSY;
policy_dbs = alloc_policy_dbs_info(policy, gov);
if (!policy_dbs)
return -ENOMEM;
/* Protect gov->gdbs_data against concurrent updates. */
mutex_lock(&gov_dbs_data_mutex);
dbs_data = gov->gdbs_data;
if (dbs_data) {
if (WARN_ON(have_governor_per_policy())) {
ret = -EINVAL;
goto free_policy_dbs_info;
}
policy_dbs->dbs_data = dbs_data;
policy->governor_data = policy_dbs;
gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
goto out;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
if (!dbs_data) {
ret = -ENOMEM;
goto free_policy_dbs_info;
}
dbs_data->gov = gov;
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
ret = gov->init(dbs_data);
if (ret)
goto free_dbs_data;
/*
* The sampling interval should not be less than the transition latency
* of the CPU and it also cannot be too small for dbs_update() to work
* correctly.
*/
dbs_data->sampling_rate = max_t(unsigned int,
CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
cpufreq_policy_transition_delay_us(policy));
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
policy_dbs->dbs_data = dbs_data;
policy->governor_data = policy_dbs;
gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
gov->kobj_type.release = cpufreq_dbs_data_release;
ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
get_governor_parent_kobj(policy),
"%s", gov->gov.name);
if (!ret)
goto out;
/* Failure, so roll back. */
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
kobject_put(&dbs_data->attr_set.kobj);
policy->governor_data = NULL;
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data);
free_dbs_data:
kfree(dbs_data);
free_policy_dbs_info:
free_policy_dbs_info(policy_dbs, gov);
out:
mutex_unlock(&gov_dbs_data_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int count;
/* Protect gov->gdbs_data against concurrent updates. */
mutex_lock(&gov_dbs_data_mutex);
count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
policy->governor_data = NULL;
if (!count && !have_governor_per_policy())
gov->gdbs_data = NULL;
free_policy_dbs_info(policy_dbs, gov);
mutex_unlock(&gov_dbs_data_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
unsigned int sampling_rate, ignore_nice, j;
unsigned int io_busy;
if (!policy->cur)
return -EINVAL;
policy_dbs->is_shared = policy_is_shared(policy);
policy_dbs->rate_mult = 1;
sampling_rate = dbs_data->sampling_rate;
ignore_nice = dbs_data->ignore_nice_load;
io_busy = dbs_data->io_is_busy;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
/*
* Make the first invocation of dbs_update() compute the load.
*/
j_cdbs->prev_load = 0;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
}
gov->start(policy);
gov_set_update_util(policy_dbs, sampling_rate);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
gov_clear_update_util(policy_dbs->policy);
irq_work_sync(&policy_dbs->irq_work);
cancel_work_sync(&policy_dbs->work);
atomic_set(&policy_dbs->work_count, 0);
policy_dbs->work_in_progress = false;
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs;
/* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
mutex_lock(&gov_dbs_data_mutex);
policy_dbs = policy->governor_data;
if (!policy_dbs)
goto out;
mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
mutex_unlock(&policy_dbs->update_mutex);
out:
mutex_unlock(&gov_dbs_data_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
| linux-master | drivers/cpufreq/cpufreq_governor.c |
/*
* CPU frequency scaling for Broadcom SoCs with AVS firmware that
* supports DVS or DVFS
*
* Copyright (c) 2016 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* "AVS" is the name of a firmware developed at Broadcom. It derives
* its name from the technique called "Adaptive Voltage Scaling".
* Adaptive voltage scaling was the original purpose of this firmware.
* The AVS firmware still supports "AVS mode", where all it does is
* adaptive voltage scaling. However, on some newer Broadcom SoCs, the
* AVS Firmware, despite its unchanged name, also supports DFS mode and
* DVFS mode.
*
* In the context of this document and the related driver, "AVS" by
* itself always means the Broadcom firmware and never refers to the
* technique called "Adaptive Voltage Scaling".
*
* The Broadcom STB AVS CPUfreq driver provides voltage and frequency
* scaling on Broadcom SoCs using AVS firmware with support for DFS and
* DVFS. The AVS firmware is running on its own co-processor. The
* driver supports both uniprocessor (UP) and symmetric multiprocessor
* (SMP) systems which share clock and voltage across all CPUs.
*
* Actual voltage and frequency scaling is done solely by the AVS
* firmware. This driver does not change frequency or voltage itself.
* It provides a standard CPUfreq interface to the rest of the kernel
* and to userland. It interfaces with the AVS firmware to effect the
* requested changes and to report back the current system status in a
* way that is expected by existing tools.
*/
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
/* Max number of arguments AVS calls take */
#define AVS_MAX_CMD_ARGS 4
/*
* This macro is used to generate AVS parameter register offsets. For
* x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
* access outside of the parameter range. (Offset 0 is the first parameter.)
*/
#define AVS_PARAM_MULT(x) ((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
/* AVS Mailbox Register offsets */
#define AVS_MBOX_COMMAND 0x00
#define AVS_MBOX_STATUS 0x04
#define AVS_MBOX_VOLTAGE0 0x08
#define AVS_MBOX_TEMP0 0x0c
#define AVS_MBOX_PV0 0x10
#define AVS_MBOX_MV0 0x14
#define AVS_MBOX_PARAM(x) (0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
#define AVS_MBOX_REVISION 0x28
#define AVS_MBOX_PSTATE 0x2c
#define AVS_MBOX_HEARTBEAT 0x30
#define AVS_MBOX_MAGIC 0x34
#define AVS_MBOX_SIGMA_HVT 0x38
#define AVS_MBOX_SIGMA_SVT 0x3c
#define AVS_MBOX_VOLTAGE1 0x40
#define AVS_MBOX_TEMP1 0x44
#define AVS_MBOX_PV1 0x48
#define AVS_MBOX_MV1 0x4c
#define AVS_MBOX_FREQUENCY 0x50
/* AVS Commands */
#define AVS_CMD_AVAILABLE 0x00
#define AVS_CMD_DISABLE 0x10
#define AVS_CMD_ENABLE 0x11
#define AVS_CMD_S2_ENTER 0x12
#define AVS_CMD_S2_EXIT 0x13
#define AVS_CMD_BBM_ENTER 0x14
#define AVS_CMD_BBM_EXIT 0x15
#define AVS_CMD_S3_ENTER 0x16
#define AVS_CMD_S3_EXIT 0x17
#define AVS_CMD_BALANCE 0x18
/* PMAP and P-STATE commands */
#define AVS_CMD_GET_PMAP 0x30
#define AVS_CMD_SET_PMAP 0x31
#define AVS_CMD_GET_PSTATE 0x40
#define AVS_CMD_SET_PSTATE 0x41
/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
#define AVS_MODE_AVS 0x0
#define AVS_MODE_DFS 0x1
#define AVS_MODE_DVS 0x2
#define AVS_MODE_DVFS 0x3
/*
* PMAP parameter p1
* unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
*/
#define NDIV_INT_SHIFT 0
#define NDIV_INT_MASK 0x3ff
#define PDIV_SHIFT 10
#define PDIV_MASK 0xf
#define MDIV_P0_SHIFT 16
#define MDIV_P0_MASK 0xff
/*
* PMAP parameter p2
* mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
*/
#define MDIV_P1_SHIFT 0
#define MDIV_P1_MASK 0xff
#define MDIV_P2_SHIFT 8
#define MDIV_P2_MASK 0xff
#define MDIV_P3_SHIFT 16
#define MDIV_P3_MASK 0xff
#define MDIV_P4_SHIFT 24
#define MDIV_P4_MASK 0xff
/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
#define AVS_PSTATE_P0 0x0
#define AVS_PSTATE_P1 0x1
#define AVS_PSTATE_P2 0x2
#define AVS_PSTATE_P3 0x3
#define AVS_PSTATE_P4 0x4
#define AVS_PSTATE_MAX AVS_PSTATE_P4
/* CPU L2 Interrupt Controller Registers */
#define AVS_CPU_L2_SET0 0x04
#define AVS_CPU_L2_INT_MASK BIT(31)
/* AVS Command Status Values */
#define AVS_STATUS_CLEAR 0x00
/* Command/notification accepted */
#define AVS_STATUS_SUCCESS 0xf0
/* Command/notification rejected */
#define AVS_STATUS_FAILURE 0xff
/* Invalid command/notification (unknown) */
#define AVS_STATUS_INVALID 0xf1
/* Non-AVS modes are not supported */
#define AVS_STATUS_NO_SUPP 0xf2
/* Cannot set P-State until P-Map supplied */
#define AVS_STATUS_NO_MAP 0xf3
/* Cannot change P-Map after initial P-Map set */
#define AVS_STATUS_MAP_SET 0xf4
/* Max AVS status; higher numbers are used for debugging */
#define AVS_STATUS_MAX 0xff
/* Other AVS related constants */
#define AVS_LOOP_LIMIT 10000
#define AVS_TIMEOUT 300 /* in ms; expected completion is < 10ms */
#define AVS_FIRMWARE_MAGIC 0xa11600d1
#define BRCM_AVS_CPUFREQ_PREFIX "brcmstb-avs"
#define BRCM_AVS_CPUFREQ_NAME BRCM_AVS_CPUFREQ_PREFIX "-cpufreq"
#define BRCM_AVS_CPU_DATA "brcm,avs-cpu-data-mem"
#define BRCM_AVS_CPU_INTR "brcm,avs-cpu-l2-intr"
#define BRCM_AVS_HOST_INTR "sw_intr"
struct pmap {
unsigned int mode;
unsigned int p1;
unsigned int p2;
unsigned int state;
};
struct private_data {
void __iomem *base;
void __iomem *avs_intr_base;
struct device *dev;
struct completion done;
struct semaphore sem;
struct pmap pmap;
int host_irq;
};
static void __iomem *__map_region(const char *name)
{
struct device_node *np;
void __iomem *ptr;
np = of_find_compatible_node(NULL, NULL, name);
if (!np)
return NULL;
ptr = of_iomap(np, 0);
of_node_put(np);
return ptr;
}
static unsigned long wait_for_avs_command(struct private_data *priv,
unsigned long timeout)
{
unsigned long time_left = 0;
u32 val;
/* Event driven, wait for the command interrupt */
if (priv->host_irq >= 0)
return wait_for_completion_timeout(&priv->done,
msecs_to_jiffies(timeout));
/* Polling for command completion */
do {
time_left = timeout;
val = readl(priv->base + AVS_MBOX_STATUS);
if (val)
break;
usleep_range(1000, 2000);
} while (--timeout);
return time_left;
}
static int __issue_avs_command(struct private_data *priv, unsigned int cmd,
unsigned int num_in, unsigned int num_out,
u32 args[])
{
void __iomem *base = priv->base;
unsigned long time_left;
unsigned int i;
int ret;
u32 val;
ret = down_interruptible(&priv->sem);
if (ret)
return ret;
/*
* Make sure no other command is currently running: cmd is 0 if AVS
* co-processor is idle. Due to the guard above, we should almost never
* have to wait here.
*/
for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
val = readl(base + AVS_MBOX_COMMAND);
/* Give the caller a chance to retry if AVS is busy. */
if (i == AVS_LOOP_LIMIT) {
ret = -EAGAIN;
goto out;
}
/* Clear status before we begin. */
writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
/* Provide input parameters */
for (i = 0; i < num_in; i++)
writel(args[i], base + AVS_MBOX_PARAM(i));
/* Protect from spurious interrupts. */
reinit_completion(&priv->done);
/* Now issue the command & tell firmware to wake up to process it. */
writel(cmd, base + AVS_MBOX_COMMAND);
writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
/* Wait for AVS co-processor to finish processing the command. */
time_left = wait_for_avs_command(priv, AVS_TIMEOUT);
/*
* If the AVS status is not in the expected range, it means AVS didn't
* complete our command in time, and we return an error. Also, if there
* is no "time left", we timed out waiting for the interrupt.
*/
val = readl(base + AVS_MBOX_STATUS);
if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) {
dev_err(priv->dev, "AVS command %#x didn't complete in time\n",
cmd);
dev_err(priv->dev, " Time left: %u ms, AVS status: %#x\n",
jiffies_to_msecs(time_left), val);
ret = -ETIMEDOUT;
goto out;
}
/* Process returned values */
for (i = 0; i < num_out; i++)
args[i] = readl(base + AVS_MBOX_PARAM(i));
/* Clear status to tell AVS co-processor we are done. */
writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
/* Convert firmware errors to errno's as much as possible. */
switch (val) {
case AVS_STATUS_INVALID:
ret = -EINVAL;
break;
case AVS_STATUS_NO_SUPP:
ret = -ENOTSUPP;
break;
case AVS_STATUS_NO_MAP:
ret = -ENOENT;
break;
case AVS_STATUS_MAP_SET:
ret = -EEXIST;
break;
case AVS_STATUS_FAILURE:
ret = -EIO;
break;
}
out:
up(&priv->sem);
return ret;
}
static irqreturn_t irq_handler(int irq, void *data)
{
struct private_data *priv = data;
/* AVS command completed execution. Wake up __issue_avs_command(). */
complete(&priv->done);
return IRQ_HANDLED;
}
static char *brcm_avs_mode_to_string(unsigned int mode)
{
switch (mode) {
case AVS_MODE_AVS:
return "AVS";
case AVS_MODE_DFS:
return "DFS";
case AVS_MODE_DVS:
return "DVS";
case AVS_MODE_DVFS:
return "DVFS";
}
return NULL;
}
static void brcm_avs_parse_p1(u32 p1, unsigned int *mdiv_p0, unsigned int *pdiv,
unsigned int *ndiv)
{
*mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
*pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
*ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
}
static void brcm_avs_parse_p2(u32 p2, unsigned int *mdiv_p1,
unsigned int *mdiv_p2, unsigned int *mdiv_p3,
unsigned int *mdiv_p4)
{
*mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
*mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
*mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
*mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
}
static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
{
u32 args[AVS_MAX_CMD_ARGS];
int ret;
ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, 0, 4, args);
if (ret || !pmap)
return ret;
pmap->mode = args[0];
pmap->p1 = args[1];
pmap->p2 = args[2];
pmap->state = args[3];
return 0;
}
static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
{
u32 args[AVS_MAX_CMD_ARGS];
args[0] = pmap->mode;
args[1] = pmap->p1;
args[2] = pmap->p2;
args[3] = pmap->state;
return __issue_avs_command(priv, AVS_CMD_SET_PMAP, 4, 0, args);
}
static int brcm_avs_get_pstate(struct private_data *priv, unsigned int *pstate)
{
u32 args[AVS_MAX_CMD_ARGS];
int ret;
ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, 0, 1, args);
if (ret)
return ret;
*pstate = args[0];
return 0;
}
static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
{
u32 args[AVS_MAX_CMD_ARGS];
args[0] = pstate;
return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, 1, 0, args);
}
static u32 brcm_avs_get_voltage(void __iomem *base)
{
return readl(base + AVS_MBOX_VOLTAGE1);
}
static u32 brcm_avs_get_frequency(void __iomem *base)
{
return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
}
/*
* We determine which frequencies are supported by cycling through all P-states
* and reading back what frequency we are running at for each P-state.
*/
static struct cpufreq_frequency_table *
brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
{
struct cpufreq_frequency_table *table;
unsigned int pstate;
int i, ret;
/* Remember P-state for later */
ret = brcm_avs_get_pstate(priv, &pstate);
if (ret)
return ERR_PTR(ret);
/*
* We allocate space for the 5 different P-STATES AVS,
* plus extra space for a terminating element.
*/
table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
ret = brcm_avs_set_pstate(priv, i);
if (ret)
return ERR_PTR(ret);
table[i].frequency = brcm_avs_get_frequency(priv->base);
table[i].driver_data = i;
}
table[i].frequency = CPUFREQ_TABLE_END;
/* Restore P-state */
ret = brcm_avs_set_pstate(priv, pstate);
if (ret)
return ERR_PTR(ret);
return table;
}
/*
* To ensure the right firmware is running we need to
* - check the MAGIC matches what we expect
* - brcm_avs_get_pmap() doesn't return -ENOTSUPP or -EINVAL
* We need to set up our interrupt handling before calling brcm_avs_get_pmap()!
*/
static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
{
u32 magic;
int rc;
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
(rc != -EINVAL));
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
cpufreq_cpu_put(policy);
return brcm_avs_get_frequency(priv->base);
}
static int brcm_avs_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
return brcm_avs_set_pstate(policy->driver_data,
policy->freq_table[index].driver_data);
}
static int brcm_avs_suspend(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
int ret;
ret = brcm_avs_get_pmap(priv, &priv->pmap);
if (ret)
return ret;
/*
* We can't use the P-state returned by brcm_avs_get_pmap(), since
* that's the initial P-state from when the P-map was downloaded to the
* AVS co-processor, not necessarily the P-state we are running at now.
* So, we get the current P-state explicitly.
*/
ret = brcm_avs_get_pstate(priv, &priv->pmap.state);
if (ret)
return ret;
/* This is best effort. Nothing to do if it fails. */
(void)__issue_avs_command(priv, AVS_CMD_S2_ENTER, 0, 0, NULL);
return 0;
}
static int brcm_avs_resume(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
int ret;
/* This is best effort. Nothing to do if it fails. */
(void)__issue_avs_command(priv, AVS_CMD_S2_EXIT, 0, 0, NULL);
ret = brcm_avs_set_pmap(priv, &priv->pmap);
if (ret == -EEXIST) {
struct platform_device *pdev = cpufreq_get_driver_data();
struct device *dev = &pdev->dev;
dev_warn(dev, "PMAP was already set\n");
ret = 0;
}
return ret;
}
/*
* All initialization code that we only want to execute once goes here. Setup
* code that can be re-tried on every core (if it failed before) can go into
* brcm_avs_cpufreq_init().
*/
static int brcm_avs_prepare_init(struct platform_device *pdev)
{
struct private_data *priv;
struct device *dev;
int ret;
dev = &pdev->dev;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
sema_init(&priv->sem, 1);
init_completion(&priv->done);
platform_set_drvdata(pdev, priv);
priv->base = __map_region(BRCM_AVS_CPU_DATA);
if (!priv->base) {
dev_err(dev, "Couldn't find property %s in device tree.\n",
BRCM_AVS_CPU_DATA);
return -ENOENT;
}
priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
if (!priv->avs_intr_base) {
dev_err(dev, "Couldn't find property %s in device tree.\n",
BRCM_AVS_CPU_INTR);
ret = -ENOENT;
goto unmap_base;
}
priv->host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
ret = devm_request_irq(dev, priv->host_irq, irq_handler,
IRQF_TRIGGER_RISING,
BRCM_AVS_HOST_INTR, priv);
if (ret && priv->host_irq >= 0) {
dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
BRCM_AVS_HOST_INTR, priv->host_irq, ret);
goto unmap_intr_base;
}
if (brcm_avs_is_firmware_loaded(priv))
return 0;
dev_err(dev, "AVS firmware is not loaded or doesn't support DVFS\n");
ret = -ENODEV;
unmap_intr_base:
iounmap(priv->avs_intr_base);
unmap_base:
iounmap(priv->base);
return ret;
}
static void brcm_avs_prepare_uninit(struct platform_device *pdev)
{
struct private_data *priv;
priv = platform_get_drvdata(pdev);
iounmap(priv->avs_intr_base);
iounmap(priv->base);
}
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct platform_device *pdev;
struct private_data *priv;
struct device *dev;
int ret;
pdev = cpufreq_get_driver_data();
priv = platform_get_drvdata(pdev);
policy->driver_data = priv;
dev = &pdev->dev;
freq_table = brcm_avs_get_freq_table(dev, priv);
if (IS_ERR(freq_table)) {
ret = PTR_ERR(freq_table);
dev_err(dev, "Couldn't determine frequency table (%d).\n", ret);
return ret;
}
policy->freq_table = freq_table;
/* All cores share the same clock and thus the same policy. */
cpumask_setall(policy->cpus);
ret = __issue_avs_command(priv, AVS_CMD_ENABLE, 0, 0, NULL);
if (!ret) {
unsigned int pstate;
ret = brcm_avs_get_pstate(priv, &pstate);
if (!ret) {
policy->cur = freq_table[pstate].frequency;
dev_info(dev, "registered\n");
return 0;
}
}
dev_err(dev, "couldn't initialize driver (%d)\n", ret);
return ret;
}
static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
unsigned int pstate;
if (brcm_avs_get_pstate(priv, &pstate))
return sprintf(buf, "<unknown>\n");
return sprintf(buf, "%u\n", pstate);
}
static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
struct pmap pmap;
if (brcm_avs_get_pmap(priv, &pmap))
return sprintf(buf, "<unknown>\n");
return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
pmap.mode);
}
static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
{
unsigned int mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
struct private_data *priv = policy->driver_data;
unsigned int ndiv, pdiv;
struct pmap pmap;
if (brcm_avs_get_pmap(priv, &pmap))
return sprintf(buf, "<unknown>\n");
brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
}
static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
return sprintf(buf, "0x%08x\n", brcm_avs_get_voltage(priv->base));
}
static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
{
struct private_data *priv = policy->driver_data;
return sprintf(buf, "0x%08x\n", brcm_avs_get_frequency(priv->base));
}
cpufreq_freq_attr_ro(brcm_avs_pstate);
cpufreq_freq_attr_ro(brcm_avs_mode);
cpufreq_freq_attr_ro(brcm_avs_pmap);
cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
&brcm_avs_voltage,
&brcm_avs_frequency,
NULL
};
static struct cpufreq_driver brcm_avs_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = brcm_avs_target_index,
.get = brcm_avs_cpufreq_get,
.suspend = brcm_avs_suspend,
.resume = brcm_avs_resume,
.init = brcm_avs_cpufreq_init,
.attr = brcm_avs_cpufreq_attr,
.name = BRCM_AVS_CPUFREQ_PREFIX,
};
static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
{
int ret;
ret = brcm_avs_prepare_init(pdev);
if (ret)
return ret;
brcm_avs_driver.driver_data = pdev;
ret = cpufreq_register_driver(&brcm_avs_driver);
if (ret)
brcm_avs_prepare_uninit(pdev);
return ret;
}
static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
{ .compatible = BRCM_AVS_CPU_DATA },
{ }
};
MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
static struct platform_driver brcm_avs_cpufreq_platdrv = {
.driver = {
.name = BRCM_AVS_CPUFREQ_NAME,
.of_match_table = brcm_avs_cpufreq_match,
},
.probe = brcm_avs_cpufreq_probe,
.remove_new = brcm_avs_cpufreq_remove,
};
module_platform_driver(brcm_avs_cpufreq_platdrv);
MODULE_AUTHOR("Markus Mayer <[email protected]>");
MODULE_DESCRIPTION("CPUfreq driver for Broadcom STB AVS");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/brcmstb-avs-cpufreq.c |
#define CREATE_TRACE_POINTS
#include "amd-pstate-trace.h"
| linux-master | drivers/cpufreq/amd-pstate-trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file provides the ACPI based P-state support. This
* module works with generic cpufreq infrastructure. Most of
* the code is based on i386 version
* (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
*
* Copyright (C) 2005 Intel Corp
* Venkatesh Pallipadi <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pal.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
MODULE_AUTHOR("Venkatesh Pallipadi");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
unsigned int resume;
};
struct cpufreq_acpi_req {
unsigned int cpu;
unsigned int state;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
static int
processor_set_pstate (
u32 value)
{
s64 retval;
pr_debug("processor_set_pstate\n");
retval = ia64_pal_set_pstate((u64)value);
if (retval) {
pr_debug("Failed to set freq to 0x%x, with error 0x%llx\n",
value, retval);
return -ENODEV;
}
return (int)retval;
}
static int
processor_get_pstate (
u32 *value)
{
u64 pstate_index = 0;
s64 retval;
pr_debug("processor_get_pstate\n");
retval = ia64_pal_get_pstate(&pstate_index,
PAL_GET_PSTATE_TYPE_INSTANT);
*value = (u32) pstate_index;
if (retval)
pr_debug("Failed to get current freq with "
"error 0x%llx, idx 0x%x\n", retval, *value);
return (int)retval;
}
/* To be used only after data->acpi_data is initialized */
static unsigned
extract_clock (
struct cpufreq_acpi_io *data,
unsigned value)
{
unsigned long i;
pr_debug("extract_clock\n");
for (i = 0; i < data->acpi_data.state_count; i++) {
if (value == data->acpi_data.states[i].status)
return data->acpi_data.states[i].core_frequency;
}
return data->acpi_data.states[i-1].core_frequency;
}
static long
processor_get_freq (
void *arg)
{
struct cpufreq_acpi_req *req = arg;
unsigned int cpu = req->cpu;
struct cpufreq_acpi_io *data = acpi_io_data[cpu];
u32 value;
int ret;
pr_debug("processor_get_freq\n");
if (smp_processor_id() != cpu)
return -EAGAIN;
/* processor_get_pstate gets the instantaneous frequency */
ret = processor_get_pstate(&value);
if (ret) {
pr_warn("get performance failed with error %d\n", ret);
return ret;
}
return 1000 * extract_clock(data, value);
}
static long
processor_set_freq (
void *arg)
{
struct cpufreq_acpi_req *req = arg;
unsigned int cpu = req->cpu;
struct cpufreq_acpi_io *data = acpi_io_data[cpu];
int ret, state = req->state;
u32 value;
pr_debug("processor_set_freq\n");
if (smp_processor_id() != cpu)
return -EAGAIN;
if (state == data->acpi_data.state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
pr_debug("Already at target state (P%d)\n", state);
return 0;
}
}
pr_debug("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
value = (u32) data->acpi_data.states[state].control;
pr_debug("Transitioning to state: 0x%08x\n", value);
ret = processor_set_pstate(value);
if (ret) {
pr_warn("Transition failed with error %d\n", ret);
return -ENODEV;
}
data->acpi_data.state = state;
return 0;
}
static unsigned int
acpi_cpufreq_get (
unsigned int cpu)
{
struct cpufreq_acpi_req req;
long ret;
req.cpu = cpu;
ret = work_on_cpu(cpu, processor_get_freq, &req);
return ret > 0 ? (unsigned int) ret : 0;
}
static int
acpi_cpufreq_target (
struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_acpi_req req;
req.cpu = policy->cpu;
req.state = index;
return work_on_cpu(req.cpu, processor_set_freq, &req);
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
{
unsigned int i;
unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpufreq_frequency_table *freq_table;
pr_debug("acpi_cpufreq_cpu_init\n");
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return (-ENOMEM);
acpi_io_data[cpu] = data;
result = acpi_processor_register_performance(&data->acpi_data, cpu);
if (result)
goto err_free;
/* capability check */
if (data->acpi_data.state_count <= 1) {
pr_debug("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((data->acpi_data.control_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(data->acpi_data.status_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE)) {
pr_debug("Unsupported address space [%d, %d]\n",
(u32) (data->acpi_data.control_register.space_id),
(u32) (data->acpi_data.status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
freq_table = kcalloc(data->acpi_data.state_count + 1,
sizeof(*freq_table),
GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<data->acpi_data.state_count; i++) {
if ((data->acpi_data.states[i].transition_latency * 1000) >
policy->cpuinfo.transition_latency) {
policy->cpuinfo.transition_latency =
data->acpi_data.states[i].transition_latency * 1000;
}
}
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
{
if (i < data->acpi_data.state_count) {
freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
} else {
freq_table[i].frequency = CPUFREQ_TABLE_END;
}
}
policy->freq_table = freq_table;
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
pr_info("CPU%u - ACPI performance management activated\n", cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
(i == data->acpi_data.state?'*':' '), i,
(u32) data->acpi_data.states[i].core_frequency,
(u32) data->acpi_data.states[i].power,
(u32) data->acpi_data.states[i].transition_latency,
(u32) data->acpi_data.states[i].bus_master_latency,
(u32) data->acpi_data.states[i].status,
(u32) data->acpi_data.states[i].control);
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
return (result);
err_unreg:
acpi_processor_unregister_performance(cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
return (result);
}
static int
acpi_cpufreq_cpu_exit (
struct cpufreq_policy *policy)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) {
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(policy->cpu);
kfree(policy->freq_table);
kfree(data);
}
return (0);
}
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = acpi_cpufreq_target,
.get = acpi_cpufreq_get,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
.attr = cpufreq_generic_attr,
};
static int __init
acpi_cpufreq_init (void)
{
pr_debug("acpi_cpufreq_init\n");
return cpufreq_register_driver(&acpi_cpufreq_driver);
}
static void __exit
acpi_cpufreq_exit (void)
{
pr_debug("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
}
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
| linux-master | drivers/cpufreq/ia64-acpi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* CPUFreq support for Armada 8K
*
* Copyright (C) 2018 Marvell
*
* Omri Itach <[email protected]>
* Gregory Clement <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
{ .compatible = "marvell,ap806-cpu-clock" },
{ .compatible = "marvell,ap807-cpu-clock" },
{ },
};
MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
/*
* Setup the opps list with the divider for the max frequency, that
* will be filled at runtime.
*/
static const int opps_div[] __initconst = {1, 2, 3, 4};
static struct platform_device *armada_8k_pdev;
struct freq_table {
struct device *cpu_dev;
unsigned int freq[ARRAY_SIZE(opps_div)];
};
/* If the CPUs share the same clock, then they are in the same cluster. */
static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
struct cpumask *cpumask)
{
int cpu;
for_each_possible_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_warn("Failed to get cpu%d device\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
if (clk_is_match(clk, cur_clk))
cpumask_set_cpu(cpu, cpumask);
clk_put(clk);
}
}
}
static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
struct freq_table *freq_tables,
int opps_index)
{
unsigned int cur_frequency;
unsigned int freq;
int i, ret;
/* Get nominal (current) CPU frequency. */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
return -EINVAL;
}
freq_tables[opps_index].cpu_dev = cpu_dev;
for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
freq = cur_frequency / opps_div[i];
ret = dev_pm_opp_add(cpu_dev, freq, 0);
if (ret)
return ret;
freq_tables[opps_index].freq[i] = freq;
}
return 0;
}
static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
if (!freq_tables[opps_index].cpu_dev)
break;
for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
/*
* A 0Hz frequency is not valid, this meant
* that it was not yet initialized so there is
* no more opp to free
*/
if (freq_tables[opps_index].freq[i] == 0)
break;
dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
freq_tables[opps_index].freq[i]);
}
}
kfree(freq_tables);
}
static int __init armada_8k_cpufreq_init(void)
{
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
struct cpumask cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
if (!node || !of_device_is_available(node)) {
of_node_put(node);
return -ENODEV;
}
of_node_put(node);
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
if (!freq_tables)
return -ENOMEM;
cpumask_copy(&cpus, cpu_possible_mask);
/*
* For each CPU, this loop registers the operating points
* supported (which are the nominal CPU frequency and full integer
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("Cannot get CPU %d\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
ret = PTR_ERR(clk);
goto remove_opp;
}
ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
if (ret) {
clk_put(clk);
goto remove_opp;
}
opps_index++;
cpumask_clear(&shared_cpus);
armada_8k_get_sharing_cpus(clk, &shared_cpus);
dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
cpumask_andnot(&cpus, &cpus, &shared_cpus);
clk_put(clk);
}
armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
if (ret)
goto remove_opp;
platform_set_drvdata(armada_8k_pdev, freq_tables);
return 0;
remove_opp:
armada_8k_cpufreq_free_table(freq_tables);
return ret;
}
module_init(armada_8k_cpufreq_init);
static void __exit armada_8k_cpufreq_exit(void)
{
struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
platform_device_unregister(armada_8k_pdev);
armada_8k_cpufreq_free_table(freq_tables);
}
module_exit(armada_8k_cpufreq_exit);
MODULE_AUTHOR("Gregory Clement <[email protected]>");
MODULE_DESCRIPTION("Armada 8K cpufreq driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/armada-8k-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/cpufreq/freq_table.c
*
* Copyright (C) 2002 - 2003 Dominik Brodowski
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
if (!table)
return false;
cpufreq_for_each_valid_entry(pos, table)
if (pos->flags & CPUFREQ_BOOST_FREQ)
return true;
return false;
}
EXPORT_SYMBOL_GPL(policy_has_boost_freq);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
unsigned int freq;
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
if (!cpufreq_boost_enabled()
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
max_freq = freq;
}
policy->min = policy->cpuinfo.min_freq = min_freq;
policy->max = max_freq;
/*
* If the driver has set its own cpuinfo.max_freq above max_freq, leave
* it as is.
*/
if (policy->cpuinfo.max_freq < max_freq)
policy->max = policy->cpuinfo.max_freq = max_freq;
if (policy->min == ~0)
return -EINVAL;
else
return 0;
}
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
unsigned int freq, next_larger = ~0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
cpufreq_verify_within_cpu_limits(policy);
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
if ((freq >= policy->min) && (freq <= policy->max)) {
found = true;
break;
}
if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
if (!found) {
policy->max = next_larger;
cpufreq_verify_within_cpu_limits(policy);
}
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
/*
* Generic routine to verify policy & frequency table, requires driver to set
* policy->freq_table prior to it.
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
{
if (!policy->freq_table)
return -ENODEV;
return cpufreq_frequency_table_verify(policy, policy->freq_table);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
.frequency = 0,
};
struct cpufreq_frequency_table suboptimal = {
.driver_data = ~0,
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
unsigned int freq, diff, i = 0;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
switch (relation) {
case CPUFREQ_RELATION_H:
suboptimal.frequency = ~0;
break;
case CPUFREQ_RELATION_L:
case CPUFREQ_RELATION_C:
optimal.frequency = ~0;
break;
}
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((freq < policy->min) || (freq > policy->max))
continue;
if (freq == target_freq) {
optimal.driver_data = i;
break;
}
switch (relation) {
case CPUFREQ_RELATION_H:
if (freq < target_freq) {
if (freq >= optimal.frequency) {
optimal.frequency = freq;
optimal.driver_data = i;
}
} else {
if (freq <= suboptimal.frequency) {
suboptimal.frequency = freq;
suboptimal.driver_data = i;
}
}
break;
case CPUFREQ_RELATION_L:
if (freq > target_freq) {
if (freq <= optimal.frequency) {
optimal.frequency = freq;
optimal.driver_data = i;
}
} else {
if (freq >= suboptimal.frequency) {
suboptimal.frequency = freq;
suboptimal.driver_data = i;
}
}
break;
case CPUFREQ_RELATION_C:
diff = abs(freq - target_freq);
if (diff < optimal.frequency ||
(diff == optimal.frequency &&
freq > table[optimal.driver_data].frequency)) {
optimal.frequency = diff;
optimal.driver_data = i;
}
break;
}
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
WARN(1, "Invalid frequency table: %d\n", policy->cpu);
return 0;
}
index = suboptimal.driver_data;
} else
index = optimal.driver_data;
pr_debug("target index is %u, freq is:%u kHz\n", index,
table[index].frequency);
return index;
}
EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
int idx;
if (unlikely(!table)) {
pr_debug("%s: Unable to find frequency table\n", __func__);
return -ENOENT;
}
cpufreq_for_each_valid_entry_idx(pos, table, idx)
if (pos->frequency == freq)
return idx;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
/*
* show_available_freqs - show available frequencies for the specified CPU
*/
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
bool show_boost)
{
ssize_t count = 0;
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
if (!table)
return -ENODEV;
cpufreq_for_each_valid_entry(pos, table) {
/*
* show_boost = true and driver_data = BOOST freq
* display BOOST freqs
*
* show_boost = false and driver_data = BOOST freq
* show_boost = true and driver_data != BOOST freq
* continue - do not display anything
*
* show_boost = false and driver_data != BOOST freq
* display NON BOOST freqs
*/
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
count += sprintf(&buf[count], "%d ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
return count;
}
#define cpufreq_attr_available_freq(_name) \
struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
__ATTR_RO(_name##_frequencies)
/*
* scaling_available_frequencies_show - show available normal frequencies for
* the specified CPU
*/
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
char *buf)
{
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
* the specified CPU
*/
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
char *buf)
{
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
struct cpufreq_frequency_table *prev = NULL;
int ascending = 0;
policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED;
cpufreq_for_each_valid_entry(pos, table) {
if (!prev) {
prev = pos;
continue;
}
if (pos->frequency == prev->frequency) {
pr_warn("Duplicate freq-table entries: %u\n",
pos->frequency);
return -EINVAL;
}
/* Frequency increased from prev to pos */
if (pos->frequency > prev->frequency) {
/* But frequency was decreasing earlier */
if (ascending < 0) {
pr_debug("Freq table is unsorted\n");
return 0;
}
ascending++;
} else {
/* Frequency decreased from prev to pos */
/* But frequency was increasing earlier */
if (ascending > 0) {
pr_debug("Freq table is unsorted\n");
return 0;
}
ascending--;
}
prev = pos;
}
if (ascending > 0)
policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING;
else
policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING;
pr_debug("Freq table is sorted in %s order\n",
ascending > 0 ? "ascending" : "descending");
return 0;
}
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
{
int ret;
if (!policy->freq_table) {
/* Freq table must be passed by drivers with target_index() */
if (has_target_index())
return -EINVAL;
return 0;
}
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
if (ret)
return ret;
return set_freq_table_sorted(policy);
}
MODULE_AUTHOR("Dominik Brodowski <[email protected]>");
MODULE_DESCRIPTION("CPUfreq frequency table helpers");
| linux-master | drivers/cpufreq/freq_table.c |
/*
* Cpufreq driver for the loongson-2 processors
*
* The 2E revision of loongson processor not support this feature.
*
* Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, [email protected]
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <asm/idle.h>
#include <asm/mach-loongson2ef/loongson.h>
static uint nowait;
static void (*saved_cpu_wait) (void);
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
unsigned long val, void *data);
static struct notifier_block loongson2_cpufreq_notifier_block = {
.notifier_call = loongson2_cpu_freq_notifier
};
static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
unsigned long val, void *data)
{
if (val == CPUFREQ_POSTCHANGE)
current_cpu_data.udelay_val = loops_per_jiffy;
return 0;
}
/*
* Here we notify other drivers of the proposed change and the final change.
*/
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
unsigned int freq;
freq =
((cpu_clock_freq / 1000) *
loongson2_clockmod_table[index].driver_data) / 8;
/* setting the cpu frequency */
loongson2_cpu_set_rate(freq);
return 0;
}
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int i;
unsigned long rate;
int ret;
rate = cpu_clock_freq / 1000;
if (!rate)
return -EINVAL;
/* clock table init */
for (i = 2;
(loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
ret = loongson2_cpu_set_rate(rate);
if (ret)
return ret;
cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
return 0;
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
return 0;
}
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
.exit = loongson2_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
{
.name = "loongson2_cpufreq",
},
{}
};
MODULE_DEVICE_TABLE(platform, platform_device_ids);
static struct platform_driver platform_driver = {
.driver = {
.name = "loongson2_cpufreq",
},
.id_table = platform_device_ids,
};
/*
* This is the simple version of Loongson-2 wait, Maybe we need do this in
* interrupt disabled context.
*/
static DEFINE_SPINLOCK(loongson2_wait_lock);
static void loongson2_cpu_wait(void)
{
unsigned long flags;
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
cpu_freq = readl(LOONGSON_CHIPCFG);
/* Put CPU into wait mode */
writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
/* Restore CPU state */
writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
static int __init cpufreq_init(void)
{
int ret;
/* Register platform stuff */
ret = platform_driver_register(&platform_driver);
if (ret)
return ret;
pr_info("Loongson-2F CPU frequency driver\n");
cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
if (!ret && !nowait) {
saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait;
}
return ret;
}
static void __exit cpufreq_exit(void)
{
if (!nowait && saved_cpu_wait)
cpu_wait = saved_cpu_wait;
cpufreq_unregister_driver(&loongson2_cpufreq_driver);
cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
platform_driver_unregister(&platform_driver);
}
module_init(cpufreq_init);
module_exit(cpufreq_exit);
module_param(nowait, uint, 0644);
MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
MODULE_AUTHOR("Yanhua <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/loongson2_cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU frequency scaling for OMAP using OPP information
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <[email protected]>
*
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
*
* Copyright (C) 2007-2011 Texas Instruments, Inc.
* - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <asm/smp_plat.h>
#include <asm/cpu.h>
/* OPP tolerance in percentage */
#define OPP_TOLERANCE 4
static struct cpufreq_frequency_table *freq_table;
static atomic_t freq_table_users = ATOMIC_INIT(0);
static struct device *mpu_dev;
static struct regulator *mpu_reg;
static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
int r, ret;
struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
old_freq = policy->cur;
new_freq = freq_table[index].frequency;
freq = new_freq * 1000;
ret = clk_round_rate(policy->clk, freq);
if (ret < 0) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
freq);
return ret;
}
freq = ret;
if (mpu_reg) {
opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, new_freq);
return -EINVAL;
}
volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (mpu_reg && (new_freq > old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
__func__);
return r;
}
}
ret = clk_set_rate(policy->clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
if (mpu_reg && (new_freq < old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
clk_set_rate(policy->clk, old_freq * 1000);
return r;
}
}
return ret;
}
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int omap_cpu_init(struct cpufreq_policy *policy)
{
int result;
policy->clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(policy->clk))
return PTR_ERR(policy->clk);
if (!freq_table) {
result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
if (result) {
dev_err(mpu_dev,
"%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
clk_put(policy->clk);
return result;
}
}
atomic_inc_return(&freq_table_users);
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
return 0;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
freq_table_free();
clk_put(policy->clk);
return 0;
}
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = omap_target,
.get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
.attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
{
mpu_dev = get_cpu_device(0);
if (!mpu_dev) {
pr_warn("%s: unable to get the MPU device\n", __func__);
return -EINVAL;
}
mpu_reg = regulator_get(mpu_dev, "vcc");
if (IS_ERR(mpu_reg)) {
pr_warn("%s: unable to get MPU regulator\n", __func__);
mpu_reg = NULL;
} else {
/*
* Ensure physical regulator is present.
* (e.g. could be dummy regulator.)
*/
if (regulator_get_voltage(mpu_reg) < 0) {
pr_warn("%s: physical regulator not present for MPU\n",
__func__);
regulator_put(mpu_reg);
mpu_reg = NULL;
}
}
return cpufreq_register_driver(&omap_driver);
}
static void omap_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&omap_driver);
}
static struct platform_driver omap_cpufreq_platdrv = {
.driver = {
.name = "omap-cpufreq",
},
.probe = omap_cpufreq_probe,
.remove_new = omap_cpufreq_remove,
};
module_platform_driver(omap_cpufreq_platdrv);
MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/omap-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on documentation provided by Dave Jones. Thanks!
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
#define EPS_BRAND_C7M 0
#define EPS_BRAND_C7 1
#define EPS_BRAND_EDEN 2
#define EPS_BRAND_C3 3
#define EPS_BRAND_C7D 4
struct eps_cpu_data {
u32 fsb;
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
u32 bios_limit;
#endif
struct cpufreq_frequency_table freq_table[];
};
static struct eps_cpu_data *eps_cpu[NR_CPUS];
/* Module parameters */
static int freq_failsafe_off;
static int voltage_failsafe_off;
static int set_max_voltage;
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
static int ignore_acpi_limit;
static struct acpi_processor_performance *eps_acpi_cpu_perf;
/* Minimum necessary to get acpi_processor_get_bios_limit() working */
static int eps_acpi_init(void)
{
eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
GFP_KERNEL);
if (!eps_acpi_cpu_perf)
return -ENOMEM;
if (!zalloc_cpumask_var(&eps_acpi_cpu_perf->shared_cpu_map,
GFP_KERNEL)) {
kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL;
return -ENOMEM;
}
if (acpi_processor_register_performance(eps_acpi_cpu_perf, 0)) {
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL;
return -EIO;
}
return 0;
}
static int eps_acpi_exit(struct cpufreq_policy *policy)
{
if (eps_acpi_cpu_perf) {
acpi_processor_unregister_performance(0);
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL;
}
return 0;
}
#endif
static unsigned int eps_get(unsigned int cpu)
{
struct eps_cpu_data *centaur;
u32 lo, hi;
if (cpu)
return 0;
centaur = eps_cpu[cpu];
if (centaur == NULL)
return 0;
/* Return current frequency */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
return centaur->fsb * ((lo >> 8) & 0xff);
}
static int eps_set_state(struct eps_cpu_data *centaur,
struct cpufreq_policy *policy,
u32 dest_state)
{
u32 lo, hi;
int i;
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
while (lo & ((1 << 16) | (1 << 17))) {
udelay(16);
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
return -ENODEV;
}
}
/* Set new multiplier and voltage */
wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
/* Wait until transition end */
i = 0;
do {
udelay(16);
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
return -ENODEV;
}
} while (lo & ((1 << 16) | (1 << 17)));
#ifdef DEBUG
{
u8 current_multiplier, current_voltage;
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
pr_info("Current multiplier = %d\n", current_multiplier);
}
#endif
return 0;
}
static int eps_target(struct cpufreq_policy *policy, unsigned int index)
{
struct eps_cpu_data *centaur;
unsigned int cpu = policy->cpu;
unsigned int dest_state;
int ret;
if (unlikely(eps_cpu[cpu] == NULL))
return -ENODEV;
centaur = eps_cpu[cpu];
/* Make frequency transition */
dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
pr_err("Timeout!\n");
return ret;
}
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
u32 lo, hi;
u64 val;
u8 current_multiplier, current_voltage;
u8 max_multiplier, max_voltage;
u8 min_multiplier, min_voltage;
u8 brand = 0;
u32 fsb;
struct eps_cpu_data *centaur;
struct cpuinfo_x86 *c = &cpu_data(0);
struct cpufreq_frequency_table *f_table;
int k, step, voltage;
int states;
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
unsigned int limit;
#endif
if (policy->cpu != 0)
return -ENODEV;
/* Check brand */
pr_info("Detected VIA ");
switch (c->x86_model) {
case 10:
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
pr_cont("Model A ");
break;
case 13:
rdmsr(0x1154, lo, hi);
brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
pr_cont("Model D ");
break;
}
switch (brand) {
case EPS_BRAND_C7M:
pr_cont("C7-M\n");
break;
case EPS_BRAND_C7:
pr_cont("C7\n");
break;
case EPS_BRAND_EDEN:
pr_cont("Eden\n");
break;
case EPS_BRAND_C7D:
pr_cont("C7-D\n");
break;
case EPS_BRAND_C3:
pr_cont("C3\n");
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
wrmsrl(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
pr_info("Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
pr_info("Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
pr_info("Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
|| min_multiplier == 0)
return -EINVAL;
if (current_multiplier > max_multiplier
|| max_multiplier <= min_multiplier)
return -EINVAL;
if (current_voltage > 0x1f || max_voltage > 0x1f)
return -EINVAL;
if (max_voltage < min_voltage
|| current_voltage < min_voltage
|| current_voltage > max_voltage)
return -EINVAL;
/* Check for systems using underclocked CPU */
if (!freq_failsafe_off && max_multiplier != current_multiplier) {
pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
pr_info("You can use freq_failsafe_off option to disable this check.\n");
return -EINVAL;
}
if (!voltage_failsafe_off && max_voltage != current_voltage) {
pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
pr_info("You can use voltage_failsafe_off option to disable this check.\n");
return -EINVAL;
}
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
/* Check for ACPI processor speed limit */
if (!ignore_acpi_limit && !eps_acpi_init()) {
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
pr_info("ACPI limit %u.%uGHz\n",
limit/1000000,
(limit%1000000)/10000);
eps_acpi_exit(policy);
/* Check if max_multiplier is in BIOS limits */
if (limit && max_multiplier * fsb > limit) {
pr_info("Aborting\n");
return -EINVAL;
}
}
}
#endif
/* Allow user to set lower maximum voltage then that reported
* by processor */
if (brand == EPS_BRAND_C7M && set_max_voltage) {
u32 v;
/* Change mV to something hardware can use */
v = (set_max_voltage - 700) / 16;
/* Check if voltage is within limits */
if (v >= min_voltage && v <= max_voltage) {
pr_info("Setting %dmV as maximum\n", v * 16 + 700);
max_voltage = v;
}
}
/* Calc number of p-states supported */
if (brand == EPS_BRAND_C7M)
states = max_multiplier - min_multiplier + 1;
else
states = 2;
/* Allocate private data and frequency table for current cpu */
centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
GFP_KERNEL);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;
/* Copy basic values */
centaur->fsb = fsb;
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
centaur->bios_limit = limit;
#endif
/* Fill frequency and MSR value table */
f_table = ¢aur->freq_table[0];
if (brand != EPS_BRAND_C7M) {
f_table[0].frequency = fsb * min_multiplier;
f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
f_table[1].frequency = fsb * max_multiplier;
f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
f_table[2].frequency = CPUFREQ_TABLE_END;
} else {
k = 0;
step = ((max_voltage - min_voltage) * 256)
/ (max_multiplier - min_multiplier);
for (i = min_multiplier; i <= max_multiplier; i++) {
voltage = (k * step) / 256 + min_voltage;
f_table[k].frequency = fsb * i;
f_table[k].driver_data = (i << 8) | voltage;
k++;
}
f_table[k].frequency = CPUFREQ_TABLE_END;
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
policy->freq_table = ¢aur->freq_table[0];
return 0;
}
static int eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
/* Bye */
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
return 0;
}
static struct cpufreq_driver eps_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
.attr = cpufreq_generic_attr,
};
/* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */
static const struct x86_cpu_id eps_cpu_id[] = {
X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_EST, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id);
static int __init eps_init(void)
{
if (!x86_match_cpu(eps_cpu_id) || boot_cpu_data.x86_model < 10)
return -ENODEV;
if (cpufreq_register_driver(&eps_driver))
return -EINVAL;
return 0;
}
static void __exit eps_exit(void)
{
cpufreq_unregister_driver(&eps_driver);
}
/* Allow user to overclock his machine or to change frequency to higher after
* unloading module */
module_param(freq_failsafe_off, int, 0644);
MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
module_param(voltage_failsafe_off, int, 0644);
MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
module_param(ignore_acpi_limit, int, 0644);
MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
#endif
module_param(set_max_voltage, int, 0644);
MODULE_PARM_DESC(set_max_voltage, "Set maximum CPU voltage (mV) C7-M only");
MODULE_AUTHOR("Rafal Bilski <[email protected]>");
MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
MODULE_LICENSE("GPL");
module_init(eps_init);
module_exit(eps_exit);
| linux-master | drivers/cpufreq/e_powersaver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/cpufreq/cpufreq_ondemand.c
*
* Copyright (C) 2001 Russell King
* (C) 2003 Venkatesh Pallipadi <[email protected]>.
* Jun Nakajima <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/tick.h>
#include <linux/sched/cpufreq.h>
#include "cpufreq_ondemand.h"
/* On-demand governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
* Not all CPUs want IO time to be accounted as busy; this depends on how
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
* Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
* For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model >= 15)
return 1;
#endif
return 0;
}
/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
*/
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation)
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index;
unsigned int delay_hi_us;
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
if (!freq_table) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
index = cpufreq_frequency_table_target(policy, freq_next, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
index = cpufreq_table_find_index_h(policy, freq_avg,
relation & CPUFREQ_RELATION_E);
freq_lo = freq_table[index].frequency;
index = cpufreq_table_find_index_l(policy, freq_avg,
relation & CPUFREQ_RELATION_E);
freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_lo;
}
delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
delay_hi_us += (freq_hi - freq_lo) / 2;
delay_hi_us /= freq_hi - freq_lo;
dbs_info->freq_hi_delay_us = delay_hi_us;
dbs_info->freq_lo = freq_lo;
dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
return freq_hi;
}
static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->freq_lo = 0;
}
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias)
freq = od_ops.powersave_bias_target(policy, freq,
CPUFREQ_RELATION_HE);
else if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
}
/*
* Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency. Else, we adjust the frequency
* proportional to load.
*/
static void od_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
dbs_info->freq_lo = 0;
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
policy_dbs->rate_mult = dbs_data->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next, min_f, max_f;
min_f = policy->cpuinfo.min_freq;
max_f = policy->cpuinfo.max_freq;
freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
policy_dbs->rate_mult = 1;
if (od_tuners->powersave_bias)
freq_next = od_ops.powersave_bias_target(policy,
freq_next,
CPUFREQ_RELATION_LE);
__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
}
}
static unsigned int od_dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
int sample_type = dbs_info->sample_type;
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
/*
* OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
* it then.
*/
if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
__cpufreq_driver_target(policy, dbs_info->freq_lo,
CPUFREQ_RELATION_HE);
return dbs_info->freq_lo_delay_us;
}
od_update(policy);
if (dbs_info->freq_lo) {
/* Setup SUB_SAMPLE */
dbs_info->sample_type = OD_SUB_SAMPLE;
return dbs_info->freq_hi_delay_us;
}
return dbs_data->sampling_rate * policy_dbs->rate_mult;
}
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
dbs_data->io_is_busy = !!input;
/* we need to re-evaluate prev_cpu_idle */
gov_update_cpu_data(dbs_data);
return count;
}
static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
dbs_data->up_threshold = input;
return count;
}
static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
* rate_mult values in od_update() and od_dbs_update().
*/
mutex_lock(&policy_dbs->update_mutex);
policy_dbs->rate_mult = 1;
mutex_unlock(&policy_dbs->update_mutex);
}
return count;
}
static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
if (input > 1)
input = 1;
if (input == dbs_data->ignore_nice_load) { /* nothing to do */
return count;
}
dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
gov_update_cpu_data(dbs_data);
return count;
}
static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
if (input > 1000)
input = 1000;
od_tuners->powersave_bias = input;
list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
ondemand_powersave_bias_init(policy_dbs->policy);
return count;
}
gov_show_one_common(sampling_rate);
gov_show_one_common(up_threshold);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(ignore_nice_load);
gov_show_one_common(io_is_busy);
gov_show_one(od, powersave_bias);
gov_attr_rw(sampling_rate);
gov_attr_rw(io_is_busy);
gov_attr_rw(up_threshold);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(ignore_nice_load);
gov_attr_rw(powersave_bias);
static struct attribute *od_attrs[] = {
&sampling_rate.attr,
&up_threshold.attr,
&sampling_down_factor.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
&io_is_busy.attr,
NULL
};
ATTRIBUTE_GROUPS(od);
/************************** sysfs end ************************/
static struct policy_dbs_info *od_alloc(void)
{
struct od_policy_dbs_info *dbs_info;
dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
return dbs_info ? &dbs_info->policy_dbs : NULL;
}
static void od_free(struct policy_dbs_info *policy_dbs)
{
kfree(to_dbs_info(policy_dbs));
}
static int od_init(struct dbs_data *dbs_data)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
int cpu;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners)
return -ENOMEM;
cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
} else {
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
}
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
dbs_data->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
}
static void od_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
}
static void od_start(struct cpufreq_policy *policy)
{
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->sample_type = OD_NORMAL_SAMPLE;
ondemand_powersave_bias_init(policy);
}
static struct od_ops od_ops = {
.powersave_bias_target = generic_powersave_bias_target,
};
static struct dbs_governor od_dbs_gov = {
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_groups = od_groups },
.gov_dbs_update = od_dbs_update,
.alloc = od_alloc,
.free = od_free,
.init = od_init,
.exit = od_exit,
.start = od_start,
};
#define CPU_FREQ_GOV_ONDEMAND (od_dbs_gov.gov)
static void od_set_powersave_bias(unsigned int powersave_bias)
{
unsigned int cpu;
cpumask_var_t done;
if (!alloc_cpumask_var(&done, GFP_KERNEL))
return;
default_powersave_bias = powersave_bias;
cpumask_clear(done);
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy;
struct policy_dbs_info *policy_dbs;
struct dbs_data *dbs_data;
struct od_dbs_tuners *od_tuners;
if (cpumask_test_cpu(cpu, done))
continue;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
continue;
policy_dbs = policy->governor_data;
if (!policy_dbs)
continue;
cpumask_or(done, done, policy->cpus);
dbs_data = policy_dbs->dbs_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
}
cpus_read_unlock();
free_cpumask_var(done);
}
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias)
{
od_ops.powersave_bias_target = f;
od_set_powersave_bias(powersave_bias);
}
EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
void od_unregister_powersave_bias_handler(void)
{
od_ops.powersave_bias_target = generic_powersave_bias_target;
od_set_powersave_bias(0);
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
MODULE_AUTHOR("Venkatesh Pallipadi <[email protected]>");
MODULE_AUTHOR("Alexey Starikovskiy <[email protected]>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
struct cpufreq_governor *cpufreq_default_governor(void)
{
return &CPU_FREQ_GOV_ONDEMAND;
}
#endif
cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
| linux-master | drivers/cpufreq/cpufreq_ondemand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 PA Semi, Inc
*
* Authors: Egor Martovetsky <[email protected]>
* Olof Johansson <[email protected]>
*
* Maintained by: Olof Johansson <[email protected]>
*
* Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*/
#include <linux/cpufreq.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <asm/hw_irq.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/smp.h>
#include <platforms/pasemi/pasemi.h>
#define SDCASR_REG 0x0100
#define SDCASR_REG_STRIDE 0x1000
#define SDCPWR_CFGA0_REG 0x0100
#define SDCPWR_PWST0_REG 0x0000
#define SDCPWR_GIZTIME_REG 0x0440
/* SDCPWR_GIZTIME_REG fields */
#define SDCPWR_GIZTIME_GR 0x80000000
#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
/* Offset of ASR registers from SDC base */
#define SDCASR_OFFSET 0x120000
static void __iomem *sdcpwr_mapbase;
static void __iomem *sdcasr_mapbase;
/* Current astate, is used when waking up from power savings on
* one core, in case the other core has switched states during
* the idle time.
*/
static int current_astate;
/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
static struct cpufreq_frequency_table pas_freqs[] = {
{0, 0, 0},
{0, 1, 0},
{0, 2, 0},
{0, 3, 0},
{0, 4, 0},
{0, 0, CPUFREQ_TABLE_END},
};
/*
* hardware specific functions
*/
static int get_astate_freq(int astate)
{
u32 ret;
ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
return ret & 0x3f;
}
static int get_cur_astate(int cpu)
{
u32 ret;
ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
ret = (ret >> (cpu * 4)) & 0x7;
return ret;
}
static int get_gizmo_latency(void)
{
u32 giztime, ret;
giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
/* just provide the upper bound */
if (giztime & SDCPWR_GIZTIME_GR)
ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
else
ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
return ret;
}
static void set_astate(int cpu, unsigned int astate)
{
unsigned long flags;
/* Return if called before init has run */
if (unlikely(!sdcasr_mapbase))
return;
local_irq_save(flags);
out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
local_irq_restore(flags);
}
int check_astate(void)
{
return get_cur_astate(hard_smp_processor_id());
}
void restore_astate(int cpu)
{
set_astate(cpu, current_astate);
}
/*
* cpufreq functions
*/
static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
const u32 *max_freqp;
u32 max_freq;
int cur_astate, idx;
struct resource res;
struct device_node *cpu, *dn;
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
if (!cpu)
goto out;
max_freqp = of_get_property(cpu, "clock-frequency", NULL);
of_node_put(cpu);
if (!max_freqp) {
err = -EINVAL;
goto out;
}
/* we need the freq in kHz */
max_freq = *max_freqp / 1000;
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn)
dn = of_find_compatible_node(NULL, NULL,
"pasemi,pwrficient-sdc");
if (!dn)
goto out;
err = of_address_to_resource(dn, 0, &res);
of_node_put(dn);
if (err)
goto out;
sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
if (!sdcasr_mapbase) {
err = -EINVAL;
goto out;
}
dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
if (!dn)
dn = of_find_compatible_node(NULL, NULL,
"pasemi,pwrficient-gizmo");
if (!dn) {
err = -ENODEV;
goto out_unmap_sdcasr;
}
err = of_address_to_resource(dn, 0, &res);
of_node_put(dn);
if (err)
goto out_unmap_sdcasr;
sdcpwr_mapbase = ioremap(res.start, 0x1000);
if (!sdcpwr_mapbase) {
err = -EINVAL;
goto out_unmap_sdcasr;
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
/* initialize frequency table */
cpufreq_for_each_entry_idx(pos, pas_freqs, idx) {
pos->frequency = get_astate_freq(pos->driver_data) * 100000;
pr_debug("%d: %d\n", idx, pos->frequency);
}
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
ppc_proc_freq = policy->cur * 1000ul;
cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
return 0;
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
return err;
}
static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
/*
* We don't support CPU hotplug. Don't unmap after the system
* has already made it to a running state.
*/
if (system_state >= SYSTEM_RUNNING)
return 0;
if (sdcasr_mapbase)
iounmap(sdcasr_mapbase);
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
return 0;
}
static int pas_cpufreq_target(struct cpufreq_policy *policy,
unsigned int pas_astate_new)
{
int i;
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
pas_freqs[pas_astate_new].frequency,
pas_freqs[pas_astate_new].driver_data);
current_astate = pas_astate_new;
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
return 0;
}
static struct cpufreq_driver pas_cpufreq_driver = {
.name = "pas-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
.attr = cpufreq_generic_attr,
};
/*
* module init and destoy
*/
static int __init pas_cpufreq_init(void)
{
if (!of_machine_is_compatible("PA6T-1682M") &&
!of_machine_is_compatible("pasemi,pwrficient"))
return -ENODEV;
return cpufreq_register_driver(&pas_cpufreq_driver);
}
static void __exit pas_cpufreq_exit(void)
{
cpufreq_unregister_driver(&pas_cpufreq_driver);
}
module_init(pas_cpufreq_init);
module_exit(pas_cpufreq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <[email protected]>, Olof Johansson <[email protected]>");
| linux-master | drivers/cpufreq/pasemi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* System Control and Power Interface (SCPI) based CPUFreq Interface driver
*
* Copyright (C) 2015 ARM Ltd.
* Sudeep Holla <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
#include <linux/types.h>
struct scpi_data {
struct clk *clk;
struct device *cpu_dev;
};
static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
struct scpi_data *priv = policy->driver_data;
unsigned long rate = clk_get_rate(priv->clk);
return rate / 1000;
}
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
u64 rate = policy->freq_table[index].frequency * 1000;
struct scpi_data *priv = policy->driver_data;
int ret;
ret = clk_set_rate(priv->clk, rate);
if (ret)
return ret;
if (clk_get_rate(priv->clk) != rate)
return -EIO;
return 0;
}
static int
scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
int cpu, domain, tdomain;
struct device *tcpu_dev;
domain = scpi_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
for_each_possible_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
tcpu_dev = get_cpu_device(cpu);
if (!tcpu_dev)
continue;
tdomain = scpi_ops->device_domain_id(tcpu_dev);
if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
return 0;
}
static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
unsigned int latency;
struct device *cpu_dev;
struct scpi_data *priv;
struct cpufreq_frequency_table *freq_table;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("failed to get cpu%d device\n", policy->cpu);
return -ENODEV;
}
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
return ret;
}
ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
return ret;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
return ret;
}
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
}
priv->cpu_dev = cpu_dev;
priv->clk = clk_get(cpu_dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
__func__, cpu_dev->id);
ret = PTR_ERR(priv->clk);
goto out_free_cpufreq_table;
}
policy->driver_data = priv;
policy->freq_table = freq_table;
/* scpi allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = false;
return 0;
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_priv:
kfree(priv);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret;
}
static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
return 0;
}
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
.target_index = scpi_cpufreq_set_target,
.register_em = cpufreq_register_em_with_opp,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
int ret;
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
ret = cpufreq_register_driver(&scpi_cpufreq_driver);
if (ret)
dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
__func__, ret);
return ret;
}
static void scpi_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
}
static struct platform_driver scpi_cpufreq_platdrv = {
.driver = {
.name = "scpi-cpufreq",
},
.probe = scpi_cpufreq_probe,
.remove_new = scpi_cpufreq_remove,
};
module_platform_driver(scpi_cpufreq_platdrv);
MODULE_ALIAS("platform:scpi-cpufreq");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/scpi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* drivers/cpufreq/cpufreq_stats.c
*
* Copyright (C) 2003-2004 Venkatesh Pallipadi <[email protected]>.
* (C) 2004 Zou Nan hai <[email protected]>.
*/
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
struct cpufreq_stats {
unsigned int total_trans;
unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
unsigned int *freq_table;
unsigned int *trans_table;
/* Deferred reset */
unsigned int reset_pending;
unsigned long long reset_time;
};
static void cpufreq_stats_update(struct cpufreq_stats *stats,
unsigned long long time)
{
unsigned long long cur_time = local_clock();
stats->time_in_state[stats->last_index] += cur_time - time;
stats->last_time = cur_time;
}
static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = local_clock();
stats->total_trans = 0;
/* Adjust for the time elapsed since reset was requested */
WRITE_ONCE(stats->reset_pending, 0);
/*
* Prevent the reset_time read from being reordered before the
* reset_pending accesses in cpufreq_stats_record_transition().
*/
smp_rmb();
cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
if (READ_ONCE(stats->reset_pending))
return sprintf(buf, "%d\n", 0);
else
return sprintf(buf, "%u\n", stats->total_trans);
}
cpufreq_freq_attr_ro(total_trans);
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
bool pending = READ_ONCE(stats->reset_pending);
unsigned long long time;
ssize_t len = 0;
int i;
for (i = 0; i < stats->state_num; i++) {
if (pending) {
if (i == stats->last_index) {
/*
* Prevent the reset_time read from occurring
* before the reset_pending read above.
*/
smp_rmb();
time = local_clock() - READ_ONCE(stats->reset_time);
} else {
time = 0;
}
} else {
time = stats->time_in_state[i];
if (i == stats->last_index)
time += local_clock() - stats->last_time;
}
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
nsec_to_clock_t(time));
}
return len;
}
cpufreq_freq_attr_ro(time_in_state);
/* We don't care what is written to the attribute */
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
size_t count)
{
struct cpufreq_stats *stats = policy->stats;
/*
* Defer resetting of stats to cpufreq_stats_record_transition() to
* avoid races.
*/
WRITE_ONCE(stats->reset_time, local_clock());
/*
* The memory barrier below is to prevent the readers of reset_time from
* seeing a stale or partially updated value.
*/
smp_wmb();
WRITE_ONCE(stats->reset_pending, 1);
return count;
}
cpufreq_freq_attr_wo(reset);
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
bool pending = READ_ONCE(stats->reset_pending);
ssize_t len = 0;
int i, j, count;
len += sysfs_emit_at(buf, len, " From : To\n");
len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
break;
if (pending)
count = 0;
else
count = stats->trans_table[i * stats->max_state + j];
len += sysfs_emit_at(buf, len, "%9u ", count);
}
if (len >= PAGE_SIZE)
break;
len += sysfs_emit_at(buf, len, "\n");
}
if (len >= PAGE_SIZE) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}
return len;
}
cpufreq_freq_attr_ro(trans_table);
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
&reset.attr,
&trans_table.attr,
NULL
};
static const struct attribute_group stats_attr_group = {
.attrs = default_attrs,
.name = "stats"
};
static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
{
int index;
for (index = 0; index < stats->max_state; index++)
if (stats->freq_table[index] == freq)
return index;
return -1;
}
void cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stats = policy->stats;
/* Already freed */
if (!stats)
return;
pr_debug("%s: Free stats table\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
kfree(stats->time_in_state);
kfree(stats);
policy->stats = NULL;
}
void cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
unsigned int i = 0, count;
struct cpufreq_stats *stats;
unsigned int alloc_size;
struct cpufreq_frequency_table *pos;
count = cpufreq_table_count_valid_entries(policy);
if (!count)
return;
/* stats already initialized */
if (policy->stats)
return;
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
return;
alloc_size = count * sizeof(int) + count * sizeof(u64);
alloc_size += count * count * sizeof(int);
/* Allocate memory for time_in_state/freq_table/trans_table in one go */
stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
if (!stats->time_in_state)
goto free_stat;
stats->freq_table = (unsigned int *)(stats->time_in_state + count);
stats->trans_table = stats->freq_table + count;
stats->max_state = count;
/* Find valid-unique entries */
cpufreq_for_each_valid_entry(pos, policy->freq_table)
if (policy->freq_table_sorted != CPUFREQ_TABLE_UNSORTED ||
freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
stats->state_num = i;
stats->last_time = local_clock();
stats->last_index = freq_table_get_index(stats, policy->cur);
policy->stats = stats;
if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
return;
/* We failed, release resources */
policy->stats = NULL;
kfree(stats->time_in_state);
free_stat:
kfree(stats);
}
void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
unsigned int new_freq)
{
struct cpufreq_stats *stats = policy->stats;
int old_index, new_index;
if (unlikely(!stats))
return;
if (unlikely(READ_ONCE(stats->reset_pending)))
cpufreq_stats_reset_table(stats);
old_index = stats->last_index;
new_index = freq_table_get_index(stats, new_freq);
/* We can't do stats->time_in_state[-1]= .. */
if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
return;
cpufreq_stats_update(stats, stats->last_time);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
}
| linux-master | drivers/cpufreq/cpufreq_stats.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Abstract code for CPUFreq governor tunable sysfs attributes.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <[email protected]>
*/
#include "cpufreq_governor.h"
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
return container_of(attr, struct governor_attr, attr);
}
static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct governor_attr *gattr = to_gov_attr(attr);
return gattr->show(to_gov_attr_set(kobj), buf);
}
static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
struct governor_attr *gattr = to_gov_attr(attr);
int ret;
mutex_lock(&attr_set->update_lock);
ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
mutex_unlock(&attr_set->update_lock);
return ret;
}
const struct sysfs_ops governor_sysfs_ops = {
.show = governor_show,
.store = governor_store,
};
EXPORT_SYMBOL_GPL(governor_sysfs_ops);
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
{
INIT_LIST_HEAD(&attr_set->policy_list);
mutex_init(&attr_set->update_lock);
attr_set->usage_count = 1;
list_add(list_node, &attr_set->policy_list);
}
EXPORT_SYMBOL_GPL(gov_attr_set_init);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
{
mutex_lock(&attr_set->update_lock);
attr_set->usage_count++;
list_add(list_node, &attr_set->policy_list);
mutex_unlock(&attr_set->update_lock);
}
EXPORT_SYMBOL_GPL(gov_attr_set_get);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
{
unsigned int count;
mutex_lock(&attr_set->update_lock);
list_del(list_node);
count = --attr_set->usage_count;
mutex_unlock(&attr_set->update_lock);
if (count)
return count;
mutex_destroy(&attr_set->update_lock);
kobject_put(&attr_set->kobj);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
| linux-master | drivers/cpufreq/cpufreq_governor_attr_set.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pstate.c: Native P state management for Intel processors
*
* (C) Copyright 2012 Intel Corporation
* Author: Dirk Brandewie <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched/cpufreq.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
#include <trace/events/power.h>
#include <asm/cpu.h>
#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
#define INTEL_CPUFREQ_TRANSITION_DELAY 500
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#endif
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}
static inline int32_t div_fp(s64 x, s64 y)
{
return div64_s64((int64_t)x << FRAC_BITS, y);
}
static inline int ceiling_fp(int32_t x)
{
int mask, ret;
ret = fp_toint(x);
mask = (1 << FRAC_BITS) - 1;
if (x & mask)
ret += 1;
return ret;
}
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
}
static inline u64 div_ext_fp(u64 x, u64 y)
{
return div64_u64(x << EXT_FRAC_BITS, y);
}
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
* performance during last sample period
* @busy_scaled: Scaled busy value which is used to calculate next
* P state. This can be different than core_avg_perf
* to account for cpu idle period
* @aperf: Difference of actual performance frequency clock count
* read from APERF MSR between last and current sample
* @mperf: Difference of maximum performance frequency clock count
* read from MPERF MSR between last and current sample
* @tsc: Difference of time stamp counter between last and
* current sample
* @time: Current time from scheduler
*
* This structure is used in the cpudata structure to store performance sample
* data for choosing next P State.
*/
struct sample {
int32_t core_avg_perf;
int32_t busy_scaled;
u64 aperf;
u64 mperf;
u64 tsc;
u64 time;
};
/**
* struct pstate_data - Store P state data
* @current_pstate: Current requested P state
* @min_pstate: Min P state possible for this platform
* @max_pstate: Max P state possible for this platform
* @max_pstate_physical:This is physical Max P state for a processor
* This can be higher than the max_pstate which can
* be limited by platform thermal design power limits
* @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
* @scaling: Scaling factor between performance and frequency
* @turbo_pstate: Max Turbo P state possible for this platform
* @min_freq: @min_pstate frequency in cpufreq units
* @max_freq: @max_pstate frequency in cpufreq units
* @turbo_freq: @turbo_pstate frequency in cpufreq units
*
* Stores the per cpu model P state limits and current P state.
*/
struct pstate_data {
int current_pstate;
int min_pstate;
int max_pstate;
int max_pstate_physical;
int perf_ctl_scaling;
int scaling;
int turbo_pstate;
unsigned int min_freq;
unsigned int max_freq;
unsigned int turbo_freq;
};
/**
* struct vid_data - Stores voltage information data
* @min: VID data for this platform corresponding to
* the lowest P state
* @max: VID data corresponding to the highest P State.
* @turbo: VID data for turbo P state
* @ratio: Ratio of (vid max - vid min) /
* (max P state - Min P State)
*
* Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
* This data is used in Atom platforms, where in addition to target P state,
* the voltage data needs to be specified to select next P State.
*/
struct vid_data {
int min;
int max;
int turbo;
int32_t ratio;
};
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
* @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
* @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
* P-state capacity.
*/
struct global_params {
bool no_turbo;
bool turbo_disabled;
bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
* @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
* @last_update: Time of the last update.
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
* @last_sample_time: Last Sample time
* @aperf_mperf_shift: APERF vs MPERF counting frequency difference
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
* @epp_powersave: Last saved HWP energy performance preference
* (EPP) or energy performance bias (EPB),
* when policy switched to performance
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
* @epp_cached Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
struct cpudata {
int cpu;
unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
struct pstate_data pstate;
struct vid_data vid;
u64 last_update;
u64 last_sample_time;
u64 aperf_mperf_shift;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
int32_t min_perf_ratio;
int32_t max_perf_ratio;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
#endif
unsigned int iowait_boost;
s16 epp_powersave;
s16 epp_policy;
s16 epp_default;
s16 epp_cached;
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
/**
* struct pstate_funcs - Per CPU model specific callbacks
* @get_max: Callback to get maximum non turbo effective P state
* @get_max_physical: Callback to get maximum non turbo physical P state
* @get_min: Callback to get minimum P state
* @get_turbo: Callback to get turbo P state
* @get_scaling: Callback to get frequency scaling factor
* @get_cpu_scaling: Get frequency scaling factor for a given cpu
* @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
*
* Core and Atom CPU models have different way to get P State limits. This
* structure is used to store those callbacks.
*/
struct pstate_funcs {
int (*get_max)(int cpu);
int (*get_max_physical)(int cpu);
int (*get_min)(int cpu);
int (*get_turbo)(int cpu);
int (*get_scaling)(void);
int (*get_cpu_scaling)(int cpu);
int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
};
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
#define HYBRID_SCALING_FACTOR 78741
static inline int core_get_scaling(void)
{
return 100000;
}
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
static struct global_params global;
static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);
#ifdef CONFIG_ACPI
static bool intel_pstate_acpi_pm_profile_server(void)
{
if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
return true;
return false;
}
static bool intel_pstate_get_ppc_enable_status(void)
{
if (intel_pstate_acpi_pm_profile_server())
return true;
return acpi_ppc;
}
#ifdef CONFIG_ACPI_CPPC_LIB
/* The work item is needed to avoid CPU hotplug locking issues */
static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
{
sched_set_itmt_support();
}
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
#define CPPC_MAX_PERF U8_MAX
static void intel_pstate_set_itmt_prio(int cpu)
{
struct cppc_perf_caps cppc_perf;
static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret)
return;
/*
* On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
* In this case we can't use CPPC.highest_perf to enable ITMT.
* In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
*/
if (cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
if (max_highest_perf <= min_highest_perf) {
if (cppc_perf.highest_perf > max_highest_perf)
max_highest_perf = cppc_perf.highest_perf;
if (cppc_perf.highest_perf < min_highest_perf)
min_highest_perf = cppc_perf.highest_perf;
if (max_highest_perf > min_highest_perf) {
/*
* This code can be run during CPU online under the
* CPU hotplug locks, so sched_set_itmt_support()
* cannot be called from here. Queue up a work item
* to invoke it.
*/
schedule_work(&sched_itmt_work);
}
}
}
static int intel_pstate_get_cppc_guaranteed(int cpu)
{
struct cppc_perf_caps cppc_perf;
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret)
return ret;
if (cppc_perf.guaranteed_perf)
return cppc_perf.guaranteed_perf;
return cppc_perf.nominal_perf;
}
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
* If the nominal frequency and the nominal performance are not
* zero and the ratio between them is not 100, return the hybrid
* scaling factor.
*/
if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
return HYBRID_SCALING_FACTOR;
return core_get_scaling();
}
#else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu)
{
}
#endif /* CONFIG_ACPI_CPPC_LIB */
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int ret;
int i;
if (hwp_active) {
intel_pstate_set_itmt_prio(policy->cpu);
return;
}
if (!intel_pstate_get_ppc_enable_status())
return;
cpu = all_cpu_data[policy->cpu];
ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
policy->cpu);
if (ret)
return;
/*
* Check if the control value in _PSS is for PERF_CTL MSR, which should
* guarantee that the states returned by it map to the states in our
* list directly.
*/
if (cpu->acpi_perf_data.control_register.space_id !=
ACPI_ADR_SPACE_FIXED_HARDWARE)
goto err;
/*
* If there is only one entry _PSS, simply ignore _PSS and continue as
* usual without taking _PSS into account
*/
if (cpu->acpi_perf_data.state_count < 2)
goto err;
pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
(i == cpu->acpi_perf_data.state ? '*' : ' '), i,
(u32) cpu->acpi_perf_data.states[i].core_frequency,
(u32) cpu->acpi_perf_data.states[i].power,
(u32) cpu->acpi_perf_data.states[i].control);
}
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");
return;
err:
cpu->valid_pss_table = false;
acpi_processor_unregister_performance(policy->cpu);
}
static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
cpu = all_cpu_data[policy->cpu];
if (!cpu->valid_pss_table)
return;
acpi_processor_unregister_performance(policy->cpu);
}
#else /* CONFIG_ACPI */
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
}
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
}
static inline bool intel_pstate_acpi_pm_profile_server(void)
{
return false;
}
#endif /* CONFIG_ACPI */
#ifndef CONFIG_ACPI_CPPC_LIB
static inline int intel_pstate_get_cppc_guaranteed(int cpu)
{
return -ENOTSUPP;
}
static int intel_pstate_cppc_get_scaling(int cpu)
{
return core_get_scaling();
}
#endif /* CONFIG_ACPI_CPPC_LIB */
/**
* intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
*
* On hybrid processors, HWP may expose more performance levels than there are
* P-states accessible through the PERF_CTL interface. If that happens, the
* scaling factor between HWP performance levels and CPU frequency will be less
* than the scaling factor between P-state values and CPU frequency.
*
* In that case, adjust the CPU parameters used in computations accordingly.
*/
static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
{
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int scaling = cpu->pstate.scaling;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_pstate_physical =
DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
scaling);
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
}
static inline void update_turbo_state(void)
{
u64 misc_en;
struct cpudata *cpu;
cpu = all_cpu_data[0];
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
global.turbo_disabled =
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
static int min_perf_pct_min(void)
{
struct cpudata *cpu = all_cpu_data[0];
int turbo_pstate = cpu->pstate.turbo_pstate;
return turbo_pstate ?
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{
u64 epb;
int ret;
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return (s16)ret;
return (s16)(epb & 0x0f);
}
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
} else {
/* When there is no EPP present, HWP uses EPB settings */
epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
static int intel_pstate_set_epb(int cpu, s16 pref)
{
u64 epb;
int ret;
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return ret;
epb = (epb & ~0x0f) | pref;
wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
return 0;
}
/*
* EPP/EPB display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
* 0 default
* 1 performance
* 2 balance_performance
* 3 balance_power
* 4 power
*/
enum energy_perf_value_index {
EPP_INDEX_DEFAULT = 0,
EPP_INDEX_PERFORMANCE,
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
};
static const char * const energy_perf_strings[] = {
[EPP_INDEX_DEFAULT] = "default",
[EPP_INDEX_PERFORMANCE] = "performance",
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
NULL
};
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0, /* Unused index */
[EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
[EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
};
static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
{
s16 epp;
int index = -EINVAL;
*raw_epp = 0;
epp = intel_pstate_get_epp(cpu_data, 0);
if (epp < 0)
return epp;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == epp_values[EPP_INDEX_PERFORMANCE])
return EPP_INDEX_PERFORMANCE;
if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
return EPP_INDEX_BALANCE_PERFORMANCE;
if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
return EPP_INDEX_BALANCE_POWERSAVE;
if (epp == epp_values[EPP_INDEX_POWERSAVE])
return EPP_INDEX_POWERSAVE;
*raw_epp = epp;
return 0;
} else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
* 0x00-0x03 : Performance
* 0x04-0x07 : Balance performance
* 0x08-0x0B : Balance power
* 0x0C-0x0F : Power
* The EPB is a 4 bit value, but our ranges restrict the
* value which can be set. Here only using top two bits
* effectively.
*/
index = (epp >> 2) + 1;
}
return index;
}
static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
{
int ret;
/*
* Use the cached HWP Request MSR value, because in the active mode the
* register itself may be updated by intel_pstate_hwp_boost_up() or
* intel_pstate_hwp_boost_down() at any time.
*/
u64 value = READ_ONCE(cpu->hwp_req_cached);
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
/*
* The only other updater of hwp_req_cached in the active mode,
* intel_pstate_hwp_set(), is called under the same lock as this
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
return ret;
}
static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
int pref_index, bool use_raw,
u32 raw_epp)
{
int epp = -EINVAL;
int ret;
if (!pref_index)
epp = cpu_data->epp_default;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (use_raw)
epp = raw_epp;
else if (epp == -EINVAL)
epp = epp_values[pref_index];
/*
* To avoid confusion, refuse to set EPP to any values different
* from 0 (performance) if the current policy is "performance",
* because those values would be overridden.
*/
if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
} else {
if (epp == -EINVAL)
epp = (pref_index - 1) << 2;
ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
}
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
int i = 0;
int ret = 0;
while (energy_perf_strings[i] != NULL)
ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
ret += sprintf(&buf[ret], "\n");
return ret;
}
cpufreq_freq_attr_ro(energy_performance_available_preferences);
static struct cpufreq_driver intel_pstate;
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
char str_preference[21];
bool raw = false;
ssize_t ret;
u32 epp = 0;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
return -EINVAL;
ret = match_string(energy_perf_strings, -1, str_preference);
if (ret < 0) {
if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
return ret;
ret = kstrtouint(buf, 10, &epp);
if (ret)
return ret;
if (epp > 255)
return -EINVAL;
raw = true;
}
/*
* This function runs with the policy R/W semaphore held, which
* guarantees that the driver pointer will not change while it is
* running.
*/
if (!intel_pstate_driver)
return -EAGAIN;
mutex_lock(&intel_pstate_limits_lock);
if (intel_pstate_driver == &intel_pstate) {
ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
} else {
/*
* In the passive mode the governor needs to be stopped on the
* target CPU before the EPP update and restarted after it,
* which is super-heavy-weight, so make sure it is worth doing
* upfront.
*/
if (!raw)
epp = ret ? epp_values[ret] : cpu->epp_default;
if (cpu->epp_cached != epp) {
int err;
cpufreq_stop_governor(policy);
ret = intel_pstate_set_epp(cpu, epp);
err = cpufreq_start_governor(policy);
if (!ret)
ret = err;
} else {
ret = 0;
}
}
mutex_unlock(&intel_pstate_limits_lock);
return ret ?: count;
}
static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
int preference, raw_epp;
preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
if (preference < 0)
return preference;
if (raw_epp)
return sprintf(buf, "%d\n", raw_epp);
else
return sprintf(buf, "%s\n", energy_perf_strings[preference]);
}
cpufreq_freq_attr_rw(energy_performance_preference);
static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
int ratio, freq;
ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
if (ratio <= 0) {
u64 cap;
rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
freq = ratio * cpu->pstate.scaling;
if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
return sprintf(buf, "%d\n", freq);
}
cpufreq_freq_attr_ro(base_frequency);
static struct freq_attr *hwp_cpufreq_attrs[] = {
&energy_performance_preference,
&energy_performance_available_preferences,
&base_frequency,
NULL,
};
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
}
static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
int scaling = cpu->pstate.scaling;
__intel_pstate_get_hwp_cap(cpu);
cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
if (scaling != cpu->pstate.perf_ctl_scaling) {
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
perf_ctl_scaling);
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
perf_ctl_scaling);
}
}
static void intel_pstate_hwp_set(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
int max, min;
u64 value;
s16 epp;
max = cpu_data->max_perf_ratio;
min = cpu_data->min_perf_ratio;
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
if (cpu_data->epp_policy == cpu_data->policy)
goto skip_epp;
cpu_data->epp_policy = cpu_data->policy;
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
epp = intel_pstate_get_epp(cpu_data, value);
cpu_data->epp_powersave = epp;
/* If EPP read was failed, then don't try to write */
if (epp < 0)
goto skip_epp;
epp = 0;
} else {
/* skip setting EPP, when saved value is invalid */
if (cpu_data->epp_powersave < 0)
goto skip_epp;
/*
* No need to restore EPP when it is not zero. This
* means:
* - Policy is not changed
* - user has manually changed
* - Error reading EPB
*/
epp = intel_pstate_get_epp(cpu_data, value);
if (epp)
goto skip_epp;
epp = cpu_data->epp_powersave;
}
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
intel_pstate_set_epb(cpu, epp);
}
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
static void intel_pstate_hwp_offline(struct cpudata *cpu)
{
u64 value = READ_ONCE(cpu->hwp_req_cached);
int min_perf;
intel_pstate_disable_hwp_interrupt(cpu);
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* In case the EPP has been set to "performance" by the
* active mode "performance" scaling algorithm, replace that
* temporary value with the cached EPP one.
*/
value &= ~GENMASK_ULL(31, 24);
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
/*
* However, make sure that EPP will be set to "performance" when
* the CPU is brought back online again and the "performance"
* scaling algorithm is still in effect.
*/
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
}
/*
* Clear the desired perf field in the cached HWP request value to
* prevent nonzero desired values from being leaked into the active
* mode.
*/
value &= ~HWP_DESIRED_PERF(~0L);
WRITE_ONCE(cpu->hwp_req_cached, value);
value &= ~GENMASK_ULL(31, 0);
min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
value |= HWP_MIN_PERF(min_perf);
/* Set EPP to min */
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
{
u64 power_ctl;
mutex_lock(&intel_pstate_driver_lock);
rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
} else {
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
pr_debug("CPU %d suspending\n", cpu->cpu);
cpu->suspended = true;
/* disable HWP interrupt and cancel any pending work */
intel_pstate_disable_hwp_interrupt(cpu);
return 0;
}
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
pr_debug("CPU %d resuming\n", cpu->cpu);
/* Only restore if the system default is changed */
if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
set_power_ctl_ee_state(true);
else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
set_power_ctl_ee_state(false);
if (cpu->suspended && hwp_active) {
mutex_lock(&intel_pstate_limits_lock);
/* Re-enable HWP, because "online" has not done that. */
intel_pstate_hwp_reenable(cpu);
mutex_unlock(&intel_pstate_limits_lock);
}
cpu->suspended = false;
return 0;
}
static void intel_pstate_update_policies(void)
{
int cpu;
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
}
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
refresh_frequency_limits(policy);
}
static void intel_pstate_update_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
if (!policy)
return;
__intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
cpufreq_cpu_release(policy);
}
static void intel_pstate_update_limits(unsigned int cpu)
{
mutex_lock(&intel_pstate_driver_lock);
update_turbo_state();
/*
* If turbo has been turned on or off globally, policy limits for
* all CPUs need to be updated to reflect that.
*/
if (global.turbo_disabled_mf != global.turbo_disabled) {
global.turbo_disabled_mf = global.turbo_disabled;
arch_set_max_freq_ratio(global.turbo_disabled);
for_each_possible_cpu(cpu)
intel_pstate_update_max_freq(cpu);
} else {
cpufreq_update_policy(cpu);
}
mutex_unlock(&intel_pstate_driver_lock);
}
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", global.object); \
}
static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
mutex_lock(&intel_pstate_driver_lock);
ret = intel_pstate_show_status(buf);
mutex_unlock(&intel_pstate_driver_lock);
return ret;
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
int ret;
mutex_lock(&intel_pstate_driver_lock);
ret = intel_pstate_update_status(buf, p ? p - buf : count);
mutex_unlock(&intel_pstate_driver_lock);
return ret < 0 ? ret : count;
}
static ssize_t show_turbo_pct(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
mutex_unlock(&intel_pstate_driver_lock);
return sprintf(buf, "%u\n", turbo_pct);
}
static ssize_t show_num_pstates(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
mutex_unlock(&intel_pstate_driver_lock);
return sprintf(buf, "%u\n", total);
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
update_turbo_state();
if (global.turbo_disabled)
ret = sprintf(buf, "%u\n", global.turbo_disabled);
else
ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
return ret;
}
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
mutex_lock(&intel_pstate_limits_lock);
update_turbo_state();
if (global.turbo_disabled) {
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
}
global.no_turbo = clamp_t(int, input, 0, 1);
if (global.no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
/* Squash the global minimum into the permitted range. */
if (global.min_perf_pct > pct)
global.min_perf_pct = pct;
}
mutex_unlock(&intel_pstate_limits_lock);
intel_pstate_update_policies();
arch_set_max_freq_ratio(global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
return count;
}
static void update_qos_request(enum freq_qos_req_type type)
{
struct freq_qos_request *req;
struct cpufreq_policy *policy;
int i;
for_each_possible_cpu(i) {
struct cpudata *cpu = all_cpu_data[i];
unsigned int freq, perf_pct;
policy = cpufreq_cpu_get(i);
if (!policy)
continue;
req = policy->driver_data;
cpufreq_cpu_put(policy);
if (!req)
continue;
if (hwp_active)
intel_pstate_get_hwp_cap(cpu);
if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct;
} else {
req++;
perf_pct = global.max_perf_pct;
}
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i);
}
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
mutex_lock(&intel_pstate_limits_lock);
global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
mutex_unlock(&intel_pstate_limits_lock);
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
update_qos_request(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
return count;
}
static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
mutex_unlock(&intel_pstate_driver_lock);
return -EAGAIN;
}
mutex_lock(&intel_pstate_limits_lock);
global.min_perf_pct = clamp_t(int, input,
min_perf_pct_min(), global.max_perf_pct);
mutex_unlock(&intel_pstate_limits_lock);
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
update_qos_request(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
return count;
}
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hwp_boost);
}
static ssize_t store_hwp_dynamic_boost(struct kobject *a,
struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = kstrtouint(buf, 10, &input);
if (ret)
return ret;
mutex_lock(&intel_pstate_driver_lock);
hwp_boost = !!input;
intel_pstate_update_policies();
mutex_unlock(&intel_pstate_driver_lock);
return count;
}
static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
u64 power_ctl;
int enable;
rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
bool input;
int ret;
ret = kstrtobool(buf, &input);
if (ret)
return ret;
set_power_ctl_ee_state(input);
return count;
}
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);
define_one_global_rw(status);
define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
define_one_global_ro(turbo_pct);
define_one_global_ro(num_pstates);
define_one_global_rw(hwp_dynamic_boost);
define_one_global_rw(energy_efficiency);
static struct attribute *intel_pstate_attributes[] = {
&status.attr,
&no_turbo.attr,
NULL
};
static const struct attribute_group intel_pstate_attr_group = {
.attrs = intel_pstate_attributes,
};
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
static struct kobject *intel_pstate_kobject;
static void __init intel_pstate_sysfs_expose_params(void)
{
struct device *dev_root = bus_get_dev_root(&cpu_subsys);
int rc;
if (dev_root) {
intel_pstate_kobject = kobject_create_and_add("intel_pstate", &dev_root->kobj);
put_device(dev_root);
}
if (WARN_ON(!intel_pstate_kobject))
return;
rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
if (WARN_ON(rc))
return;
if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
WARN_ON(rc);
rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
WARN_ON(rc);
}
/*
* If per cpu limits are enforced there are no global limits, so
* return without creating max/min_perf_pct attributes
*/
if (per_cpu_limits)
return;
rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
WARN_ON(rc);
rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
WARN_ON(rc);
if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
WARN_ON(rc);
}
}
static void __init intel_pstate_sysfs_remove(void)
{
if (!intel_pstate_kobject)
return;
sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
}
if (!per_cpu_limits) {
sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
}
kobject_put(intel_pstate_kobject);
}
static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
{
int rc;
if (!hwp_active)
return;
rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
WARN_ON_ONCE(rc);
}
static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
{
if (!hwp_active)
return;
sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
}
/************************** sysfs end ************************/
static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
}
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
static DEFINE_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
struct cpudata *cpudata;
unsigned long flags;
u64 value;
if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
rdmsrl_safe(MSR_HWP_STATUS, &value);
if (!(value & 0x01))
return;
spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
/*
* Currently we never free all_cpu_data. And we can't reach here
* without this allocated. But for safety for future changes, added
* check.
*/
if (unlikely(!READ_ONCE(all_cpu_data)))
goto ack_intr;
/*
* The free is done during cleanup, when cpufreq registry is failed.
* We wouldn't be here if it fails on init or switch status. But for
* future changes, added check.
*/
cpudata = READ_ONCE(all_cpu_data[this_cpu]);
if (unlikely(!cpudata))
goto ack_intr;
schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags);
return;
ack_intr:
wrmsrl_safe(MSR_HWP_STATUS, 0);
spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
unsigned long flags;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
spin_lock_irqsave(&hwp_notify_lock, flags);
if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
cancel_delayed_work(&cpudata->hwp_notify_work);
spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
unsigned long flags;
spin_lock_irqsave(&hwp_notify_lock, flags);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
spin_unlock_irqrestore(&hwp_notify_lock, flags);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
{
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
/*
* If this CPU gen doesn't call for change in balance_perf
* EPP return.
*/
if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
return;
/*
* If the EPP is set by firmware, which means that firmware enabled HWP
* - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
* then use this as new balance_perf EPP.
*/
if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE &&
cpudata->epp_default > HWP_EPP_PERFORMANCE) {
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
return;
}
/*
* Use hard coded value per gen to update the balance_perf
* and default EPP.
*/
cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
intel_pstate_set_epp(cpudata, cpudata->epp_default);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
if (cpudata->epp_default >= 0)
return;
intel_pstate_update_epp_defaults(cpudata);
}
static int atom_get_min_pstate(int not_used)
{
u64 value;
rdmsrl(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
static int atom_get_max_pstate(int not_used)
{
u64 value;
rdmsrl(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
static int atom_get_turbo_pstate(int not_used)
{
u64 value;
rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
int32_t vid_fp;
u32 vid;
val = (u64)pstate << 8;
if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
int_tofp(pstate - cpudata->pstate.min_pstate),
cpudata->vid.ratio);
vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
vid = ceiling_fp(vid_fp);
if (pstate > cpudata->pstate.max_pstate)
vid = cpudata->vid.turbo;
return val | vid;
}
static int silvermont_get_scaling(void)
{
u64 value;
int i;
/* Defined in Table 35-6 from SDM (Sept 2015) */
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
rdmsrl(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
return silvermont_freq_table[i];
}
static int airmont_get_scaling(void)
{
u64 value;
int i;
/* Defined in Table 35-10 from SDM (Sept 2015) */
static int airmont_freq_table[] = {
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
rdmsrl(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
return airmont_freq_table[i];
}
static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
rdmsrl(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
cpudata->vid.max - cpudata->vid.min,
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
static int core_get_min_pstate(int cpu)
{
u64 value;
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
static int core_get_max_pstate_physical(int cpu)
{
u64 value;
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
static int core_get_tdp_ratio(int cpu, u64 plat_info)
{
/* Check how many TDP levels present */
if (plat_info & 0x600000000) {
u64 tdp_ctrl;
u64 tdp_ratio;
int tdp_msr;
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
/* For level 1 and 2, bits[23:16] contain the ratio */
if (tdp_ctrl & 0x03)
tdp_ratio >>= 16;
tdp_ratio &= 0xff; /* ratios are only 8 bits long */
pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
return (int)tdp_ratio;
}
return -ENXIO;
}
static int core_get_max_pstate(int cpu)
{
u64 tar;
u64 plat_info;
int max_pstate;
int tdp_ratio;
int err;
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
if (tdp_ratio <= 0)
return max_pstate;
if (hwp_active) {
/* Turbo activation ratio is not used on HWP platforms */
return tdp_ratio;
}
err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
/* Do some sanity checking for safety */
tar_levels = tar & 0xff;
if (tdp_ratio - 1 == tar_levels) {
max_pstate = tar_levels;
pr_debug("max_pstate=TAC %x\n", max_pstate);
}
}
return max_pstate;
}
static int core_get_turbo_pstate(int cpu)
{
u64 value;
int nont, ret;
rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
ret = nont;
return ret;
}
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
val = (u64)pstate << 8;
if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32;
return val;
}
static int knl_get_aperf_mperf_shift(void)
{
return 10;
}
static int knl_get_turbo_pstate(int cpu)
{
u64 value;
int nont, ret;
rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
ret = nont;
return ret;
}
static void hybrid_get_type(void *data)
{
u8 *cpu_type = data;
*cpu_type = get_this_hybrid_cpu_type();
}
static int hwp_get_cpu_scaling(int cpu)
{
u8 cpu_type = 0;
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
/* P-cores have a smaller perf level-to-freqency scaling factor. */
if (cpu_type == 0x40)
return HYBRID_SCALING_FACTOR;
/* Use default core scaling for E-cores */
if (cpu_type == 0x20)
return core_get_scaling();
/*
* If reached here, this system is either non-hybrid (like Tiger
* Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
* no E cores (in which case CPUID for hybrid support is 0).
*
* The CPPC nominal_frequency field is 0 for non-hybrid systems,
* so the default core scaling will be used for them.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
* Generally, there is no guarantee that this code will always run on
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
update_turbo_state();
intel_pstate_set_pstate(cpu, pstate);
}
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
int perf_ctl_scaling = pstate_funcs.get_scaling();
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
__intel_pstate_get_hwp_cap(cpu);
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
if (cpu->pstate.scaling != perf_ctl_scaling)
intel_pstate_hybrid_hwp_adjust(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
}
if (cpu->pstate.scaling == perf_ctl_scaling) {
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
}
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
intel_pstate_set_min_pstate(cpu);
}
/*
* Long hold time will keep high perf limits for long time,
* which negatively impacts perf/watt for some workloads,
* like specpower. 3ms is based on experiements on some
* workoads.
*/
static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
{
u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
u32 max_limit = (hwp_req & 0xff00) >> 8;
u32 min_limit = (hwp_req & 0xff);
u32 boost_level1;
/*
* Cases to consider (User changes via sysfs or boot time):
* If, P0 (Turbo max) = P1 (Guaranteed max) = min:
* No boost, return.
* If, P0 (Turbo max) > P1 (Guaranteed max) = min:
* Should result in one level boost only for P0.
* If, P0 (Turbo max) = P1 (Guaranteed max) > min:
* Should result in two level boost:
* (min + p1)/2 and P1.
* If, P0 (Turbo max) > P1 (Guaranteed max) > min:
* Should result in three level boost:
* (min + p1)/2, P1 and P0.
*/
/* If max and min are equal or already at max, nothing to boost */
if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
return;
if (!cpu->hwp_boost_min)
cpu->hwp_boost_min = min_limit;
/* level at half way mark between min and guranteed */
boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
if (cpu->hwp_boost_min < boost_level1)
cpu->hwp_boost_min = boost_level1;
else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
max_limit != HWP_GUARANTEED_PERF(hwp_cap))
cpu->hwp_boost_min = max_limit;
else
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
wrmsrl(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
{
if (cpu->hwp_boost_min) {
bool expired;
/* Check if we are idle for hold time to boost down */
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
cpu->last_update = cpu->sample.time;
}
static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
u64 time)
{
cpu->sample.time = time;
if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
bool do_io = false;
cpu->sched_flags = 0;
/*
* Set iowait_boost flag and update time. Since IO WAIT flag
* is set all the time, we can't just conclude that there is
* some IO bound activity is scheduled on this CPU with just
* one occurrence. If we receive at least two in two
* consecutive ticks, then we treat as boost candidate.
*/
if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
do_io = true;
cpu->last_io_update = time;
if (do_io)
intel_pstate_hwp_boost_up(cpu);
} else {
intel_pstate_hwp_boost_down(cpu);
}
}
static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
u64 time, unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
cpu->sched_flags |= flags;
if (smp_processor_id() == cpu->cpu)
intel_pstate_update_util_hwp_local(cpu, time);
}
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
}
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
{
u64 aperf, mperf;
unsigned long flags;
u64 tsc;
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
return false;
}
local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = time;
cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf;
cpu->sample.tsc = tsc;
cpu->sample.aperf -= cpu->prev_aperf;
cpu->sample.mperf -= cpu->prev_mperf;
cpu->sample.tsc -= cpu->prev_tsc;
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
/*
* First time this function is invoked in a given cycle, all of the
* previous sample data fields are equal to zero or stale and they must
* be populated with meaningful numbers for things to work, so assume
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
if (cpu->last_sample_time) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
return false;
}
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
{
return mul_ext_fp(cpu->pstate.max_pstate_physical,
cpu->sample.core_avg_perf);
}
static inline int32_t get_target_pstate(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac;
int target, avg_pstate;
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc);
if (busy_frac < cpu->iowait_boost)
busy_frac = cpu->iowait_boost;
sample->busy_scaled = busy_frac * 100;
target = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
if (target < cpu->pstate.min_pstate)
target = cpu->pstate.min_pstate;
/*
* If the average P-state during the previous cycle was higher than the
* current target, add 50% of the difference to the target to reduce
* possible performance oscillations and offset possible performance
* loss related to moving the workload from one CPU to another within
* a package/module.
*/
avg_pstate = get_avg_pstate(cpu);
if (avg_pstate > target)
target += (avg_pstate - target) >> 1;
return target;
}
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
int max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate);
}
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
if (pstate == cpu->pstate.current_pstate)
return;
cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
{
int from = cpu->pstate.current_pstate;
struct sample *sample;
int target_pstate;
update_turbo_state();
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
fp_toint(sample->busy_scaled),
from,
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
sample->tsc,
get_avg_frequency(cpu),
fp_toint(cpu->iowait_boost * 100));
}
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
/* Don't allow remote callbacks */
if (smp_processor_id() != cpu->cpu)
return;
delta_ns = time - cpu->last_update;
if (flags & SCHED_CPUFREQ_IOWAIT) {
/* Start over if the CPU may have been idle. */
if (delta_ns > TICK_NSEC) {
cpu->iowait_boost = ONE_EIGHTH_FP;
} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
cpu->iowait_boost <<= 1;
if (cpu->iowait_boost > int_tofp(1))
cpu->iowait_boost = int_tofp(1);
} else {
cpu->iowait_boost = ONE_EIGHTH_FP;
}
} else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */
if (delta_ns > TICK_NSEC)
cpu->iowait_boost = 0;
else
cpu->iowait_boost >>= 1;
}
cpu->last_update = time;
delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return;
if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_pstate(cpu);
}
static struct pstate_funcs core_funcs = {
.get_max = core_get_max_pstate,
.get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate,
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
};
static const struct pstate_funcs silvermont_funcs = {
.get_max = atom_get_max_pstate,
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
.get_val = atom_get_val,
.get_scaling = silvermont_get_scaling,
.get_vid = atom_get_vid,
};
static const struct pstate_funcs airmont_funcs = {
.get_max = atom_get_max_pstate,
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
.get_val = atom_get_val,
.get_scaling = airmont_get_scaling,
.get_vid = atom_get_vid,
};
static const struct pstate_funcs knl_funcs = {
.get_max = core_get_max_pstate,
.get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
.get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
};
#define X86_MATCH(model, policy) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
X86_FEATURE_APERFMPERF, &policy)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(SANDYBRIDGE, core_funcs),
X86_MATCH(SANDYBRIDGE_X, core_funcs),
X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
X86_MATCH(IVYBRIDGE, core_funcs),
X86_MATCH(HASWELL, core_funcs),
X86_MATCH(BROADWELL, core_funcs),
X86_MATCH(IVYBRIDGE_X, core_funcs),
X86_MATCH(HASWELL_X, core_funcs),
X86_MATCH(HASWELL_L, core_funcs),
X86_MATCH(HASWELL_G, core_funcs),
X86_MATCH(BROADWELL_G, core_funcs),
X86_MATCH(ATOM_AIRMONT, airmont_funcs),
X86_MATCH(SKYLAKE_L, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE, core_funcs),
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(XEON_PHI_KNL, knl_funcs),
X86_MATCH(XEON_PHI_KNM, knl_funcs),
X86_MATCH(ATOM_GOLDMONT, core_funcs),
X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
X86_MATCH(KABYLAKE, core_funcs),
{}
};
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
cpu = all_cpu_data[cpunum];
if (!cpu) {
cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
if (!cpu)
return -ENOMEM;
WRITE_ONCE(all_cpu_data[cpunum], cpu);
cpu->cpu = cpunum;
cpu->epp_default = -EINVAL;
if (hwp_active) {
intel_pstate_hwp_enable(cpu);
if (intel_pstate_acpi_pm_profile_server())
hwp_boost = true;
}
} else if (hwp_active) {
/*
* Re-enable HWP in case this happens after a resume from ACPI
* S3 if the CPU was offline during the whole system/resume
* cycle.
*/
intel_pstate_hwp_reenable(cpu);
}
cpu->epp_powersave = -EINVAL;
cpu->epp_policy = 0;
intel_pstate_get_cpu_pstates(cpu);
pr_debug("controlling: cpu %d\n", cpunum);
return 0;
}
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
if (hwp_active && !hwp_boost)
return;
if (cpu->update_util_set)
return;
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
(hwp_active ?
intel_pstate_update_util_hwp :
intel_pstate_update_util));
cpu->update_util_set = true;
}
static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
if (!cpu_data->update_util_set)
return;
cpufreq_remove_update_util_hook(cpu);
cpu_data->update_util_set = false;
synchronize_rcu();
}
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
return global.turbo_disabled || global.no_turbo ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned int policy_min,
unsigned int policy_max)
{
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int32_t max_policy_perf, min_policy_perf;
max_policy_perf = policy_max / perf_ctl_scaling;
if (policy_max == policy_min) {
min_policy_perf = max_policy_perf;
} else {
min_policy_perf = policy_min / perf_ctl_scaling;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
/*
* HWP needs some special consideration, because HWP_REQUEST uses
* abstract values to represent performance rather than pure ratios.
*/
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
int scaling = cpu->pstate.scaling;
int freq;
freq = max_policy_perf * perf_ctl_scaling;
max_policy_perf = DIV_ROUND_UP(freq, scaling);
freq = min_policy_perf * perf_ctl_scaling;
min_policy_perf = DIV_ROUND_UP(freq, scaling);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
cpu->cpu, min_policy_perf, max_policy_perf);
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
cpu->min_perf_ratio = min_policy_perf;
cpu->max_perf_ratio = max_policy_perf;
} else {
int turbo_max = cpu->pstate.turbo_pstate;
int32_t global_min, global_max;
/* Global limits are in percent of the maximum turbo P-state. */
global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
global_min, global_max);
cpu->min_perf_ratio = max(min_policy_perf, global_min);
cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
cpu->max_perf_ratio = min(max_policy_perf, global_max);
cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
/* Make sure min_perf <= max_perf */
cpu->min_perf_ratio = min(cpu->min_perf_ratio,
cpu->max_perf_ratio);
}
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
cpu->max_perf_ratio,
cpu->min_perf_ratio);
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
cpu = all_cpu_data[policy->cpu];
cpu->policy = policy->policy;
mutex_lock(&intel_pstate_limits_lock);
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_max_within_limits(cpu);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
if (hwp_active) {
/*
* When hwp_boost was active before and dynamically it
* was turned off, in that case we need to clear the
* update util hook.
*/
if (!hwp_boost)
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_hwp_set(policy->cpu);
}
/*
* policy->cur is never updated with the intel_pstate driver, but it
* is used as a stale frequency value. So, keep it within limits.
*/
policy->cur = policy->min;
mutex_unlock(&intel_pstate_limits_lock);
return 0;
}
static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
struct cpufreq_policy_data *policy)
{
if (!hwp_active &&
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_freq) {
pr_debug("policy->max > max non turbo frequency\n");
policy->max = policy->cpuinfo.max_freq;
}
}
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
struct cpufreq_policy_data *policy)
{
int max_freq;
update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
max_freq = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
}
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
intel_pstate_adjust_policy_max(cpu, policy);
}
static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
{
intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
return 0;
}
static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
pr_debug("CPU %d going offline\n", cpu->cpu);
if (cpu->suspended)
return 0;
/*
* If the CPU is an SMT thread and it goes offline with the performance
* settings different from the minimum, it will prevent its sibling
* from getting to lower performance levels, so force the minimum
* performance on CPU offline to prevent that from happening.
*/
if (hwp_active)
intel_pstate_hwp_offline(cpu);
else
intel_pstate_set_min_pstate(cpu);
intel_pstate_exit_perf_limits(policy);
return 0;
}
static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
pr_debug("CPU %d going online\n", cpu->cpu);
intel_pstate_init_acpi_perf_limits(policy);
if (hwp_active) {
/*
* Re-enable HWP and clear the "suspended" flag to let "resume"
* know that it need not do that.
*/
intel_pstate_hwp_reenable(cpu);
cpu->suspended = false;
}
return 0;
}
static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
{
intel_pstate_clear_update_util_hook(policy->cpu);
return intel_cpufreq_cpu_offline(policy);
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
return 0;
}
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int rc;
rc = intel_pstate_init_cpu(policy->cpu);
if (rc)
return rc;
cpu = all_cpu_data[policy->cpu];
cpu->max_perf_ratio = 0xFF;
cpu->min_perf_ratio = 0;
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
update_turbo_state();
global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
intel_pstate_init_acpi_perf_limits(policy);
policy->fast_switch_possible = true;
return 0;
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
{
int ret = __intel_pstate_cpu_init(policy);
if (ret)
return ret;
/*
* Set the policy to powersave to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
*/
policy->policy = CPUFREQ_POLICY_POWERSAVE;
if (hwp_active) {
struct cpudata *cpu = all_cpu_data[policy->cpu];
cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
}
return 0;
}
static struct cpufreq_driver intel_pstate = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
.suspend = intel_pstate_suspend,
.resume = intel_pstate_resume,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.offline = intel_pstate_cpu_offline,
.online = intel_pstate_cpu_online,
.update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
intel_pstate_verify_cpu_policy(cpu, policy);
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
}
/* Use of trace in passive mode:
*
* In passive mode the trace core_busy field (also known as the
* performance field, and lablelled as such on the graphs; also known as
* core_avg_perf) is not needed and so is re-assigned to indicate if the
* driver call was via the normal or fast switch path. Various graphs
* output from the intel_pstate_tracer.py utility that include core_busy
* (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
* so we use 10 to indicate the normal path through the driver, and
* 90 to indicate the fast switch path through the driver.
* The scaled_busy field is not used, and is set to 0.
*/
#define INTEL_PSTATE_TRACE_TARGET 10
#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
{
struct sample *sample;
if (!trace_pstate_sample_enabled())
return;
if (!intel_pstate_sample(cpu, ktime_get()))
return;
sample = &cpu->sample;
trace_pstate_sample(trace_type,
0,
old_pstate,
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
sample->tsc,
get_avg_frequency(cpu),
fp_toint(cpu->iowait_boost * 100));
}
static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
u32 desired, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
value &= ~HWP_DESIRED_PERF(~0L);
value |= HWP_DESIRED_PERF(desired);
if (value == prev)
return;
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
wrmsrl(MSR_HWP_REQUEST, value);
else
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
wrmsrl(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int target_pstate, bool fast_switch)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
if (hwp_active) {
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
cpu->pstate.current_pstate = target_pstate;
intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
INTEL_PSTATE_TRACE_TARGET, old_pstate);
return target_pstate;
}
static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
struct cpufreq_freqs freqs;
int target_pstate;
update_turbo_state();
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
switch (relation) {
case CPUFREQ_RELATION_L:
target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
break;
case CPUFREQ_RELATION_H:
target_pstate = freqs.new / cpu->pstate.scaling;
break;
default:
target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
break;
}
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
}
static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
return target_pstate * cpu->pstate.scaling;
}
static void intel_cpufreq_adjust_perf(unsigned int cpunum,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity)
{
struct cpudata *cpu = all_cpu_data[cpunum];
u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
update_turbo_state();
cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
target_pstate = cap_pstate;
if (target_perf < capacity)
target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
min_pstate = cap_pstate;
if (min_perf < capacity)
min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
if (min_pstate < cpu->pstate.min_pstate)
min_pstate = cpu->pstate.min_pstate;
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
cpu->pstate.current_pstate = target_pstate;
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
}
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct freq_qos_request *req;
struct cpudata *cpu;
struct device *dev;
int ret, freq;
dev = get_cpu_device(policy->cpu);
if (!dev)
return -ENODEV;
ret = __intel_pstate_cpu_init(policy);
if (ret)
return ret;
policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
/* This reflects the intel_pstate_get_cpu_pstates() setting. */
policy->cur = policy->cpuinfo.min_freq;
req = kcalloc(2, sizeof(*req), GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto pstate_exit;
}
cpu = all_cpu_data[policy->cpu];
if (hwp_active) {
u64 value;
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
intel_pstate_get_hwp_cap(cpu);
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
} else {
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
}
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
freq);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_req;
}
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
freq);
if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
goto remove_min_req;
}
policy->driver_data = req;
return 0;
remove_min_req:
freq_qos_remove_request(req);
free_req:
kfree(req);
pstate_exit:
intel_pstate_exit_perf_limits(policy);
return ret;
}
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct freq_qos_request *req;
req = policy->driver_data;
freq_qos_remove_request(req + 1);
freq_qos_remove_request(req);
kfree(req);
return intel_pstate_cpu_exit(policy);
}
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
{
intel_pstate_suspend(policy);
if (hwp_active) {
struct cpudata *cpu = all_cpu_data[policy->cpu];
u64 value = READ_ONCE(cpu->hwp_req_cached);
/*
* Clear the desired perf field in MSR_HWP_REQUEST in case
* intel_cpufreq_adjust_perf() is in use and the last value
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
return 0;
}
static struct cpufreq_driver intel_cpufreq = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_cpufreq_verify_policy,
.target = intel_cpufreq_target,
.fast_switch = intel_cpufreq_fast_switch,
.init = intel_cpufreq_cpu_init,
.exit = intel_cpufreq_cpu_exit,
.offline = intel_cpufreq_cpu_offline,
.online = intel_pstate_cpu_online,
.suspend = intel_cpufreq_suspend,
.resume = intel_pstate_resume,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
static struct cpufreq_driver *default_driver;
static void intel_pstate_driver_cleanup(void)
{
unsigned int cpu;
cpus_read_lock();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
intel_pstate_driver = NULL;
}
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
int ret;
if (driver == &intel_pstate)
intel_pstate_sysfs_expose_hwp_dynamic_boost();
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
intel_pstate_driver_cleanup();
return ret;
}
global.min_perf_pct = min_perf_pct_min();
return 0;
}
static ssize_t intel_pstate_show_status(char *buf)
{
if (!intel_pstate_driver)
return sprintf(buf, "off\n");
return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
"active" : "passive");
}
static int intel_pstate_update_status(const char *buf, size_t size)
{
if (size == 3 && !strncmp(buf, "off", size)) {
if (!intel_pstate_driver)
return -EINVAL;
if (hwp_active)
return -EBUSY;
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_driver_cleanup();
return 0;
}
if (size == 6 && !strncmp(buf, "active", size)) {
if (intel_pstate_driver) {
if (intel_pstate_driver == &intel_pstate)
return 0;
cpufreq_unregister_driver(intel_pstate_driver);
}
return intel_pstate_register_driver(&intel_pstate);
}
if (size == 7 && !strncmp(buf, "passive", size)) {
if (intel_pstate_driver) {
if (intel_pstate_driver == &intel_cpufreq)
return 0;
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_sysfs_hide_hwp_dynamic_boost();
}
return intel_pstate_register_driver(&intel_cpufreq);
}
return -EINVAL;
}
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
static unsigned int force_load __initdata;
static int __init intel_pstate_msrs_not_valid(void)
{
if (!pstate_funcs.get_max(0) ||
!pstate_funcs.get_min(0) ||
!pstate_funcs.get_turbo(0))
return -ENODEV;
return 0;
}
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
pstate_funcs.get_max_physical = funcs->get_max_physical;
pstate_funcs.get_min = funcs->get_min;
pstate_funcs.get_turbo = funcs->get_turbo;
pstate_funcs.get_scaling = funcs->get_scaling;
pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
}
#ifdef CONFIG_ACPI
static bool __init intel_pstate_no_acpi_pss(void)
{
int i;
for_each_possible_cpu(i) {
acpi_status status;
union acpi_object *pss;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_processor *pr = per_cpu(processors, i);
if (!pr)
continue;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
if (ACPI_FAILURE(status))
continue;
pss = buffer.pointer;
if (pss && pss->type == ACPI_TYPE_PACKAGE) {
kfree(pss);
return false;
}
kfree(pss);
}
pr_debug("ACPI _PSS not found\n");
return true;
}
static bool __init intel_pstate_no_acpi_pcch(void)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
goto not_found;
if (acpi_has_method(handle, "PCCH"))
return false;
not_found:
pr_debug("ACPI PCCH not found\n");
return true;
}
static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
for_each_possible_cpu(i) {
struct acpi_processor *pr = per_cpu(processors, i);
if (!pr)
continue;
if (acpi_has_method(pr->handle, "_PPC"))
return true;
}
pr_debug("ACPI _PPC not found\n");
return false;
}
enum {
PSS,
PPC,
};
/* Hardware vendor-specific info that has its own power management modes */
static struct acpi_platform_list plat_info[] __initdata = {
{"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
{"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{ } /* End */
};
#define BITMASK_OOB (BIT(8) | BIT(18))
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
const struct x86_cpu_id *id;
u64 misc_pwr;
int idx;
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
return true;
}
}
idx = acpi_match_platform_list(plat_info);
if (idx < 0)
return false;
switch (plat_info[idx].data) {
case PSS:
if (!intel_pstate_no_acpi_pss())
return false;
return intel_pstate_no_acpi_pcch();
case PPC:
return intel_pstate_has_acpi_ppc() && !force_load;
}
return false;
}
static void intel_pstate_request_control_from_smm(void)
{
/*
* It may be unsafe to request P-states control from SMM if _PPC support
* has not been enabled.
*/
if (acpi_ppc)
acpi_processor_pstate_control();
}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
#define INTEL_PSTATE_HWP_BROADWELL 0x01
#define X86_MATCH_HWP(model, hwp_mode) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
X86_FEATURE_HWP, hwp_mode)
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
X86_MATCH_HWP(ANY, 0),
{}
};
static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
rdmsrl(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
static const struct x86_cpu_id intel_epp_balance_perf[] = {
/*
* Set EPP value as 102, this is the max suggested EPP
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
{}
};
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
const struct x86_cpu_id *id;
int rc;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
id = x86_match_cpu(hwp_support_ids);
if (id) {
hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced)
pr_info("HWP enabled by BIOS\n");
else if (no_load)
return -ENODEV;
copy_cpu_funcs(&core_funcs);
/*
* Avoid enabling HWP for processors without EPP support,
* because that means incomplete HWP implementation which is a
* corner case and supporting it is generally problematic.
*
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
WRITE_ONCE(hwp_active, 1);
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver)
default_driver = &intel_pstate;
pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling;
goto hwp_cpu_matched;
}
pr_info("HWP not enabled\n");
} else {
if (no_load)
return -ENODEV;
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
pr_info("CPU model not supported\n");
return -ENODEV;
}
copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
}
if (intel_pstate_msrs_not_valid()) {
pr_info("Invalid MSRs\n");
return -ENODEV;
}
/* Without HWP start in the passive mode. */
if (!default_driver)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
/*
* The Intel pstate driver will be ignored if the platform
* firmware has its own power management modes.
*/
if (intel_pstate_platform_pwr_mgmt_exists()) {
pr_info("P-states controlled by the platform\n");
return -ENODEV;
}
if (!hwp_active && hwp_only)
return -ENOTSUPP;
pr_info("Intel P-state driver initializing\n");
_all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
if (!_all_cpu_data)
return -ENOMEM;
WRITE_ONCE(all_cpu_data, _all_cpu_data);
intel_pstate_request_control_from_smm();
intel_pstate_sysfs_expose_params();
if (hwp_active) {
const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
if (id)
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
}
mutex_lock(&intel_pstate_driver_lock);
rc = intel_pstate_register_driver(default_driver);
mutex_unlock(&intel_pstate_driver_lock);
if (rc) {
intel_pstate_sysfs_remove();
return rc;
}
if (hwp_active) {
const struct x86_cpu_id *id;
id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
if (id) {
set_power_ctl_ee_state(false);
pr_info("Disabling energy efficiency optimization\n");
}
pr_info("HWP enabled\n");
} else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
}
return 0;
}
device_initcall(intel_pstate_init);
static int __init intel_pstate_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "disable"))
no_load = 1;
else if (!strcmp(str, "active"))
default_driver = &intel_pstate;
else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq;
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
hwp_only = 1;
if (!strcmp(str, "per_cpu_perf_limits"))
per_cpu_limits = true;
#ifdef CONFIG_ACPI
if (!strcmp(str, "support_acpi_ppc"))
acpi_ppc = true;
#endif
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
MODULE_AUTHOR("Dirk Brandewie <[email protected]>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
| linux-master | drivers/cpufreq/intel_pstate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cyrix MediaGX and NatSemi Geode Suspend Modulation
* (C) 2002 Zwane Mwaikambo <[email protected]>
* (C) 2002 Hiroshi Miura <[email protected]>
* All Rights Reserved
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*
* Theoretical note:
*
* (see Geode(tm) CS5530 manual (rev.4.1) page.56)
*
* CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
* are based on Suspend Modulation.
*
* Suspend Modulation works by asserting and de-asserting the SUSP# pin
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
* the CPU enters an idle state. GX1 stops its core clock when SUSP# is
* asserted then power consumption is reduced.
*
* Suspend Modulation's OFF/ON duration are configurable
* with 'Suspend Modulation OFF Count Register'
* and 'Suspend Modulation ON Count Register'.
* These registers are 8bit counters that represent the number of
* 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
* to the processor.
*
* These counters define a ratio which is the effective frequency
* of operation of the system.
*
* OFF Count
* F_eff = Fgx * ----------------------
* OFF Count + ON Count
*
* 0 <= On Count, Off Count <= 255
*
* From these limits, we can get register values
*
* off_duration + on_duration <= MAX_DURATION
* on_duration = off_duration * (stock_freq - freq) / freq
*
* off_duration = (freq * DURATION) / stock_freq
* on_duration = DURATION - off_duration
*
*---------------------------------------------------------------------------
*
* ChangeLog:
* Dec. 12, 2003 Hiroshi Miura <[email protected]>
* - fix on/off register mistake
* - fix cpu_khz calc when it stops cpu modulation.
*
* Dec. 11, 2002 Hiroshi Miura <[email protected]>
* - rewrite for Cyrix MediaGX Cx5510/5520 and
* NatSemi Geode Cs5530(A).
*
* Jul. ??, 2002 Zwane Mwaikambo <[email protected]>
* - cs5530_mod patch for 2.4.19-rc1.
*
*---------------------------------------------------------------------------
*
* Todo
* Test on machines with 5510, 5530, 5530A
*/
/************************************************************************
* Suspend Modulation - Definitions *
************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/processor-cyrix.h>
/* PCI config registers, all at F0 */
#define PCI_PMER1 0x80 /* power management enable register 1 */
#define PCI_PMER2 0x81 /* power management enable register 2 */
#define PCI_PMER3 0x82 /* power management enable register 3 */
#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
#define PCI_MODON 0x95 /* suspend modulation ON counter register */
#define PCI_SUSCFG 0x96 /* suspend configuration register */
/* PMER1 bits */
#define GPM (1<<0) /* global power management */
#define GIT (1<<1) /* globally enable PM device idle timers */
#define GTR (1<<2) /* globally enable IO traps */
#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
/* SUSCFG bits */
#define SUSMOD (1<<0) /* enable/disable suspend modulation */
/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
/* the below is supported only with cs5530A */
#define PWRSVE_ISA (1<<3) /* stop ISA clock */
#define PWRSVE (1<<4) /* active idle */
struct gxfreq_params {
u8 on_duration;
u8 off_duration;
u8 pci_suscfg;
u8 pci_pmer1;
u8 pci_pmer2;
struct pci_dev *cs55x0;
};
static struct gxfreq_params *gx_params;
static int stock_freq;
/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
static int pci_busclk;
module_param(pci_busclk, int, 0444);
/* maximum duration for which the cpu may be suspended
* (32us * MAX_DURATION). If no parameter is given, this defaults
* to 255.
* Note that this leads to a maximum of 8 ms(!) where the CPU clock
* is suspended -- processing power is just 0.39% of what it used to be,
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
static int max_duration = 255;
module_param(max_duration, int, 0444);
/* For the default policy, we want at least some processing power
* - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
*/
#define POLICY_MIN_DIV 20
/**
* we can detect a core multiplier from dir0_lsb
* from GX1 datasheet p.56,
* MULT[3:0]:
* 0000 = SYSCLK multiplied by 4 (test only)
* 0001 = SYSCLK multiplied by 10
* 0010 = SYSCLK multiplied by 4
* 0011 = SYSCLK multiplied by 6
* 0100 = SYSCLK multiplied by 9
* 0101 = SYSCLK multiplied by 5
* 0110 = SYSCLK multiplied by 7
* 0111 = SYSCLK multiplied by 8
* of 33.3MHz
**/
static int gx_freq_mult[16] = {
4, 10, 4, 6, 9, 5, 7, 8,
0, 0, 0, 0, 0, 0, 0, 0
};
/****************************************************************
* Low Level chipset interface *
****************************************************************/
static struct pci_device_id gx_chipset_tbl[] __initdata = {
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, gx_chipset_tbl);
static void gx_write_byte(int reg, int value)
{
pci_write_config_byte(gx_params->cs55x0, reg, value);
}
/**
* gx_detect_chipset:
*
**/
static struct pci_dev * __init gx_detect_chipset(void)
{
struct pci_dev *gx_pci = NULL;
/* detect which companion chip is used */
for_each_pci_dev(gx_pci) {
if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
return gx_pci;
}
pr_debug("error: no supported chipset found!\n");
return NULL;
}
/**
* gx_get_cpuspeed:
*
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
* Geode CPU runs.
*/
static unsigned int gx_get_cpuspeed(unsigned int cpu)
{
if ((gx_params->pci_suscfg & SUSMOD) == 0)
return stock_freq;
return (stock_freq * gx_params->off_duration)
/ (gx_params->on_duration + gx_params->off_duration);
}
/**
* gx_validate_speed:
* determine current cpu speed
*
**/
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
u8 *off_duration)
{
unsigned int i;
u8 tmp_on, tmp_off;
int old_tmp_freq = stock_freq;
int tmp_freq;
*off_duration = 1;
*on_duration = 0;
for (i = max_duration; i > 0; i--) {
tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_on = i - tmp_off;
tmp_freq = (stock_freq * tmp_off) / i;
/* if this relation is closer to khz, use this. If it's equal,
* prefer it, too - lower latency */
if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
*on_duration = tmp_on;
*off_duration = tmp_off;
old_tmp_freq = tmp_freq;
}
}
return old_tmp_freq;
}
/**
* gx_set_cpuspeed:
* set cpu speed in khz.
**/
static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
{
u8 suscfg, pmer1;
unsigned int new_khz;
unsigned long flags;
struct cpufreq_freqs freqs;
freqs.old = gx_get_cpuspeed(0);
new_khz = gx_validate_speed(khz, &gx_params->on_duration,
&gx_params->off_duration);
freqs.new = new_khz;
cpufreq_freq_transition_begin(policy, &freqs);
local_irq_save(flags);
if (new_khz != stock_freq) {
/* if new khz == 100% of CPU speed, it is special case */
switch (gx_params->cs55x0->device) {
case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
/* FIXME: need to test other values -- Zwane,Miura */
/* typical 2 to 4ms */
gx_write_byte(PCI_IRQTC, 4);
/* typical 50 to 100ms */
gx_write_byte(PCI_VIDTC, 100);
gx_write_byte(PCI_PMER1, pmer1);
if (gx_params->cs55x0->revision < 0x10) {
/* CS5530(rev 1.2, 1.3) */
suscfg = gx_params->pci_suscfg|SUSMOD;
} else {
/* CS5530A,B.. */
suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
}
break;
case PCI_DEVICE_ID_CYRIX_5520:
case PCI_DEVICE_ID_CYRIX_5510:
suscfg = gx_params->pci_suscfg | SUSMOD;
break;
default:
local_irq_restore(flags);
pr_debug("fatal: try to set unknown chipset.\n");
return;
}
} else {
suscfg = gx_params->pci_suscfg & ~(SUSMOD);
gx_params->off_duration = 0;
gx_params->on_duration = 0;
pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n");
}
gx_write_byte(PCI_MODOFF, gx_params->off_duration);
gx_write_byte(PCI_MODON, gx_params->on_duration);
gx_write_byte(PCI_SUSCFG, suscfg);
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
local_irq_restore(flags);
gx_params->pci_suscfg = suscfg;
cpufreq_freq_transition_end(policy, &freqs, 0);
pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
}
/****************************************************************
* High level functions *
****************************************************************/
/*
* cpufreq_gx_verify: test if frequency range is valid
*
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
*/
static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
{
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
if (!stock_freq || !policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
stock_freq);
/* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max
* needs to be increased until one frequency is supported.
* policy->min may not be decreased, though. This way we guarantee a
* specific processing capacity.
*/
tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
if (tmp_freq < policy->min)
tmp_freq += stock_freq / max_duration;
policy->min = tmp_freq;
if (policy->min > policy->max)
policy->max = tmp_freq;
tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
if (tmp_freq > policy->max)
tmp_freq -= stock_freq / max_duration;
policy->max = tmp_freq;
if (policy->max < policy->min)
policy->max = policy->min;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
stock_freq);
return 0;
}
/*
* cpufreq_gx_target:
*
*/
static int cpufreq_gx_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
u8 tmp1, tmp2;
unsigned int tmp_freq;
if (!stock_freq || !policy)
return -EINVAL;
policy->cpu = 0;
tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
while (tmp_freq < policy->min) {
tmp_freq += stock_freq / max_duration;
tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
}
while (tmp_freq > policy->max) {
tmp_freq -= stock_freq / max_duration;
tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
}
gx_set_cpuspeed(policy, tmp_freq);
return 0;
}
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
{
unsigned int maxfreq;
if (!policy || policy->cpu != 0)
return -ENODEV;
/* determine maximum frequency */
if (pci_busclk)
maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
else if (cpu_khz)
maxfreq = cpu_khz;
else
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
stock_freq = maxfreq;
pr_debug("cpu max frequency is %d.\n", maxfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
if (max_duration < POLICY_MIN_DIV)
policy->min = maxfreq / max_duration;
else
policy->min = maxfreq / POLICY_MIN_DIV;
policy->max = maxfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
return 0;
}
/*
* cpufreq_gx_init:
* MediaGX/Geode GX initialize cpufreq driver
*/
static struct cpufreq_driver gx_suspmod_driver = {
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.get = gx_get_cpuspeed,
.verify = cpufreq_gx_verify,
.target = cpufreq_gx_target,
.init = cpufreq_gx_cpu_init,
.name = "gx-suspmod",
};
static int __init cpufreq_gx_init(void)
{
int ret;
struct gxfreq_params *params;
struct pci_dev *gx_pci;
/* Test if we have the right hardware */
gx_pci = gx_detect_chipset();
if (gx_pci == NULL)
return -ENODEV;
/* check whether module parameters are sane */
if (max_duration > 0xff)
max_duration = 0xff;
pr_debug("geode suspend modulation available.\n");
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (params == NULL)
return -ENOMEM;
params->cs55x0 = gx_pci;
gx_params = params;
/* keep cs55x0 configurations */
pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
pci_read_config_byte(params->cs55x0, PCI_MODOFF,
&(params->off_duration));
ret = cpufreq_register_driver(&gx_suspmod_driver);
if (ret) {
kfree(params);
return ret; /* register error! */
}
return 0;
}
static void __exit cpufreq_gx_exit(void)
{
cpufreq_unregister_driver(&gx_suspmod_driver);
pci_dev_put(gx_params->cs55x0);
kfree(gx_params);
}
MODULE_AUTHOR("Hiroshi Miura <[email protected]>");
MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
MODULE_LICENSE("GPL");
module_init(cpufreq_gx_init);
module_exit(cpufreq_gx_exit);
| linux-master | drivers/cpufreq/gx-suspmod.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
* M (part of the Centrino chipset).
*
* Since the original Pentium M, most new Intel CPUs support Enhanced
* SpeedStep.
*
* Despite the "SpeedStep" in the name, this is almost entirely unlike
* traditional SpeedStep.
*
* Modelled on speedstep.c
*
* Copyright (C) 2003 Jeremy Fitzhardinge <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/sched.h> /* current */
#include <linux/delay.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#define MAINTAINER "[email protected]"
#define INTEL_MSR_RANGE (0xffff)
struct cpu_id
{
__u8 x86; /* CPU family */
__u8 x86_model; /* model */
__u8 x86_stepping; /* stepping */
};
enum {
CPU_BANIAS,
CPU_DOTHAN_A1,
CPU_DOTHAN_A2,
CPU_DOTHAN_B0,
CPU_MP4HT_D0,
CPU_MP4HT_E0,
};
static const struct cpu_id cpu_ids[] = {
[CPU_BANIAS] = { 6, 9, 5 },
[CPU_DOTHAN_A1] = { 6, 13, 1 },
[CPU_DOTHAN_A2] = { 6, 13, 2 },
[CPU_DOTHAN_B0] = { 6, 13, 6 },
[CPU_MP4HT_D0] = {15, 3, 4 },
[CPU_MP4HT_E0] = {15, 4, 1 },
};
#define N_IDS ARRAY_SIZE(cpu_ids)
struct cpu_model
{
const struct cpu_id *cpu_id;
const char *model_name;
unsigned max_freq; /* max clock in kHz */
struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
};
static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
const struct cpu_id *x);
/* Operating points for current CPU */
static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
static struct cpufreq_driver centrino_driver;
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
/* Computes the correct form for IA32_PERF_CTL MSR for a particular
frequency/voltage operating point; frequency in MHz, volts in mV.
This is stored as "driver_data" in the structure. */
#define OP(mhz, mv) \
{ \
.frequency = (mhz) * 1000, \
.driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \
}
/*
* These voltage tables were derived from the Intel Pentium M
* datasheet, document 25261202.pdf, Table 5. I have verified they
* are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
* M.
*/
/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
static struct cpufreq_frequency_table banias_900[] =
{
OP(600, 844),
OP(800, 988),
OP(900, 1004),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
static struct cpufreq_frequency_table banias_1000[] =
{
OP(600, 844),
OP(800, 972),
OP(900, 988),
OP(1000, 1004),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
static struct cpufreq_frequency_table banias_1100[] =
{
OP( 600, 956),
OP( 800, 1020),
OP( 900, 1100),
OP(1000, 1164),
OP(1100, 1180),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
static struct cpufreq_frequency_table banias_1200[] =
{
OP( 600, 956),
OP( 800, 1004),
OP( 900, 1020),
OP(1000, 1100),
OP(1100, 1164),
OP(1200, 1180),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Intel Pentium M processor 1.30GHz (Banias) */
static struct cpufreq_frequency_table banias_1300[] =
{
OP( 600, 956),
OP( 800, 1260),
OP(1000, 1292),
OP(1200, 1356),
OP(1300, 1388),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Intel Pentium M processor 1.40GHz (Banias) */
static struct cpufreq_frequency_table banias_1400[] =
{
OP( 600, 956),
OP( 800, 1180),
OP(1000, 1308),
OP(1200, 1436),
OP(1400, 1484),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Intel Pentium M processor 1.50GHz (Banias) */
static struct cpufreq_frequency_table banias_1500[] =
{
OP( 600, 956),
OP( 800, 1116),
OP(1000, 1228),
OP(1200, 1356),
OP(1400, 1452),
OP(1500, 1484),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Intel Pentium M processor 1.60GHz (Banias) */
static struct cpufreq_frequency_table banias_1600[] =
{
OP( 600, 956),
OP( 800, 1036),
OP(1000, 1164),
OP(1200, 1276),
OP(1400, 1420),
OP(1600, 1484),
{ .frequency = CPUFREQ_TABLE_END }
};
/* Intel Pentium M processor 1.70GHz (Banias) */
static struct cpufreq_frequency_table banias_1700[] =
{
OP( 600, 956),
OP( 800, 1004),
OP(1000, 1116),
OP(1200, 1228),
OP(1400, 1308),
OP(1700, 1484),
{ .frequency = CPUFREQ_TABLE_END }
};
#undef OP
#define _BANIAS(cpuid, max, name) \
{ .cpu_id = cpuid, \
.model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
.max_freq = (max)*1000, \
.op_points = banias_##max, \
}
#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
/* CPU models, their operating frequency range, and freq/voltage
operating points */
static struct cpu_model models[] =
{
_BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
BANIAS(1000),
BANIAS(1100),
BANIAS(1200),
BANIAS(1300),
BANIAS(1400),
BANIAS(1500),
BANIAS(1600),
BANIAS(1700),
/* NULL model_name is a wildcard */
{ &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
{ &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
{ &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
{ &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
{ NULL, }
};
#undef _BANIAS
#undef BANIAS
static int centrino_cpu_init_table(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
struct cpu_model *model;
for(model = models; model->cpu_id != NULL; model++)
if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
(model->model_name == NULL ||
strcmp(cpu->x86_model_id, model->model_name) == 0))
break;
if (model->cpu_id == NULL) {
/* No match at all */
pr_debug("no support for CPU model \"%s\": "
"send /proc/cpuinfo to " MAINTAINER "\n",
cpu->x86_model_id);
return -ENOENT;
}
if (model->op_points == NULL) {
/* Matched a non-match */
pr_debug("no table support for CPU model \"%s\"\n",
cpu->x86_model_id);
pr_debug("try using the acpi-cpufreq driver\n");
return -ENOENT;
}
per_cpu(centrino_model, policy->cpu) = model;
pr_debug("found \"%s\": max frequency: %dkHz\n",
model->model_name, model->max_freq);
return 0;
}
#else
static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
{
return -ENODEV;
}
#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
const struct cpu_id *x)
{
if ((c->x86 == x->x86) &&
(c->x86_model == x->x86_model) &&
(c->x86_stepping == x->x86_stepping))
return 1;
return 0;
}
/* To be called only after centrino_model is initialized */
static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
{
int i;
/*
* Extract clock in kHz from PERF_CTL value
* for centrino, as some DSDTs are buggy.
* Ideally, this can be done using the acpi_data structure.
*/
if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
msr = (msr >> 8) & 0xff;
return msr * 100000;
}
if ((!per_cpu(centrino_model, cpu)) ||
(!per_cpu(centrino_model, cpu)->op_points))
return 0;
msr &= 0xffff;
for (i = 0;
per_cpu(centrino_model, cpu)->op_points[i].frequency
!= CPUFREQ_TABLE_END;
i++) {
if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
return per_cpu(centrino_model, cpu)->
op_points[i].frequency;
}
if (failsafe)
return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
else
return 0;
}
/* Return the current CPU frequency in kHz */
static unsigned int get_cur_freq(unsigned int cpu)
{
unsigned l, h;
unsigned clock_freq;
rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
clock_freq = extract_clock(l, cpu, 0);
if (unlikely(clock_freq == 0)) {
/*
* On some CPUs, we can see transient MSR values (which are
* not present in _PSS), while CPU is doing some automatic
* P-state transition (like TM2). Get the last freq set
* in PERF_CTL.
*/
rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
clock_freq = extract_clock(l, cpu, 1);
}
return clock_freq;
}
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
unsigned l, h;
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
if (cpu->x86_vendor != X86_VENDOR_INTEL ||
!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
if (policy->cpu != 0)
return -ENODEV;
for (i = 0; i < N_IDS; i++)
if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
break;
if (i != N_IDS)
per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
if (!per_cpu(centrino_cpu, policy->cpu)) {
pr_debug("found unsupported CPU with "
"Enhanced SpeedStep: send /proc/cpuinfo to "
MAINTAINER "\n");
return -ENODEV;
}
if (centrino_cpu_init_table(policy))
return -ENODEV;
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l);
wrmsr(MSR_IA32_MISC_ENABLE, l, h);
/* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("couldn't enable Enhanced SpeedStep\n");
return -ENODEV;
}
}
policy->cpuinfo.transition_latency = 10000;
/* 10uS transition latency */
policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
return 0;
}
static int centrino_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
if (!per_cpu(centrino_model, cpu))
return -ENODEV;
per_cpu(centrino_model, cpu) = NULL;
return 0;
}
/**
* centrino_target - set a new CPUFreq policy
* @policy: new policy
* @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
int retval = 0;
unsigned int j, first_cpu;
struct cpufreq_frequency_table *op_points;
cpumask_var_t covered_cpus;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
return -ENOMEM;
if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
retval = -ENODEV;
goto out;
}
first_cpu = 1;
op_points = &per_cpu(centrino_model, cpu)->op_points[index];
for_each_cpu(j, policy->cpus) {
int good_cpu;
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
good_cpu = cpumask_any_and(policy->cpus,
cpu_online_mask);
else
good_cpu = j;
if (good_cpu >= nr_cpu_ids) {
pr_debug("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
/* We haven't started the transition yet. */
goto out;
}
break;
}
msr = op_points->driver_data;
if (first_cpu) {
rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
if (msr == (oldmsr & 0xffff)) {
pr_debug("no change needed - msr was and needs "
"to be %x\n", oldmsr);
retval = 0;
goto out;
}
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
}
wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpumask_set_cpu(j, covered_cpus);
}
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to policy->cpus and
* MSRs have already been written on coverd_cpus.
* Best effort undo..
*/
for_each_cpu(j, covered_cpus)
wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
}
retval = 0;
out:
free_cpumask_var(covered_cpus);
return retval;
}
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
.exit = centrino_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
.attr = cpufreq_generic_attr,
};
/*
* This doesn't replace the detailed checks above because
* the generic CPU IDs don't have a way to match for steppings
* or ASCII model IDs.
*/
static const struct x86_cpu_id centrino_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
{}
};
/**
* centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
*
* Initializes the Enhanced SpeedStep support. Returns -ENODEV on
* unsupported devices, -ENOENT if there's no voltage table for this
* particular CPU model, -EINVAL on problems during initiatization,
* and zero on success.
*
* This is quite picky. Not only does the CPU have to advertise the
* "est" flag in the cpuid capability flags, we look for a specific
* CPU model and stepping, and we need to have the exact model name in
* our voltage tables. That is, be paranoid about not releasing
* someone's valuable magic smoke.
*/
static int __init centrino_init(void)
{
if (!x86_match_cpu(centrino_ids))
return -ENODEV;
return cpufreq_register_driver(¢rino_driver);
}
static void __exit centrino_exit(void)
{
cpufreq_unregister_driver(¢rino_driver);
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <[email protected]>");
MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
MODULE_LICENSE ("GPL");
late_initcall(centrino_init);
module_exit(centrino_exit);
| linux-master | drivers/cpufreq/speedstep-centrino.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/cpufreq/cpufreq_powersave.c
*
* Copyright (C) 2002 - 2003 Dominik Brodowski <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/module.h>
static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
{
pr_debug("setting to %u kHz\n", policy->min);
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
}
static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.limits = cpufreq_gov_powersave_limits,
.owner = THIS_MODULE,
.flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <[email protected]>");
MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
struct cpufreq_governor *cpufreq_default_governor(void)
{
return &cpufreq_gov_powersave;
}
#endif
cpufreq_governor_init(cpufreq_gov_powersave);
cpufreq_governor_exit(cpufreq_gov_powersave);
| linux-master | drivers/cpufreq/cpufreq_powersave.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
*
* Copyright (C) 2013 Andrew Lunn <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
#define CPU_SW_INT_BLK BIT(28)
static struct priv
{
struct clk *cpu_clk;
struct clk *ddr_clk;
struct clk *powersave_clk;
struct device *dev;
void __iomem *base;
} priv;
#define STATE_CPU_FREQ 0x01
#define STATE_DDR_FREQ 0x02
/*
* Kirkwood can swap the clock to the CPU between two clocks:
*
* - cpu clk
* - ddr clk
*
* The frequencies are set at runtime before registering this table.
*/
static struct cpufreq_frequency_table kirkwood_freq_table[] = {
{0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
{0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
{0, 0, CPUFREQ_TABLE_END},
};
static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
{
return clk_get_rate(priv.powersave_clk) / 1000;
}
static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
local_irq_disable();
/* Disable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg |= CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
switch (state) {
case STATE_CPU_FREQ:
clk_set_parent(priv.powersave_clk, priv.cpu_clk);
break;
case STATE_DDR_FREQ:
clk_set_parent(priv.powersave_clk, priv.ddr_clk);
break;
}
/* Wait-for-Interrupt, while the hardware changes frequency */
cpu_do_idle();
/* Enable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg &= ~CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
local_irq_enable();
return 0;
}
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
return 0;
}
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.get = kirkwood_cpufreq_get_cpu_frequency,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
.attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int err;
priv.dev = &pdev->dev;
priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
np = of_cpu_device_node_get(0);
if (!np) {
dev_err(&pdev->dev, "failed to get cpu device node\n");
return -ENODEV;
}
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk\n");
err = PTR_ERR(priv.cpu_clk);
goto out_node;
}
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
goto out_node;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
if (IS_ERR(priv.ddr_clk)) {
dev_err(priv.dev, "Unable to get ddrclk\n");
err = PTR_ERR(priv.ddr_clk);
goto out_cpu;
}
err = clk_prepare_enable(priv.ddr_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare ddrclk\n");
goto out_cpu;
}
kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
priv.powersave_clk = of_clk_get_by_name(np, "powersave");
if (IS_ERR(priv.powersave_clk)) {
dev_err(priv.dev, "Unable to get powersave\n");
err = PTR_ERR(priv.powersave_clk);
goto out_ddr;
}
err = clk_prepare_enable(priv.powersave_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare powersave clk\n");
goto out_ddr;
}
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
if (err) {
dev_err(priv.dev, "Failed to register cpufreq driver\n");
goto out_powersave;
}
of_node_put(np);
return 0;
out_powersave:
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
out_node:
of_node_put(np);
return err;
}
static void kirkwood_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
clk_disable_unprepare(priv.powersave_clk);
clk_disable_unprepare(priv.ddr_clk);
clk_disable_unprepare(priv.cpu_clk);
}
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
.remove_new = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
},
};
module_platform_driver(kirkwood_cpufreq_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andrew Lunn <[email protected]");
MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
MODULE_ALIAS("platform:kirkwood-cpufreq");
| linux-master | drivers/cpufreq/kirkwood-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
* for the ondemand governor.
*
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Jacob Shin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/percpu-defs.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#include "cpufreq_ondemand.h"
#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
#define CLASS_CODE_SHIFT 56
#define POWERSAVE_BIAS_MAX 1000
#define POWERSAVE_BIAS_DEF 400
struct cpu_data_t {
u64 actual;
u64 reference;
unsigned int freq_prev;
};
static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_next,
unsigned int relation)
{
int sensitivity;
long d_actual, d_reference;
struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
if (!policy->freq_table)
return freq_next;
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
&actual.l, &actual.h);
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
&reference.l, &reference.h);
actual.h &= 0x00ffffff;
reference.h &= 0x00ffffff;
/* counter wrapped around, so stay on current frequency */
if (actual.q < data->actual || reference.q < data->reference) {
freq_next = policy->cur;
goto out;
}
d_actual = actual.q - data->actual;
d_reference = reference.q - data->reference;
/* divide by 0, so stay on current frequency as well */
if (d_reference == 0) {
freq_next = policy->cur;
goto out;
}
sensitivity = POWERSAVE_BIAS_MAX -
(POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
/* this workload is not CPU bound, so choose a lower freq */
if (sensitivity < od_tuners->powersave_bias) {
if (data->freq_prev == policy->cur)
freq_next = policy->cur;
if (freq_next > policy->cur)
freq_next = policy->cur;
else if (freq_next < policy->cur)
freq_next = policy->min;
else {
unsigned int index;
index = cpufreq_table_find_index_h(policy,
policy->cur - 1,
relation & CPUFREQ_RELATION_E);
freq_next = policy->freq_table[index].frequency;
}
data->freq_prev = freq_next;
} else
data->freq_prev = 0;
out:
data->actual = actual.q;
data->reference = reference.q;
return freq_next;
}
static int __init amd_freq_sensitivity_init(void)
{
u64 val;
struct pci_dev *pcidev;
unsigned int pci_vendor;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
pci_vendor = PCI_VENDOR_ID_AMD;
else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
pci_vendor = PCI_VENDOR_ID_HYGON;
else
return -ENODEV;
pcidev = pci_get_device(pci_vendor,
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) {
if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
} else {
pci_dev_put(pcidev);
}
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
return -ENODEV;
od_register_powersave_bias_handler(amd_powersave_bias_target,
POWERSAVE_BIAS_DEF);
return 0;
}
late_initcall(amd_freq_sensitivity_init);
static void __exit amd_freq_sensitivity_exit(void)
{
od_unregister_powersave_bias_handler();
}
module_exit(amd_freq_sensitivity_exit);
static const struct x86_cpu_id __maybe_unused amd_freq_sensitivity_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
MODULE_AUTHOR("Jacob Shin <[email protected]>");
MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
"the ondemand governor.");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/amd_freq_sensitivity.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Power Interface (SCMI) based CPUFreq Interface driver
*
* Copyright (C) 2018-2021 ARM Ltd.
* Sudeep Holla <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#include <linux/units.h>
struct scmi_data {
int domain_id;
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
struct scmi_data *priv = policy->driver_data;
unsigned long rate;
int ret;
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
return rate / 1000;
}
/*
* perf_ops->freq_set is not a synchronous, the actual OPP change will
* happen asynchronously and can get notified if the events are
* subscribed for by the SCMI firmware
*/
static int
scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct scmi_data *priv = policy->driver_data;
u64 freq = policy->freq_table[index].frequency;
return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
}
static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
if (!perf_ops->freq_set(ph, priv->domain_id,
target_freq * 1000, true))
return target_freq;
return 0;
}
static int
scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
int cpu, domain, tdomain;
struct device *tcpu_dev;
domain = perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
for_each_possible_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
tcpu_dev = get_cpu_device(cpu);
if (!tcpu_dev)
continue;
tdomain = perf_ops->device_domain_id(tcpu_dev);
if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
return 0;
}
static int __maybe_unused
scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
unsigned long *KHz)
{
enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
unsigned long Hz;
int ret, domain;
domain = perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
/* Get the power cost of the performance domain. */
Hz = *KHz * 1000;
ret = perf_ops->est_power_get(ph, domain, &Hz, power);
if (ret)
return ret;
/* Convert the power to uW if it is mW (ignore bogoW) */
if (power_scale == SCMI_POWER_MILLIWATTS)
*power *= MICROWATT_PER_MILLIWATT;
/* The EM framework specifies the frequency in KHz. */
*KHz = Hz / 1000;
return 0;
}
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp;
unsigned int latency;
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("failed to get cpu%d device\n", policy->cpu);
return -ENODEV;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_free_priv;
}
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
goto out_free_cpumask;
}
/*
* Obtain CPUs that share performance levels.
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret || cpumask_empty(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
* A previous CPU may have marked OPPs as shared for a few CPUs, based on
* what OPP core provided. If the current CPU is part of those few, then
* there is no need to add OPPs again.
*/
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
ret = perf_ops->device_opps_add(ph, cpu_dev);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
goto out_free_cpumask;
}
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
__func__, nr_opp);
ret = -ENODEV;
goto out_free_opp;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
goto out_free_opp;
}
priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
priv->domain_id = perf_ops->device_domain_id(cpu_dev);
policy->driver_data = priv;
policy->freq_table = freq_table;
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
latency = perf_ops->transition_latency_get(ph, cpu_dev);
if (!latency)
latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
return 0;
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
free_cpumask_var(priv->opp_shared_cpus);
out_free_priv:
kfree(priv);
return ret;
}
static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
struct scmi_data *priv = policy->driver_data;
bool em_power_scale = false;
/*
* This callback will be called for each policy, but we don't need to
* register with EM every time. Despite not being part of the same
* policy, some CPUs may still share their perf-domains, and a CPU from
* another policy may already have registered with EM on behalf of CPUs
* of this policy.
*/
if (!priv->nr_opp)
return;
if (power_scale == SCMI_POWER_MILLIWATTS
|| power_scale == SCMI_POWER_MICROWATTS)
em_power_scale = true;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
&em_cb, priv->opp_shared_cpus,
em_power_scale);
}
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
struct device *dev = &sdev->dev;
const struct scmi_handle *handle;
handle = sdev->handle;
if (!handle)
return -ENODEV;
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
if (of_property_present(dev->of_node, "#clock-cells"))
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
#endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
if (ret) {
dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
__func__, ret);
}
return ret;
}
static void scmi_cpufreq_remove(struct scmi_device *sdev)
{
cpufreq_unregister_driver(&scmi_cpufreq_driver);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_PERF, "cpufreq" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_cpufreq_drv = {
.name = "scmi-cpufreq",
.probe = scmi_cpufreq_probe,
.remove = scmi_cpufreq_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_cpufreq_drv);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/scmi-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* elanfreq: cpufreq driver for the AMD ELAN family
*
* (c) Copyright 2002 Robert Schwebel <[email protected]>
*
* Parts of this code are (c) Sven Geggus <[email protected]>
*
* All Rights Reserved.
*
* 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
/* Module parameter */
static int max_freq;
struct s_elan_multiplier {
int clock; /* frequency in kHz */
int val40h; /* PMU Force Mode register */
int val80h; /* CPU Clock Speed Register */
};
/*
* It is important that the frequencies
* are listed in ascending order here!
*/
static struct s_elan_multiplier elan_multiplier[] = {
{1000, 0x02, 0x18},
{2000, 0x02, 0x10},
{4000, 0x02, 0x08},
{8000, 0x00, 0x00},
{16000, 0x00, 0x02},
{33000, 0x00, 0x04},
{66000, 0x01, 0x04},
{99000, 0x01, 0x05}
};
static struct cpufreq_frequency_table elanfreq_table[] = {
{0, 0, 1000},
{0, 1, 2000},
{0, 2, 4000},
{0, 3, 8000},
{0, 4, 16000},
{0, 5, 33000},
{0, 6, 66000},
{0, 7, 99000},
{0, 0, CPUFREQ_TABLE_END},
};
/**
* elanfreq_get_cpu_frequency: determine current cpu speed
*
* Finds out at which frequency the CPU of the Elan SOC runs
* at the moment. Frequencies from 1 to 33 MHz are generated
* the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
* and have the rest of the chip running with 33 MHz.
*/
static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
{
u8 clockspeed_reg; /* Clock Speed Register */
local_irq_disable();
outb_p(0x80, REG_CSCIR);
clockspeed_reg = inb_p(REG_CSCDR);
local_irq_enable();
if ((clockspeed_reg & 0xE0) == 0xE0)
return 0;
/* Are we in CPU clock multiplied mode (66/99 MHz)? */
if ((clockspeed_reg & 0xE0) == 0xC0) {
if ((clockspeed_reg & 0x01) == 0)
return 66000;
else
return 99000;
}
/* 33 MHz is not 32 MHz... */
if ((clockspeed_reg & 0xE0) == 0xA0)
return 33000;
return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
}
static int elanfreq_target(struct cpufreq_policy *policy,
unsigned int state)
{
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
* 0x23: Chip Setup & Control Register Data Register (CSCD)
*
*/
/*
* 0x40 is the Power Management Unit's Force Mode Register.
* Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
*/
local_irq_disable();
outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
outb_p(0x00, REG_CSCDR);
local_irq_enable(); /* wait till internal pipelines and */
udelay(1000); /* buffers have cleaned up */
local_irq_disable();
/* now, set the CPU clock speed register (0x80) */
outb_p(0x80, REG_CSCIR);
outb_p(elan_multiplier[state].val80h, REG_CSCDR);
/* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
outb_p(0x40, REG_CSCIR);
outb_p(elan_multiplier[state].val40h, REG_CSCDR);
udelay(10000);
local_irq_enable();
return 0;
}
/*
* Module init and exit code
*/
static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
struct cpufreq_frequency_table *pos;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
(c->x86 != 4) || (c->x86_model != 10))
return -ENODEV;
/* max freq */
if (!max_freq)
max_freq = elanfreq_get_cpu_frequency(0);
/* table init */
cpufreq_for_each_entry(pos, elanfreq_table)
if (pos->frequency > max_freq)
pos->frequency = CPUFREQ_ENTRY_INVALID;
policy->freq_table = elanfreq_table;
return 0;
}
#ifndef MODULE
/**
* elanfreq_setup - elanfreq command line parameter parsing
*
* elanfreq command line parameter. Use:
* elanfreq=66000
* to set the maximum CPU frequency to 66 MHz. Note that in
* case you do not give this boot parameter, the maximum
* frequency will fall back to _current_ CPU frequency which
* might be lower. If you build this as a module, use the
* max_freq module parameter instead.
*/
static int __init elanfreq_setup(char *str)
{
max_freq = simple_strtoul(str, &str, 0);
pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
return 1;
}
__setup("elanfreq=", elanfreq_setup);
#endif
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 10, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, elan_id);
static int __init elanfreq_init(void)
{
if (!x86_match_cpu(elan_id))
return -ENODEV;
return cpufreq_register_driver(&elanfreq_driver);
}
static void __exit elanfreq_exit(void)
{
cpufreq_unregister_driver(&elanfreq_driver);
}
module_param(max_freq, int, 0444);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Schwebel <[email protected]>, "
"Sven Geggus <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
module_init(elanfreq_init);
module_exit(elanfreq_exit);
| linux-master | drivers/cpufreq/elanfreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2001 Dave Jones, Arjan van de ven.
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
*
* Based upon reverse engineered information, and on Intel documentation
* for chipsets ICH2-M and ICH3-M.
*
* Many thanks to Ducrot Bruno for finding and fixing the last
* "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
* for extensive testing.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
/*********************************************************************
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <asm/cpu_device_id.h>
#include "speedstep-lib.h"
/* speedstep_chipset:
* It is necessary to know which chipset is used. As accesses to
* this device occur at various places in this module, we need a
* static struct pci_dev * pointing to that device.
*/
static struct pci_dev *speedstep_chipset_dev;
/* speedstep_processor
*/
static enum speedstep_processor speedstep_processor;
static u32 pmbase;
/*
* There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
{0, SPEEDSTEP_HIGH, 0},
{0, SPEEDSTEP_LOW, 0},
{0, 0, CPUFREQ_TABLE_END},
};
/**
* speedstep_find_register - read the PMBASE address
*
* Returns: -ENODEV if no register could be found
*/
static int speedstep_find_register(void)
{
if (!speedstep_chipset_dev)
return -ENODEV;
/* get PMBASE */
pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
if (!(pmbase & 0x01)) {
pr_err("could not find speedstep register\n");
return -ENODEV;
}
pmbase &= 0xFFFFFFFE;
if (!pmbase) {
pr_err("could not find speedstep register\n");
return -ENODEV;
}
pr_debug("pmbase is 0x%x\n", pmbase);
return 0;
}
/**
* speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
*
* Tries to change the SpeedStep state. Can be called from
* smp_call_function_single.
*/
static void speedstep_set_state(unsigned int state)
{
u8 pm2_blk;
u8 value;
unsigned long flags;
if (state > 0x1)
return;
/* Disable IRQs */
local_irq_save(flags);
/* read state */
value = inb(pmbase + 0x50);
pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
/* write new state */
value &= 0xFE;
value |= state;
pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
/* Disable bus master arbitration */
pm2_blk = inb(pmbase + 0x20);
pm2_blk |= 0x01;
outb(pm2_blk, (pmbase + 0x20));
/* Actual transition */
outb(value, (pmbase + 0x50));
/* Restore bus master arbitration */
pm2_blk &= 0xfe;
outb(pm2_blk, (pmbase + 0x20));
/* check if transition was successful */
value = inb(pmbase + 0x50);
/* Enable IRQs */
local_irq_restore(flags);
pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
if (state == (value & 0x1))
pr_debug("change to %u MHz succeeded\n",
speedstep_get_frequency(speedstep_processor) / 1000);
else
pr_err("change failed - I/O error\n");
return;
}
/* Wrapper for smp_call_function_single. */
static void _speedstep_set_state(void *_state)
{
speedstep_set_state(*(unsigned int *)_state);
}
/**
* speedstep_activate - activate SpeedStep control in the chipset
*
* Tries to activate the SpeedStep status and control registers.
* Returns -EINVAL on an unsupported chipset, and zero on success.
*/
static int speedstep_activate(void)
{
u16 value = 0;
if (!speedstep_chipset_dev)
return -EINVAL;
pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
if (!(value & 0x08)) {
value |= 0x08;
pr_debug("activating SpeedStep (TM) registers\n");
pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
}
return 0;
}
/**
* speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
*
* Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
* the LPC bridge / PM module which contains all power-management
* functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
* chipset, or zero on failure.
*/
static unsigned int speedstep_detect_chipset(void)
{
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801DB_12,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev)
return 4; /* 4-M */
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801CA_12,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev)
return 3; /* 3-M */
speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801BA_10,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (speedstep_chipset_dev) {
/* speedstep.c causes lockups on Dell Inspirons 8000 and
* 8100 which use a pretty old revision of the 82815
* host bridge. Abort on these systems.
*/
struct pci_dev *hostbridge;
hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82815_MC,
PCI_ANY_ID, PCI_ANY_ID,
NULL);
if (!hostbridge)
return 2; /* 2-M */
if (hostbridge->revision < 5) {
pr_debug("hostbridge does not support speedstep\n");
speedstep_chipset_dev = NULL;
pci_dev_put(hostbridge);
return 0;
}
pci_dev_put(hostbridge);
return 2; /* 2-M */
}
return 0;
}
static void get_freq_data(void *_speed)
{
unsigned int *speed = _speed;
*speed = speedstep_get_frequency(speedstep_processor);
}
static unsigned int speedstep_get(unsigned int cpu)
{
unsigned int speed;
/* You're supposed to ensure CPU is online. */
BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
pr_debug("detected %u kHz as current frequency\n", speed);
return speed;
}
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
* @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int policy_cpu;
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
true);
return 0;
}
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
};
static void get_freqs_on_cpu(void *_get_freqs)
{
struct get_freqs *get_freqs = _get_freqs;
get_freqs->ret =
speedstep_get_freqs(speedstep_processor,
&speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency,
&get_freqs->policy->cpuinfo.transition_latency,
&speedstep_set_state);
}
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
unsigned int policy_cpu;
struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
#endif
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
/* detect low and high frequency and transition latency */
gf.policy = policy;
smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
if (gf.ret)
return gf.ret;
policy->freq_table = speedstep_freqs;
return 0;
}
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
{}
};
/**
* speedstep_init - initializes the SpeedStep CPUFreq driver
*
* Initializes the SpeedStep support. Returns -ENODEV on unsupported
* devices, -EINVAL on problems during initiatization, and zero on
* success.
*/
static int __init speedstep_init(void)
{
if (!x86_match_cpu(ss_smi_ids))
return -ENODEV;
/* detect processor */
speedstep_processor = speedstep_detect_processor();
if (!speedstep_processor) {
pr_debug("Intel(R) SpeedStep(TM) capable processor "
"not found\n");
return -ENODEV;
}
/* detect chipset */
if (!speedstep_detect_chipset()) {
pr_debug("Intel(R) SpeedStep(TM) for this chipset not "
"(yet) available.\n");
return -ENODEV;
}
/* activate speedstep support */
if (speedstep_activate()) {
pci_dev_put(speedstep_chipset_dev);
return -EINVAL;
}
if (speedstep_find_register())
return -ENODEV;
return cpufreq_register_driver(&speedstep_driver);
}
/**
* speedstep_exit - unregisters SpeedStep support
*
* Unregisters SpeedStep support.
*/
static void __exit speedstep_exit(void)
{
pci_dev_put(speedstep_chipset_dev);
cpufreq_unregister_driver(&speedstep_driver);
}
MODULE_AUTHOR("Dave Jones, Dominik Brodowski <[email protected]>");
MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
"with ICH-M southbridges.");
MODULE_LICENSE("GPL");
module_init(speedstep_init);
module_exit(speedstep_exit);
| linux-master | drivers/cpufreq/speedstep-ich.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cpufreq driver for the cell processor
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
*
* Author: Christian Krafft <[email protected]>
*/
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
#include "ppc_cbe_cpufreq.h"
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
{0, 1, 0},
{0, 2, 0},
{0, 3, 0},
{0, 4, 0},
{0, 5, 0},
{0, 6, 0},
{0, 8, 0},
{0, 10, 0},
{0, 0, CPUFREQ_TABLE_END},
};
/*
* hardware specific functions
*/
static int set_pmode(unsigned int cpu, unsigned int slow_mode)
{
int rc;
if (cbe_cpufreq_has_pmi)
rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
else
rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
return rc;
}
/*
* cpufreq functions
*/
static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
const u32 *max_freqp;
u32 max_freq;
int cur_pmode;
struct device_node *cpu;
cpu = of_get_cpu_node(policy->cpu, NULL);
if (!cpu)
return -ENODEV;
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
/*
* Let's check we can actually get to the CELL regs
*/
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
of_node_put(cpu);
return -EINVAL;
}
max_freqp = of_get_property(cpu, "clock-frequency", NULL);
of_node_put(cpu);
if (!max_freqp)
return -EINVAL;
/* we need the freq in kHz */
max_freq = *max_freqp / 1000;
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
/* initialize frequency table */
cpufreq_for_each_entry(pos, cbe_freqs) {
pos->frequency = max_freq / pos->driver_data;
pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
}
/* if DEBUG is enabled set_pmode() measures the latency
* of a transition */
policy->cpuinfo.transition_latency = 25000;
cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
pr_debug("current pmode is at %d\n",cur_pmode);
policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
policy->freq_table = cbe_freqs;
cbe_cpufreq_pmi_policy_init(policy);
return 0;
}
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cbe_cpufreq_pmi_policy_exit(policy);
return 0;
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
unsigned int cbe_pmode_new)
{
pr_debug("setting frequency for cpu %d to %d kHz, " \
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
cbe_freqs[cbe_pmode_new].driver_data);
return set_pmode(policy->cpu, cbe_pmode_new);
}
static struct cpufreq_driver cbe_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
.exit = cbe_cpufreq_cpu_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
/*
* module init and destoy
*/
static int __init cbe_cpufreq_init(void)
{
int ret;
if (!machine_is(cell))
return -ENODEV;
cbe_cpufreq_pmi_init();
ret = cpufreq_register_driver(&cbe_cpufreq_driver);
if (ret)
cbe_cpufreq_pmi_exit();
return ret;
}
static void __exit cbe_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cbe_cpufreq_driver);
cbe_cpufreq_pmi_exit();
}
module_init(cbe_cpufreq_init);
module_exit(cbe_cpufreq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <[email protected]>");
| linux-master | drivers/cpufreq/ppc_cbe_cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mach-sa1100/cpu-sa1110.c
*
* Copyright (C) 2001 Russell King
*
* Note: there are two erratas that apply to the SA1110 here:
* 7 - SDRAM auto-power-up failure (rev A0)
* 13 - Corruption of internal register reads/writes following
* SDRAM reads (rev A0, B0, B1)
*
* We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
*
* The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
*/
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
#include <mach/hardware.h>
#undef DEBUG
struct sdram_params {
const char name[20];
u_char rows; /* bits */
u_char cas_latency; /* cycles */
u_char tck; /* clock cycle time (ns) */
u_char trcd; /* activate to r/w (ns) */
u_char trp; /* precharge to activate (ns) */
u_char twr; /* write recovery time (ns) */
u_short refresh; /* refresh time for array (us) */
};
struct sdram_info {
u_int mdcnfg;
u_int mdrefr;
u_int mdcas[3];
};
static struct sdram_params sdram_tbl[] __initdata = {
{ /* Toshiba TC59SM716 CL2 */
.name = "TC59SM716-CL2",
.rows = 12,
.tck = 10,
.trcd = 20,
.trp = 20,
.twr = 10,
.refresh = 64000,
.cas_latency = 2,
}, { /* Toshiba TC59SM716 CL3 */
.name = "TC59SM716-CL3",
.rows = 12,
.tck = 8,
.trcd = 20,
.trp = 20,
.twr = 8,
.refresh = 64000,
.cas_latency = 3,
}, { /* Samsung K4S641632D TC75 */
.name = "K4S641632D",
.rows = 14,
.tck = 9,
.trcd = 27,
.trp = 20,
.twr = 9,
.refresh = 64000,
.cas_latency = 3,
}, { /* Samsung K4S281632B-1H */
.name = "K4S281632B-1H",
.rows = 12,
.tck = 10,
.trp = 20,
.twr = 10,
.refresh = 64000,
.cas_latency = 3,
}, { /* Samsung KM416S4030CT */
.name = "KM416S4030CT",
.rows = 13,
.tck = 8,
.trcd = 24, /* 3 CLKs */
.trp = 24, /* 3 CLKs */
.twr = 16, /* Trdl: 2 CLKs */
.refresh = 64000,
.cas_latency = 3,
}, { /* Winbond W982516AH75L CL3 */
.name = "W982516AH75L",
.rows = 16,
.tck = 8,
.trcd = 20,
.trp = 20,
.twr = 8,
.refresh = 64000,
.cas_latency = 3,
}, { /* Micron MT48LC8M16A2TG-75 */
.name = "MT48LC8M16A2TG-75",
.rows = 12,
.tck = 8,
.trcd = 20,
.trp = 20,
.twr = 8,
.refresh = 64000,
.cas_latency = 3,
},
};
static struct sdram_params sdram_params;
/*
* Given a period in ns and frequency in khz, calculate the number of
* cycles of frequency in period. Note that we round up to the next
* cycle, even if we are only slightly over.
*/
static inline u_int ns_to_cycles(u_int ns, u_int khz)
{
return (ns * khz + 999999) / 1000000;
}
/*
* Create the MDCAS register bit pattern.
*/
static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
{
u_int shift;
rcd = 2 * rcd - 1;
shift = delayed + 1 + rcd;
mdcas[0] = (1 << rcd) - 1;
mdcas[0] |= 0x55555555 << shift;
mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
}
static void
sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
struct sdram_params *sdram)
{
u_int mem_khz, sd_khz, trp, twr;
mem_khz = cpu_khz / 2;
sd_khz = mem_khz;
/*
* If SDCLK would invalidate the SDRAM timings,
* run SDCLK at half speed.
*
* CPU steppings prior to B2 must either run the memory at
* half speed or use delayed read latching (errata 13).
*/
if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
(read_cpuid_revision() < ARM_CPU_REV_SA1110_B2 && sd_khz < 62000))
sd_khz /= 2;
sd->mdcnfg = MDCNFG & 0x007f007f;
twr = ns_to_cycles(sdram->twr, mem_khz);
/* trp should always be >1 */
trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
if (trp < 1)
trp = 1;
sd->mdcnfg |= trp << 8;
sd->mdcnfg |= trp << 24;
sd->mdcnfg |= sdram->cas_latency << 12;
sd->mdcnfg |= sdram->cas_latency << 28;
sd->mdcnfg |= twr << 14;
sd->mdcnfg |= twr << 30;
sd->mdrefr = MDREFR & 0xffbffff0;
sd->mdrefr |= 7;
if (sd_khz != mem_khz)
sd->mdrefr |= MDREFR_K1DB2;
/* initial number of '1's in MDCAS + 1 */
set_mdcas(sd->mdcas, sd_khz >= 62000,
ns_to_cycles(sdram->trcd, mem_khz));
#ifdef DEBUG
printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
sd->mdcas[2]);
#endif
}
/*
* Set the SDRAM refresh rate.
*/
static inline void sdram_set_refresh(u_int dri)
{
MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
(void) MDREFR;
}
/*
* Update the refresh period. We do this such that we always refresh
* the SDRAMs within their permissible period. The refresh period is
* always a multiple of the memory clock (fixed at cpu_clock / 2).
*
* FIXME: we don't currently take account of burst accesses here,
* but neither do Intels DM nor Angel.
*/
static void
sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
{
u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
#ifdef DEBUG
mdelay(250);
printk(KERN_DEBUG "new dri value = %d\n", dri);
#endif
sdram_set_refresh(dri);
}
/*
* Ok, set the CPU frequency.
*/
static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
struct sdram_params *sdram = &sdram_params;
struct sdram_info sd;
unsigned long flags;
unsigned int unused;
sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
#if 0
/*
* These values are wrong according to the SA1110 documentation
* and errata, but they seem to work. Need to get a storage
* scope on to the SDRAM signals to work out why.
*/
if (policy->max < 147500) {
sd.mdrefr |= MDREFR_K1DB2;
sd.mdcas[0] = 0xaaaaaa7f;
} else {
sd.mdrefr &= ~MDREFR_K1DB2;
sd.mdcas[0] = 0xaaaaaa9f;
}
sd.mdcas[1] = 0xaaaaaaaa;
sd.mdcas[2] = 0xaaaaaaaa;
#endif
/*
* The clock could be going away for some time. Set the SDRAMs
* to refresh rapidly (every 64 memory clock cycles). To get
* through the whole array, we need to wait 262144 mclk cycles.
* We wait 20ms to be safe.
*/
sdram_set_refresh(2);
if (!irqs_disabled())
msleep(20);
else
mdelay(20);
/*
* Reprogram the DRAM timings with interrupts disabled, and
* ensure that we are doing this within a complete cache line.
* This means that we won't access SDRAM for the duration of
* the programming.
*/
local_irq_save(flags);
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
udelay(10);
__asm__ __volatile__("\n\
b 2f \n\
.align 5 \n\
1: str %3, [%1, #0] @ MDCNFG \n\
str %4, [%1, #28] @ MDREFR \n\
str %5, [%1, #4] @ MDCAS0 \n\
str %6, [%1, #8] @ MDCAS1 \n\
str %7, [%1, #12] @ MDCAS2 \n\
str %8, [%2, #0] @ PPCR \n\
ldr %0, [%1, #0] \n\
b 3f \n\
2: b 1b \n\
3: nop \n\
nop"
: "=&r" (unused)
: "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
"r" (sd.mdrefr), "r" (sd.mdcas[0]),
"r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
local_irq_restore(flags);
/*
* Now, return the SDRAM refresh back to normal.
*/
sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
return 0;
}
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
cpufreq_generic_init(policy, sa11x0_freq_table, 0);
return 0;
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
.get = sa11x0_getspeed,
.init = sa1110_cpu_init,
.name = "sa1110",
};
static struct sdram_params *sa1110_find_sdram(const char *name)
{
struct sdram_params *sdram;
for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
sdram++)
if (strcmp(name, sdram->name) == 0)
return sdram;
return NULL;
}
static char sdram_name[16];
static int __init sa1110_clk_init(void)
{
struct sdram_params *sdram;
const char *name = sdram_name;
if (!cpu_is_sa1110())
return -ENODEV;
if (!name[0]) {
if (machine_is_assabet())
name = "TC59SM716-CL3";
if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
}
sdram = sa1110_find_sdram(name);
if (sdram) {
printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
" twr: %d refresh: %d cas_latency: %d\n",
sdram->tck, sdram->trcd, sdram->trp,
sdram->twr, sdram->refresh, sdram->cas_latency);
memcpy(&sdram_params, sdram, sizeof(sdram_params));
return cpufreq_register_driver(&sa1110_driver);
}
return 0;
}
module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
arch_initcall(sa1110_clk_init);
| linux-master | drivers/cpufreq/sa1110-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
*
* Copyright (C) 2003 David S. Miller ([email protected])
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/head.h>
#include <asm/timer.h>
struct us3_freq_percpu_info {
struct cpufreq_frequency_table table[4];
};
/* Indexed by cpu number. */
static struct us3_freq_percpu_info *us3_freq_table;
/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
* in the Safari config register.
*/
#define SAFARI_CFG_DIV_1 0x0000000000000000UL
#define SAFARI_CFG_DIV_2 0x0000000040000000UL
#define SAFARI_CFG_DIV_32 0x0000000080000000UL
#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
static void read_safari_cfg(void *arg)
{
unsigned long ret, *val = arg;
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=&r" (ret)
: "i" (ASI_SAFARI_CONFIG));
*val = ret;
}
static void update_safari_cfg(void *arg)
{
unsigned long reg, *new_bits = arg;
read_safari_cfg(®);
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= *new_bits;
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (reg), "i" (ASI_SAFARI_CONFIG)
: "memory");
}
static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
{
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
unsigned long ret;
switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
case SAFARI_CFG_DIV_1:
ret = clock_tick / 1;
break;
case SAFARI_CFG_DIV_2:
ret = clock_tick / 2;
break;
case SAFARI_CFG_DIV_32:
ret = clock_tick / 32;
break;
default:
BUG();
}
return ret;
}
static unsigned int us3_freq_get(unsigned int cpu)
{
unsigned long reg;
if (smp_call_function_single(cpu, read_safari_cfg, ®, 1))
return 0;
return get_current_freq(cpu, reg);
}
static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq;
new_freq = sparc64_get_clock_tick(cpu) / 1000;
switch (index) {
case 0:
new_bits = SAFARI_CFG_DIV_1;
new_freq /= 1;
break;
case 1:
new_bits = SAFARI_CFG_DIV_2;
new_freq /= 2;
break;
case 2:
new_bits = SAFARI_CFG_DIV_32;
new_freq /= 32;
break;
default:
BUG();
}
return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
}
static int us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0];
table[0].driver_data = 0;
table[0].frequency = clock_tick / 1;
table[1].driver_data = 1;
table[1].frequency = clock_tick / 2;
table[2].driver_data = 2;
table[2].frequency = clock_tick / 32;
table[3].driver_data = 0;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
policy->freq_table = table;
return 0;
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
us3_freq_target(policy, 0);
return 0;
}
static struct cpufreq_driver cpufreq_us3_driver = {
.name = "UltraSPARC-III",
.init = us3_freq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = us3_freq_target,
.get = us3_freq_get,
.exit = us3_freq_cpu_exit,
};
static int __init us3_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
if (tlb_type != cheetah && tlb_type != cheetah_plus)
return -ENODEV;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == CHEETAH_MANUF &&
(impl == CHEETAH_IMPL ||
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
ret = cpufreq_register_driver(&cpufreq_us3_driver);
if (ret)
kfree(us3_freq_table);
return ret;
}
return -ENODEV;
}
static void __exit us3_freq_exit(void)
{
cpufreq_unregister_driver(&cpufreq_us3_driver);
kfree(us3_freq_table);
}
MODULE_AUTHOR("David S. Miller <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
MODULE_LICENSE("GPL");
module_init(us3_freq_init);
module_exit(us3_freq_exit);
| linux-master | drivers/cpufreq/sparc-us3-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2009 Wolfson Microelectronics plc
*
* S3C64xx CPUfreq Support
*/
#define pr_fmt(fmt) "cpufreq: " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
static struct regulator *vddarm;
static unsigned long regulator_latency;
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
};
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
[2] = { 1100000, 1150000 },
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
{ 0, 0, 100000 },
{ 0, 0, 133000 },
{ 0, 1, 200000 },
{ 0, 1, 222000 },
{ 0, 1, 266000 },
{ 0, 2, 333000 },
{ 0, 2, 400000 },
{ 0, 2, 532000 },
{ 0, 2, 533000 },
{ 0, 3, 667000 },
{ 0, 4, 800000 },
{ 0, 0, CPUFREQ_TABLE_END },
};
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct s3c64xx_dvfs *dvfs;
unsigned int old_freq, new_freq;
int ret;
old_freq = clk_get_rate(policy->clk) / 1000;
new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
new_freq, ret);
return ret;
}
}
#endif
ret = clk_set_rate(policy->clk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
new_freq, ret);
return ret;
}
#ifdef CONFIG_REGULATOR
if (vddarm && new_freq < old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
new_freq, ret);
if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
return ret;
}
}
#endif
pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(policy->clk) / 1000);
return 0;
}
#ifdef CONFIG_REGULATOR
static void s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
struct s3c64xx_dvfs *dvfs;
count = regulator_count_voltages(vddarm);
if (count < 0) {
pr_err("Unable to check supported voltages\n");
}
if (!count)
goto out;
cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) {
dvfs = &s3c64xx_dvfs_table[freq->driver_data];
found = 0;
for (i = 0; i < count; i++) {
v = regulator_list_voltage(vddarm, i);
if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
found = 1;
}
if (!found) {
pr_debug("%dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
}
out:
/* Guess based on having to do an I2C/SPI write; in future we
* will be able to query the regulator performance here. */
regulator_latency = 1 * 1000 * 1000;
}
#endif
static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq;
if (policy->cpu != 0)
return -EINVAL;
policy->clk = clk_get(NULL, "armclk");
if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(policy->clk));
return PTR_ERR(policy->clk);
}
#ifdef CONFIG_REGULATOR
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
pr_err("Failed to obtain VDDARM: %ld\n", PTR_ERR(vddarm));
pr_err("Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
#endif
cpufreq_for_each_entry(freq, s3c64xx_freq_table) {
unsigned long r;
/* Check for frequencies we can generate */
r = clk_round_rate(policy->clk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("%dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
cpufreq_generic_init(policy, s3c64xx_freq_table,
(500 * 1000) + regulator_latency);
return 0;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = s3c64xx_cpufreq_set_target,
.get = cpufreq_generic_get,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
static int __init s3c64xx_cpufreq_init(void)
{
return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
}
module_init(s3c64xx_cpufreq_init);
| linux-master | drivers/cpufreq/s3c64xx-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pervasive backend for the cbe_cpufreq driver
*
* This driver makes use of the pervasive unit to
* engage the desired frequency.
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
*
* Author: Christian Krafft <[email protected]>
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <asm/machdep.h>
#include <asm/hw_irq.h>
#include <asm/cell-regs.h>
#include "ppc_cbe_cpufreq.h"
/* to write to MIC register */
static u64 MIC_Slow_Fast_Timer_table[] = {
[0 ... 7] = 0x007fc00000000000ull,
};
/* more values for the MIC */
static u64 MIC_Slow_Next_Timer_table[] = {
0x0000240000000000ull,
0x0000268000000000ull,
0x000029C000000000ull,
0x00002D0000000000ull,
0x0000300000000000ull,
0x0000334000000000ull,
0x000039C000000000ull,
0x00003FC000000000ull,
};
int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
{
struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_mic_tm_regs __iomem *mic_tm_regs;
unsigned long flags;
u64 value;
#ifdef DEBUG
long time;
#endif
local_irq_save(flags);
mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
pmd_regs = cbe_get_cpu_pmd_regs(cpu);
#ifdef DEBUG
time = jiffies;
#endif
out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
value = in_be64(&pmd_regs->pmcr);
/* set bits to zero */
value &= 0xFFFFFFFFFFFFFFF8ull;
/* set bits to next pmode */
value |= pmode;
out_be64(&pmd_regs->pmcr, value);
#ifdef DEBUG
/* wait until new pmode appears in status register */
value = in_be64(&pmd_regs->pmsr) & 0x07;
while (value != pmode) {
cpu_relax();
value = in_be64(&pmd_regs->pmsr) & 0x07;
}
time = jiffies - time;
time = jiffies_to_msecs(time);
pr_debug("had to wait %lu ms for a transition using " \
"pervasive unit\n", time);
#endif
local_irq_restore(flags);
return 0;
}
int cbe_cpufreq_get_pmode(int cpu)
{
int ret;
struct cbe_pmd_regs __iomem *pmd_regs;
pmd_regs = cbe_get_cpu_pmd_regs(cpu);
ret = in_be64(&pmd_regs->pmsr) & 0x07;
return ret;
}
| linux-master | drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPPC (Collaborative Processor Performance Control) driver for
* interfacing with the CPUfreq layer and governors. See
* cppc_acpi.c for CPPC specific methods.
*
* (C) Copyright 2014, 2015 Linaro Ltd.
* Author: Ashwin Chaugule <[email protected]>
*/
#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
#include <linux/arch_topology.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/dmi.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <uapi/linux/sched/types.h>
#include <asm/unaligned.h>
#include <acpi/cppc_acpi.h>
/* Minimum struct length needed for the DMI processor entry we want */
#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
/* Offset in the DMI processor structure for the max frequency */
#define DMI_PROCESSOR_MAX_SPEED 0x14
/*
* This list contains information parsed from per CPU ACPI _CPC and _PSD
* structures: e.g. the highest and lowest supported performance, capabilities,
* desired performance, level requested etc. Depending on the share_type, not
* all CPUs will have an entry in the list.
*/
static LIST_HEAD(cpu_data_list);
static bool boost_supported;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
static struct cppc_workaround_oem_info wa_info[] = {
{
.oem_id = "HISI ",
.oem_table_id = "HIP07 ",
.oem_revision = 0,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0,
}
};
static struct cpufreq_driver cppc_cpufreq_driver;
static enum {
FIE_UNSET = -1,
FIE_ENABLED,
FIE_DISABLED
} fie_disabled = FIE_UNSET;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
module_param(fie_disabled, int, 0444);
MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
/* Frequency invariance support */
struct cppc_freq_invariance {
int cpu;
struct irq_work irq_work;
struct kthread_work work;
struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
struct cppc_cpudata *cpu_data;
};
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
* cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
* @work: The work item.
*
* The CPPC driver register itself with the topology core to provide its own
* implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
* gets called by the scheduler on every tick.
*
* Note that the arch specific counters have higher priority than CPPC counters,
* if available, though the CPPC driver doesn't need to have any special
* handling for that.
*
* On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
* reach here from hard-irq context), which then schedules a normal work item
* and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
* based on the counter updates since the last tick.
*/
static void cppc_scale_freq_workfn(struct kthread_work *work)
{
struct cppc_freq_invariance *cppc_fi;
struct cppc_perf_fb_ctrs fb_ctrs = {0};
struct cppc_cpudata *cpu_data;
unsigned long local_freq_scale;
u64 perf;
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
cpu_data = cppc_fi->cpu_data;
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
pr_warn("%s: failed to read perf counters\n", __func__);
return;
}
perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
&fb_ctrs);
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
perf <<= SCHED_CAPACITY_SHIFT;
local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
/* This can happen due to counter's overflow */
if (unlikely(local_freq_scale > 1024))
local_freq_scale = 1024;
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
}
static void cppc_irq_work(struct irq_work *irq_work)
{
struct cppc_freq_invariance *cppc_fi;
cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
kthread_queue_work(kworker_fie, &cppc_fi->work);
}
static void cppc_scale_freq_tick(void)
{
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
/*
* cppc_get_perf_ctrs() can potentially sleep, call that from the right
* context.
*/
irq_work_queue(&cppc_fi->irq_work);
}
static struct scale_freq_data cppc_sftd = {
.source = SCALE_FREQ_SOURCE_CPPC,
.set_freq_scale = cppc_scale_freq_tick,
};
static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
{
struct cppc_freq_invariance *cppc_fi;
int cpu, ret;
if (fie_disabled)
return;
for_each_cpu(cpu, policy->cpus) {
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
cppc_fi->cpu = cpu;
cppc_fi->cpu_data = policy->driver_data;
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
if (ret) {
pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
__func__, cpu, ret);
/*
* Don't abort if the CPU was offline while the driver
* was getting registered.
*/
if (cpu_online(cpu))
return;
}
}
/* Register for freq-invariance */
topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
}
/*
* We free all the resources on policy's removal and not on CPU removal as the
* irq-work are per-cpu and the hotplug core takes care of flushing the pending
* irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
* fires on another CPU after the concerned CPU is removed, it won't harm.
*
* We just need to make sure to remove them all on policy->exit().
*/
static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
{
struct cppc_freq_invariance *cppc_fi;
int cpu;
if (fie_disabled)
return;
/* policy->cpus will be empty here, use related_cpus instead */
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
irq_work_sync(&cppc_fi->irq_work);
kthread_cancel_work_sync(&cppc_fi->work);
}
}
static void __init cppc_freq_invariance_init(void)
{
struct sched_attr attr = {
.size = sizeof(struct sched_attr),
.sched_policy = SCHED_DEADLINE,
.sched_nice = 0,
.sched_priority = 0,
/*
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
.sched_runtime = 1000000,
.sched_deadline = 10000000,
.sched_period = 10000000,
};
int ret;
if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
fie_disabled = FIE_ENABLED;
if (cppc_perf_ctrs_in_pcc()) {
pr_info("FIE not enabled on systems with registers in PCC\n");
fie_disabled = FIE_DISABLED;
}
}
if (fie_disabled)
return;
kworker_fie = kthread_create_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
fie_disabled = FIE_DISABLED;
return;
}
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
if (ret) {
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
ret);
kthread_destroy_worker(kworker_fie);
fie_disabled = FIE_DISABLED;
}
}
static void cppc_freq_invariance_exit(void)
{
if (fie_disabled)
return;
kthread_destroy_worker(kworker_fie);
}
#else
static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
{
}
static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
{
}
static inline void cppc_freq_invariance_init(void)
{
}
static inline void cppc_freq_invariance_exit(void)
{
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
const u8 *dmi_data = (const u8 *)dm;
u16 *mhz = (u16 *)private;
if (dm->type == DMI_ENTRY_PROCESSOR &&
dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
u16 val = (u16)get_unaligned((const u16 *)
(dmi_data + DMI_PROCESSOR_MAX_SPEED));
*mhz = val > *mhz ? val : *mhz;
}
}
/* Look up the max frequency in DMI */
static u64 cppc_get_dmi_max_khz(void)
{
u16 mhz = 0;
dmi_walk(cppc_find_dmi_mhz, &mhz);
/*
* Real stupid fallback value, just in case there is no
* actual value set.
*/
mhz = mhz ? mhz : 1;
return (1000 * mhz);
}
/*
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
* use them to convert perf to freq and vice versa. The conversion is
* extrapolated as an affine function passing by the 2 points:
* - (Low perf, Low freq)
* - (Nominal perf, Nominal perf)
*/
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
unsigned int perf)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
mul = caps->nominal_freq - caps->lowest_freq;
div = caps->nominal_perf - caps->lowest_perf;
offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = max_khz;
div = caps->highest_perf;
}
retval = offset + div64_u64(perf * mul, div);
if (retval >= 0)
return retval;
return 0;
}
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
unsigned int freq)
{
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
s64 retval, offset = 0;
static u64 max_khz;
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
mul = caps->nominal_perf - caps->lowest_perf;
div = caps->nominal_freq - caps->lowest_freq;
offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
} else {
if (!max_khz)
max_khz = cppc_get_dmi_max_khz();
mul = caps->highest_perf;
div = max_khz;
}
retval = offset + div64_u64(freq * mul, div);
if (retval >= 0)
return retval;
return 0;
}
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
u32 desired_perf;
int ret = 0;
desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
return ret;
cpu_data->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
if (ret)
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu, ret);
return ret;
}
static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
u32 desired_perf;
int ret;
desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
if (ret) {
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
cpu, ret);
return 0;
}
return target_freq;
}
static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
* transition requests), so ideally we need to use the PCC values as a fallback
* if we don't have a platform specific transition_delay_us
*/
#ifdef CONFIG_ARM64
#include <asm/cputype.h>
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_num = read_cpuid_part_number();
switch (implementor) {
case ARM_CPU_IMP_QCOM:
switch (part_num) {
case QCOM_CPU_PART_FALKOR_V1:
case QCOM_CPU_PART_FALKOR:
return 10000;
}
}
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
#endif
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
/* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
#define CPPC_EM_COST_STEP (1)
/* Add a cost gap correspnding to the energy of 4 CPUs. */
#define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
/ CPPC_EM_CAP_STEP)
static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
{
struct cppc_perf_caps *perf_caps;
unsigned int min_cap, max_cap;
struct cppc_cpudata *cpu_data;
int cpu = policy->cpu;
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu);
min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
perf_caps->highest_perf);
if ((min_cap == 0) || (max_cap < min_cap))
return 0;
return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
}
/*
* The cost is defined as:
* cost = power * max_frequency / frequency
*/
static inline unsigned long compute_cost(int cpu, int step)
{
return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
step * CPPC_EM_COST_STEP;
}
static int cppc_get_cpu_power(struct device *cpu_dev,
unsigned long *power, unsigned long *KHz)
{
unsigned long perf_step, perf_prev, perf, perf_check;
unsigned int min_step, max_step, step, step_check;
unsigned long prev_freq = *KHz;
unsigned int min_cap, max_cap;
struct cpufreq_policy *policy;
struct cppc_perf_caps *perf_caps;
struct cppc_cpudata *cpu_data;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
perf_caps->highest_perf);
perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
max_cap);
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
return -EINVAL;
if (min_step == max_step) {
step = max_step;
perf = perf_caps->highest_perf;
} else if (step < min_step) {
step = min_step;
perf = perf_caps->lowest_perf;
} else {
step++;
if (step == max_step)
perf = perf_caps->highest_perf;
else
perf = step * perf_step;
}
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step_check = perf_check / perf_step;
/*
* To avoid bad integer approximation, check that new frequency value
* increased and that the new frequency will be converted to the
* desired step value.
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
step_check = perf_check / perf_step;
}
/*
* With an artificial EM, only the cost value is used. Still the power
* is populated such as 0 < power < EM_MAX_POWER. This allows to add
* more sense to the artificial performance states.
*/
*power = compute_cost(cpu_dev->id, step);
return 0;
}
static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
unsigned long *cost)
{
unsigned long perf_step, perf_prev;
struct cppc_perf_caps *perf_caps;
struct cpufreq_policy *policy;
struct cppc_cpudata *cpu_data;
unsigned int max_cap;
int step;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
*cost = compute_cost(cpu_dev->id, step);
return 0;
}
static int populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
int class, cpu, index;
for_each_possible_cpu(cpu) {
gicc = acpi_cpu_get_madt_gicc(cpu);
class = gicc->efficiency_class;
bitmap_set(used_classes, class, 1);
}
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
return -EINVAL;
}
/*
* Squeeze efficiency class values on [0:#efficiency_class-1].
* Values are per spec in [0:255].
*/
index = 0;
for_each_set_bit(class, used_classes, 256) {
for_each_possible_cpu(cpu) {
gicc = acpi_cpu_get_madt_gicc(cpu);
if (gicc->efficiency_class == class)
per_cpu(efficiency_class, cpu) = index;
}
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
return 0;
}
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data;
struct em_data_callback em_cb =
EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
cpu_data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu),
get_perf_level_count(policy), &em_cb,
cpu_data->shared_cpu_map, 0);
}
#else
static int populate_efficiency_class(void)
{
return 0;
}
#endif
static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
{
struct cppc_cpudata *cpu_data;
int ret;
cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
if (!cpu_data)
goto out;
if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
goto free_cpu;
ret = acpi_get_psd_map(cpu, cpu_data);
if (ret) {
pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
goto free_mask;
}
ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
if (ret) {
pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
goto free_mask;
}
/* Convert the lowest and nominal freq from MHz to KHz */
cpu_data->perf_caps.lowest_freq *= 1000;
cpu_data->perf_caps.nominal_freq *= 1000;
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
free_mask:
free_cpumask_var(cpu_data->shared_cpu_map);
free_cpu:
kfree(cpu_data);
out:
return NULL;
}
static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
}
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct cppc_cpudata *cpu_data;
struct cppc_perf_caps *caps;
int ret;
cpu_data = cppc_cpufreq_get_cpu_data(cpu);
if (!cpu_data) {
pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
return -ENODEV;
}
caps = &cpu_data->perf_caps;
policy->driver_data = cpu_data;
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
caps->lowest_nonlinear_perf);
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
caps->lowest_perf);
policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
caps->nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
switch (policy->shared_type) {
case CPUFREQ_SHARED_TYPE_HW:
case CPUFREQ_SHARED_TYPE_NONE:
/* Nothing to be done - we'll have a policy for each CPU */
break;
case CPUFREQ_SHARED_TYPE_ANY:
/*
* All CPUs in the domain will share a policy and all cpufreq
* operations will use a single cppc_cpudata structure stored
* in policy->driver_data.
*/
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
break;
default:
pr_debug("Unsupported CPU co-ord type: %d\n",
policy->shared_type);
ret = -EFAULT;
goto out;
}
policy->fast_switch_possible = cppc_allow_fast_switch();
policy->dvfs_possible_from_any_cpu = true;
/*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
if (ret) {
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->highest_perf, cpu, ret);
goto out;
}
cppc_cpufreq_cpu_fie_init(policy);
return 0;
out:
cppc_cpufreq_put_cpu_data(policy);
return ret;
}
static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
unsigned int cpu = policy->cpu;
int ret;
cppc_cpufreq_cpu_fie_exit(policy);
cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->lowest_perf, cpu, ret);
cppc_cpufreq_put_cpu_data(policy);
return 0;
}
static inline u64 get_delta(u64 t1, u64 t0)
{
if (t1 > t0 || t0 > ~(u32)0)
return t1 - t0;
return (u32)t1 - (u32)t0;
}
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
u64 reference_perf;
reference_perf = fb_ctrs_t0->reference_perf;
delta_reference = get_delta(fb_ctrs_t1->reference,
fb_ctrs_t0->reference);
delta_delivered = get_delta(fb_ctrs_t1->delivered,
fb_ctrs_t0->delivered);
/* Check to avoid divide-by zero and invalid delivered_perf */
if (!delta_reference || !delta_delivered)
return cpu_data->perf_ctrls.desired_perf;
return (reference_perf * delta_delivered) / delta_reference;
}
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data = policy->driver_data;
u64 delivered_perf;
int ret;
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
if (ret)
return 0;
udelay(2); /* 2usec delay between sampling */
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
if (ret)
return 0;
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
if (!boost_supported) {
pr_err("BOOST not supported by CPU or firmware\n");
return -EINVAL;
}
if (state)
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
caps->highest_perf);
else
policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
return ret;
return 0;
}
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
cpufreq_freq_attr_ro(freqdomain_cpus);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
NULL,
};
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
.fast_switch = cppc_cpufreq_fast_switch,
.init = cppc_cpufreq_cpu_init,
.exit = cppc_cpufreq_cpu_exit,
.set_boost = cppc_cpufreq_set_boost,
.attr = cppc_cpufreq_attr,
.name = "cppc_cpufreq",
};
/*
* HISI platform does not support delivered performance counter and
* reference performance counter. It can calculate the performance using the
* platform specific mechanism. We reuse the desired performance register to
* store the real performance calculated by the platform.
*/
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data = policy->driver_data;
u64 desired_perf;
int ret;
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf);
if (ret < 0)
return -EIO;
return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
}
static void cppc_check_hisi_workaround(void)
{
struct acpi_table_header *tbl;
acpi_status status = AE_OK;
int i;
status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
if (ACPI_FAILURE(status) || !tbl)
return;
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
wa_info[i].oem_revision == tbl->oem_revision) {
/* Overwrite the get() callback */
cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
fie_disabled = FIE_DISABLED;
break;
}
}
acpi_put_table(tbl);
}
static int __init cppc_cpufreq_init(void)
{
int ret;
if (!acpi_cpc_valid())
return -ENODEV;
cppc_check_hisi_workaround();
cppc_freq_invariance_init();
populate_efficiency_class();
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret)
cppc_freq_invariance_exit();
return ret;
}
static inline void free_cpu_data(void)
{
struct cppc_cpudata *iter, *tmp;
list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
free_cpumask_var(iter->shared_cpu_map);
list_del(&iter->node);
kfree(iter);
}
}
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
MODULE_AUTHOR("Ashwin Chaugule");
MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
static const struct acpi_device_id cppc_acpi_ids[] __used = {
{ACPI_PROCESSOR_DEVICE_HID, },
{}
};
MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
| linux-master | drivers/cpufreq/cppc_cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/cpufreq/cpufreq.c
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
* (C) 2013 Viresh Kumar <[email protected]>
*
* Oct 2005 - Ashok Raj <[email protected]>
* Added handling for CPU hotplug
* Feb 2006 - Jacob Shin <[email protected]>
* Fix handling for CPU hotplug -- affected CPUs
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
#include <linux/units.h>
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
if ((__active) == !policy_is_inactive(__policy))
#define for_each_active_policy(__policy) \
for_each_suitable_policy(__policy, true)
#define for_each_inactive_policy(__policy) \
for_each_suitable_policy(__policy, false)
/* Iterate over governors */
static LIST_HEAD(cpufreq_governor_list);
#define for_each_governor(__governor) \
list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
static char default_governor[CPUFREQ_NAME_LEN];
/*
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
* also protects the cpufreq_cpu_data array.
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_RWLOCK(cpufreq_driver_lock);
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
bool cpufreq_supports_freq_invariance(void)
{
return static_branch_likely(&cpufreq_freq_invariance);
}
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
static inline bool has_target(void)
{
return cpufreq_driver->target_index || cpufreq_driver->target;
}
bool has_target_index(void)
{
return !!cpufreq_driver->target_index;
}
/* internal prototypes */
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_init_governor(struct cpufreq_policy *policy);
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
/*
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
* "transition" list for kernel code that needs to handle
* changes to devices when the CPU clock speed changes.
* The mutex locks both lists.
*/
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
static int off __read_mostly;
static int cpufreq_disabled(void)
{
return off;
}
void disable_cpufreq(void)
{
off = 1;
}
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
{
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
static struct kobject *cpufreq_global_kobject;
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
return &policy->kobj;
else
return cpufreq_global_kobject;
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
struct kernel_cpustat kcpustat;
u64 cur_wall_time;
u64 idle_time;
u64 busy_time;
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
kcpustat_cpu_fetch(&kcpustat, cpu);
busy_time = kcpustat.cpustat[CPUTIME_USER];
busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
busy_time += kcpustat.cpustat[CPUTIME_IRQ];
busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
busy_time += kcpustat.cpustat[CPUTIME_STEAL];
busy_time += kcpustat.cpustat[CPUTIME_NICE];
idle_time = cur_wall_time - busy_time;
if (wall)
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
return div_u64(idle_time, NSEC_PER_USEC);
}
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
{
u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall);
else if (!io_busy)
idle_time += get_cpu_iowait_time_us(cpu, wall);
return idle_time;
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:
* - validate & show freq table passed
* - set policies transition latency
* - policy->cpus with all possible CPUs
*/
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency)
{
policy->freq_table = table;
policy->cpuinfo.transition_latency = transition_latency;
/*
* The driver only supports the SMP configuration where all processors
* share the clock and voltage and clock.
*/
cpumask_setall(policy->cpus);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
unsigned int cpufreq_generic_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
if (!policy || IS_ERR(policy->clk)) {
pr_err("%s: No %s associated to cpu: %d\n",
__func__, policy ? "clk" : "policy", cpu);
return 0;
}
return clk_get_rate(policy->clk) / 1000;
}
EXPORT_SYMBOL_GPL(cpufreq_generic_get);
/**
* cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
* @cpu: CPU to find the policy for.
*
* Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
* the kobject reference counter of that policy. Return a valid policy on
* success or NULL on failure.
*
* The policy returned by this function has to be released with the help of
* cpufreq_cpu_put() to balance its kobject reference counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
unsigned long flags;
if (WARN_ON(cpu >= nr_cpu_ids))
return NULL;
/* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
/* get the CPU */
policy = cpufreq_cpu_get_raw(cpu);
if (policy)
kobject_get(&policy->kobj);
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return policy;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
/**
* cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
* @policy: cpufreq policy returned by cpufreq_cpu_get().
*/
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
/**
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
*/
void cpufreq_cpu_release(struct cpufreq_policy *policy)
{
if (WARN_ON(!policy))
return;
lockdep_assert_held(&policy->rwsem);
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
}
/**
* cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
* @cpu: CPU to find the policy for.
*
* Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
* if the policy returned by it is not NULL, acquire its rwsem for writing.
* Return the policy if it is active or release it and return NULL otherwise.
*
* The policy returned by this function has to be released with the help of
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
* counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy)
return NULL;
down_write(&policy->rwsem);
if (policy_is_inactive(policy)) {
cpufreq_cpu_release(policy);
return NULL;
}
return policy;
}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - Adjust the system "loops_per_jiffy".
* @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
* @ci: Frequency change information.
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
static unsigned int l_p_j_ref_freq;
if (ci->flags & CPUFREQ_CONST_LOOPS)
return;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
l_p_j_ref, l_p_j_ref_freq);
}
if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
loops_per_jiffy, ci->new);
}
#endif
}
/**
* cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
* @policy: cpufreq policy to enable fast frequency switching for.
* @freqs: contain details of the frequency update.
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
*
* This function calls the transition notifiers and adjust_jiffies().
*
* It is called twice on all CPU frequency changes that have external effects.
*/
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs,
unsigned int state)
{
int cpu;
BUG_ON(irqs_disabled());
if (cpufreq_disabled())
return;
freqs->policy = policy;
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
switch (state) {
case CPUFREQ_PRECHANGE:
/*
* Detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
if (policy->cur && policy->cur != freqs->old) {
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
freqs->old, policy->cur);
freqs->old = policy->cur;
}
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus));
for_each_cpu(cpu, policy->cpus)
trace_cpu_frequency(freqs->new, cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
cpufreq_stats_record_transition(policy, freqs->new);
policy->cur = freqs->new;
}
}
/* Do post notifications when there are chances that transition has failed */
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
if (!transition_failed)
return;
swap(freqs->old, freqs->new);
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
}
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs)
{
/*
* Catch double invocations of _begin() which lead to self-deadlock.
* ASYNC_NOTIFICATION drivers are left out because the cpufreq core
* doesn't invoke _begin() on their behalf, and hence the chances of
* double invocations are very low. Moreover, there are scenarios
* where these checks can emit false-positive warnings in these
* drivers; so we avoid that by skipping them altogether.
*/
WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
&& current == policy->transition_task);
wait:
wait_event(policy->transition_wait, !policy->transition_ongoing);
spin_lock(&policy->transition_lock);
if (unlikely(policy->transition_ongoing)) {
spin_unlock(&policy->transition_lock);
goto wait;
}
policy->transition_ongoing = true;
policy->transition_task = current;
spin_unlock(&policy->transition_lock);
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
if (WARN_ON(!policy->transition_ongoing))
return;
cpufreq_notify_post_transition(policy, freqs, transition_failed);
arch_set_freq_scale(policy->related_cpus,
policy->cur,
policy->cpuinfo.max_freq);
spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
policy->transition_task = NULL;
spin_unlock(&policy->transition_lock);
wake_up(&policy->transition_wait);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*
* Fast frequency switching status count. Positive means "enabled", negative
* means "disabled" and 0 means "not decided yet".
*/
static int cpufreq_fast_switch_count;
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
static void cpufreq_list_transition_notifiers(void)
{
struct notifier_block *nb;
pr_info("Registered transition notifiers:\n");
mutex_lock(&cpufreq_transition_notifier_list.mutex);
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
pr_info("%pS\n", nb->notifier_call);
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
}
/**
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
* @policy: cpufreq policy to enable fast frequency switching for.
*
* Try to enable fast frequency switching for @policy.
*
* The attempt will fail if there is at least one transition notifier registered
* at this point, as fast frequency switching is quite fundamentally at odds
* with transition notifiers. Thus if successful, it will make registration of
* transition notifiers fail going forward.
*/
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
{
lockdep_assert_held(&policy->rwsem);
if (!policy->fast_switch_possible)
return;
mutex_lock(&cpufreq_fast_switch_lock);
if (cpufreq_fast_switch_count >= 0) {
cpufreq_fast_switch_count++;
policy->fast_switch_enabled = true;
} else {
pr_warn("CPU%u: Fast frequency switching not enabled\n",
policy->cpu);
cpufreq_list_transition_notifiers();
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
/**
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
* @policy: cpufreq policy to disable fast frequency switching for.
*/
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
{
mutex_lock(&cpufreq_fast_switch_lock);
if (policy->fast_switch_enabled) {
policy->fast_switch_enabled = false;
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
cpufreq_fast_switch_count--;
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int idx;
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
}
/**
* cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
* one.
* @policy: associated policy to interrogate
* @target_freq: target frequency to resolve.
*
* The target to driver frequency mapping is cached in the policy.
*
* Return: Lowest driver-supported frequency greater than or equal to the
* given target_freq, subject to policy (min/max) and driver limitations.
*/
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
{
unsigned int latency;
if (policy->transition_delay_us)
return policy->transition_delay_us;
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (latency) {
/*
* For platforms that can change the frequency very fast (< 10
* us), the above formula gives a decent transition delay. But
* for platforms where transition_latency is in milliseconds, it
* ends up giving unrealistic values.
*
* Cap the default transition delay to 10 ms, which seems to be
* a reasonable amount of time after which we should reevaluate
* the frequency.
*/
return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
}
return LATENCY_MULTIPLIER;
}
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret, enable;
ret = sscanf(buf, "%d", &enable);
if (ret != 1 || enable < 0 || enable > 1)
return -EINVAL;
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
__func__, enable ? "enable" : "disable");
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
__func__, enable ? "enabled" : "disabled");
return count;
}
define_one_global_rw(boost);
static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
{
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
int ret, enable;
ret = kstrtoint(buf, 10, &enable);
if (ret || enable < 0 || enable > 1)
return -EINVAL;
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
if (policy->boost_enabled == enable)
return count;
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock();
if (ret)
return ret;
policy->boost_enabled = enable;
return count;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
static struct cpufreq_governor *find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
for_each_governor(t)
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
}
static struct cpufreq_governor *get_governor(const char *str_governor)
{
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
if (!t)
goto unlock;
if (!try_module_get(t->owner))
t = NULL;
unlock:
mutex_unlock(&cpufreq_governor_mutex);
return t;
}
static unsigned int cpufreq_parse_policy(char *str_governor)
{
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
return CPUFREQ_POLICY_PERFORMANCE;
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
}
/**
* cpufreq_parse_governor - parse a governor string only for has_target()
* @str_governor: Governor name.
*/
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{
struct cpufreq_governor *t;
t = get_governor(str_governor);
if (t)
return t;
if (request_module("cpufreq_%s", str_governor))
return NULL;
return get_governor(str_governor);
}
/*
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
*
* Write out information from cpufreq_driver->policy[cpu]; object must be
* "unsigned int".
*/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
return sprintf(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
__weak unsigned int arch_freq_get_on_cpu(int cpu)
{
return 0;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
unsigned int freq;
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
ret = sprintf(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
ret = sprintf(buf, "%u\n", policy->cur);
return ret;
}
/*
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
#define store_one(file_name, object) \
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
unsigned long val; \
int ret; \
\
ret = kstrtoul(buf, 0, &val); \
if (ret) \
return ret; \
\
ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \
}
store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);
/*
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
if (cur_freq)
return sprintf(buf, "%u\n", cur_freq);
return sprintf(buf, "<unknown>\n");
}
/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
policy->governor->name);
return -EINVAL;
}
/*
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
char str_governor[16];
int ret;
ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
if (cpufreq_driver->setpolicy) {
unsigned int new_pol;
new_pol = cpufreq_parse_policy(str_governor);
if (!new_pol)
return -EINVAL;
ret = cpufreq_set_policy(policy, NULL, new_pol);
} else {
struct cpufreq_governor *new_gov;
new_gov = cpufreq_parse_governor(str_governor);
if (!new_gov)
return -EINVAL;
ret = cpufreq_set_policy(policy, new_gov,
CPUFREQ_POLICY_UNKNOWN);
module_put(new_gov->owner);
}
return ret ? ret : count;
}
/*
* show_scaling_driver - show the cpufreq driver currently loaded
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}
/*
* show_scaling_available_governors - show the available CPUfreq governors
*/
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
char *buf)
{
ssize_t i = 0;
struct cpufreq_governor *t;
if (!has_target()) {
i += sprintf(buf, "performance powersave");
goto out;
}
mutex_lock(&cpufreq_governor_mutex);
for_each_governor(t) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
break;
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
mutex_unlock(&cpufreq_governor_mutex);
out:
i += sprintf(&buf[i], "\n");
return i;
}
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
for_each_cpu(cpu, mask) {
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
/* Remove the extra space at the end */
i--;
i += sprintf(&buf[i], "\n");
return i;
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
/*
* show_related_cpus - show the CPUs affected by each transition even if
* hw coordination is in use
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
return cpufreq_show_cpus(policy->related_cpus, buf);
}
/*
* show_affected_cpus - show the CPUs affected by each transition
*/
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
return cpufreq_show_cpus(policy->cpus, buf);
}
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
unsigned int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
ret = sscanf(buf, "%u", &freq);
if (ret != 1)
return -EINVAL;
policy->governor->store_setspeed(policy, freq);
return count;
}
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_setspeed)
return sprintf(buf, "<unsupported>\n");
return policy->governor->show_setspeed(policy, buf);
}
/*
* show_bios_limit - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
unsigned int limit;
int ret;
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
if (!ret)
return sprintf(buf, "%u\n", limit);
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
cpufreq_freq_attr_ro(scaling_available_governors);
cpufreq_freq_attr_ro(scaling_driver);
cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
&scaling_available_governors.attr,
&scaling_setspeed.attr,
NULL
};
ATTRIBUTE_GROUPS(cpufreq);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem);
if (likely(!policy_is_inactive(policy)))
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
return ret;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
down_write(&policy->rwsem);
if (likely(!policy_is_inactive(policy)))
ret = fattr->store(policy, buf, count);
up_write(&policy->rwsem);
return ret;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
{
struct cpufreq_policy *policy = to_policy(kobj);
pr_debug("last reference is dropped\n");
complete(&policy->kobj_unregister);
}
static const struct sysfs_ops sysfs_ops = {
.show = show,
.store = store,
};
static const struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
};
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
struct device *dev)
{
if (unlikely(!dev))
return;
if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
return;
dev_dbg(dev, "%s: Adding symlink\n", __func__);
if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
dev_err(dev, "cpufreq symlink creation failed\n");
}
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
struct device *dev)
{
dev_dbg(dev, "%s: Removing symlink\n", __func__);
sysfs_remove_link(&dev->kobj, "cpufreq");
cpumask_clear_cpu(cpu, policy->real_cpus);
}
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
{
struct freq_attr **drv_attr;
int ret = 0;
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
return ret;
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret)
return ret;
}
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
return ret;
}
if (cpufreq_boost_supported()) {
ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
if (ret)
return ret;
}
return 0;
}
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
struct cpufreq_governor *gov = NULL;
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
int ret;
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
} else {
gov = get_governor(default_governor);
}
if (!gov) {
gov = cpufreq_default_governor();
__module_get(gov->owner);
}
} else {
/* Use the default policy if there is no last_policy. */
if (policy->last_policy) {
pol = policy->last_policy;
} else {
pol = cpufreq_parse_policy(default_governor);
/*
* In case the default governor is neither "performance"
* nor "powersave", fall back to the initial policy
* value set by the driver.
*/
if (pol == CPUFREQ_POLICY_UNKNOWN)
pol = policy->policy;
}
if (pol != CPUFREQ_POLICY_PERFORMANCE &&
pol != CPUFREQ_POLICY_POWERSAVE)
return -ENODATA;
}
ret = cpufreq_set_policy(policy, gov, pol);
if (gov)
module_put(gov->owner);
return ret;
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
/* Has this CPU been taken care of already? */
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
down_write(&policy->rwsem);
if (has_target())
cpufreq_stop_governor(policy);
cpumask_set_cpu(cpu, policy->cpus);
if (has_target()) {
ret = cpufreq_start_governor(policy);
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
up_write(&policy->rwsem);
return ret;
}
void refresh_frequency_limits(struct cpufreq_policy *policy)
{
if (!policy_is_inactive(policy)) {
pr_debug("updating policy for CPU %u\n", policy->cpu);
cpufreq_set_policy(policy, policy->governor, policy->policy);
}
}
EXPORT_SYMBOL(refresh_frequency_limits);
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
down_write(&policy->rwsem);
refresh_frequency_limits(policy);
up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
void *data)
{
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
schedule_work(&policy->update);
return 0;
}
static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
void *data)
{
struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
schedule_work(&policy->update);
return 0;
}
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
{
struct kobject *kobj;
struct completion *cmp;
down_write(&policy->rwsem);
cpufreq_stats_free_table(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
up_write(&policy->rwsem);
kobject_put(kobj);
/*
* We need to make sure that the underlying kobj is
* actually not referenced anymore by anybody before we
* proceed with unloading.
*/
pr_debug("waiting for dropping of refcount\n");
wait_for_completion(cmp);
pr_debug("wait complete\n");
}
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
struct cpufreq_policy *policy;
struct device *dev = get_cpu_device(cpu);
int ret;
if (!dev)
return NULL;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
return NULL;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
init_completion(&policy->kobj_unregister);
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
/*
* The entire policy object will be freed below, but the extra
* memory allocated for the kobject name needs to be freed by
* releasing the kobject.
*/
kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
policy->nb_max.notifier_call = cpufreq_notifier_max;
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
&policy->nb_min);
if (ret) {
dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
ret, cpu);
goto err_kobj_remove;
}
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
&policy->nb_max);
if (ret) {
dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
ret, cpu);
goto err_min_qos_notifier;
}
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
return policy;
err_min_qos_notifier:
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
&policy->nb_min);
err_kobj_remove:
cpufreq_policy_put_kobj(policy);
err_free_real_cpus:
free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
kfree(policy);
return NULL;
}
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
unsigned long flags;
int cpu;
/*
* The callers must ensure the policy is inactive by now, to avoid any
* races with show()/store() callbacks.
*/
if (unlikely(!policy_is_inactive(policy)))
pr_warn("%s: Freeing active policy\n", __func__);
/* Remove policy from list */
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_del(&policy->policy_list);
for_each_cpu(cpu, policy->related_cpus)
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
&policy->nb_max);
freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
&policy->nb_min);
/* Cancel any pending policy->update work before freeing the policy. */
cancel_work_sync(&policy->update);
if (policy->max_freq_req) {
/*
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
* notification, since CPUFREQ_CREATE_POLICY notification was
* sent after adding max_freq_req earlier.
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
freq_qos_remove_request(policy->max_freq_req);
}
freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
cpufreq_policy_put_kobj(policy);
free_cpumask_var(policy->real_cpus);
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
kfree(policy);
}
static int cpufreq_online(unsigned int cpu)
{
struct cpufreq_policy *policy;
bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
/* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
if (!policy_is_inactive(policy))
return cpufreq_add_policy_cpu(policy, cpu);
/* This is the only online CPU for the policy. Start over. */
new_policy = false;
down_write(&policy->rwsem);
policy->cpu = cpu;
policy->governor = NULL;
} else {
new_policy = true;
policy = cpufreq_policy_alloc(cpu);
if (!policy)
return -ENOMEM;
down_write(&policy->rwsem);
}
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
cpumask_copy(policy->cpus, policy->related_cpus);
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
} else {
cpumask_copy(policy->cpus, cpumask_of(cpu));
/*
* Call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU.
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_free_policy;
}
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
ret = cpufreq_table_validate_and_sort(policy);
if (ret)
goto out_offline_policy;
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
}
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (new_policy) {
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
}
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
GFP_KERNEL);
if (!policy->min_freq_req) {
ret = -ENOMEM;
goto out_destroy_policy;
}
ret = freq_qos_add_request(&policy->constraints,
policy->min_freq_req, FREQ_QOS_MIN,
FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
/*
* So we don't call freq_qos_remove_request() for an
* uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL;
goto out_destroy_policy;
}
/*
* This must be initialized right here to avoid calling
* freq_qos_remove_request() on uninitialized request in case
* of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
ret = freq_qos_add_request(&policy->constraints,
policy->max_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
policy->max_freq_req = NULL;
goto out_destroy_policy;
}
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
}
if (cpufreq_driver->get && has_target()) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
ret = -EIO;
pr_err("%s: ->get() failed\n", __func__);
goto out_destroy_policy;
}
}
/*
* Sometimes boot loaders set CPU frequency to a value outside of
* frequency table present with cpufreq core. In such cases CPU might be
* unstable if it has to run on that frequency for long duration of time
* and so its better to set it to a frequency which is specified in
* freq-table. This also makes cpufreq stats inconsistent as
* cpufreq-stats would fail to register because current frequency of CPU
* isn't found in freq-table.
*
* Because we don't want this change to effect boot process badly, we go
* for the next freq which is >= policy->cur ('cur' must be set by now,
* otherwise we will end up setting freq to lowest of the table as 'cur'
* is initialized to zero).
*
* We are passing target-freq as "policy->cur - 1" otherwise
* __cpufreq_driver_target() would simply fail, as policy->cur will be
* equal to target-freq.
*/
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
&& has_target()) {
unsigned int old_freq = policy->cur;
/* Are we running at unknown frequency ? */
ret = cpufreq_frequency_table_get_index(policy, old_freq);
if (ret == -EINVAL) {
ret = __cpufreq_driver_target(policy, old_freq - 1,
CPUFREQ_RELATION_L);
/*
* Reaching here after boot in a few seconds may not
* mean that system will remain stable at "unknown"
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
if (new_policy) {
ret = cpufreq_add_dev_interface(policy);
if (ret)
goto out_destroy_policy;
cpufreq_stats_create_table(policy);
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Register with the energy model before
* sched_cpufreq_governor_change() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
*
* Also, this should be called before the policy is registered
* with cooling framework.
*/
if (cpufreq_driver->register_em)
cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy);
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
__func__, cpu, ret);
goto out_destroy_policy;
}
up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
/* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
return 0;
out_destroy_policy:
for_each_cpu(j, policy->real_cpus)
remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
out_offline_policy:
if (cpufreq_driver->offline)
cpufreq_driver->offline(policy);
out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
out_free_policy:
cpumask_clear(policy->cpus);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
return ret;
}
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
* @sif: Subsystem interface structure pointer (not used)
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
struct cpufreq_policy *policy;
unsigned cpu = dev->id;
int ret;
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
if (cpu_online(cpu)) {
ret = cpufreq_online(cpu);
if (ret)
return ret;
}
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy)
add_cpu_dev_symlink(policy, cpu, dev);
return 0;
}
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
{
int ret;
if (has_target())
cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
if (!policy_is_inactive(policy)) {
/* Nominate a new CPU if necessary. */
if (cpu == policy->cpu)
policy->cpu = cpumask_any(policy->cpus);
/* Start the governor again for the active policy. */
if (has_target()) {
ret = cpufreq_start_governor(policy);
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
return;
}
if (has_target())
strncpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
if (has_target())
cpufreq_exit_governor(policy);
/*
* Perform the ->offline() during light-weight tear-down, as
* that allows fast recovery when the CPU comes back.
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
} else if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
}
static int cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return 0;
}
down_write(&policy->rwsem);
__cpufreq_offline(cpu, policy);
up_write(&policy->rwsem);
return 0;
}
/*
* cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
*/
static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy)
return;
down_write(&policy->rwsem);
if (cpu_online(cpu))
__cpufreq_offline(cpu, policy);
remove_cpu_dev_symlink(policy, cpu, dev);
if (!cpumask_empty(policy->real_cpus)) {
up_write(&policy->rwsem);
return;
}
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
cpufreq_policy_free(policy);
}
/**
* cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
* @policy: Policy managing CPUs.
* @new_freq: New CPU frequency.
*
* Adjust to the current frequency first and clean up later by either calling
* cpufreq_update_policy(), or scheduling handle_update().
*/
static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
unsigned int new_freq)
{
struct cpufreq_freqs freqs;
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
policy->cur, new_freq);
freqs.old = policy->cur;
freqs.new = new_freq;
cpufreq_freq_transition_begin(policy, &freqs);
cpufreq_freq_transition_end(policy, &freqs, 0);
}
static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
{
unsigned int new_freq;
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
/*
* If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case.
*/
if (policy->fast_switch_enabled || !has_target())
return new_freq;
if (policy->cur != new_freq) {
/*
* For some platforms, the frequency returned by hardware may be
* slightly different from what is provided in the frequency
* table, for example hardware may return 499 MHz instead of 500
* MHz. In such cases it is better to avoid getting into
* unnecessary frequency updates.
*/
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
return policy->cur;
cpufreq_out_of_sync(policy, new_freq);
if (update)
schedule_work(&policy->update);
}
return new_freq;
}
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
* @cpu: CPU number
*
* This is the last known freq, without actually getting it from the driver.
* Return value will be same as what is shown in scaling_cur_freq in sysfs.
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
ret_freq = cpufreq_driver->get(cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get);
/**
* cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
* @cpu: CPU number
*
* Just return the max possible frequency for a given CPU.
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (policy) {
ret_freq = policy->max;
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
/**
* cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
* @cpu: CPU number
*
* The default return value is the max_freq field of cpuinfo.
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (policy) {
ret_freq = policy->cpuinfo.max_freq;
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
if (unlikely(policy_is_inactive(policy)))
return 0;
return cpufreq_verify_current_freq(policy, true);
}
/**
* cpufreq_get - get the current CPU frequency (in kHz)
* @cpu: CPU number
*
* Get the CPU current (static) CPU frequency
*/
unsigned int cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (policy) {
down_read(&policy->rwsem);
if (cpufreq_driver->get)
ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
}
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
static struct subsys_interface cpufreq_interface = {
.name = "cpufreq",
.subsys = &cpu_subsys,
.add_dev = cpufreq_add_dev,
.remove_dev = cpufreq_remove_dev,
};
/*
* In case platform wants some specific frequency to be configured
* during suspend..
*/
int cpufreq_generic_suspend(struct cpufreq_policy *policy)
{
int ret;
if (!policy->suspend_freq) {
pr_debug("%s: suspend_freq not defined\n", __func__);
return 0;
}
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
policy->suspend_freq);
ret = __cpufreq_driver_target(policy, policy->suspend_freq,
CPUFREQ_RELATION_H);
if (ret)
pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
__func__, policy->suspend_freq, ret);
return ret;
}
EXPORT_SYMBOL(cpufreq_generic_suspend);
/**
* cpufreq_suspend() - Suspend CPUFreq governors.
*
* Called during system wide Suspend/Hibernate cycles for suspending governors
* as some platforms can't change frequency after this point in suspend cycle.
* Because some of the devices (like: i2c, regulators, etc) they use for
* changing frequency are suspended quickly after this point.
*/
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
if (!cpufreq_driver)
return;
if (!has_target() && !cpufreq_driver->suspend)
goto suspend;
pr_debug("%s: Suspending Governors\n", __func__);
for_each_active_policy(policy) {
if (has_target()) {
down_write(&policy->rwsem);
cpufreq_stop_governor(policy);
up_write(&policy->rwsem);
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
pr_err("%s: Failed to suspend driver: %s\n", __func__,
cpufreq_driver->name);
}
suspend:
cpufreq_suspended = true;
}
/**
* cpufreq_resume() - Resume CPUFreq governors.
*
* Called during system wide Suspend/Hibernate cycle for resuming governors that
* are suspended with cpufreq_suspend().
*/
void cpufreq_resume(void)
{
struct cpufreq_policy *policy;
int ret;
if (!cpufreq_driver)
return;
if (unlikely(!cpufreq_suspended))
return;
cpufreq_suspended = false;
if (!has_target() && !cpufreq_driver->resume)
return;
pr_debug("%s: Resuming Governors\n", __func__);
for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_start_governor(policy);
up_write(&policy->rwsem);
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
__func__, policy->cpu);
}
}
}
/**
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
* @flags: Flags to test against the current cpufreq driver's flags.
*
* Assumes that the driver is there, so callers must ensure that this is the
* case.
*/
bool cpufreq_driver_test_flags(u16 flags)
{
return !!(cpufreq_driver->flags & flags);
}
/**
* cpufreq_get_current_driver - Return the current driver's name.
*
* Return the name string of the currently registered cpufreq driver or NULL if
* none.
*/
const char *cpufreq_get_current_driver(void)
{
if (cpufreq_driver)
return cpufreq_driver->name;
return NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
/**
* cpufreq_get_driver_data - Return current driver data.
*
* Return the private data of the currently registered cpufreq driver, or NULL
* if no cpufreq driver has been registered.
*/
void *cpufreq_get_driver_data(void)
{
if (cpufreq_driver)
return cpufreq_driver->driver_data;
return NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
*********************************************************************/
/**
* cpufreq_register_notifier - Register a notifier with cpufreq.
* @nb: notifier function to register.
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
*
* Add a notifier to one of two lists: either a list of notifiers that run on
* clock rate changes (once before and once after every transition), or a list
* of notifiers that ron on cpufreq policy changes.
*
* This function may sleep and it has the same return values as
* blocking_notifier_chain_register().
*/
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
if (cpufreq_disabled())
return -EINVAL;
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
if (cpufreq_fast_switch_count > 0) {
mutex_unlock(&cpufreq_fast_switch_lock);
return -EBUSY;
}
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
if (!ret)
cpufreq_fast_switch_count--;
mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(cpufreq_register_notifier);
/**
* cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
* @nb: notifier block to be unregistered.
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
*
* Remove a notifier from one of the cpufreq notifier lists.
*
* This function may sleep and it has the same return values as
* blocking_notifier_chain_unregister().
*/
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
if (cpufreq_disabled())
return -EINVAL;
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
cpufreq_fast_switch_count++;
mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(cpufreq_unregister_notifier);
/*********************************************************************
* GOVERNORS *
*********************************************************************/
/**
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
* @policy: cpufreq policy to switch the frequency for.
* @target_freq: New frequency to set (may be approximate).
*
* Carry out a fast frequency switch without sleeping.
*
* The driver's ->fast_switch() callback invoked by this function must be
* suitable for being called from within RCU-sched read-side critical sections
* and it is expected to select the minimum available frequency greater than or
* equal to @target_freq (CPUFREQ_RELATION_L).
*
* This function must not be called if policy->fast_switch_enabled is unset.
*
* Governors calling this function must guarantee that it will never be invoked
* twice in parallel for the same policy and that it will never be called in
* parallel with either ->target() or ->target_index() for the same policy.
*
* Returns the actual frequency set for the CPU.
*
* If 0 is returned by the driver's ->fast_switch() callback to indicate an
* error condition, the hardware configuration must be preserved.
*/
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
unsigned int freq;
int cpu;
target_freq = clamp_val(target_freq, policy->min, policy->max);
freq = cpufreq_driver->fast_switch(policy, target_freq);
if (!freq)
return 0;
policy->cur = freq;
arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq);
cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) {
for_each_cpu(cpu, policy->cpus)
trace_cpu_frequency(freq, cpu);
}
return freq;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
/**
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
* @cpu: Target CPU.
* @min_perf: Minimum (required) performance level (units of @capacity).
* @target_perf: Target (desired) performance level (units of @capacity).
* @capacity: Capacity of the target CPU.
*
* Carry out a fast performance level switch of @cpu without sleeping.
*
* The driver's ->adjust_perf() callback invoked by this function must be
* suitable for being called from within RCU-sched read-side critical sections
* and it is expected to select a suitable performance level equal to or above
* @min_perf and preferably equal to or below @target_perf.
*
* This function must not be called if policy->fast_switch_enabled is unset.
*
* Governors calling this function must guarantee that it will never be invoked
* twice in parallel for the same CPU and that it will never be called in
* parallel with either ->target() or ->target_index() or ->fast_switch() for
* the same CPU.
*/
void cpufreq_driver_adjust_perf(unsigned int cpu,
unsigned long min_perf,
unsigned long target_perf,
unsigned long capacity)
{
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
}
/**
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
*
* Return 'true' if the ->adjust_perf callback is present for the
* current driver or 'false' otherwise.
*/
bool cpufreq_driver_has_adjust_perf(void)
{
return !!cpufreq_driver->adjust_perf;
}
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
{
int ret;
freqs->new = cpufreq_driver->get_intermediate(policy, index);
/* We don't need to switch to intermediate freq */
if (!freqs->new)
return 0;
pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
__func__, policy->cpu, freqs->old, freqs->new);
cpufreq_freq_transition_begin(policy, freqs);
ret = cpufreq_driver->target_intermediate(policy, index);
cpufreq_freq_transition_end(policy, freqs, ret);
if (ret)
pr_err("%s: Failed to change to intermediate frequency: %d\n",
__func__, ret);
return ret;
}
static int __target_index(struct cpufreq_policy *policy, int index)
{
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
unsigned int restore_freq, intermediate_freq = 0;
unsigned int newfreq = policy->freq_table[index].frequency;
int retval = -EINVAL;
bool notify;
if (newfreq == policy->cur)
return 0;
/* Save last value to restore later on errors */
restore_freq = policy->cur;
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
if (notify) {
/* Handle switching to intermediate frequency */
if (cpufreq_driver->get_intermediate) {
retval = __target_intermediate(policy, &freqs, index);
if (retval)
return retval;
intermediate_freq = freqs.new;
/* Set old freq to intermediate */
if (intermediate_freq)
freqs.old = freqs.new;
}
freqs.new = newfreq;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old, freqs.new);
cpufreq_freq_transition_begin(policy, &freqs);
}
retval = cpufreq_driver->target_index(policy, index);
if (retval)
pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
retval);
if (notify) {
cpufreq_freq_transition_end(policy, &freqs, retval);
/*
* Failed after setting to intermediate freq? Driver should have
* reverted back to initial frequency and so should we. Check
* here for intermediate_freq instead of get_intermediate, in
* case we haven't switched to intermediate freq at all.
*/
if (unlikely(retval && intermediate_freq)) {
freqs.old = intermediate_freq;
freqs.new = restore_freq;
cpufreq_freq_transition_begin(policy, &freqs);
cpufreq_freq_transition_end(policy, &freqs, 0);
}
}
return retval;
}
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
if (cpufreq_disabled())
return -ENODEV;
target_freq = __resolve_freq(policy, target_freq, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
/*
* This might look like a redundant call as we are checking it again
* after finding index. But it is left intentionally for cases where
* exactly same freq is called again and so we can save on few function
* calls.
*/
if (target_freq == policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0;
if (cpufreq_driver->target) {
/*
* If the driver hasn't setup a single inefficient frequency,
* it's unlikely it knows how to decode CPUFREQ_RELATION_E.
*/
if (!policy->efficiencies_available)
relation &= ~CPUFREQ_RELATION_E;
return cpufreq_driver->target(policy, target_freq, relation);
}
if (!cpufreq_driver->target_index)
return -EINVAL;
return __target_index(policy, policy->cached_resolved_idx);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret;
down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
up_write(&policy->rwsem);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
{
return NULL;
}
static int cpufreq_init_governor(struct cpufreq_policy *policy)
{
int ret;
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
/*
* Governor might not be initiated here if ACPI _PPC changed
* notification happened, so check it.
*/
if (!policy->governor)
return -EINVAL;
/* Platform doesn't want dynamic frequency switching ? */
if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
struct cpufreq_governor *gov = cpufreq_fallback_governor();
if (gov) {
pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
policy->governor->name, gov->name);
policy->governor = gov;
} else {
return -EINVAL;
}
}
if (!try_module_get(policy->governor->owner))
return -EINVAL;
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (policy->governor->init) {
ret = policy->governor->init(policy);
if (ret) {
module_put(policy->governor->owner);
return ret;
}
}
policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
return 0;
}
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
{
if (cpufreq_suspended || !policy->governor)
return;
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (policy->governor->exit)
policy->governor->exit(policy);
module_put(policy->governor->owner);
}
int cpufreq_start_governor(struct cpufreq_policy *policy)
{
int ret;
if (cpufreq_suspended)
return 0;
if (!policy->governor)
return -EINVAL;
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (cpufreq_driver->get)
cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
if (ret)
return ret;
}
if (policy->governor->limits)
policy->governor->limits(policy);
return 0;
}
void cpufreq_stop_governor(struct cpufreq_policy *policy)
{
if (cpufreq_suspended || !policy->governor)
return;
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (policy->governor->stop)
policy->governor->stop(policy);
}
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
{
if (cpufreq_suspended || !policy->governor)
return;
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (policy->governor->limits)
policy->governor->limits(policy);
}
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
if (!governor)
return -EINVAL;
if (cpufreq_disabled())
return -ENODEV;
mutex_lock(&cpufreq_governor_mutex);
err = -EBUSY;
if (!find_governor(governor->name)) {
err = 0;
list_add(&governor->governor_list, &cpufreq_governor_list);
}
mutex_unlock(&cpufreq_governor_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
struct cpufreq_policy *policy;
unsigned long flags;
if (!governor)
return;
if (cpufreq_disabled())
return;
/* clear last_governor for all inactive policies */
read_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_inactive_policy(policy) {
if (!strcmp(policy->last_governor, governor->name)) {
policy->governor = NULL;
strcpy(policy->last_governor, "\0");
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
/*********************************************************************
* POLICY INTERFACE *
*********************************************************************/
/**
* cpufreq_get_policy - get the current cpufreq_policy
* @policy: struct cpufreq_policy into which the current cpufreq_policy
* is written
* @cpu: CPU to find the policy for
*
* Reads the current cpufreq policy.
*/
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
{
struct cpufreq_policy *cpu_policy;
if (!policy)
return -EINVAL;
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
return -EINVAL;
memcpy(policy, cpu_policy, sizeof(*policy));
cpufreq_cpu_put(cpu_policy);
return 0;
}
EXPORT_SYMBOL(cpufreq_get_policy);
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
* @new_gov: Policy governor pointer.
* @new_pol: Policy value (for drivers with built-in governors).
*
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
* limits to be set for the policy, update @policy with the verified limits
* values and either invoke the driver's ->setpolicy() callback (if present) or
* carry out a governor update for @policy. That is, run the current governor's
* ->limits() callback (if @new_gov points to the same object as the one in
* @policy) or replace the governor for @policy with @new_gov.
*
* The cpuinfo part of @policy is not updated by this function.
*/
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol)
{
struct cpufreq_policy_data new_data;
struct cpufreq_governor *old_gov;
int ret;
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
new_data.freq_table = policy->freq_table;
new_data.cpu = policy->cpu;
/*
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
new_data.cpu, new_data.min, new_data.max);
/*
* Verify that the CPU speed can be set within these limits and make sure
* that min <= max.
*/
ret = cpufreq_driver->verify(&new_data);
if (ret)
return ret;
/*
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
*/
policy->min = new_data.min;
policy->max = new_data.max;
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
policy->policy = new_pol;
pr_debug("setting range\n");
return cpufreq_driver->setpolicy(policy);
}
if (new_gov == policy->governor) {
pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
}
pr_debug("governor switch\n");
/* save old, working values */
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
cpufreq_stop_governor(policy);
cpufreq_exit_governor(policy);
}
/* start new governor */
policy->governor = new_gov;
ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
if (!ret) {
pr_debug("governor change\n");
sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
cpufreq_exit_governor(policy);
}
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
if (cpufreq_init_governor(policy))
policy->governor = NULL;
else
cpufreq_start_governor(policy);
}
return ret;
}
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
*
* Update the current frequency for the cpufreq policy of @cpu and use
* cpufreq_set_policy() to re-apply the min and max limits, which triggers the
* evaluation of policy notifiers and the cpufreq driver's ->verify() callback
* for the policy in question, among other things.
*/
void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
if (!policy)
return;
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && has_target() &&
(cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
goto unlock;
refresh_frequency_limits(policy);
unlock:
cpufreq_cpu_release(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
/**
* cpufreq_update_limits - Update policy limits for a given CPU.
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
* cpufreq_update_policy() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
cpufreq_update_policy(cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
if (!policy->freq_table)
return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
}
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
return ret;
return 0;
}
int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
int ret = 0;
if (cpufreq_driver->boost_enabled == state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_lock();
for_each_active_policy(policy) {
ret = cpufreq_driver->set_boost(policy, state);
if (ret)
goto err_reset_state;
policy->boost_enabled = state;
}
cpus_read_unlock();
return 0;
err_reset_state:
cpus_read_unlock();
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = !state;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
__func__, state ? "enable" : "disable");
return ret;
}
static bool cpufreq_boost_supported(void)
{
return cpufreq_driver->set_boost;
}
static int create_boost_sysfs_file(void)
{
int ret;
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
if (ret)
pr_err("%s: cannot register global BOOST sysfs file\n",
__func__);
return ret;
}
static void remove_boost_sysfs_file(void)
{
if (cpufreq_boost_supported())
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
int cpufreq_enable_boost_support(void)
{
if (!cpufreq_driver)
return -EINVAL;
if (cpufreq_boost_supported())
return 0;
cpufreq_driver->set_boost = cpufreq_boost_set_sw;
/* This will get removed on driver unregister */
return create_boost_sysfs_file();
}
EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
int cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
}
EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
static enum cpuhp_state hp_online;
static int cpuhp_cpufreq_online(unsigned int cpu)
{
cpufreq_online(cpu);
return 0;
}
static int cpuhp_cpufreq_offline(unsigned int cpu)
{
cpufreq_offline(cpu);
return 0;
}
/**
* cpufreq_register_driver - register a CPU Frequency driver
* @driver_data: A struct cpufreq_driver containing the values#
* submitted by the CPU Frequency driver.
*
* Registers a CPU Frequency driver to this core code. This code
* returns zero on success, -EEXIST when another driver got here first
* (and isn't unregistered in the meantime).
*
*/
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
{
unsigned long flags;
int ret;
if (cpufreq_disabled())
return -ENODEV;
/*
* The cpufreq core depends heavily on the availability of device
* structure, make sure they are available before proceeding further.
*/
if (!get_cpu_device(0))
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
/* Protect against concurrent CPU online/offline. */
cpus_read_lock();
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = -EEXIST;
goto out;
}
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("supports frequency invariance");
}
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
if (cpufreq_boost_supported()) {
ret = create_boost_sysfs_file();
if (ret)
goto err_null_driver;
}
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
if (unlikely(list_empty(&cpufreq_policy_list))) {
/* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
driver_data->name);
goto err_if_unreg;
}
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
"cpufreq:online",
cpuhp_cpufreq_online,
cpuhp_cpufreq_offline);
if (ret < 0)
goto err_if_unreg;
hp_online = ret;
ret = 0;
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
out:
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
/*
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
* Unregister the current CPUFreq driver. Only call this if you have
* the right to do so, i.e. if you have succeeded in initialising before!
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
void cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
return;
pr_debug("unregistering driver %s\n", driver->name);
/* Protect against concurrent cpu hotplug */
cpus_read_lock();
subsys_interface_unregister(&cpufreq_interface);
remove_boost_sysfs_file();
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
struct cpufreq_governor *gov = cpufreq_default_governor();
struct device *dev_root;
if (cpufreq_disabled())
return -ENODEV;
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
put_device(dev_root);
}
BUG_ON(!cpufreq_global_kobject);
if (!strlen(default_governor))
strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
return 0;
}
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
| linux-master | drivers/cpufreq/cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Freescale Semiconductor, Inc.
*
* CPU Frequency Scaling driver for Freescale QorIQ SoCs.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/platform_device.h>
/**
* struct cpu_data
* @pclk: the parent clock of cpu
* @table: frequency table
*/
struct cpu_data {
struct clk **pclk;
struct cpufreq_frequency_table *table;
};
/**
* struct soc_data - SoC specific data
* @flags: SOC_xxx
*/
struct soc_data {
u32 flags;
};
static u32 get_bus_freq(void)
{
struct device_node *soc;
u32 sysfreq;
struct clk *pltclk;
int ret;
/* get platform freq by searching bus-frequency property */
soc = of_find_node_by_type(NULL, "soc");
if (soc) {
ret = of_property_read_u32(soc, "bus-frequency", &sysfreq);
of_node_put(soc);
if (!ret)
return sysfreq;
}
/* get platform freq by its clock name */
pltclk = clk_get(NULL, "cg-pll0-div1");
if (IS_ERR(pltclk)) {
pr_err("%s: can't get bus frequency %ld\n",
__func__, PTR_ERR(pltclk));
return PTR_ERR(pltclk);
}
return clk_get_rate(pltclk);
}
static struct clk *cpu_to_clk(int cpu)
{
struct device_node *np;
struct clk *clk;
if (!cpu_present(cpu))
return NULL;
np = of_get_cpu_node(cpu, NULL);
if (!np)
return NULL;
clk = of_clk_get(np, 0);
of_node_put(np);
return clk;
}
/* traverse cpu nodes to get cpu mask of sharing clock wire */
static void set_affected_cpus(struct cpufreq_policy *policy)
{
struct cpumask *dstp = policy->cpus;
struct clk *clk;
int i;
for_each_present_cpu(i) {
clk = cpu_to_clk(i);
if (IS_ERR(clk)) {
pr_err("%s: no clock for cpu %d\n", __func__, i);
continue;
}
if (clk_is_match(policy->clk, clk))
cpumask_set_cpu(i, dstp);
}
}
/* reduce the duplicated frequencies in frequency table */
static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
int count)
{
int i, j;
for (i = 1; i < count; i++) {
for (j = 0; j < i; j++) {
if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID ||
freq_table[j].frequency !=
freq_table[i].frequency)
continue;
freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
break;
}
}
}
/* sort the frequencies in frequency table in descenting order */
static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
int count)
{
int i, j, ind;
unsigned int freq, max_freq;
struct cpufreq_frequency_table table;
for (i = 0; i < count - 1; i++) {
max_freq = freq_table[i].frequency;
ind = i;
for (j = i + 1; j < count; j++) {
freq = freq_table[j].frequency;
if (freq == CPUFREQ_ENTRY_INVALID ||
freq <= max_freq)
continue;
ind = j;
max_freq = freq;
}
if (ind != i) {
/* exchange the frequencies */
table.driver_data = freq_table[i].driver_data;
table.frequency = freq_table[i].frequency;
freq_table[i].driver_data = freq_table[ind].driver_data;
freq_table[i].frequency = freq_table[ind].frequency;
freq_table[ind].driver_data = table.driver_data;
freq_table[ind].frequency = table.frequency;
}
}
}
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct device_node *np;
int i, count;
u32 freq;
struct clk *clk;
const struct clk_hw *hwclk;
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
u64 u64temp;
np = of_get_cpu_node(cpu, NULL);
if (!np)
return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto err_np;
policy->clk = of_clk_get(np, 0);
if (IS_ERR(policy->clk)) {
pr_err("%s: no clock information\n", __func__);
goto err_nomem2;
}
hwclk = __clk_get_hw(policy->clk);
count = clk_hw_get_num_parents(hwclk);
data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
if (!data->pclk)
goto err_nomem2;
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
if (!table)
goto err_pclk;
for (i = 0; i < count; i++) {
clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
data->pclk[i] = clk;
freq = clk_get_rate(clk);
table[i].frequency = freq / 1000;
table[i].driver_data = i;
}
freq_table_redup(table, count);
freq_table_sort(table, count);
table[i].frequency = CPUFREQ_TABLE_END;
policy->freq_table = table;
data->table = table;
/* update ->cpus if we have cluster, no harm if not */
set_affected_cpus(policy);
policy->driver_data = data;
/* Minimum transition latency is 12 platform clocks */
u64temp = 12ULL * NSEC_PER_SEC;
do_div(u64temp, get_bus_freq());
policy->cpuinfo.transition_latency = u64temp + 1;
of_node_put(np);
return 0;
err_pclk:
kfree(data->pclk);
err_nomem2:
kfree(data);
err_np:
of_node_put(np);
return -ENODEV;
}
static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
kfree(data->pclk);
kfree(data->table);
kfree(data);
policy->driver_data = NULL;
return 0;
}
static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct clk *parent;
struct cpu_data *data = policy->driver_data;
parent = data->pclk[data->table[index].driver_data];
return clk_set_parent(policy->clk, parent);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS |
CPUFREQ_IS_COOLING_DEV,
.init = qoriq_cpufreq_cpu_init,
.exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
.attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
/* e6500 cannot use cpufreq due to erratum A-008083 */
{ .compatible = "fsl,b4420-clockgen", },
{ .compatible = "fsl,b4860-clockgen", },
{ .compatible = "fsl,t2080-clockgen", },
{ .compatible = "fsl,t4240-clockgen", },
{}
};
static int qoriq_cpufreq_probe(struct platform_device *pdev)
{
int ret;
struct device_node *np;
np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
if (np) {
of_node_put(np);
dev_info(&pdev->dev, "Disabling due to erratum A-008083");
return -ENODEV;
}
ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
if (ret)
return ret;
dev_info(&pdev->dev, "Freescale QorIQ CPU frequency scaling driver\n");
return 0;
}
static void qoriq_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&qoriq_cpufreq_driver);
}
static struct platform_driver qoriq_cpufreq_platform_driver = {
.driver = {
.name = "qoriq-cpufreq",
},
.probe = qoriq_cpufreq_probe,
.remove_new = qoriq_cpufreq_remove,
};
module_platform_driver(qoriq_cpufreq_platform_driver);
MODULE_ALIAS("platform:qoriq-cpufreq");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tang Yuantian <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
| linux-master | drivers/cpufreq/qoriq-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Google, Inc.
*
* Author:
* Colin Cross <[email protected]>
* Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
*/
#include <linux/bits.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/types.h>
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
static bool cpu0_node_has_opp_v2_prop(void)
{
struct device_node *np = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
of_node_put(np);
return ret;
}
static void tegra20_cpufreq_put_supported_hw(void *opp_token)
{
dev_pm_opp_put_supported_hw((unsigned long) opp_token);
}
static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
{
platform_device_unregister(cpufreq_dt);
}
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
struct platform_device *cpufreq_dt;
struct device *cpu_dev;
u32 versions[2];
int err;
if (!cpu0_node_has_opp_v2_prop()) {
dev_err(&pdev->dev, "operating points not found\n");
dev_err(&pdev->dev, "please update your device tree\n");
return -ENODEV;
}
if (of_machine_is_compatible("nvidia,tegra20")) {
versions[0] = BIT(tegra_sku_info.cpu_process_id);
versions[1] = BIT(tegra_sku_info.soc_speedo_id);
} else {
versions[0] = BIT(tegra_sku_info.cpu_process_id);
versions[1] = BIT(tegra_sku_info.cpu_speedo_id);
}
dev_info(&pdev->dev, "hardware version 0x%x 0x%x\n",
versions[0], versions[1]);
cpu_dev = get_cpu_device(0);
if (WARN_ON(!cpu_dev))
return -ENODEV;
err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
if (err < 0) {
dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_put_supported_hw,
(void *)((unsigned long) err));
if (err)
return err;
cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
err = PTR_ERR_OR_ZERO(cpufreq_dt);
if (err) {
dev_err(&pdev->dev,
"failed to create cpufreq-dt device: %d\n", err);
return err;
}
err = devm_add_action_or_reset(&pdev->dev,
tegra20_cpufreq_dt_unregister,
cpufreq_dt);
if (err)
return err;
return 0;
}
static struct platform_driver tegra20_cpufreq_driver = {
.probe = tegra20_cpufreq_probe,
.driver = {
.name = "tegra20-cpufreq",
},
};
module_platform_driver(tegra20_cpufreq_driver);
MODULE_ALIAS("platform:tegra20-cpufreq");
MODULE_AUTHOR("Colin Cross <[email protected]>");
MODULE_DESCRIPTION("NVIDIA Tegra20 cpufreq driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/tegra20-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Versatile Express SPC CPUFreq Interface driver
*
* Copyright (C) 2013 - 2019 ARM Ltd.
* Sudeep Holla <[email protected]>
*
* Copyright (C) 2013 Linaro.
* Viresh Kumar <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
/* Currently we support only two clusters */
#define A15_CLUSTER 0
#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER
#include <asm/bL_switcher.h>
static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x))
#else
#define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0)
#define bL_switch_request(...) do { } while (0)
#define bL_switcher_put_enabled() do { } while (0)
#define bL_switcher_get_enabled() do { } while (0)
#endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
static unsigned int clk_big_min; /* (Big) clock frequencies */
static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
static DEFINE_PER_CPU(unsigned int, physical_cluster);
static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
static struct mutex cluster_lock[MAX_CLUSTERS];
static inline int raw_cpu_to_cluster(int cpu)
{
return topology_physical_package_id(cpu);
}
static inline int cpu_to_cluster(int cpu)
{
return is_bL_switching_enabled() ?
MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
static unsigned int find_cluster_maxfreq(int cluster)
{
int j;
u32 max_freq = 0, cpu_freq;
for_each_online_cpu(j) {
cpu_freq = per_cpu(cpu_last_req_freq, j);
if (cluster == per_cpu(physical_cluster, j) &&
max_freq < cpu_freq)
max_freq = cpu_freq;
}
return max_freq;
}
static unsigned int clk_get_cpu_rate(unsigned int cpu)
{
u32 cur_cluster = per_cpu(physical_cluster, cpu);
u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
/* For switcher we use virtual A7 clock rates */
if (is_bL_switching_enabled())
rate = VIRT_FREQ(cur_cluster, rate);
return rate;
}
static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
{
if (is_bL_switching_enabled())
return per_cpu(cpu_last_req_freq, cpu);
else
return clk_get_cpu_rate(cpu);
}
static unsigned int
ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
{
u32 new_rate, prev_rate;
int ret;
bool bLs = is_bL_switching_enabled();
mutex_lock(&cluster_lock[new_cluster]);
if (bLs) {
prev_rate = per_cpu(cpu_last_req_freq, cpu);
per_cpu(cpu_last_req_freq, cpu) = rate;
per_cpu(physical_cluster, cpu) = new_cluster;
new_rate = find_cluster_maxfreq(new_cluster);
new_rate = ACTUAL_FREQ(new_cluster, new_rate);
} else {
new_rate = rate;
}
ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
if (!ret) {
/*
* FIXME: clk_set_rate hasn't returned an error here however it
* may be that clk_change_rate failed due to hardware or
* firmware issues and wasn't able to report that due to the
* current design of the clk core layer. To work around this
* problem we will read back the clock rate and check it is
* correct. This needs to be removed once clk core is fixed.
*/
if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
ret = -EIO;
}
if (WARN_ON(ret)) {
if (bLs) {
per_cpu(cpu_last_req_freq, cpu) = prev_rate;
per_cpu(physical_cluster, cpu) = old_cluster;
}
mutex_unlock(&cluster_lock[new_cluster]);
return ret;
}
mutex_unlock(&cluster_lock[new_cluster]);
/* Recalc freq for old cluster when switching clusters */
if (old_cluster != new_cluster) {
/* Switch cluster */
bL_switch_request(cpu, new_cluster);
mutex_lock(&cluster_lock[old_cluster]);
/* Set freq of old cluster if there are cpus left on it */
new_rate = find_cluster_maxfreq(old_cluster);
new_rate = ACTUAL_FREQ(old_cluster, new_rate);
if (new_rate &&
clk_set_rate(clk[old_cluster], new_rate * 1000)) {
pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
__func__, ret, old_cluster);
}
mutex_unlock(&cluster_lock[old_cluster]);
}
return 0;
}
/* Set clock frequency */
static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
freqs_new = freq_table[cur_cluster][index].frequency;
if (is_bL_switching_enabled()) {
if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
new_cluster = A7_CLUSTER;
else if (actual_cluster == A7_CLUSTER &&
freqs_new > clk_little_max)
new_cluster = A15_CLUSTER;
}
return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
freqs_new);
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
{
int count;
for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
;
return count;
}
/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
u32 min_freq = ~0;
cpufreq_for_each_entry(pos, table)
if (pos->frequency < min_freq)
min_freq = pos->frequency;
return min_freq;
}
/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
u32 max_freq = 0;
cpufreq_for_each_entry(pos, table)
if (pos->frequency > max_freq)
max_freq = pos->frequency;
return max_freq;
}
static bool search_frequency(struct cpufreq_frequency_table *table, int size,
unsigned int freq)
{
int count;
for (count = 0; count < size; count++) {
if (table[count].frequency == freq)
return true;
}
return false;
}
static int merge_cluster_tables(void)
{
int i, j, k = 0, count = 1;
struct cpufreq_frequency_table *table;
for (i = 0; i < MAX_CLUSTERS; i++)
count += get_table_count(freq_table[i]);
table = kcalloc(count, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
freq_table[MAX_CLUSTERS] = table;
/* Add in reverse order to get freqs in increasing order */
for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
j++) {
if (i == A15_CLUSTER &&
search_frequency(table, count, freq_table[i][j].frequency))
continue; /* skip duplicates */
table[k++].frequency =
VIRT_FREQ(i, freq_table[i][j].frequency);
}
}
table[k].driver_data = k;
table[k].frequency = CPUFREQ_TABLE_END;
return 0;
}
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
if (!freq_table[cluster])
return;
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
if (atomic_dec_return(&cluster_usage[cluster]))
return;
if (cluster < MAX_CLUSTERS)
return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return;
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
int ret;
if (freq_table[cluster])
return 0;
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
if (ret)
goto out;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret)
goto out;
clk[cluster] = clk_get(cpu_dev, NULL);
if (!IS_ERR(clk[cluster]))
return 0;
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
__func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
if (atomic_inc_return(&cluster_usage[cluster]) != 1)
return 0;
if (cluster < MAX_CLUSTERS) {
ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/*
* Get data for all clusters and fill virtual cluster with a merge of
* both
*/
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return -ENODEV;
ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
if (ret)
goto put_clusters;
}
ret = merge_cluster_tables();
if (ret)
goto put_clusters;
/* Assuming 2 cluster, set clk_big_min and clk_little_max */
clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
clk_little_max = VIRT_FREQ(A7_CLUSTER,
get_table_max(freq_table[A7_CLUSTER]));
return 0;
put_clusters:
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev)
return -ENODEV;
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/* Per-CPU initialization */
static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
{
u32 cur_cluster = cpu_to_cluster(policy->cpu);
struct device *cpu_dev;
int ret;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
return -ENODEV;
}
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
for_each_cpu(cpu, policy->cpus)
per_cpu(physical_cluster, cpu) = cur_cluster;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
if (ret)
return ret;
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
return -ENODEV;
}
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
return 0;
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = ve_spc_cpufreq_set_target,
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg)
{
pr_debug("%s: action: %ld\n", __func__, action);
switch (action) {
case BL_NOTIFY_PRE_ENABLE:
case BL_NOTIFY_PRE_DISABLE:
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
break;
case BL_NOTIFY_POST_ENABLE:
set_switching_enabled(true);
cpufreq_register_driver(&ve_spc_cpufreq_driver);
break;
case BL_NOTIFY_POST_DISABLE:
set_switching_enabled(false);
cpufreq_register_driver(&ve_spc_cpufreq_driver);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier,
};
static int __bLs_register_notifier(void)
{
return bL_switcher_register_notifier(&bL_switcher_notifier);
}
static int __bLs_unregister_notifier(void)
{
return bL_switcher_unregister_notifier(&bL_switcher_notifier);
}
#else
static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
int ret, i;
set_switching_enabled(bL_switcher_get_enabled());
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
if (!is_bL_switching_enabled())
ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ve_spc_cpufreq_driver.name, ret);
} else {
ret = __bLs_register_notifier();
if (ret)
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
else
pr_info("%s: Registered platform driver: %s\n",
__func__, ve_spc_cpufreq_driver.name);
}
bL_switcher_put_enabled();
return ret;
}
static void ve_spc_cpufreq_remove(struct platform_device *pdev)
{
bL_switcher_get_enabled();
__bLs_unregister_notifier();
cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
ve_spc_cpufreq_driver.name);
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
.driver = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
.remove_new = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
MODULE_ALIAS("platform:vexpress-spc-cpufreq");
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/cpufreq/vexpress-spc-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <[email protected]>
* and Markus Demleitner <[email protected]>
*
* This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
* that is iMac G5 and latest single CPU desktop.
*/
#undef DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/sections.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/smu.h>
#include <asm/pmac_pfunc.h>
#define DBG(fmt...) pr_debug(fmt)
/* see 970FX user manual */
#define SCOM_PCR 0x0aa001 /* PCR scom addr */
#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
#define PCR_SPEED_SHIFT 17
#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
#define SCOM_PSR 0x408001 /* PSR scom addr */
/* warning: PSR is a 64 bits register */
#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
#define PSR_CUR_SPEED_SHIFT (56)
/*
* The G5 only supports two frequencies (Quarter speed is not supported)
*/
#define CPUFREQ_HIGH 0
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table g5_cpu_freqs[] = {
{0, CPUFREQ_HIGH, 0},
{0, CPUFREQ_LOW, 0},
{0, 0, CPUFREQ_TABLE_END},
};
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
static int g5_pmode_cur;
static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
static unsigned long transition_latency;
#ifdef CONFIG_PMAC_SMU
static const u32 *g5_pmode_data;
static int g5_pmode_max;
static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
static int g5_fvt_count; /* number of op. points */
static int g5_fvt_cur; /* current op. point */
/*
* SMU based voltage switching for Neo2 platforms
*/
static void g5_smu_switch_volt(int speed_mode)
{
struct smu_simple_cmd cmd;
DECLARE_COMPLETION_ONSTACK(comp);
smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
&comp, 'V', 'S', 'L', 'E', 'W',
0xff, g5_fvt_cur+1, speed_mode);
wait_for_completion(&comp);
}
/*
* Platform function based voltage/vdnap switching for Neo2
*/
static struct pmf_function *pfunc_set_vdnap0;
static struct pmf_function *pfunc_vdnap0_complete;
static void g5_vdnap_switch_volt(int speed_mode)
{
struct pmf_args args;
u32 slew, done = 0;
unsigned long timeout;
slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
args.count = 1;
args.u[0].p = &slew;
pmf_call_one(pfunc_set_vdnap0, &args);
/* It's an irq GPIO so we should be able to just block here,
* I'll do that later after I've properly tested the IRQ code for
* platform functions
*/
timeout = jiffies + HZ/10;
while(!time_after(jiffies, timeout)) {
args.count = 1;
args.u[0].p = &done;
pmf_call_one(pfunc_vdnap0_complete, &args);
if (done)
break;
usleep_range(1000, 1000);
}
if (done == 0)
pr_warn("Timeout in clock slewing !\n");
}
/*
* SCOM based frequency switching for 970FX rev3
*/
static int g5_scom_switch_freq(int speed_mode)
{
unsigned long flags;
int to;
/* If frequency is going up, first ramp up the voltage */
if (speed_mode < g5_pmode_cur)
g5_switch_volt(speed_mode);
local_irq_save(flags);
/* Clear PCR high */
scom970_write(SCOM_PCR, 0);
/* Clear PCR low */
scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
/* Set PCR low */
scom970_write(SCOM_PCR, PCR_HILO_SELECT |
g5_pmode_data[speed_mode]);
/* Wait for completion */
for (to = 0; to < 10; to++) {
unsigned long psr = scom970_read(SCOM_PSR);
if ((psr & PSR_CMD_RECEIVED) == 0 &&
(((psr >> PSR_CUR_SPEED_SHIFT) ^
(g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
== 0)
break;
if (psr & PSR_CMD_COMPLETED)
break;
udelay(100);
}
local_irq_restore(flags);
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
g5_switch_volt(speed_mode);
g5_pmode_cur = speed_mode;
ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
return 0;
}
static int g5_scom_query_freq(void)
{
unsigned long psr = scom970_read(SCOM_PSR);
int i;
for (i = 0; i <= g5_pmode_max; i++)
if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
(g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
break;
return i;
}
/*
* Fake voltage switching for platforms with missing support
*/
static void g5_dummy_switch_volt(int speed_mode)
{
}
#endif /* CONFIG_PMAC_SMU */
/*
* Platform function based voltage switching for PowerMac7,2 & 7,3
*/
static struct pmf_function *pfunc_cpu0_volt_high;
static struct pmf_function *pfunc_cpu0_volt_low;
static struct pmf_function *pfunc_cpu1_volt_high;
static struct pmf_function *pfunc_cpu1_volt_low;
static void g5_pfunc_switch_volt(int speed_mode)
{
if (speed_mode == CPUFREQ_HIGH) {
if (pfunc_cpu0_volt_high)
pmf_call_one(pfunc_cpu0_volt_high, NULL);
if (pfunc_cpu1_volt_high)
pmf_call_one(pfunc_cpu1_volt_high, NULL);
} else {
if (pfunc_cpu0_volt_low)
pmf_call_one(pfunc_cpu0_volt_low, NULL);
if (pfunc_cpu1_volt_low)
pmf_call_one(pfunc_cpu1_volt_low, NULL);
}
usleep_range(10000, 10000); /* should be faster , to fix */
}
/*
* Platform function based frequency switching for PowerMac7,2 & 7,3
*/
static struct pmf_function *pfunc_cpu_setfreq_high;
static struct pmf_function *pfunc_cpu_setfreq_low;
static struct pmf_function *pfunc_cpu_getfreq;
static struct pmf_function *pfunc_slewing_done;
static int g5_pfunc_switch_freq(int speed_mode)
{
struct pmf_args args;
u32 done = 0;
unsigned long timeout;
int rc;
DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
/* If frequency is going up, first ramp up the voltage */
if (speed_mode < g5_pmode_cur)
g5_switch_volt(speed_mode);
/* Do it */
if (speed_mode == CPUFREQ_HIGH)
rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
else
rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
if (rc)
pr_warn("pfunc switch error %d\n", rc);
/* It's an irq GPIO so we should be able to just block here,
* I'll do that later after I've properly tested the IRQ code for
* platform functions
*/
timeout = jiffies + HZ/10;
while(!time_after(jiffies, timeout)) {
args.count = 1;
args.u[0].p = &done;
pmf_call_one(pfunc_slewing_done, &args);
if (done)
break;
usleep_range(500, 500);
}
if (done == 0)
pr_warn("Timeout in clock slewing !\n");
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
g5_switch_volt(speed_mode);
g5_pmode_cur = speed_mode;
ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
return 0;
}
static int g5_pfunc_query_freq(void)
{
struct pmf_args args;
u32 val = 0;
args.count = 1;
args.u[0].p = &val;
pmf_call_one(pfunc_cpu_getfreq, &args);
return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
}
/*
* Common interface to the cpufreq core
*/
static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
{
return g5_switch_freq(index);
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
{
return g5_cpu_freqs[g5_pmode_cur].frequency;
}
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
return 0;
}
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
.attr = cpufreq_generic_attr,
};
#ifdef CONFIG_PMAC_SMU
static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
{
unsigned int psize, ssize;
unsigned long max_freq;
char *freq_method, *volt_method;
const u32 *valp;
u32 pvr_hi;
int use_volts_vdnap = 0;
int use_volts_smu = 0;
int rc = -ENODEV;
/* Check supported platforms */
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
of_machine_is_compatible("PowerMac9,1") ||
of_machine_is_compatible("PowerMac12,1"))
use_volts_smu = 1;
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
else
return -ENODEV;
/* Check 970FX for now */
valp = of_get_property(cpunode, "cpu-version", NULL);
if (!valp) {
DBG("No cpu-version property !\n");
goto bail_noprops;
}
pvr_hi = (*valp) >> 16;
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
pr_err("Unsupported CPU version\n");
goto bail_noprops;
}
/* Look for the powertune data in the device-tree */
g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
if (!g5_pmode_data) {
DBG("No power-mode-data !\n");
goto bail_noprops;
}
g5_pmode_max = psize / sizeof(u32) - 1;
if (use_volts_smu) {
const struct smu_sdbp_header *shdr;
/* Look for the FVT table */
shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
if (!shdr)
goto bail_noprops;
g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
g5_fvt_count = ssize / sizeof(*g5_fvt_table);
g5_fvt_cur = 0;
/* Sanity checking */
if (g5_fvt_count < 1 || g5_pmode_max < 1)
goto bail_noprops;
g5_switch_volt = g5_smu_switch_volt;
volt_method = "SMU";
} else if (use_volts_vdnap) {
struct device_node *root;
root = of_find_node_by_path("/");
if (root == NULL) {
pr_err("Can't find root of device tree\n");
goto bail_noprops;
}
pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
pfunc_vdnap0_complete =
pmf_find_function(root, "slewing-done");
of_node_put(root);
if (pfunc_set_vdnap0 == NULL ||
pfunc_vdnap0_complete == NULL) {
pr_err("Can't find required platform function\n");
goto bail_noprops;
}
g5_switch_volt = g5_vdnap_switch_volt;
volt_method = "GPIO";
} else {
g5_switch_volt = g5_dummy_switch_volt;
volt_method = "none";
}
/*
* From what I see, clock-frequency is always the maximal frequency.
* The current driver can not slew sysclk yet, so we really only deal
* with powertune steps for now. We also only implement full freq and
* half freq in this version. So far, I haven't yet seen a machine
* supporting anything else.
*/
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp)
return -ENODEV;
max_freq = (*valp)/1000;
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = max_freq/2;
/* Set callbacks */
transition_latency = 12000;
g5_switch_freq = g5_scom_switch_freq;
g5_query_freq = g5_scom_query_freq;
freq_method = "SCOM";
/* Force apply current frequency to make sure everything is in
* sync (voltage is right for example). Firmware may leave us with
* a strange setting ...
*/
g5_switch_volt(CPUFREQ_HIGH);
msleep(10);
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
pr_info("Registering G5 CPU frequency driver\n");
pr_info("Frequency method: %s, Voltage method: %s\n",
freq_method, volt_method);
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
rc = cpufreq_register_driver(&g5_cpufreq_driver);
/* We keep the CPU node on hold... hopefully, Apple G5 don't have
* hotplug CPU with a dynamic device-tree ...
*/
return rc;
bail_noprops:
of_node_put(cpunode);
return rc;
}
#endif /* CONFIG_PMAC_SMU */
static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
{
struct device_node *cpuid = NULL, *hwclock = NULL;
const u8 *eeprom = NULL;
const u32 *valp;
u64 max_freq, min_freq, ih, il;
int has_volt = 1, rc = 0;
DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
" RackMac3,1...\n");
/* Lookup the cpuid eeprom node */
cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
if (cpuid != NULL)
eeprom = of_get_property(cpuid, "cpuid", NULL);
if (eeprom == NULL) {
pr_err("Can't find cpuid EEPROM !\n");
rc = -ENODEV;
goto bail;
}
/* Lookup the i2c hwclock */
for_each_node_by_name(hwclock, "i2c-hwclock") {
const char *loc = of_get_property(hwclock,
"hwctrl-location", NULL);
if (loc == NULL)
continue;
if (strcmp(loc, "CPU CLOCK"))
continue;
if (!of_get_property(hwclock, "platform-get-frequency", NULL))
continue;
break;
}
if (hwclock == NULL) {
pr_err("Can't find i2c clock chip !\n");
rc = -ENODEV;
goto bail;
}
DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock);
/* Now get all the platform functions */
pfunc_cpu_getfreq =
pmf_find_function(hwclock, "get-frequency");
pfunc_cpu_setfreq_high =
pmf_find_function(hwclock, "set-frequency-high");
pfunc_cpu_setfreq_low =
pmf_find_function(hwclock, "set-frequency-low");
pfunc_slewing_done =
pmf_find_function(hwclock, "slewing-done");
pfunc_cpu0_volt_high =
pmf_find_function(hwclock, "set-voltage-high-0");
pfunc_cpu0_volt_low =
pmf_find_function(hwclock, "set-voltage-low-0");
pfunc_cpu1_volt_high =
pmf_find_function(hwclock, "set-voltage-high-1");
pfunc_cpu1_volt_low =
pmf_find_function(hwclock, "set-voltage-low-1");
/* Check we have minimum requirements */
if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
pr_err("Can't find platform functions !\n");
rc = -ENODEV;
goto bail;
}
/* Check that we have complete sets */
if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
pmf_put_function(pfunc_cpu0_volt_high);
pmf_put_function(pfunc_cpu0_volt_low);
pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
has_volt = 0;
}
if (!has_volt ||
pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
pmf_put_function(pfunc_cpu1_volt_high);
pmf_put_function(pfunc_cpu1_volt_low);
pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
}
/* Note: The device tree also contains a "platform-set-values"
* function for which I haven't quite figured out the usage. It
* might have to be called on init and/or wakeup, I'm not too sure
* but things seem to work fine without it so far ...
*/
/* Get max frequency from device-tree */
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp) {
pr_err("Can't find CPU frequency !\n");
rc = -ENODEV;
goto bail;
}
max_freq = (*valp)/1000;
/* Now calculate reduced frequency by using the cpuid input freq
* ratio. This requires 64 bits math unless we are willing to lose
* some precision
*/
ih = *((u32 *)(eeprom + 0x10));
il = *((u32 *)(eeprom + 0x20));
/* Check for machines with no useful settings */
if (il == ih) {
pr_warn("No low frequency mode available on this model !\n");
rc = -ENODEV;
goto bail;
}
min_freq = 0;
if (ih != 0 && il != 0)
min_freq = (max_freq * il) / ih;
/* Sanity check */
if (min_freq >= max_freq || min_freq < 1000) {
pr_err("Can't calculate low frequency !\n");
rc = -ENXIO;
goto bail;
}
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = min_freq;
/* Based on a measurement on Xserve G5, rounded up. */
transition_latency = 10 * NSEC_PER_MSEC;
/* Set callbacks */
g5_switch_volt = g5_pfunc_switch_volt;
g5_switch_freq = g5_pfunc_switch_freq;
g5_query_freq = g5_pfunc_query_freq;
/* Force apply current frequency to make sure everything is in
* sync (voltage is right for example). Firmware may leave us with
* a strange setting ...
*/
g5_switch_volt(CPUFREQ_HIGH);
msleep(10);
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
pr_info("Registering G5 CPU frequency driver\n");
pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
has_volt ? "i2c/pfunc" : "none");
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
rc = cpufreq_register_driver(&g5_cpufreq_driver);
bail:
if (rc != 0) {
pmf_put_function(pfunc_cpu_getfreq);
pmf_put_function(pfunc_cpu_setfreq_high);
pmf_put_function(pfunc_cpu_setfreq_low);
pmf_put_function(pfunc_slewing_done);
pmf_put_function(pfunc_cpu0_volt_high);
pmf_put_function(pfunc_cpu0_volt_low);
pmf_put_function(pfunc_cpu1_volt_high);
pmf_put_function(pfunc_cpu1_volt_low);
}
of_node_put(hwclock);
of_node_put(cpuid);
of_node_put(cpunode);
return rc;
}
static int __init g5_cpufreq_init(void)
{
struct device_node *cpunode;
int rc = 0;
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
pr_err("Can't find any CPU node\n");
return -ENODEV;
}
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
rc = g5_pm72_cpufreq_init(cpunode);
#ifdef CONFIG_PMAC_SMU
else
rc = g5_neo2_cpufreq_init(cpunode);
#endif /* CONFIG_PMAC_SMU */
return rc;
}
module_init(g5_cpufreq_init);
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/pmac64-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <[email protected]>
* Copyright (C) 2004 John Steele Scott <[email protected]>
*
* TODO: Need a big cleanup here. Basically, we need to have different
* cpufreq_driver structures for the different type of HW instead of the
* current mess. We also need to better deal with the detection of the
* type of machine.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/pmac_feature.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/mpic.h>
#include <asm/keylargo.h>
#include <asm/switch_to.h>
/* WARNING !!! This will cause calibrate_delay() to be called,
* but this is an __init function ! So you MUST go edit
* init/main.c to make it non-init before enabling DEBUG_FREQ
*/
#undef DEBUG_FREQ
extern void low_choose_7447a_dfs(int dfs);
extern void low_choose_750fx_pll(int pll);
extern void low_sleep_handler(void);
/*
* Currently, PowerMac cpufreq supports only high & low frequencies
* that are set by the firmware
*/
static unsigned int low_freq;
static unsigned int hi_freq;
static unsigned int cur_freq;
static unsigned int sleep_freq;
static unsigned long transition_latency;
/*
* Different models uses different mechanisms to switch the frequency
*/
static int (*set_speed_proc)(int low_speed);
static unsigned int (*get_speed_proc)(void);
/*
* Some definitions used by the various speedprocs
*/
static u32 voltage_gpio;
static u32 frequency_gpio;
static u32 slew_done_gpio;
static int no_schedule;
static int has_cpu_l2lve;
static int is_pmu_based;
/* There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
#define CPUFREQ_HIGH 0
#define CPUFREQ_LOW 1
static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{0, CPUFREQ_HIGH, 0},
{0, CPUFREQ_LOW, 0},
{0, 0, CPUFREQ_TABLE_END},
};
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
mdelay(ms);
else
msleep(ms);
}
#ifdef DEBUG_FREQ
static inline void debug_calc_bogomips(void)
{
/* This will cause a recalc of bogomips and display the
* result. We backup/restore the value to avoid affecting the
* core cpufreq framework's own calculation.
*/
unsigned long save_lpj = loops_per_jiffy;
calibrate_delay();
loops_per_jiffy = save_lpj;
}
#endif /* DEBUG_FREQ */
/* Switch CPU speed under 750FX CPU control
*/
static int cpu_750fx_cpu_speed(int low_speed)
{
u32 hid2;
if (low_speed == 0) {
/* ramping up, set voltage first */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Make sure we sleep for at least 1ms */
local_delay(10);
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
hid2 = mfspr(SPRN_HID2);
hid2 &= ~0x2000;
mtspr(SPRN_HID2, hid2);
}
}
#ifdef CONFIG_PPC_BOOK3S_32
low_choose_750fx_pll(low_speed);
#endif
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
hid2 = mfspr(SPRN_HID2);
hid2 |= 0x2000;
mtspr(SPRN_HID2, hid2);
}
/* ramping down, set voltage last */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
local_delay(10);
}
return 0;
}
static unsigned int cpu_750fx_get_cpu_speed(void)
{
if (mfspr(SPRN_HID1) & HID1_PS)
return low_freq;
else
return hi_freq;
}
/* Switch CPU speed using DFS */
static int dfs_set_cpu_speed(int low_speed)
{
if (low_speed == 0) {
/* ramping up, set voltage first */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Make sure we sleep for at least 1ms */
local_delay(1);
}
/* set frequency */
#ifdef CONFIG_PPC_BOOK3S_32
low_choose_7447a_dfs(low_speed);
#endif
udelay(100);
if (low_speed == 1) {
/* ramping down, set voltage last */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
local_delay(1);
}
return 0;
}
static unsigned int dfs_get_cpu_speed(void)
{
if (mfspr(SPRN_HID1) & HID1_DFS)
return low_freq;
else
return hi_freq;
}
/* Switch CPU speed using slewing GPIOs
*/
static int gpios_set_cpu_speed(int low_speed)
{
int gpio, timeout = 0;
/* If ramping up, set voltage first */
if (low_speed == 0) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
/* Delay is way too big but it's ok, we schedule */
local_delay(10);
}
/* Set frequency */
gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
if (low_speed == ((gpio & 0x01) == 0))
goto skip;
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
low_speed ? 0x04 : 0x05);
udelay(200);
do {
if (++timeout > 100)
break;
local_delay(1);
gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
} while((gpio & 0x02) == 0);
skip:
/* If ramping down, set voltage last */
if (low_speed == 1) {
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
/* Delay is way too big but it's ok, we schedule */
local_delay(10);
}
#ifdef DEBUG_FREQ
debug_calc_bogomips();
#endif
return 0;
}
/* Switch CPU speed under PMU control
*/
static int pmu_set_cpu_speed(int low_speed)
{
struct adb_request req;
unsigned long save_l2cr;
unsigned long save_l3cr;
unsigned int pic_prio;
unsigned long flags;
preempt_disable();
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
#endif
pmu_suspend();
/* Disable all interrupt sources on openpic */
pic_prio = mpic_cpu_get_priority();
mpic_cpu_set_priority(0xf);
/* Make sure the decrementer won't interrupt us */
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* Make sure any pending DEC interrupt occurring while we did
* the above didn't re-enable the DEC */
mb();
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* We can now disable MSR_EE */
local_irq_save(flags);
/* Giveup the FPU & vec */
enable_kernel_fp();
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */
/* Save & disable L2 and L3 caches */
save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
/* Send the new speed command. My assumption is that this command
* will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
*/
pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
while (!req.complete)
pmu_poll();
/* Prepare the northbridge for the speed transition */
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
/* Call low level code to backup CPU state and recover from
* hardware reset
*/
low_sleep_handler();
/* Restore the northbridge */
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
/* Restore L2 cache */
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr);
/* Restore L3 cache */
if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
switch_mmu_context(NULL, current->active_mm, NULL);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
#endif
/* Restore low level PMU operations */
pmu_unlock();
/*
* Restore decrementer; we'll take a decrementer interrupt
* as soon as interrupts are re-enabled and the generic
* clockevents code will reprogram it with the right value.
*/
set_dec(1);
/* Restore interrupts */
mpic_cpu_set_priority(pic_prio);
/* Let interrupts flow again ... */
local_irq_restore(flags);
#ifdef DEBUG_FREQ
debug_calc_bogomips();
#endif
pmu_resume();
preempt_enable();
return 0;
}
static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
{
unsigned long l3cr;
static unsigned long prev_l3cr;
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
if (l3cr & L3CR_L3E) {
prev_l3cr = l3cr;
_set_L3CR(0);
}
}
set_speed_proc(speed_mode == CPUFREQ_LOW);
if (speed_mode == CPUFREQ_HIGH &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
}
static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
{
return cur_freq;
}
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
unsigned int index)
{
int rc;
rc = do_set_cpu_speed(policy, index);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
}
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
return 0;
}
static u32 read_gpio(struct device_node *np)
{
const u32 *reg = of_get_property(np, "reg", NULL);
u32 offset;
if (reg == NULL)
return 0;
/* That works for all keylargos but shall be fixed properly
* some day... The problem is that it seems we can't rely
* on the "reg" property of the GPIO nodes, they are either
* relative to the base of KeyLargo or to the base of the
* GPIO space, and the device-tree doesn't help.
*/
offset = *reg;
if (offset < KEYLARGO_GPIO_LEVELS0)
offset += KEYLARGO_GPIO_LEVELS0;
return offset;
}
static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
{
/* Ok, this could be made a bit smarter, but let's be robust for now. We
* always force a speed change to high speed before sleep, to make sure
* we have appropriate voltage and/or bus speed for the wakeup process,
* and to make sure our loops_per_jiffies are "good enough", that is will
* not cause too short delays if we sleep in low speed and wake in high
* speed..
*/
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
do_set_cpu_speed(policy, CPUFREQ_HIGH);
return 0;
}
static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
{
/* If we resume, first check if we have a get() function */
if (get_speed_proc)
cur_freq = get_speed_proc();
else
cur_freq = 0;
/* We don't, hrm... we don't really know our speed here, best
* is that we force a switch to whatever it was, which is
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(policy, sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH);
ppc_proc_freq = cur_freq * 1000ul;
no_schedule = 0;
return 0;
}
static struct cpufreq_driver pmac_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
{
struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
"voltage-gpio");
struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
"frequency-gpio");
struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
"slewing-done");
const u32 *value;
/*
* Check to see if it's GPIO driven or PMU only
*
* The way we extract the GPIO address is slightly hackish, but it
* works well enough for now. We need to abstract the whole GPIO
* stuff sooner or later anyway
*/
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (freq_gpio_np)
frequency_gpio = read_gpio(freq_gpio_np);
if (slew_done_gpio_np)
slew_done_gpio = read_gpio(slew_done_gpio_np);
of_node_put(volt_gpio_np);
of_node_put(freq_gpio_np);
of_node_put(slew_done_gpio_np);
/* If we use the frequency GPIOs, calculate the min/max speeds based
* on the bus frequencies
*/
if (frequency_gpio && slew_done_gpio) {
int lenp, rc;
const u32 *freqs, *ratio;
freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
lenp /= sizeof(u32);
if (freqs == NULL || lenp != 2) {
pr_err("bus-frequencies incorrect or missing\n");
return 1;
}
ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
NULL);
if (ratio == NULL) {
pr_err("processor-to-bus-ratio*2 missing\n");
return 1;
}
/* Get the min/max bus frequencies */
low_freq = min(freqs[0], freqs[1]);
hi_freq = max(freqs[0], freqs[1]);
/* Grrrr.. It _seems_ that the device-tree is lying on the low bus
* frequency, it claims it to be around 84Mhz on some models while
* it appears to be approx. 101Mhz on all. Let's hack around here...
* fortunately, we don't need to be too precise
*/
if (low_freq < 98000000)
low_freq = 101000000;
/* Convert those to CPU core clocks */
low_freq = (low_freq * (*ratio)) / 2000;
hi_freq = (hi_freq * (*ratio)) / 2000;
/* Now we get the frequencies, we read the GPIO to see what is out current
* speed
*/
rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
cur_freq = (rc & 0x01) ? hi_freq : low_freq;
set_speed_proc = gpios_set_cpu_speed;
return 1;
}
/* If we use the PMU, look for the min & max frequencies in the
* device-tree
*/
value = of_get_property(cpunode, "min-clock-frequency", NULL);
if (!value)
return 1;
low_freq = (*value) / 1000;
/* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
* here */
if (low_freq < 100000)
low_freq *= 10;
value = of_get_property(cpunode, "max-clock-frequency", NULL);
if (!value)
return 1;
hi_freq = (*value) / 1000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
return 0;
}
static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
if (!of_property_read_bool(cpunode, "dynamic-power-step"))
return 1;
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
}
/* OF only reports the high frequency */
hi_freq = cur_freq;
low_freq = cur_freq/2;
/* Read actual frequency from CPU */
cur_freq = dfs_get_cpu_speed();
set_speed_proc = dfs_set_cpu_speed;
get_speed_proc = dfs_get_cpu_speed;
return 0;
}
static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
u32 pvr;
const u32 *value;
if (!of_property_read_bool(cpunode, "dynamic-power-step"))
return 1;
hi_freq = cur_freq;
value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
if (!value)
return 1;
low_freq = (*value) / 1000;
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
set_speed_proc = cpu_750fx_cpu_speed;
get_speed_proc = cpu_750fx_get_cpu_speed;
cur_freq = cpu_750fx_get_cpu_speed();
return 0;
}
/* Currently, we support the following machines:
*
* - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
* - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
* - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
* - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
* - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
* - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
* - Recent MacRISC3 laptops
* - All new machines with 7447A CPUs
*/
static int __init pmac_cpufreq_setup(void)
{
struct device_node *cpunode;
const u32 *value;
if (strstr(boot_command_line, "nocpufreq"))
return 0;
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (!cpunode)
goto out;
/* Get current cpu clock freq */
value = of_get_property(cpunode, "clock-frequency", NULL);
if (!value)
goto out;
cur_freq = (*value) / 1000;
/* Check for 7447A based MacRISC3 */
if (of_machine_is_compatible("MacRISC3") &&
of_property_read_bool(cpunode, "dynamic-power-step") &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
/* Allow dynamic switching */
transition_latency = 8000000;
pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
/* Check for other MacRISC3 machines */
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5") ||
of_machine_is_compatible("MacRISC3")) {
pmac_cpufreq_init_MacRISC3(cpunode);
/* Else check for iBook2 500/600 */
} else if (of_machine_is_compatible("PowerBook4,1")) {
hi_freq = cur_freq;
low_freq = 400000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for TiPb 550 */
else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
hi_freq = cur_freq;
low_freq = 500000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for TiPb 400 & 500 */
else if (of_machine_is_compatible("PowerBook3,2")) {
/* We only know about the 400 MHz and the 500Mhz model
* they both have 300 MHz as low frequency
*/
if (cur_freq < 350000 || cur_freq > 550000)
goto out;
hi_freq = cur_freq;
low_freq = 300000;
set_speed_proc = pmu_set_cpu_speed;
is_pmu_based = 1;
}
/* Else check for 750FX */
else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
pmac_cpufreq_init_750FX(cpunode);
out:
of_node_put(cpunode);
if (set_speed_proc == NULL)
return -ENODEV;
pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
ppc_proc_freq = cur_freq * 1000ul;
pr_info("Registering PowerMac CPU frequency driver\n");
pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
low_freq/1000, hi_freq/1000, cur_freq/1000);
return cpufreq_register_driver(&pmac_cpufreq_driver);
}
module_init(pmac_cpufreq_setup);
| linux-master | drivers/cpufreq/pmac32-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Pentium 4/Xeon CPU on demand clock modulation/speed scaling
* (C) 2002 - 2003 Dominik Brodowski <[email protected]>
* (C) 2002 Zwane Mwaikambo <[email protected]>
* (C) 2002 Arjan van de Ven <[email protected]>
* (C) 2002 Tora T. Engstad
* All Rights Reserved
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*
* Date Errata Description
* 20020525 N44, O17 12.5% or 25% DC causes lockup
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/timex.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/timer.h>
#include <asm/cpu_device_id.h>
#include "speedstep-lib.h"
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
* intel docs i just use it to mean disable
*/
enum {
DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
};
#define DC_ENTRIES 8
static int has_N44_O17_errata[NR_CPUS];
static unsigned int stock_freq;
static struct cpufreq_driver p4clockmod_driver;
static unsigned int cpufreq_p4_get(unsigned int cpu);
static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
{
u32 l, h;
if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
if (l & 0x01)
pr_debug("CPU#%d currently thermal throttled\n", cpu);
if (has_N44_O17_errata[cpu] &&
(newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (newstate == DC_DISABLE) {
pr_debug("CPU#%d disabling modulation\n", cpu);
wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
} else {
pr_debug("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
/* bits 63 - 5 : reserved
* bit 4 : enable/disable
* bits 3-1 : duty cycle
* bit 0 : reserved
*/
l = (l & ~14);
l = l | (1<<4) | ((newstate & 0x7)<<1);
wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
}
return 0;
}
static struct cpufreq_frequency_table p4clockmod_table[] = {
{0, DC_RESV, CPUFREQ_ENTRY_INVALID},
{0, DC_DFLT, 0},
{0, DC_25PT, 0},
{0, DC_38PT, 0},
{0, DC_50PT, 0},
{0, DC_64PT, 0},
{0, DC_75PT, 0},
{0, DC_88PT, 0},
{0, DC_DISABLE, 0},
{0, DC_RESV, CPUFREQ_TABLE_END},
};
static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
{
int i;
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
return 0;
}
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
switch (c->x86_model) {
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
case 0x16: /* Celeron Core */
case 0x1C: /* Atom */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
case 0x0D: /* Pentium M (Dothan) */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
fallthrough;
case 0x09: /* Pentium M (Banias) */
return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
}
}
if (c->x86 != 0xF)
return 0;
/* on P-4s, the TSC runs with constant frequency independent whether
* throttling is active or not. */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
}
return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
}
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
int cpuid = 0;
unsigned int i;
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
#endif
/* Errata workaround */
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
switch (cpuid) {
case 0x0f07:
case 0x0f0a:
case 0x0f11:
case 0x0f12:
has_N44_O17_errata[policy->cpu] = 1;
pr_debug("has errata -- disabling low frequencies\n");
}
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
c->x86_model < 2) {
/* switch to maximum frequency and measure result */
cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
recalibrate_cpu_khz();
}
/* get max frequency */
stock_freq = cpufreq_p4_get_frequency(c);
if (!stock_freq)
return -EINVAL;
/* table init */
for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
policy->freq_table = &p4clockmod_table[0];
return 0;
}
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
u32 l, h;
rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (l & 0x10) {
l = l >> 1;
l &= 0x7;
} else
l = DC_DISABLE;
if (l != DC_DISABLE)
return stock_freq * l / 8;
return stock_freq;
}
static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL),
{}
};
/*
* Intentionally no MODULE_DEVICE_TABLE here: this driver should not
* be auto loaded. Please don't add one.
*/
static int __init cpufreq_p4_init(void)
{
int ret;
/*
* THERM_CONTROL is architectural for IA32 now, so
* we can rely on the capability checks
*/
if (!x86_match_cpu(cpufreq_p4_id) || !boot_cpu_has(X86_FEATURE_ACPI))
return -ENODEV;
ret = cpufreq_register_driver(&p4clockmod_driver);
if (!ret)
pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
return ret;
}
static void __exit cpufreq_p4_exit(void)
{
cpufreq_unregister_driver(&p4clockmod_driver);
}
MODULE_AUTHOR("Zwane Mwaikambo <[email protected]>");
MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
MODULE_LICENSE("GPL");
late_initcall(cpufreq_p4_init);
module_exit(cpufreq_p4_exit);
| linux-master | drivers/cpufreq/p4-clockmod.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2002,2003 Intrinsyc Software
*
* History:
* 31-Jul-2002 : Initial version [FB]
* 29-Jan-2003 : added PXA255 support [FB]
* 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
*
* Note:
* This driver may change the memory bus clock rate, but will not do any
* platform specific access timing changes... for example if you have flash
* memory connected to CS0, you will need to register a platform specific
* notifier which will adjust the memory access strobes to maintain a
* minimum strobe width.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/soc/pxa/cpu.h>
#include <linux/io.h>
#ifdef DEBUG
static unsigned int freq_debug;
module_param(freq_debug, uint, 0);
MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
#else
#define freq_debug 0
#endif
static struct regulator *vcc_core;
static unsigned int pxa27x_maxfreq;
module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
struct pxa_cpufreq_data {
struct clk *clk_core;
};
static struct pxa_cpufreq_data pxa_cpufreq_data;
struct pxa_freqs {
unsigned int khz;
int vmin;
int vmax;
};
/*
* PXA255 definitions
*/
static const struct pxa_freqs pxa255_run_freqs[] =
{
/* CPU MEMBUS run turbo PXbus SDRAM */
{ 99500, -1, -1}, /* 99, 99, 50, 50 */
{132700, -1, -1}, /* 133, 133, 66, 66 */
{199100, -1, -1}, /* 199, 199, 99, 99 */
{265400, -1, -1}, /* 265, 265, 133, 66 */
{331800, -1, -1}, /* 331, 331, 166, 83 */
{398100, -1, -1}, /* 398, 398, 196, 99 */
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static const struct pxa_freqs pxa255_turbo_freqs[] =
{
/* CPU run turbo PXbus SDRAM */
{ 99500, -1, -1}, /* 99, 99, 50, 50 */
{199100, -1, -1}, /* 99, 199, 50, 99 */
{298500, -1, -1}, /* 99, 287, 50, 99 */
{298600, -1, -1}, /* 199, 287, 99, 99 */
{398100, -1, -1}, /* 199, 398, 99, 99 */
};
#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
static struct cpufreq_frequency_table
pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
static struct cpufreq_frequency_table
pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
static unsigned int pxa255_turbo_table;
module_param(pxa255_turbo_table, uint, 0);
MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
static struct pxa_freqs pxa27x_freqs[] = {
{104000, 900000, 1705000 },
{156000, 1000000, 1705000 },
{208000, 1180000, 1705000 },
{312000, 1250000, 1705000 },
{416000, 1350000, 1705000 },
{520000, 1450000, 1705000 },
{624000, 1550000, 1705000 }
};
#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
static struct cpufreq_frequency_table
pxa27x_freq_table[NUM_PXA27x_FREQS+1];
#ifdef CONFIG_REGULATOR
static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{
int ret = 0;
int vmin, vmax;
if (!cpu_is_pxa27x())
return 0;
vmin = pxa_freq->vmin;
vmax = pxa_freq->vmax;
if ((vmin == -1) || (vmax == -1))
return 0;
ret = regulator_set_voltage(vcc_core, vmin, vmax);
if (ret)
pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
return ret;
}
static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
pr_info("Didn't find vcc_core regulator\n");
vcc_core = NULL;
} else {
pr_info("Found vcc_core regulator\n");
}
}
#else
static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{
return 0;
}
static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
const struct pxa_freqs **pxa_freqs)
{
if (cpu_is_pxa25x()) {
if (!pxa255_turbo_table) {
*pxa_freqs = pxa255_run_freqs;
*freq_table = pxa255_run_freq_table;
} else {
*pxa_freqs = pxa255_turbo_freqs;
*freq_table = pxa255_turbo_freq_table;
}
} else if (cpu_is_pxa27x()) {
*pxa_freqs = pxa27x_freqs;
*freq_table = pxa27x_freq_table;
} else {
BUG();
}
}
static void pxa27x_guess_max_freq(void)
{
if (!pxa27x_maxfreq) {
pxa27x_maxfreq = 416000;
pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
pxa27x_maxfreq);
} else {
pxa27x_maxfreq *= 1000;
}
}
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
return (unsigned int) clk_get_rate(data->clk_core) / 1000;
}
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
const struct pxa_freqs *pxa_freq_settings;
struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int new_freq_cpu;
int ret = 0;
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
new_freq_cpu = pxa_freq_settings[idx].khz;
if (freq_debug)
pr_debug("Changing CPU frequency from %d Mhz to %d Mhz\n",
policy->cur / 1000, new_freq_cpu / 1000);
if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
if (ret)
return ret;
}
clk_set_rate(data->clk_core, new_freq_cpu * 1000);
/*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
* only power savings will suffer from this.
*
* Note: if the voltage change fails, and a return value is returned, a
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
if (vcc_core && new_freq_cpu < policy->cur)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;
}
static int pxa_cpufreq_init(struct cpufreq_policy *policy)
{
int i;
unsigned int freq;
struct cpufreq_frequency_table *pxa255_freq_table;
const struct pxa_freqs *pxa255_freqs;
/* try to guess pxa27x cpu */
if (cpu_is_pxa27x())
pxa27x_guess_max_freq();
pxa_cpufreq_init_voltages();
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
pxa255_run_freq_table[i].driver_data = i;
}
pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
/* Generate pxa25x the turbo cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
pxa255_turbo_freq_table[i].frequency =
pxa255_turbo_freqs[i].khz;
pxa255_turbo_freq_table[i].driver_data = i;
}
pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
pxa255_turbo_table = !!pxa255_turbo_table;
/* Generate the pxa27x cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA27x_FREQS; i++) {
freq = pxa27x_freqs[i].khz;
if (freq > pxa27x_maxfreq)
break;
pxa27x_freq_table[i].frequency = freq;
pxa27x_freq_table[i].driver_data = i;
}
pxa27x_freq_table[i].driver_data = i;
pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
/*
* Set the policy's minimum and maximum frequencies from the tables
* just constructed. This sets cpuinfo.mxx_freq, min and max.
*/
if (cpu_is_pxa25x()) {
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
policy->freq_table = pxa255_freq_table;
}
else if (cpu_is_pxa27x()) {
policy->freq_table = pxa27x_freq_table;
}
pr_info("frequency change support initialized\n");
return 0;
}
static struct cpufreq_driver pxa_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pxa_set_target,
.init = pxa_cpufreq_init,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
.driver_data = &pxa_cpufreq_data,
};
static int __init pxa_cpu_init(void)
{
int ret = -ENODEV;
pxa_cpufreq_data.clk_core = clk_get_sys(NULL, "core");
if (IS_ERR(pxa_cpufreq_data.clk_core))
return PTR_ERR(pxa_cpufreq_data.clk_core);
if (cpu_is_pxa25x() || cpu_is_pxa27x())
ret = cpufreq_register_driver(&pxa_cpufreq_driver);
return ret;
}
static void __exit pxa_cpu_exit(void)
{
cpufreq_unregister_driver(&pxa_cpufreq_driver);
}
MODULE_AUTHOR("Intrinsyc Software Inc.");
MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
MODULE_LICENSE("GPL");
module_init(pxa_cpu_init);
module_exit(pxa_cpu_exit);
| linux-master | drivers/cpufreq/pxa2xx-cpufreq.c |
/*
* pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
*
* Copyright (C) 2009 Red Hat, Matthew Garrett <[email protected]>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Nagananda Chumbalkar <[email protected]>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
* INFRINGEMENT. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <acpi/processor.h>
#define PCC_VERSION "1.10.00"
#define POLL_LOOPS 300
#define CMD_COMPLETE 0x1
#define CMD_GET_FREQ 0x0
#define CMD_SET_FREQ 0x1
#define BUF_SZ 4
struct pcc_register_resource {
u8 descriptor;
u16 length;
u8 space_id;
u8 bit_width;
u8 bit_offset;
u8 access_size;
u64 address;
} __attribute__ ((packed));
struct pcc_memory_resource {
u8 descriptor;
u16 length;
u8 space_id;
u8 resource_usage;
u8 type_specific;
u64 granularity;
u64 minimum;
u64 maximum;
u64 translation_offset;
u64 address_length;
} __attribute__ ((packed));
static struct cpufreq_driver pcc_cpufreq_driver;
struct pcc_header {
u32 signature;
u16 length;
u8 major;
u8 minor;
u32 features;
u16 command;
u16 status;
u32 latency;
u32 minimum_time;
u32 maximum_time;
u32 nominal;
u32 throttled_frequency;
u32 minimum_frequency;
};
static void __iomem *pcch_virt_addr;
static struct pcc_header __iomem *pcch_hdr;
static DEFINE_SPINLOCK(pcc_lock);
static struct acpi_generic_address doorbell;
static u64 doorbell_preserve;
static u64 doorbell_write;
static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
struct pcc_cpu {
u32 input_offset;
u32 output_offset;
};
static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
static inline void pcc_cmd(void)
{
u64 doorbell_value;
int i;
acpi_read(&doorbell_value, &doorbell);
acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
&doorbell);
for (i = 0; i < POLL_LOOPS; i++) {
if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
break;
}
}
static inline void pcc_clear_mapping(void)
{
if (pcch_virt_addr)
iounmap(pcch_virt_addr);
pcch_virt_addr = NULL;
}
static unsigned int pcc_get_freq(unsigned int cpu)
{
struct pcc_cpu *pcc_cpu_data;
unsigned int curr_freq;
unsigned int freq_limit;
u16 status;
u32 input_buffer;
u32 output_buffer;
spin_lock(&pcc_lock);
pr_debug("get: get_freq for CPU %d\n", cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
input_buffer = 0x1;
iowrite32(input_buffer,
(pcch_virt_addr + pcc_cpu_data->input_offset));
iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
pcc_cmd();
output_buffer =
ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
/* Clear the input buffer - we are done with the current command */
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
if (status != CMD_COMPLETE) {
pr_debug("get: FAILED: for CPU %d, status is %d\n",
cpu, status);
goto cmd_incomplete;
}
iowrite16(0, &pcch_hdr->status);
curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
/ 100) * 1000);
pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is "
"0x%p, contains a value of: 0x%x. Speed is: %d MHz\n",
cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
output_buffer, curr_freq);
freq_limit = (output_buffer >> 8) & 0xff;
if (freq_limit != 0xff) {
pr_debug("get: frequency for cpu %d is being temporarily"
" capped at %d\n", cpu, curr_freq);
}
spin_unlock(&pcc_lock);
return curr_freq;
cmd_incomplete:
iowrite16(0, &pcch_hdr->status);
spin_unlock(&pcc_lock);
return 0;
}
static int pcc_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct pcc_cpu *pcc_cpu_data;
struct cpufreq_freqs freqs;
u16 status;
u32 input_buffer;
int cpu;
cpu = policy->cpu;
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
pr_debug("target: CPU %d should go to target freq: %d "
"(virtual) input_offset is 0x%p\n",
cpu, target_freq,
(pcch_virt_addr + pcc_cpu_data->input_offset));
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
spin_lock(&pcc_lock);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
iowrite32(input_buffer,
(pcch_virt_addr + pcc_cpu_data->input_offset));
iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
pcc_cmd();
/* Clear the input buffer - we are done with the current command */
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
spin_unlock(&pcc_lock);
cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
cpu, status);
return -EINVAL;
}
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
return 0;
}
static int pcc_get_offset(int cpu)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *pccp, *offset;
struct pcc_cpu *pcc_cpu_data;
struct acpi_processor *pr;
int ret = 0;
pr = per_cpu(processors, cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
if (!pr)
return -ENODEV;
status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
pccp = buffer.pointer;
if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
}
offset = &(pccp->package.elements[0]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto out_free;
}
pcc_cpu_data->input_offset = offset->integer.value;
offset = &(pccp->package.elements[1]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto out_free;
}
pcc_cpu_data->output_offset = offset->integer.value;
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data "
"input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
out_free:
kfree(buffer.pointer);
return ret;
}
static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
{
acpi_status status;
struct acpi_object_list input;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object in_params[4];
union acpi_object *out_obj;
u32 capabilities[2];
u32 errors;
u32 supported;
int ret = 0;
input.count = 4;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = 1;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = 2;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 8;
in_params[3].buffer.pointer = (u8 *)&capabilities;
capabilities[0] = OSC_QUERY_ENABLE;
capabilities[1] = 0x1;
status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!output.length)
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) {
ret = -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1)) {
ret = -ENODEV;
goto out_free;
}
kfree(output.pointer);
capabilities[0] = 0x0;
capabilities[1] = 0x1;
status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!output.length)
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) {
ret = -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1)) {
ret = -ENODEV;
goto out_free;
}
out_free:
kfree(output.pointer);
return ret;
}
static int __init pcc_cpufreq_evaluate(void)
{
acpi_status status;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member;
acpi_handle handle, osc_handle;
int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!acpi_has_method(handle, "PCCH"))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle);
if (ACPI_SUCCESS(status)) {
ret = pcc_cpufreq_do_osc(&osc_handle);
if (ret)
pr_debug("probe: _OSC evaluation did not succeed\n");
/* Firmware's use of _OSC is optional */
ret = 0;
}
status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
}
member = &out_obj->package.elements[0];
if (member->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
pr_debug("probe: mem_resource descriptor: 0x%x,"
" length: %d, space_id: %d, resource_usage: %d,"
" type_specific: %d, granularity: 0x%llx,"
" minimum: 0x%llx, maximum: 0x%llx,"
" translation_offset: 0x%llx, address_length: 0x%llx\n",
mem_resource->descriptor, mem_resource->length,
mem_resource->space_id, mem_resource->resource_usage,
mem_resource->type_specific, mem_resource->granularity,
mem_resource->minimum, mem_resource->maximum,
mem_resource->translation_offset,
mem_resource->address_length);
if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
ret = -ENODEV;
goto out_free;
}
pcch_virt_addr = ioremap(mem_resource->minimum,
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
ret = -ENOMEM;
goto out_free;
}
pcch_hdr = pcch_virt_addr;
pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
pr_debug("probe: PCCH header is at physical address: 0x%llx,"
" signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
" supported features: 0x%x, command field: 0x%x,"
" status field: 0x%x, nominal latency: %d us\n",
mem_resource->minimum, ioread32(&pcch_hdr->signature),
ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
ioread32(&pcch_hdr->latency));
pr_debug("probe: min time between commands: %d us,"
" max time between commands: %d us,"
" nominal CPU frequency: %d MHz,"
" minimum CPU frequency: %d MHz,"
" minimum CPU frequency without throttling: %d MHz\n",
ioread32(&pcch_hdr->minimum_time),
ioread32(&pcch_hdr->maximum_time),
ioread32(&pcch_hdr->nominal),
ioread32(&pcch_hdr->throttled_frequency),
ioread32(&pcch_hdr->minimum_frequency));
member = &out_obj->package.elements[1];
if (member->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto pcch_free;
}
reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
doorbell.space_id = reg_resource->space_id;
doorbell.bit_width = reg_resource->bit_width;
doorbell.bit_offset = reg_resource->bit_offset;
doorbell.access_width = 4;
doorbell.address = reg_resource->address;
pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
"bit_offset is %d, access_width is %d, address is 0x%llx\n",
doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
doorbell.access_width, reg_resource->address);
member = &out_obj->package.elements[2];
if (member->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto pcch_free;
}
doorbell_preserve = member->integer.value;
member = &out_obj->package.elements[3];
if (member->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto pcch_free;
}
doorbell_write = member->integer.value;
pr_debug("probe: doorbell_preserve: 0x%llx,"
" doorbell_write: 0x%llx\n",
doorbell_preserve, doorbell_write);
pcc_cpu_info = alloc_percpu(struct pcc_cpu);
if (!pcc_cpu_info) {
ret = -ENOMEM;
goto pcch_free;
}
printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
" limits: %d MHz, %d MHz\n", PCC_VERSION,
ioread32(&pcch_hdr->minimum_frequency),
ioread32(&pcch_hdr->nominal));
kfree(output.pointer);
return ret;
pcch_free:
pcc_clear_mapping();
out_free:
kfree(output.pointer);
return ret;
}
static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned int result = 0;
if (!pcch_virt_addr) {
result = -1;
goto out;
}
result = pcc_get_offset(cpu);
if (result) {
pr_debug("init: PCCP evaluation failed\n");
goto out;
}
policy->max = policy->cpuinfo.max_freq =
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
out:
return result;
}
static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
return 0;
}
static struct cpufreq_driver pcc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.get = pcc_get_freq,
.verify = pcc_cpufreq_verify,
.target = pcc_cpufreq_target,
.init = pcc_cpufreq_cpu_init,
.exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
};
static int __init pcc_cpufreq_probe(struct platform_device *pdev)
{
int ret;
/* Skip initialization if another cpufreq driver is there. */
if (cpufreq_get_current_driver())
return -ENODEV;
if (acpi_disabled)
return -ENODEV;
ret = pcc_cpufreq_evaluate();
if (ret) {
pr_debug("pcc_cpufreq_probe: PCCH evaluation failed\n");
return ret;
}
if (num_present_cpus() > 4) {
pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
__func__);
pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
__func__);
pr_err("%s: and complain to the system vendor\n", __func__);
}
ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret;
}
static void pcc_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&pcc_cpufreq_driver);
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
}
static struct platform_driver pcc_cpufreq_platdrv = {
.driver = {
.name = "pcc-cpufreq",
},
.remove_new = pcc_cpufreq_remove,
};
static int __init pcc_cpufreq_init(void)
{
return platform_driver_probe(&pcc_cpufreq_platdrv, pcc_cpufreq_probe);
}
static void __exit pcc_cpufreq_exit(void)
{
platform_driver_unregister(&pcc_cpufreq_platdrv);
}
MODULE_ALIAS("platform:pcc-cpufreq");
MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
MODULE_VERSION(PCC_VERSION);
MODULE_DESCRIPTION("Processor Clocking Control interface driver");
MODULE_LICENSE("GPL");
late_initcall(pcc_cpufreq_init);
module_exit(pcc_cpufreq_exit);
| linux-master | drivers/cpufreq/pcc-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* CPU frequency scaling support for Armada 37xx platform.
*
* Copyright (C) 2017 Marvell
*
* Gregory CLEMENT <[email protected]>
*/
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "cpufreq-dt.h"
/* Clk register set */
#define ARMADA_37XX_CLK_TBG_SEL 0
#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22
/* Power management in North Bridge register set */
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
#define ARMADA_37XX_NB_TBG_DIV_OFF 13
#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
#define ARMADA_37XX_NB_CLK_SEL_OFF 11
#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
#define ARMADA_37XX_NB_TBG_SEL_OFF 9
#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
#define ARMADA_37XX_NB_VDD_SEL_OFF 6
#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
#define ARMADA_37XX_NB_CONFIG_SHIFT 16
#define ARMADA_37XX_NB_DYN_MOD 0x24
#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
#define ARMADA_37XX_NB_TBG_EN BIT(28)
#define ARMADA_37XX_NB_DIV_EN BIT(29)
#define ARMADA_37XX_NB_VDD_EN BIT(30)
#define ARMADA_37XX_NB_DFS_EN BIT(31)
#define ARMADA_37XX_NB_CPU_LOAD 0x30
#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
#define ARMADA_37XX_DVFS_LOAD_0 0
#define ARMADA_37XX_DVFS_LOAD_1 1
#define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3
/* AVS register set */
#define ARMADA_37XX_AVS_CTL0 0x0
#define ARMADA_37XX_AVS_ENABLE BIT(30)
#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
#define ARMADA_37XX_AVS_VDD_MASK 0x3F
#define ARMADA_37XX_AVS_CTL2 0x8
#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
/*
* On Armada 37xx the Power management manages 4 level of CPU load,
* each level can be associated with a CPU clock source, a CPU
* divider, a VDD level, etc...
*/
#define LOAD_LEVEL_NR 4
#define MIN_VOLT_MV 1000
#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
/* AVS value for the corresponding voltage (in mV) */
static int avs_map[] = {
747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
1342
};
struct armada37xx_cpufreq_state {
struct platform_device *pdev;
struct device *cpu_dev;
struct regmap *regmap;
u32 nb_l0l1;
u32 nb_l2l3;
u32 nb_dyn_mod;
u32 nb_cpu_load;
};
static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
u32 avs[LOAD_LEVEL_NR];
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
/*
* The cpufreq scaling for 1.2 GHz variant of the SOC is currently
* unstable because we do not know how to configure it properly.
*/
/* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
};
static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
{
int i;
for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
if (freq == armada_37xx_dvfs[i].cpu_freq_max)
return &armada_37xx_dvfs[i];
}
pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
return NULL;
}
/*
* Setup the four level managed by the hardware. Once the four level
* will be configured then the DVFS will be enabled.
*/
static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
struct regmap *clk_base, u8 *divider)
{
u32 cpu_tbg_sel;
int load_lvl;
/* Determine to which TBG clock is CPU connected */
regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
unsigned int reg, mask, val, offset = 0;
if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
reg = ARMADA_37XX_NB_L0L1;
else
reg = ARMADA_37XX_NB_L2L3;
if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
load_lvl == ARMADA_37XX_DVFS_LOAD_2)
offset += ARMADA_37XX_NB_CONFIG_SHIFT;
/* Set cpu clock source, for all the level we use TBG */
val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
mask = (ARMADA_37XX_NB_CLK_SEL_MASK
<< ARMADA_37XX_NB_CLK_SEL_OFF);
/* Set TBG index, for all levels we use the same TBG */
val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
mask = (ARMADA_37XX_NB_TBG_SEL_MASK
<< ARMADA_37XX_NB_TBG_SEL_OFF);
/*
* Set cpu divider based on the pre-computed array in
* order to have balanced step.
*/
val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
<< ARMADA_37XX_NB_TBG_DIV_OFF);
/* Set VDD divider which is actually the load level. */
val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
<< ARMADA_37XX_NB_VDD_SEL_OFF);
val <<= offset;
mask <<= offset;
regmap_update_bits(base, reg, mask, val);
}
}
/*
* Find out the armada 37x supported AVS value whose voltage value is
* the round-up closest to the target voltage value.
*/
static u32 armada_37xx_avs_val_match(int target_vm)
{
u32 avs;
/* Find out the round-up closest supported voltage value */
for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
if (avs_map[avs] >= target_vm)
break;
/*
* If all supported voltages are smaller than target one,
* choose the largest supported voltage
*/
if (avs == ARRAY_SIZE(avs_map))
avs = ARRAY_SIZE(avs_map) - 1;
return avs;
}
/*
* For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
* value or a default value when SVC is not supported.
* - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
* can be got from the mapping table of avs_map.
* - L1 voltage should be about 100mv smaller than L0 voltage
* - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
* This function calculates L1 & L2 & L3 AVS values dynamically based
* on L0 voltage and fill all AVS values to the AVS value table.
* When base CPU frequency is 1000 or 1200 MHz then there is additional
* minimal avs value for load L1.
*/
static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
{
unsigned int target_vm;
int load_level = 0;
u32 l0_vdd_min;
if (base == NULL)
return;
/* Get L0 VDD min value */
regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
ARMADA_37XX_AVS_VDD_MASK;
if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
return;
}
dvfs->avs[0] = l0_vdd_min;
if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
/*
* If L0 voltage is smaller than 1000mv, then all VDD sets
* use L0 voltage;
*/
u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
dvfs->avs[load_level] = avs_min;
/*
* Set the avs values for load L0 and L1 when base CPU frequency
* is 1000/1200 MHz to its typical initial values according to
* the Armada 3700 Hardware Specifications.
*/
if (dvfs->cpu_freq_max >= 1000*1000*1000) {
if (dvfs->cpu_freq_max >= 1200*1000*1000)
avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
else
avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
dvfs->avs[0] = dvfs->avs[1] = avs_min;
}
return;
}
/*
* L1 voltage is equal to L0 voltage - 100mv and it must be
* larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 100;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
* L2 & L3 voltage is equal to L0 voltage - 150mv and it must
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
* Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
* otherwise the CPU gets stuck when switching from load L1 to load L0.
* Also ensure that avs value for load L1 is not higher than for L0.
*/
if (dvfs->cpu_freq_max >= 1000*1000*1000) {
u32 avs_min_l1;
if (dvfs->cpu_freq_max >= 1200*1000*1000)
avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
else
avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
if (avs_min_l1 > dvfs->avs[0])
avs_min_l1 = dvfs->avs[0];
if (dvfs->avs[1] < avs_min_l1)
dvfs->avs[1] = avs_min_l1;
}
}
static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
struct armada_37xx_dvfs *dvfs)
{
unsigned int avs_val = 0;
int load_level = 0;
if (base == NULL)
return;
/* Disable AVS before the configuration */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
ARMADA_37XX_AVS_ENABLE, 0);
/* Enable low voltage mode */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
ARMADA_37XX_AVS_LOW_VDD_EN,
ARMADA_37XX_AVS_LOW_VDD_EN);
for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
avs_val = dvfs->avs[load_level];
regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
}
/* Enable AVS after the configuration */
regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
ARMADA_37XX_AVS_ENABLE,
ARMADA_37XX_AVS_ENABLE);
}
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
mask = ARMADA_37XX_NB_DFS_EN;
regmap_update_bits(base, reg, mask, 0);
}
static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
{
unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
/* Start with the highest load (0) */
val = ARMADA_37XX_DVFS_LOAD_0;
regmap_update_bits(base, reg, mask, val);
/* Now enable DVFS for the CPUs */
reg = ARMADA_37XX_NB_DYN_MOD;
mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
ARMADA_37XX_NB_DFS_EN;
regmap_update_bits(base, reg, mask, mask);
}
static int armada37xx_cpufreq_suspend(struct cpufreq_policy *policy)
{
struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
regmap_read(state->regmap, ARMADA_37XX_NB_L0L1, &state->nb_l0l1);
regmap_read(state->regmap, ARMADA_37XX_NB_L2L3, &state->nb_l2l3);
regmap_read(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
&state->nb_cpu_load);
regmap_read(state->regmap, ARMADA_37XX_NB_DYN_MOD, &state->nb_dyn_mod);
return 0;
}
static int armada37xx_cpufreq_resume(struct cpufreq_policy *policy)
{
struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
/* Ensure DVFS is disabled otherwise the following registers are RO */
armada37xx_cpufreq_disable_dvfs(state->regmap);
regmap_write(state->regmap, ARMADA_37XX_NB_L0L1, state->nb_l0l1);
regmap_write(state->regmap, ARMADA_37XX_NB_L2L3, state->nb_l2l3);
regmap_write(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
state->nb_cpu_load);
/*
* NB_DYN_MOD register is the one that actually enable back DVFS if it
* was enabled before the suspend operation. This must be done last
* otherwise other registers are not writable.
*/
regmap_write(state->regmap, ARMADA_37XX_NB_DYN_MOD, state->nb_dyn_mod);
return 0;
}
static int __init armada37xx_cpufreq_driver_init(void)
{
struct cpufreq_dt_platform_data pdata;
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
unsigned int base_frequency;
struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
struct clk *clk, *parent;
nb_clk_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
if (IS_ERR(nb_clk_base))
return -ENODEV;
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
if (IS_ERR(nb_pm_base))
return -ENODEV;
avs_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
/* if AVS is not present don't use it but still try to setup dvfs */
if (IS_ERR(avs_base)) {
pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
avs_base = NULL;
}
/* Before doing any configuration on the DVFS first, disable it */
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
/*
* On CPU 0 register the operating points supported (which are
* the nominal CPU frequency and full integer divisions of
* it).
*/
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
dev_err(cpu_dev, "Cannot get CPU\n");
return -ENODEV;
}
clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
dev_err(cpu_dev, "Cannot get clock for CPU0\n");
return PTR_ERR(clk);
}
parent = clk_get_parent(clk);
if (IS_ERR(parent)) {
dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
clk_put(clk);
return PTR_ERR(parent);
}
/* Get parent CPU frequency */
base_frequency = clk_get_rate(parent);
if (!base_frequency) {
dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
clk_put(clk);
return -EINVAL;
}
dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
if (!dvfs) {
clk_put(clk);
return -EINVAL;
}
armada37xx_cpufreq_state = kmalloc(sizeof(*armada37xx_cpufreq_state),
GFP_KERNEL);
if (!armada37xx_cpufreq_state) {
clk_put(clk);
return -ENOMEM;
}
armada37xx_cpufreq_state->regmap = nb_pm_base;
armada37xx_cpufreq_avs_configure(avs_base, dvfs);
armada37xx_cpufreq_avs_setup(avs_base, dvfs);
armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
freq = base_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
}
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
memset(&pdata, 0, sizeof(pdata));
pdata.suspend = armada37xx_cpufreq_suspend;
pdata.resume = armada37xx_cpufreq_resume;
pdev = platform_device_register_data(NULL, "cpufreq-dt", -1, &pdata,
sizeof(pdata));
ret = PTR_ERR_OR_ZERO(pdev);
if (ret)
goto disable_dvfs;
armada37xx_cpufreq_state->cpu_dev = cpu_dev;
armada37xx_cpufreq_state->pdev = pdev;
platform_set_drvdata(pdev, dvfs);
return 0;
disable_dvfs:
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
remove_opp:
/* clean-up the already added opp before leaving */
while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
freq = base_frequency / dvfs->divider[load_lvl];
dev_pm_opp_remove(cpu_dev, freq);
}
kfree(armada37xx_cpufreq_state);
return ret;
}
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
static void __exit armada37xx_cpufreq_driver_exit(void)
{
struct platform_device *pdev = armada37xx_cpufreq_state->pdev;
struct armada_37xx_dvfs *dvfs = platform_get_drvdata(pdev);
unsigned long freq;
int load_lvl;
platform_device_unregister(pdev);
armada37xx_cpufreq_disable_dvfs(armada37xx_cpufreq_state->regmap);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
freq = dvfs->cpu_freq_max / dvfs->divider[load_lvl];
dev_pm_opp_remove(armada37xx_cpufreq_state->cpu_dev, freq);
}
kfree(armada37xx_cpufreq_state);
}
module_exit(armada37xx_cpufreq_driver_exit);
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
{ .compatible = "marvell,armada-3700-nb-pm" },
{ },
};
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
MODULE_AUTHOR("Gregory CLEMENT <[email protected]>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/armada-37xx-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
* (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
* Dominik Brodowski.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
as it is unused */
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
static unsigned int param_busfreq = 0;
static unsigned int param_max_multiplier = 0;
module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
static struct cpufreq_frequency_table clock_ratio[] = {
{0, 60, /* 110 -> 6.0x */ 0},
{0, 55, /* 011 -> 5.5x */ 0},
{0, 50, /* 001 -> 5.0x */ 0},
{0, 45, /* 000 -> 4.5x */ 0},
{0, 40, /* 010 -> 4.0x */ 0},
{0, 35, /* 111 -> 3.5x */ 0},
{0, 30, /* 101 -> 3.0x */ 0},
{0, 20, /* 100 -> 2.0x */ 0},
{0, 0, CPUFREQ_TABLE_END}
};
static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
static const struct {
unsigned freq;
unsigned mult;
} usual_frequency_table[] = {
{ 350000, 35 }, // 100 * 3.5
{ 400000, 40 }, // 100 * 4
{ 450000, 45 }, // 100 * 4.5
{ 475000, 50 }, // 95 * 5
{ 500000, 50 }, // 100 * 5
{ 506250, 45 }, // 112.5 * 4.5
{ 533500, 55 }, // 97 * 5.5
{ 550000, 55 }, // 100 * 5.5
{ 562500, 50 }, // 112.5 * 5
{ 570000, 60 }, // 95 * 6
{ 600000, 60 }, // 100 * 6
{ 618750, 55 }, // 112.5 * 5.5
{ 660000, 55 }, // 120 * 5.5
{ 675000, 60 }, // 112.5 * 6
{ 720000, 60 }, // 120 * 6
};
#define FREQ_RANGE 3000
/**
* powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
*
* Returns the current setting of the frequency multiplier. Core clock
* speed is frequency of the Front-Side Bus multiplied with this value.
*/
static int powernow_k6_get_cpu_multiplier(void)
{
unsigned long invalue = 0;
u32 msrval;
local_irq_disable();
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue = inl(POWERNOW_IOPORT + 0x8);
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
local_irq_enable();
return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
}
static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
{
unsigned long outvalue, invalue;
unsigned long msrval;
unsigned long cr0;
/* we now need to transform best_i to the BVC format, see AMD#23446 */
/*
* The processor doesn't respond to inquiry cycles while changing the
* frequency, so we must disable cache.
*/
local_irq_disable();
cr0 = read_cr0();
write_cr0(cr0 | X86_CR0_CD);
wbinvd();
outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue = inl(POWERNOW_IOPORT + 0x8);
invalue = invalue & 0x1f;
outvalue = outvalue | invalue;
outl(outvalue, (POWERNOW_IOPORT + 0x8));
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
write_cr0(cr0);
local_irq_enable();
}
/**
* powernow_k6_target - set the PowerNow! multiplier
* @best_i: clock_ratio[best_i] is the target multiplier
*
* Tries to change the PowerNow! multiplier
*/
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
if (clock_ratio[best_i].driver_data > max_multiplier) {
pr_err("invalid target frequency\n");
return -EINVAL;
}
powernow_k6_set_cpu_multiplier(best_i);
return 0;
}
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
unsigned int i, f;
unsigned khz;
if (policy->cpu != 0)
return -ENODEV;
max_multiplier = 0;
khz = cpu_khz;
for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
khz = usual_frequency_table[i].freq;
max_multiplier = usual_frequency_table[i].mult;
break;
}
}
if (param_max_multiplier) {
cpufreq_for_each_entry(pos, clock_ratio)
if (pos->driver_data == param_max_multiplier) {
max_multiplier = param_max_multiplier;
goto have_max_multiplier;
}
pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
return -EINVAL;
}
if (!max_multiplier) {
pr_warn("unknown frequency %u, cannot determine current multiplier\n",
khz);
pr_warn("use module parameters max_multiplier and bus_frequency\n");
return -EOPNOTSUPP;
}
have_max_multiplier:
param_max_multiplier = max_multiplier;
if (param_busfreq) {
if (param_busfreq >= 50000 && param_busfreq <= 150000) {
busfreq = param_busfreq / 10;
goto have_busfreq;
}
pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
return -EINVAL;
}
busfreq = khz / max_multiplier;
have_busfreq:
param_busfreq = busfreq * 10;
/* table init */
cpufreq_for_each_entry(pos, clock_ratio) {
f = pos->driver_data;
if (f > max_multiplier)
pos->frequency = CPUFREQ_ENTRY_INVALID;
else
pos->frequency = busfreq * f;
}
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 500000;
policy->freq_table = clock_ratio;
return 0;
}
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
if (clock_ratio[i].driver_data == max_multiplier) {
struct cpufreq_freqs freqs;
freqs.old = policy->cur;
freqs.new = clock_ratio[i].frequency;
freqs.flags = 0;
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
}
return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
{
unsigned int ret;
ret = (busfreq * powernow_k6_get_cpu_multiplier());
return ret;
}
static struct cpufreq_driver powernow_k6_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 12, NULL),
X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 13, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids);
/**
* powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
*
* Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
* devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
* on success.
*/
static int __init powernow_k6_init(void)
{
if (!x86_match_cpu(powernow_k6_ids))
return -ENODEV;
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
pr_info("PowerNow IOPORT region already used\n");
return -EIO;
}
if (cpufreq_register_driver(&powernow_k6_driver)) {
release_region(POWERNOW_IOPORT, 16);
return -EINVAL;
}
return 0;
}
/**
* powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
*
* Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
*/
static void __exit powernow_k6_exit(void)
{
cpufreq_unregister_driver(&powernow_k6_driver);
release_region(POWERNOW_IOPORT, 16);
}
MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
"Dominik Brodowski <[email protected]>");
MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
MODULE_LICENSE("GPL");
module_init(powernow_k6_init);
module_exit(powernow_k6_exit);
| linux-master | drivers/cpufreq/powernow-k6.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2001-2004 Dave Jones.
* (C) 2002 Padraig Brady. <[email protected]>
*
* Based upon datasheets & sample CPUs kindly provided by VIA.
*
* VIA have currently 3 different versions of Longhaul.
* Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
* It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
* Version 2 of longhaul is backward compatible with v1, but adds
* LONGHAUL MSR for purpose of both frequency and voltage scaling.
* Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
* Version 3 of longhaul got renamed to Powersaver and redesigned
* to use only the POWERSAVER MSR at 0x110a.
* It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
* It's pretty much the same feature wise to longhaul v2, though
* there is provision for scaling FSB too, but this doesn't work
* too well in practice so we don't even try to use this.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/io.h>
#include <linux/acpi.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <acpi/processor.h>
#include "longhaul.h"
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
#define CPU_SAMUEL 1
#define CPU_SAMUEL2 2
#define CPU_EZRA 3
#define CPU_EZRA_T 4
#define CPU_NEHEMIAH 5
#define CPU_NEHEMIAH_C 6
/* Flags */
#define USE_ACPI_C3 (1 << 1)
#define USE_NORTHBRIDGE (1 << 2)
static int cpu_model;
static unsigned int numscales = 16;
static unsigned int fsb;
static const struct mV_pos *vrm_mV_table;
static const unsigned char *mV_vrm_table;
static unsigned int highest_speed, lowest_speed; /* kHz */
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static struct acpi_processor *pr;
static struct acpi_processor_cx *cx;
static u32 acpi_regs_addr;
static u8 longhaul_flags;
static unsigned int longhaul_index;
/* Module parameters */
static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
static int enable;
/* Clock ratios multiplied by 10 */
static int mults[32];
static int eblcr[32];
static int longhaul_version;
static struct cpufreq_frequency_table *longhaul_table;
static char speedbuffer[8];
static char *print_speed(int speed)
{
if (speed < 1000) {
snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
return speedbuffer;
}
if (speed%1000 == 0)
snprintf(speedbuffer, sizeof(speedbuffer),
"%dGHz", speed/1000);
else
snprintf(speedbuffer, sizeof(speedbuffer),
"%d.%dGHz", speed/1000, (speed%1000)/100);
return speedbuffer;
}
static unsigned int calc_speed(int mult)
{
int khz;
khz = (mult/10)*fsb;
if (mult%10)
khz += fsb/2;
khz *= 1000;
return khz;
}
static int longhaul_get_cpu_mult(void)
{
unsigned long invalue = 0, lo, hi;
rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
if (longhaul_version == TYPE_LONGHAUL_V2 ||
longhaul_version == TYPE_POWERSAVER) {
if (lo & (1<<27))
invalue += 16;
}
return eblcr[invalue];
}
/* For processor with BCR2 MSR */
static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
rdmsrl(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
wrmsrl(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
rdmsrl(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
wrmsrl(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
static void do_powersaver(int cx_address, unsigned int mults_index,
unsigned int dir)
{
union msr_longhaul longhaul;
u32 t;
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
else
longhaul.bits.RevisionKey = 0;
longhaul.bits.SoftBusRatio = mults_index & 0xf;
longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
/* Setup new voltage */
if (can_scale_voltage)
longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
/* Sync to timer tick */
safe_halt();
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
} else {
ACPI_FLUSH_CPU_CACHE();
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3
* read */
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
} else {
ACPI_FLUSH_CPU_CACHE();
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
} else {
ACPI_FLUSH_CPU_CACHE();
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3
* read */
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
}
}
/**
* longhaul_set_cpu_frequency()
* @mults_index : bitpattern of the new multiplier.
*
* Sets a new clock ratio.
*/
static int longhaul_setstate(struct cpufreq_policy *policy,
unsigned int table_index)
{
unsigned int mults_index;
int speed, mult;
struct cpufreq_freqs freqs;
unsigned long flags;
unsigned int pic1_mask, pic2_mask;
u16 bm_status = 0;
u32 bm_timeout = 1000;
unsigned int dir = 0;
mults_index = longhaul_table[table_index].driver_data;
/* Safety precautions */
mult = mults[mults_index & 0x1f];
if (mult == -1)
return -EINVAL;
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
return -EINVAL;
/* Voltage transition before frequency transition? */
if (can_scale_voltage && longhaul_index < table_index)
dir = 1;
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop:
preempt_disable();
local_irq_save(flags);
pic2_mask = inb(0xA1);
pic1_mask = inb(0x21); /* works on C3. save mask. */
outb(0xFF, 0xA1); /* Overkill */
outb(0xFE, 0x21); /* TMR0 only */
/* Wait while PCI bus is busy. */
if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
|| ((pr != NULL) && pr->flags.bm_control))) {
bm_status = inw(acpi_regs_addr);
bm_status &= 1 << 4;
while (bm_status && bm_timeout) {
outw(1 << 4, acpi_regs_addr);
bm_timeout--;
bm_status = inw(acpi_regs_addr);
bm_status &= 1 << 4;
}
}
if (longhaul_flags & USE_NORTHBRIDGE) {
/* Disable AGP and PCI arbiters */
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
/*
* Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
* Software controlled multipliers only.
*/
case TYPE_LONGHAUL_V1:
do_longhaul1(mults_index);
break;
/*
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
*
* Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
* Nehemiah can do FSB scaling too, but this has never been proven
* to work in practice.
*/
case TYPE_LONGHAUL_V2:
case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, mults_index, dir);
} else {
do_powersaver(0, mults_index, dir);
}
break;
}
if (longhaul_flags & USE_NORTHBRIDGE) {
/* Enable arbiters */
outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
}
outb(pic2_mask, 0xA1); /* restore mask */
outb(pic1_mask, 0x21);
local_irq_restore(flags);
preempt_enable();
freqs.new = calc_speed(longhaul_get_cpu_mult());
/* Check if requested frequency is set. */
if (unlikely(freqs.new != speed)) {
pr_info("Failed to set requested frequency!\n");
/* Revision ID = 1 but processor is expecting revision key
* equal to 0. Jumpers at the bottom of processor will change
* multiplier and FSB, but will not change bits in Longhaul
* MSR nor enable voltage scaling. */
if (!revid_errata) {
pr_info("Enabling \"Ignore Revision ID\" option\n");
revid_errata = 1;
msleep(200);
goto retry_loop;
}
/* Why ACPI C3 sometimes doesn't work is a mystery for me.
* But it does happen. Processor is entering ACPI C3 state,
* but it doesn't change frequency. I tried poking various
* bits in northbridge registers, but without success. */
if (longhaul_flags & USE_ACPI_C3) {
pr_info("Disabling ACPI C3 support\n");
longhaul_flags &= ~USE_ACPI_C3;
if (revid_errata) {
pr_info("Disabling \"Ignore Revision ID\" option\n");
revid_errata = 0;
}
msleep(200);
goto retry_loop;
}
/* This shouldn't happen. Longhaul ver. 2 was reported not
* working on processors without voltage scaling, but with
* RevID = 1. RevID errata will make things right. Just
* to be 100% sure. */
if (longhaul_version == TYPE_LONGHAUL_V2) {
pr_info("Switching to Longhaul ver. 1\n");
longhaul_version = TYPE_LONGHAUL_V1;
msleep(200);
goto retry_loop;
}
}
if (!bm_timeout) {
pr_info("Warning: Timeout while waiting for idle PCI bus\n");
return -EBUSY;
}
return 0;
}
/*
* Centaur decided to make life a little more tricky.
* Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
* Samuel2 and above have to try and guess what the FSB is.
* We do this by assuming we booted at maximum multiplier, and interpolate
* between that value multiplied by possible FSBs and cpu_mhz which
* was calculated at boot time. Really ugly, but no other way to do this.
*/
#define ROUNDING 0xf
static int guess_fsb(int mult)
{
int speed = cpu_khz / 1000;
int i;
static const int speeds[] = { 666, 1000, 1333, 2000 };
int f_max, f_min;
for (i = 0; i < ARRAY_SIZE(speeds); i++) {
f_max = ((speeds[i] * mult) + 50) / 100;
f_max += (ROUNDING / 2);
f_min = f_max - ROUNDING;
if ((speed <= f_max) && (speed >= f_min))
return speeds[i] / 10;
}
return 0;
}
static int longhaul_get_ranges(void)
{
unsigned int i, j, k = 0;
unsigned int ratio;
int mult;
/* Get current frequency */
mult = longhaul_get_cpu_mult();
if (mult == -1) {
pr_info("Invalid (reserved) multiplier!\n");
return -EINVAL;
}
fsb = guess_fsb(mult);
if (fsb == 0) {
pr_info("Invalid (reserved) FSB!\n");
return -EINVAL;
}
/* Get max multiplier - as we always did.
* Longhaul MSR is useful only when voltage scaling is enabled.
* C3 is booting at max anyway. */
maxmult = mult;
/* Get min multiplier */
switch (cpu_model) {
case CPU_NEHEMIAH:
minmult = 50;
break;
case CPU_NEHEMIAH_C:
minmult = 40;
break;
default:
minmult = 30;
break;
}
pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n",
minmult/10, minmult%10, maxmult/10, maxmult%10);
highest_speed = calc_speed(maxmult);
lowest_speed = calc_speed(minmult);
pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
print_speed(lowest_speed/1000),
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
pr_info("highestspeed == lowest, aborting\n");
return -EINVAL;
}
if (lowest_speed > highest_speed) {
pr_info("nonsense! lowest (%d > %d) !\n",
lowest_speed, highest_speed);
return -EINVAL;
}
longhaul_table = kcalloc(numscales + 1, sizeof(*longhaul_table),
GFP_KERNEL);
if (!longhaul_table)
return -ENOMEM;
for (j = 0; j < numscales; j++) {
ratio = mults[j];
if (ratio == -1)
continue;
if (ratio > maxmult || ratio < minmult)
continue;
longhaul_table[k].frequency = calc_speed(ratio);
longhaul_table[k].driver_data = j;
k++;
}
if (k <= 1) {
kfree(longhaul_table);
return -ENODEV;
}
/* Sort */
for (j = 0; j < k - 1; j++) {
unsigned int min_f, min_i;
min_f = longhaul_table[j].frequency;
min_i = j;
for (i = j + 1; i < k; i++) {
if (longhaul_table[i].frequency < min_f) {
min_f = longhaul_table[i].frequency;
min_i = i;
}
}
if (min_i != j) {
swap(longhaul_table[j].frequency,
longhaul_table[min_i].frequency);
swap(longhaul_table[j].driver_data,
longhaul_table[min_i].driver_data);
}
}
longhaul_table[k].frequency = CPUFREQ_TABLE_END;
/* Find index we are running on */
for (j = 0; j < k; j++) {
if (mults[longhaul_table[j].driver_data & 0x1f] == mult) {
longhaul_index = j;
break;
}
}
return 0;
}
static void longhaul_setup_voltagescaling(void)
{
struct cpufreq_frequency_table *freq_pos;
union msr_longhaul longhaul;
struct mV_pos minvid, maxvid, vid;
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
}
if (!longhaul.bits.VRMRev) {
pr_info("VRM 8.5\n");
vrm_mV_table = &vrm85_mV[0];
mV_vrm_table = &mV_vrm85[0];
} else {
pr_info("Mobile VRM\n");
if (cpu_model < CPU_NEHEMIAH)
return;
vrm_mV_table = &mobilevrm_mV[0];
mV_vrm_table = &mV_mobilevrm[0];
}
minvid = vrm_mV_table[longhaul.bits.MinimumVID];
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
minvid.mV/1000, minvid.mV%1000,
maxvid.mV/1000, maxvid.mV%1000);
return;
}
if (minvid.mV == maxvid.mV) {
pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
maxvid.mV/1000, maxvid.mV%1000);
return;
}
/* How many voltage steps*/
numvscales = maxvid.pos - minvid.pos + 1;
pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
maxvid.mV/1000, maxvid.mV%1000,
minvid.mV/1000, minvid.mV%1000,
numvscales);
/* Calculate max frequency at min voltage */
j = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4)
j += 16;
min_vid_speed = eblcr[j];
if (min_vid_speed == -1)
return;
switch (longhaul.bits.MinMHzFSB) {
case 0:
min_vid_speed *= 13333;
break;
case 1:
min_vid_speed *= 10000;
break;
case 3:
min_vid_speed *= 6666;
break;
default:
return;
}
if (min_vid_speed >= highest_speed)
return;
/* Calculate kHz for one voltage step */
kHz_step = (highest_speed - min_vid_speed) / numvscales;
cpufreq_for_each_entry_idx(freq_pos, longhaul_table, j) {
speed = freq_pos->frequency;
if (speed > min_vid_speed)
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
else
pos = minvid.pos;
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
pr_info("f: %d kHz, index: %d, vid: %d mV\n",
speed, j, vid.mV);
}
can_scale_voltage = 1;
pr_info("Voltage scaling enabled\n");
}
static int longhaul_target(struct cpufreq_policy *policy,
unsigned int table_index)
{
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
int retval = 0;
if (!can_scale_voltage)
retval = longhaul_setstate(policy, table_index);
else {
/* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both
* "ondemand" and "userspace" are unsafe. C7 is doing
* this in hardware, C3 is old and we need to do this
* in software. */
i = longhaul_index;
current_vid = (longhaul_table[longhaul_index].driver_data >> 8);
current_vid &= 0x1f;
if (table_index > longhaul_index)
dir = 1;
while (i != table_index) {
vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) {
retval = longhaul_setstate(policy, i);
current_vid = vid;
msleep(200);
}
if (dir)
i++;
else
i--;
}
retval = longhaul_setstate(policy, table_index);
}
longhaul_index = table_index;
return retval;
}
static unsigned int longhaul_get(unsigned int cpu)
{
if (cpu)
return 0;
return calc_speed(longhaul_get_cpu_mult());
}
static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle);
if (!d)
return 0;
*return_value = acpi_driver_data(d);
return 1;
}
/* VIA don't support PM2 reg, but have something similar */
static int enable_arbiter_disable(void)
{
struct pci_dev *dev;
int status = 1;
int reg;
u8 pci_cmd;
/* Find PLE133 host bridge */
reg = 0x78;
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
NULL);
/* Find PM133/VT8605 host bridge */
if (dev == NULL)
dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8605_0, NULL);
/* Find CLE266 host bridge */
if (dev == NULL) {
reg = 0x76;
dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_862X_0, NULL);
/* Find CN400 V-Link host bridge */
if (dev == NULL)
dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
}
if (dev != NULL) {
/* Enable access to port 0x22 */
pci_read_config_byte(dev, reg, &pci_cmd);
if (!(pci_cmd & 1<<7)) {
pci_cmd |= 1<<7;
pci_write_config_byte(dev, reg, pci_cmd);
pci_read_config_byte(dev, reg, &pci_cmd);
if (!(pci_cmd & 1<<7)) {
pr_err("Can't enable access to port 0x22\n");
status = 0;
}
}
pci_dev_put(dev);
return status;
}
return 0;
}
static int longhaul_setup_southbridge(void)
{
struct pci_dev *dev;
u8 pci_cmd;
/* Find VT8235 southbridge */
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
if (dev == NULL)
/* Find VT8237 southbridge */
dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8237, NULL);
if (dev != NULL) {
/* Set transition time to max */
pci_read_config_byte(dev, 0xec, &pci_cmd);
pci_cmd &= ~(1 << 2);
pci_write_config_byte(dev, 0xec, pci_cmd);
pci_read_config_byte(dev, 0xe4, &pci_cmd);
pci_cmd &= ~(1 << 7);
pci_write_config_byte(dev, 0xe4, pci_cmd);
pci_read_config_byte(dev, 0xe5, &pci_cmd);
pci_cmd |= 1 << 7;
pci_write_config_byte(dev, 0xe5, pci_cmd);
/* Get address of ACPI registers block*/
pci_read_config_byte(dev, 0x81, &pci_cmd);
if (pci_cmd & 1 << 7) {
pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
acpi_regs_addr &= 0xff00;
pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
}
pci_dev_put(dev);
return 1;
}
return 0;
}
static int longhaul_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
char *cpuname = NULL;
int ret;
u32 lo, hi;
/* Check what we have on this motherboard */
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
cpuname = "C3 'Samuel' [C5A]";
longhaul_version = TYPE_LONGHAUL_V1;
memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
break;
case 7:
switch (c->x86_stepping) {
case 0:
longhaul_version = TYPE_LONGHAUL_V1;
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
/* Note, this is not a typo, early Samuel2's had
* Samuel1 ratios. */
memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
break;
case 1 ... 15:
longhaul_version = TYPE_LONGHAUL_V2;
if (c->x86_stepping < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
} else {
cpu_model = CPU_EZRA;
cpuname = "C3 'Ezra' [C5C]";
}
memcpy(mults, ezra_mults, sizeof(ezra_mults));
memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
break;
}
break;
case 8:
cpu_model = CPU_EZRA_T;
cpuname = "C3 'Ezra-T' [C5M]";
longhaul_version = TYPE_POWERSAVER;
numscales = 32;
memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
break;
case 9:
longhaul_version = TYPE_POWERSAVER;
numscales = 32;
memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
switch (c->x86_stepping) {
case 0 ... 1:
cpu_model = CPU_NEHEMIAH;
cpuname = "C3 'Nehemiah A' [C5XLOE]";
break;
case 2 ... 4:
cpu_model = CPU_NEHEMIAH;
cpuname = "C3 'Nehemiah B' [C5XLOH]";
break;
case 5 ... 15:
cpu_model = CPU_NEHEMIAH_C;
cpuname = "C3 'Nehemiah C' [C5P]";
break;
}
break;
default:
cpuname = "Unknown";
break;
}
/* Check Longhaul ver. 2 */
if (longhaul_version == TYPE_LONGHAUL_V2) {
rdmsr(MSR_VIA_LONGHAUL, lo, hi);
if (lo == 0 && hi == 0)
/* Looks like MSR isn't present */
longhaul_version = TYPE_LONGHAUL_V1;
}
pr_info("VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
pr_cont("Longhaul v%d supported\n", longhaul_version);
break;
case TYPE_POWERSAVER:
pr_cont("Powersaver supported\n");
break;
}
/* Doesn't hurt */
longhaul_setup_southbridge();
/* Find ACPI data for processor */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, &longhaul_walk_callback, NULL,
NULL, (void *)&pr);
/* Check ACPI support for C3 state */
if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
cx = &pr->power.states[ACPI_STATE_C3];
if (cx->address > 0 && cx->latency <= 1000)
longhaul_flags |= USE_ACPI_C3;
}
/* Disable if it isn't working */
if (disable_acpi_c3)
longhaul_flags &= ~USE_ACPI_C3;
/* Check if northbridge is friendly */
if (enable_arbiter_disable())
longhaul_flags |= USE_NORTHBRIDGE;
/* Check ACPI support for bus master arbiter disable */
if (!(longhaul_flags & USE_ACPI_C3
|| longhaul_flags & USE_NORTHBRIDGE)
&& ((pr == NULL) || !(pr->flags.bm_control))) {
pr_err("No ACPI support: Unsupported northbridge\n");
return -ENODEV;
}
if (longhaul_flags & USE_NORTHBRIDGE)
pr_info("Using northbridge support\n");
if (longhaul_flags & USE_ACPI_C3)
pr_info("Using ACPI support\n");
ret = longhaul_get_ranges();
if (ret != 0)
return ret;
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
policy->transition_delay_us = 200000; /* usec */
policy->freq_table = longhaul_table;
return 0;
}
static struct cpufreq_driver longhaul_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
X86_MATCH_VENDOR_FAM(CENTAUR, 6, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, longhaul_id);
static int __init longhaul_init(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (!x86_match_cpu(longhaul_id))
return -ENODEV;
if (!enable) {
pr_err("Option \"enable\" not set - Aborting\n");
return -ENODEV;
}
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
pr_err("More than 1 CPU detected, longhaul disabled\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (boot_cpu_has(X86_FEATURE_APIC)) {
pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
switch (c->x86_model) {
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
case 10:
pr_err("Use acpi-cpufreq driver for VIA C7\n");
}
return -ENODEV;
}
static void __exit longhaul_exit(void)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
freqs.old = policy->cur;
freqs.new = longhaul_table[i].frequency;
freqs.flags = 0;
cpufreq_freq_transition_begin(policy, &freqs);
longhaul_setstate(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
}
cpufreq_cpu_put(policy);
cpufreq_unregister_driver(&longhaul_driver);
kfree(longhaul_table);
}
/* Even if BIOS is exporting ACPI C3 state, and it is used
* with success when CPU is idle, this state doesn't
* trigger frequency transition in some cases. */
module_param(disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
/* Change CPU voltage with frequency. Very useful to save
* power, but most VIA C3 processors aren't supporting it. */
module_param(scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
/* Force revision key to 0 for processors which doesn't
* support voltage scaling, but are introducing itself as
* such. */
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
/* By default driver is disabled to prevent incompatible
* system freeze. */
module_param(enable, int, 0644);
MODULE_PARM_DESC(enable, "Enable driver");
MODULE_AUTHOR("Dave Jones");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE("GPL");
late_initcall(longhaul_init);
module_exit(longhaul_exit);
| linux-master | drivers/cpufreq/longhaul.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* POWERNV cpufreq driver for the IBM POWER processors
*
* (C) Copyright IBM 2014
*
* Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
*/
#define pr_fmt(fmt) "powernv-cpufreq: " fmt
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
#include <linux/timer.h>
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
#define MAX_NR_CHIPS 32
#define MAX_RAMP_DOWN_TIME 5120
/*
* On an idle system we want the global pstate to ramp-down from max value to
* min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
* then ramp-down rapidly later on.
*
* This gives a percentage rampdown for time elapsed in milliseconds.
* ramp_down_percentage = ((ms * ms) >> 18)
* ~= 3.8 * (sec * sec)
*
* At 0 ms ramp_down_percent = 0
* At 5120 ms ramp_down_percent = 100
*/
#define ramp_down_percent(time) ((time * time) >> 18)
/* Interval after which the timer is queued to bring down global pstate */
#define GPSTATE_TIMER_INTERVAL 2000
/**
* struct global_pstate_info - Per policy data structure to maintain history of
* global pstates
* @highest_lpstate_idx: The local pstate index from which we are
* ramping down
* @elapsed_time: Time in ms spent in ramping down from
* highest_lpstate_idx
* @last_sampled_time: Time from boot in ms when global pstates were
* last set
* @last_lpstate_idx: Last set value of local pstate and global
* @last_gpstate_idx: pstate in terms of cpufreq table index
* @timer: Is used for ramping down if cpu goes idle for
* a long time with global pstate held high
* @gpstate_lock: A spinlock to maintain synchronization between
* routines called by the timer handler and
* governer's target_index calls
* @policy: Associated CPUFreq policy
*/
struct global_pstate_info {
int highest_lpstate_idx;
unsigned int elapsed_time;
unsigned int last_sampled_time;
int last_lpstate_idx;
int last_gpstate_idx;
spinlock_t gpstate_lock;
struct timer_list timer;
struct cpufreq_policy *policy;
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
/**
* struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
* indexed by a function of pstate id.
*
* @pstate_id: pstate id for this entry.
*
* @cpufreq_table_idx: Index into the powernv_freqs
* cpufreq_frequency_table for frequency
* corresponding to pstate_id.
*
* @hentry: hlist_node that hooks this entry into the pstate_revmap
* hashtable
*/
struct pstate_idx_revmap_data {
u8 pstate_id;
unsigned int cpufreq_table_idx;
struct hlist_node hentry;
};
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
"No throttling",
"Power Cap",
"Processor Over Temperature",
"Power Supply Failure",
"Over Current",
"OCC Reset"
};
enum throttle_reason_type {
NO_THROTTLE = 0,
POWERCAP,
CPU_OVERTEMP,
POWER_SUPPLY_FAILURE,
OVERCURRENT,
OCC_RESET_THROTTLE,
OCC_MAX_REASON
};
static struct chip {
unsigned int id;
bool throttled;
bool restore;
u8 throttle_reason;
cpumask_t mask;
struct work_struct throttle;
int throttle_turbo;
int throttle_sub_turbo;
int reason[OCC_MAX_REASON];
} *chips;
static int nr_chips;
static DEFINE_PER_CPU(struct chip *, chip_info);
/*
* Note:
* The set of pstates consists of contiguous integers.
* powernv_pstate_info stores the index of the frequency table for
* max, min and nominal frequencies. It also stores number of
* available frequencies.
*
* powernv_pstate_info.nominal indicates the index to the highest
* non-turbo frequency.
*/
static struct powernv_pstate_info {
unsigned int min;
unsigned int max;
unsigned int nominal;
unsigned int nr_pstates;
bool wof_enabled;
} powernv_pstate_info;
static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
{
return ((pmsr_val >> shift) & 0xFF);
}
#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
/* Use following functions for conversions between pstate_id and index */
/*
* idx_to_pstate : Returns the pstate id corresponding to the
* frequency in the cpufreq frequency table
* powernv_freqs indexed by @i.
*
* If @i is out of bound, this will return the pstate
* corresponding to the nominal frequency.
*/
static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
/*
* pstate_to_idx : Returns the index in the cpufreq frequencytable
* powernv_freqs for the frequency whose corresponding
* pstate id is @pstate.
*
* If no frequency corresponding to @pstate is found,
* this will return the index of the nominal
* frequency.
*/
static unsigned int pstate_to_idx(u8 pstate)
{
unsigned int key = pstate % POWERNV_MAX_PSTATES;
struct pstate_idx_revmap_data *revmap_data;
hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
if (revmap_data->pstate_id == pstate)
return revmap_data->cpufreq_table_idx;
}
pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
{
struct global_pstate_info *gpstates = policy->driver_data;
gpstates->highest_lpstate_idx = 0;
gpstates->elapsed_time = 0;
gpstates->last_sampled_time = 0;
gpstates->last_lpstate_idx = 0;
gpstates->last_gpstate_idx = 0;
}
/*
* Initialize the freq table based on data obtained
* from the firmware passed via device-tree
*/
static int init_powernv_pstates(void)
{
struct device_node *power_mgt;
int i, nr_pstates = 0;
const __be32 *pstate_ids, *pstate_freqs;
u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal;
u32 pstate_turbo, pstate_ultra_turbo;
int rc = -ENODEV;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("power-mgt node not found\n");
return -ENODEV;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
pr_warn("ibm,pstate-min node not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
pr_warn("ibm,pstate-max node not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
&pstate_nominal)) {
pr_warn("ibm,pstate-nominal not found\n");
goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
&pstate_ultra_turbo)) {
powernv_pstate_info.wof_enabled = false;
goto next;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
&pstate_turbo)) {
powernv_pstate_info.wof_enabled = false;
goto next;
}
if (pstate_turbo == pstate_ultra_turbo)
powernv_pstate_info.wof_enabled = false;
else
powernv_pstate_info.wof_enabled = true;
next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
pr_warn("ibm,pstate-ids not found\n");
goto out;
}
pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
&len_freqs);
if (!pstate_freqs) {
pr_warn("ibm,pstate-frequencies-mhz not found\n");
goto out;
}
if (len_ids != len_freqs) {
pr_warn("Entries in ibm,pstate-ids and "
"ibm,pstate-frequencies-mhz does not match\n");
}
nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
if (!nr_pstates) {
pr_warn("No PStates found\n");
goto out;
}
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
struct pstate_idx_revmap_data *revmap_data;
unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id & 0xFF;
revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
if (!revmap_data) {
rc = -ENOMEM;
goto out;
}
revmap_data->pstate_id = id & 0xFF;
revmap_data->cpufreq_table_idx = i;
key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
int j;
for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
}
}
/* End of list marker entry */
powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
of_node_put(power_mgt);
return 0;
out:
of_node_put(power_mgt);
return rc;
}
/* Returns the CPU frequency corresponding to the pstate_id. */
static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
return powernv_freqs[i].frequency;
}
/*
* cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
* the firmware
*/
static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
char *buf)
{
return sprintf(buf, "%u\n",
powernv_freqs[powernv_pstate_info.nominal].frequency);
}
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
#define SCALING_BOOST_FREQS_ATTR_INDEX 2
static struct freq_attr *powernv_cpu_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
&cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
#define throttle_attr(name, member) \
static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
{ \
struct chip *chip = per_cpu(chip_info, policy->cpu); \
\
return sprintf(buf, "%u\n", chip->member); \
} \
\
static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
throttle_attr(unthrottle, reason[NO_THROTTLE]);
throttle_attr(powercap, reason[POWERCAP]);
throttle_attr(overtemp, reason[CPU_OVERTEMP]);
throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
throttle_attr(overcurrent, reason[OVERCURRENT]);
throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
throttle_attr(turbo_stat, throttle_turbo);
throttle_attr(sub_turbo_stat, throttle_sub_turbo);
static struct attribute *throttle_attrs[] = {
&throttle_attr_unthrottle.attr,
&throttle_attr_powercap.attr,
&throttle_attr_overtemp.attr,
&throttle_attr_supply_fault.attr,
&throttle_attr_overcurrent.attr,
&throttle_attr_occ_reset.attr,
&throttle_attr_turbo_stat.attr,
&throttle_attr_sub_turbo_stat.attr,
NULL,
};
static const struct attribute_group throttle_attr_grp = {
.name = "throttle_stats",
.attrs = throttle_attrs,
};
/* Helper routines */
/* Access helpers to power mgt SPR */
static inline unsigned long get_pmspr(unsigned long sprn)
{
switch (sprn) {
case SPRN_PMCR:
return mfspr(SPRN_PMCR);
case SPRN_PMICR:
return mfspr(SPRN_PMICR);
case SPRN_PMSR:
return mfspr(SPRN_PMSR);
}
BUG();
}
static inline void set_pmspr(unsigned long sprn, unsigned long val)
{
switch (sprn) {
case SPRN_PMCR:
mtspr(SPRN_PMCR, val);
return;
case SPRN_PMICR:
mtspr(SPRN_PMICR, val);
return;
}
BUG();
}
/*
* Use objects of this type to query/update
* pstates on a remote CPU via smp_call_function.
*/
struct powernv_smp_call_data {
unsigned int freq;
u8 pstate_id;
u8 gpstate_id;
};
/*
* powernv_read_cpu_freq: Reads the current frequency on this CPU.
*
* Called via smp_call_function.
*
* Note: The caller of the smp_call_function should pass an argument of
* the type 'struct powernv_smp_call_data *' along with this function.
*
* The current frequency on this CPU will be returned via
* ((struct powernv_smp_call_data *)arg)->freq;
*/
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
freq_data->freq);
}
/*
* powernv_cpufreq_get: Returns the CPU frequency as reported by the
* firmware for CPU 'cpu'. This value is reported through the sysfs
* file cpuinfo_cur_freq.
*/
static unsigned int powernv_cpufreq_get(unsigned int cpu)
{
struct powernv_smp_call_data freq_data;
smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
&freq_data, 1);
return freq_data.freq;
}
/*
* set_pstate: Sets the pstate on this CPU.
*
* This is called via an smp_call_function.
*
* The caller must ensure that freq_data is of the type
* (struct powernv_smp_call_data *) and the pstate_id which needs to be set
* on this CPU should be present in freq_data->pstate_id.
*/
static void set_pstate(void *data)
{
unsigned long val;
struct powernv_smp_call_data *freq_data = data;
unsigned long pstate_ul = freq_data->pstate_id;
unsigned long gpstate_ul = freq_data->gpstate_id;
val = get_pmspr(SPRN_PMCR);
val = val & 0x0000FFFFFFFFFFFFULL;
pstate_ul = pstate_ul & 0xFF;
gpstate_ul = gpstate_ul & 0xFF;
/* Set both global(bits 56..63) and local(bits 48..55) PStates */
val = val | (gpstate_ul << 56) | (pstate_ul << 48);
pr_debug("Setting cpu %d pmcr to %016lX\n",
raw_smp_processor_id(), val);
set_pmspr(SPRN_PMCR, val);
}
/*
* get_nominal_index: Returns the index corresponding to the nominal
* pstate in the cpufreq table
*/
static inline unsigned int get_nominal_index(void)
{
return powernv_pstate_info.nominal;
}
static void powernv_cpufreq_throttle_check(void *data)
{
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
} else {
chip->throttle_turbo++;
}
trace_powernv_throttle(chip->id,
throttle_reason[chip->throttle_reason],
pmsr_pmax);
} else if (chip->throttled) {
chip->throttled = false;
trace_powernv_throttle(chip->id,
throttle_reason[chip->throttle_reason],
pmsr_pmax);
}
/* Check if Psafe_mode_active is set in PMSR. */
next:
if (pmsr & PMSR_PSAFE_ENABLE) {
throttled = true;
pr_info("Pstate set to safe frequency\n");
}
/* Check if SPR_EM_DISABLE is set in PMSR */
if (pmsr & PMSR_SPR_EM_DISABLE) {
throttled = true;
pr_info("Frequency Control disabled from OS\n");
}
if (throttled) {
pr_info("PMSR = %16lx\n", pmsr);
pr_warn("CPU Frequency could be throttled\n");
}
}
/**
* calc_global_pstate - Calculate global pstate
* @elapsed_time: Elapsed time in milliseconds
* @local_pstate_idx: New local pstate
* @highest_lpstate_idx: pstate from which its ramping down
*
* Finds the appropriate global pstate based on the pstate from which its
* ramping down and the time elapsed in ramping down. It follows a quadratic
* equation which ensures that it reaches ramping down to pmin in 5sec.
*/
static inline int calc_global_pstate(unsigned int elapsed_time,
int highest_lpstate_idx,
int local_pstate_idx)
{
int index_diff;
/*
* Using ramp_down_percent we get the percentage of rampdown
* that we are expecting to be dropping. Difference between
* highest_lpstate_idx and powernv_pstate_info.min will give a absolute
* number of how many pstates we will drop eventually by the end of
* 5 seconds, then just scale it get the number pstates to be dropped.
*/
index_diff = ((int)ramp_down_percent(elapsed_time) *
(powernv_pstate_info.min - highest_lpstate_idx)) / 100;
/* Ensure that global pstate is >= to local pstate */
if (highest_lpstate_idx + index_diff >= local_pstate_idx)
return local_pstate_idx;
else
return highest_lpstate_idx + index_diff;
}
static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
{
unsigned int timer_interval;
/*
* Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
* if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
* Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
* seconds of ramp down time.
*/
if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
> MAX_RAMP_DOWN_TIME)
timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
else
timer_interval = GPSTATE_TIMER_INTERVAL;
mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
* gpstate_timer_handler
*
* @t: Timer context used to fetch global pstate info struct
*
* This handler brings down the global pstate closer to the local pstate
* according quadratic equation. Queues a new timer if it is still not equal
* to local pstate
*/
static void gpstate_timer_handler(struct timer_list *t)
{
struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
if (!spin_trylock(&gpstates->gpstate_lock))
return;
/*
* If the timer has migrated to the different cpu then bring
* it back to one of the policy->cpus
*/
if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
spin_unlock(&gpstates->gpstate_lock);
return;
}
/*
* If PMCR was last updated was using fast_swtich then
* We may have wrong in gpstate->last_lpstate_idx
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
freq_data.gpstate_id = extract_global_pstate(val);
freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
return;
}
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
gpstate_idx = pstate_to_idx(freq_data.pstate_id);
lpstate_idx = gpstate_idx;
reset_gpstates(policy);
gpstates->highest_lpstate_idx = gpstate_idx;
} else {
lpstate_idx = pstate_to_idx(freq_data.pstate_id);
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
lpstate_idx);
}
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_gpstate_idx = gpstate_idx;
gpstates->last_lpstate_idx = lpstate_idx;
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
}
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
* mask policy->cpus
*/
static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
unsigned int cur_msec, gpstate_idx;
struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
if (!throttled) {
/* we don't want to be preempted while
* checking if the CPU frequency has been throttled
*/
preempt_disable();
powernv_cpufreq_throttle_check(NULL);
preempt_enable();
}
cur_msec = jiffies_to_msecs(get_jiffies_64());
freq_data.pstate_id = idx_to_pstate(new_index);
if (!gpstates) {
freq_data.gpstate_id = freq_data.pstate_id;
goto no_gpstate;
}
spin_lock(&gpstates->gpstate_lock);
if (!gpstates->last_sampled_time) {
gpstate_idx = new_index;
gpstates->highest_lpstate_idx = new_index;
goto gpstates_done;
}
if (gpstates->last_gpstate_idx < new_index) {
gpstates->elapsed_time += cur_msec -
gpstates->last_sampled_time;
/*
* If its has been ramping down for more than MAX_RAMP_DOWN_TIME
* we should be resetting all global pstate related data. Set it
* equal to local pstate to start fresh.
*/
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
reset_gpstates(policy);
gpstates->highest_lpstate_idx = new_index;
gpstate_idx = new_index;
} else {
/* Elaspsed_time is less than 5 seconds, continue to rampdown */
gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
gpstates->highest_lpstate_idx,
new_index);
}
} else {
reset_gpstates(policy);
gpstates->highest_lpstate_idx = new_index;
gpstate_idx = new_index;
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
del_timer_sync(&gpstates->timer);
gpstates_done:
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_sampled_time = cur_msec;
gpstates->last_gpstate_idx = gpstate_idx;
gpstates->last_lpstate_idx = new_index;
spin_unlock(&gpstates->gpstate_lock);
no_gpstate:
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
*/
smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
return 0;
}
static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int base, i;
struct kernfs_node *kn;
struct global_pstate_info *gpstates;
base = cpu_first_thread_sibling(policy->cpu);
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
if (!kn) {
int ret;
ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
if (ret) {
pr_info("Failed to create throttle stats directory for cpu %d\n",
policy->cpu);
return ret;
}
} else {
kernfs_put(kn);
}
policy->freq_table = powernv_freqs;
policy->fast_switch_possible = true;
if (pvr_version_is(PVR_POWER9))
return 0;
/* Initialise Gpstate ramp-down timer only on POWER8 */
gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
if (!gpstates)
return -ENOMEM;
policy->driver_data = gpstates;
/* initialize timer */
gpstates->policy = policy;
timer_setup(&gpstates->timer, gpstate_timer_handler,
TIMER_PINNED | TIMER_DEFERRABLE);
gpstates->timer.expires = jiffies +
msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
spin_lock_init(&gpstates->gpstate_lock);
return 0;
}
static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
if (gpstates)
del_timer_sync(&gpstates->timer);
kfree(policy->driver_data);
return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
continue;
powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;
}
static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier,
};
static void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
struct cpufreq_policy *policy;
unsigned int cpu;
cpumask_t mask;
cpus_read_lock();
cpumask_and(&mask, &chip->mask, cpu_online_mask);
smp_call_function_any(&mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
goto out;
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
index = cpufreq_table_find_index_c(policy, policy->cur, false);
powernv_cpufreq_target_index(policy, index);
cpumask_andnot(&mask, &mask, policy->cpus);
cpufreq_cpu_put(policy);
}
out:
cpus_read_unlock();
}
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
struct opal_msg *msg = _msg;
struct opal_occ_msg omsg;
int i;
if (msg_type != OPAL_MSG_OCC)
return 0;
omsg.type = be64_to_cpu(msg->params[0]);
switch (omsg.type) {
case OCC_RESET:
occ_reset = true;
pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
/*
* powernv_cpufreq_throttle_check() is called in
* target() callback which can detect the throttle state
* for governors like ondemand.
* But static governors will not call target() often thus
* report throttling here.
*/
if (!throttled) {
throttled = true;
pr_warn("CPU frequency is throttled for duration\n");
}
break;
case OCC_LOAD:
pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
break;
case OCC_THROTTLE:
omsg.chip = be64_to_cpu(msg->params[1]);
omsg.throttle_status = be64_to_cpu(msg->params[2]);
if (occ_reset) {
occ_reset = false;
throttled = false;
pr_info("OCC Active, CPU frequency is no longer throttled\n");
for (i = 0; i < nr_chips; i++) {
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
for (i = 0; i < nr_chips; i++)
if (chips[i].id == omsg.chip)
break;
if (omsg.throttle_status >= 0 &&
omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
chips[i].throttle_reason = omsg.throttle_status;
chips[i].reason[omsg.throttle_status]++;
}
if (!omsg.throttle_status)
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
static struct notifier_block powernv_cpufreq_opal_nb = {
.notifier_call = powernv_cpufreq_occ_msg,
.next = NULL,
.priority = 0,
};
static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
int index;
struct powernv_smp_call_data freq_data;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
freq_data.pstate_id = powernv_freqs[index].driver_data;
freq_data.gpstate_id = powernv_freqs[index].driver_data;
set_pstate(&freq_data);
return powernv_freqs[index].frequency;
}
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = powernv_cpufreq_cpu_init,
.exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.fast_switch = powernv_fast_switch,
.get = powernv_cpufreq_get,
.attr = powernv_cpu_freq_attr,
};
static int init_chip_info(void)
{
unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
cpumask_t *chip_cpu_mask;
int ret = 0;
chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
/* Allocate a chip cpu mask large enough to fit mask for all chips */
chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
if (!chip_cpu_mask) {
ret = -ENOMEM;
goto free_and_return;
}
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
if (prev_chip_id != id) {
prev_chip_id = id;
chip[nr_chips++] = id;
}
cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips) {
ret = -ENOMEM;
goto out_free_chip_cpu_mask;
}
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
for_each_cpu(cpu, &chips[i].mask)
per_cpu(chip_info, cpu) = &chips[i];
}
out_free_chip_cpu_mask:
kfree(chip_cpu_mask);
free_and_return:
kfree(chip);
return ret;
}
static inline void clean_chip_info(void)
{
int i;
/* flush any pending work items */
if (chips)
for (i = 0; i < nr_chips; i++)
cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
static inline void unregister_all_notifiers(void)
{
opal_message_notifier_unregister(OPAL_MSG_OCC,
&powernv_cpufreq_opal_nb);
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
}
static int __init powernv_cpufreq_init(void)
{
int rc = 0;
/* Don't probe on pseries (guest) platforms */
if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* Discover pstates from device tree and init */
rc = init_powernv_pstates();
if (rc)
goto out;
/* Populate chip info */
rc = init_chip_info();
if (rc)
goto out;
if (powernv_pstate_info.wof_enabled)
powernv_cpufreq_driver.boost_enabled = true;
else
powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
goto cleanup;
}
if (powernv_pstate_info.wof_enabled)
cpufreq_enable_boost_support();
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
return 0;
cleanup:
clean_chip_info();
out:
pr_info("Platform driver disabled. System does not support PState control\n");
return rc;
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
cpufreq_unregister_driver(&powernv_cpufreq_driver);
unregister_all_notifiers();
clean_chip_info();
}
module_exit(powernv_cpufreq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
| linux-master | drivers/cpufreq/powernv-cpufreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*/
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define PU_SOC_VOLTAGE_NORMAL 1250000
#define PU_SOC_VOLTAGE_HIGH 1275000
#define FREQ_1P2_GHZ 1200000000
static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
enum IMX6_CPUFREQ_CLKS {
ARM,
PLL1_SYS,
STEP,
PLL1_SW,
PLL2_PFD2_396M,
/* MX6UL requires two more clks */
PLL2_BUS,
SECONDARY_SEL,
};
#define IMX6Q_CPUFREQ_CLK_NUM 5
#define IMX6UL_CPUFREQ_CLK_NUM 7
static int num_clks;
static struct clk_bulk_data clks[] = {
{ .id = "arm" },
{ .id = "pll1_sys" },
{ .id = "step" },
{ .id = "pll1_sw" },
{ .id = "pll2_pfd2_396m" },
{ .id = "pll2_bus" },
{ .id = "secondary_sel" },
};
static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
static unsigned int transition_latency;
static u32 *imx6_soc_volt;
static u32 soc_opp_count;
static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
unsigned int old_freq, new_freq;
bool pll1_sys_temp_enabled = false;
int ret;
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
old_freq / 1000, volt_old / 1000,
new_freq / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
if (new_freq > old_freq) {
if (!IS_ERR(pu_reg)) {
ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
if (ret) {
dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
return ret;
}
}
ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
if (ret) {
dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
return ret;
}
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
return ret;
}
}
/*
* The setpoints are selected per PLL/PDF frequencies, so we need to
* reprogram PLL for frequency scaling. The procedure of reprogramming
* PLL1 is as below.
* For i.MX6UL, it has a secondary clk mux, the cpu frequency change
* flow is slightly different from other i.MX6 OSC.
* The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below:
* - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
/*
* When changing pll1_sw_clk's parent to pll1_sys_clk,
* CPU may run at higher than 528MHz, this will lead to
* the system unstable if the voltage is lower than the
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
clk_set_parent(clks[SECONDARY_SEL].clk,
clks[PLL2_BUS].clk);
else
clk_set_parent(clks[SECONDARY_SEL].clk,
clks[PLL2_PFD2_396M].clk);
clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
}
} else {
clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
int ret1;
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
if (ret1)
dev_warn(cpu_dev,
"failed to restore vddarm voltage: %d\n", ret1);
return ret;
}
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret)
dev_warn(cpu_dev,
"failed to scale vddarm down: %d\n", ret);
ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
if (ret)
dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
if (!IS_ERR(pu_reg)) {
ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
if (ret)
dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
}
}
return 0;
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
return 0;
}
static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
{
int ret = dev_pm_opp_disable(dev, freq);
if (ret < 0 && ret != -ENODEV)
dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
}
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
#define OCOTP_CFG3_SPEED_996MHZ 0x2
#define OCOTP_CFG3_SPEED_852MHZ 0x1
static int imx6q_opp_check_speed_grading(struct device *dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
int ret;
if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret)
return ret;
} else {
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
if (!np)
return -ENOENT;
base = of_iomap(np, 0);
of_node_put(np);
if (!base) {
dev_err(dev, "failed to map ocotp\n");
return -EFAULT;
}
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
* 2b'11: 1200000000Hz;
* 2b'10: 996000000Hz;
* 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
val = readl_relaxed(base + OCOTP_CFG3);
iounmap(base);
}
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
if (val < OCOTP_CFG3_SPEED_996MHZ)
imx6x_disable_freq_in_opp(dev, 996000000);
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
imx6x_disable_freq_in_opp(dev, 852000000);
if (val != OCOTP_CFG3_SPEED_1P2GHZ)
imx6x_disable_freq_in_opp(dev, 1200000000);
}
return 0;
}
#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
static int imx6ul_opp_check_speed_grading(struct device *dev)
{
u32 val;
int ret = 0;
if (of_property_present(dev->of_node, "nvmem-cells")) {
ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
if (ret)
return ret;
} else {
struct device_node *np;
void __iomem *base;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ull-ocotp");
if (!np)
return -ENOENT;
base = of_iomap(np, 0);
of_node_put(np);
if (!base) {
dev_err(dev, "failed to map ocotp\n");
return -EFAULT;
}
val = readl_relaxed(base + OCOTP_CFG3);
iounmap(base);
}
/*
* Speed GRADING[1:0] defines the max speed of ARM:
* 2b'00: Reserved;
* 2b'01: 528000000Hz;
* 2b'10: 696000000Hz on i.MX6UL, 792000000Hz on i.MX6ULL;
* 2b'11: 900000000Hz on i.MX6ULL only;
* We need to set the max speed of ARM according to fuse map.
*/
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
if (of_machine_is_compatible("fsl,imx6ul"))
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
imx6x_disable_freq_in_opp(dev, 900000000);
}
return ret;
}
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
const struct property *prop;
const __be32 *val;
u32 nr, i, j;
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get cpu0 device\n");
return -ENODEV;
}
np = of_node_get(cpu_dev->of_node);
if (!np) {
dev_err(cpu_dev, "failed to find cpu0 node\n");
return -ENOENT;
}
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull"))
num_clks = IMX6UL_CPUFREQ_CLK_NUM;
else
num_clks = IMX6Q_CPUFREQ_CLK_NUM;
ret = clk_bulk_get(cpu_dev, num_clks, clks);
if (ret)
goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc");
if (PTR_ERR(arm_reg) == -EPROBE_DEFER ||
PTR_ERR(soc_reg) == -EPROBE_DEFER ||
PTR_ERR(pu_reg) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
dev_dbg(cpu_dev, "regulators not ready, defer\n");
goto put_reg;
}
if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_reg;
}
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret < 0) {
dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
goto put_reg;
}
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
} else {
ret = imx6q_opp_check_speed_grading(cpu_dev);
}
if (ret) {
dev_err_probe(cpu_dev, ret, "failed to read ocotp\n");
goto out_free_opp;
}
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_opp;
}
/* Make imx6_soc_volt array's size same as arm opp number */
imx6_soc_volt = devm_kcalloc(cpu_dev, num, sizeof(*imx6_soc_volt),
GFP_KERNEL);
if (imx6_soc_volt == NULL) {
ret = -ENOMEM;
goto free_freq_table;
}
prop = of_find_property(np, "fsl,soc-operating-points", NULL);
if (!prop || !prop->value)
goto soc_opp_out;
/*
* Each OPP is a set of tuples consisting of frequency and
* voltage like <freq-kHz vol-uV>.
*/
nr = prop->length / sizeof(u32);
if (nr % 2 || (nr / 2) < num)
goto soc_opp_out;
for (j = 0; j < num; j++) {
val = prop->value;
for (i = 0; i < nr / 2; i++) {
unsigned long freq = be32_to_cpup(val++);
unsigned long volt = be32_to_cpup(val++);
if (freq_table[j].frequency == freq) {
imx6_soc_volt[soc_opp_count++] = volt;
break;
}
}
}
soc_opp_out:
/* use fixed soc opp volt if no valid soc opp info found in dtb */
if (soc_opp_count != num) {
dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
for (j = 0; j < num; j++)
imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
/*
* Calculate the ramp time for max voltage change in the
* VDDSOC and VDDPU regulators.
*/
ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
if (ret > 0)
transition_latency += ret * 1000;
if (!IS_ERR(pu_reg)) {
ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
if (ret > 0)
transition_latency += ret * 1000;
}
/*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
max_freq = freq_table[--num].frequency;
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
opp = dev_pm_opp_find_freq_exact(cpu_dev, max_freq * 1000, true);
max_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
transition_latency += ret * 1000;
ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
if (ret) {
dev_err(cpu_dev, "failed register driver: %d\n", ret);
goto free_freq_table;
}
of_node_put(np);
return 0;
free_freq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
dev_pm_opp_of_remove_table(cpu_dev);
put_reg:
if (!IS_ERR(arm_reg))
regulator_put(arm_reg);
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
clk_bulk_put(num_clks, clks);
put_node:
of_node_put(np);
return ret;
}
static void imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
dev_pm_opp_of_remove_table(cpu_dev);
regulator_put(arm_reg);
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
clk_bulk_put(num_clks, clks);
}
static struct platform_driver imx6q_cpufreq_platdrv = {
.driver = {
.name = "imx6q-cpufreq",
},
.probe = imx6q_cpufreq_probe,
.remove_new = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
MODULE_ALIAS("platform:imx6q-cpufreq");
MODULE_AUTHOR("Shawn Guo <[email protected]>");
MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/cpufreq/imx6q-cpufreq.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.