python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2023 Cai Huoqing * Synopsys DesignWare HDMA v0 core */ #include <linux/bitfield.h> #include <linux/irqreturn.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "dw-edma-core.h" #include "dw-hdma-v0-core.h" #include "dw-hdma-v0-regs.h" #include "dw-hdma-v0-debugfs.h" enum dw_hdma_control { DW_HDMA_V0_CB = BIT(0), DW_HDMA_V0_TCB = BIT(1), DW_HDMA_V0_LLP = BIT(2), DW_HDMA_V0_LIE = BIT(3), DW_HDMA_V0_RIE = BIT(4), DW_HDMA_V0_CCS = BIT(8), DW_HDMA_V0_LLE = BIT(9), }; static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) { return dw->chip->reg_base; } static inline struct dw_hdma_v0_ch_regs __iomem * __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) { if (dir == EDMA_DIR_WRITE) return &(__dw_regs(dw)->ch[ch].wr); else return &(__dw_regs(dw)->ch[ch].rd); } #define SET_CH_32(dw, dir, ch, name, value) \ writel(value, &(__dw_ch_regs(dw, dir, ch)->name)) #define GET_CH_32(dw, dir, ch, name) \ readl(&(__dw_ch_regs(dw, dir, ch)->name)) #define SET_BOTH_CH_32(dw, ch, name, value) \ do { \ writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \ writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \ } while (0) /* HDMA management callbacks */ static void dw_hdma_v0_core_off(struct dw_edma *dw) { int id; for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { SET_BOTH_CH_32(dw, id, int_setup, HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); SET_BOTH_CH_32(dw, id, int_clear, HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK); SET_BOTH_CH_32(dw, id, ch_en, 0); } } static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) { u32 num_ch = 0; int id; for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { if (GET_CH_32(dw, id, dir, ch_en) & BIT(0)) num_ch++; } if (num_ch > HDMA_V0_MAX_NR_CH) num_ch = HDMA_V0_MAX_NR_CH; return (u16)num_ch; } static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; u32 tmp; tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK, GET_CH_32(dw, chan->id, chan->dir, ch_stat)); if (tmp == 1) return DMA_IN_PROGRESS; else if (tmp == 3) return DMA_COMPLETE; else return DMA_ERROR; } static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK); } static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK); } static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; return GET_CH_32(dw, chan->dir, chan->id, int_stat); } static irqreturn_t dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir, dw_edma_handler_t done, dw_edma_handler_t abort) { struct dw_edma *dw = dw_irq->dw; unsigned long total, pos, val; irqreturn_t ret = IRQ_NONE; struct dw_edma_chan *chan; unsigned long off, mask; if (dir == EDMA_DIR_WRITE) { total = dw->wr_ch_cnt; off = 0; mask = dw_irq->wr_mask; } else { total = dw->rd_ch_cnt; off = dw->wr_ch_cnt; mask = dw_irq->rd_mask; } for_each_set_bit(pos, &mask, total) { chan = &dw->chan[pos + off]; val = dw_hdma_v0_core_status_int(chan); if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) { dw_hdma_v0_core_clear_done_int(chan); done(chan); ret = IRQ_HANDLED; } if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) { dw_hdma_v0_core_clear_abort_int(chan); abort(chan); ret = IRQ_HANDLED; } } return ret; } static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i, u32 control, u32 size, u64 sar, u64 dar) { ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs; lli->control = control; lli->transfer_size = size; lli->sar.reg = sar; lli->dar.reg = dar; } else { struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs; writel(control, &lli->control); writel(size, &lli->transfer_size); writeq(sar, &lli->sar.reg); writeq(dar, &lli->dar.reg); } } static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk, int i, u32 control, u64 pointer) { ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli); if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs; llp->control = control; llp->llp.reg = pointer; } else { struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs; writel(control, &llp->control); writeq(pointer, &llp->llp.reg); } } static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) { struct dw_edma_burst *child; struct dw_edma_chan *chan = chunk->chan; u32 control = 0, i = 0; int j; if (chunk->cb) control = DW_HDMA_V0_CB; j = chunk->bursts_alloc; list_for_each_entry(child, &chunk->burst->list, list) { j--; if (!j) { control |= DW_HDMA_V0_LIE; if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) control |= DW_HDMA_V0_RIE; } dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz, child->sar, child->dar); } control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB; if (!chunk->cb) control |= DW_HDMA_V0_CB; dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); } static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) { struct dw_edma_chan *chan = chunk->chan; struct dw_edma *dw = chan->dw; u32 tmp; dw_hdma_v0_core_write_chunk(chunk); if (first) { /* Enable engine */ SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0)); /* Interrupt enable&unmask - done, abort */ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN; SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); /* Channel control */ SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN); /* Linked list */ /* llp is not aligned on 64bit -> keep 32bit accesses */ SET_CH_32(dw, chan->dir, chan->id, llp.lsb, lower_32_bits(chunk->ll_region.paddr)); SET_CH_32(dw, chan->dir, chan->id, llp.msb, upper_32_bits(chunk->ll_region.paddr)); } /* Set consumer cycle */ SET_CH_32(dw, chan->dir, chan->id, cycle_sync, HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); /* Doorbell */ SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); } static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; /* MSI done addr - low, high */ SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo); SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi); /* MSI abort addr - low, high */ SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo); SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi); /* config MSI data */ SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data); } /* HDMA debugfs callbacks */ static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw) { dw_hdma_v0_debugfs_on(dw); } static const struct dw_edma_core_ops dw_hdma_v0_core = { .off = dw_hdma_v0_core_off, .ch_count = dw_hdma_v0_core_ch_count, .ch_status = dw_hdma_v0_core_ch_status, .handle_int = dw_hdma_v0_core_handle_int, .start = dw_hdma_v0_core_start, .ch_config = dw_hdma_v0_core_ch_config, .debugfs_on = dw_hdma_v0_core_debugfs_on, }; void dw_hdma_v0_core_register(struct dw_edma *dw) { dw->core = &dw_hdma_v0_core; }
linux-master
drivers/dma/dw-edma/dw-hdma-v0-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. * Synopsys DesignWare eDMA v0 core * * Author: Gustavo Pimentel <[email protected]> */ #include <linux/debugfs.h> #include <linux/bitfield.h> #include "dw-edma-v0-debugfs.h" #include "dw-edma-v0-regs.h" #include "dw-edma-core.h" #define REGS_ADDR(dw, name) \ ({ \ struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \ \ (void __iomem *)&__regs->name; \ }) #define REGS_CH_ADDR(dw, name, _dir, _ch) \ ({ \ struct dw_edma_v0_ch_regs __iomem *__ch_regs; \ \ if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \ __ch_regs = REGS_ADDR(dw, type.legacy.ch); \ else if (_dir == EDMA_DIR_READ) \ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \ else \ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \ \ (void __iomem *)&__ch_regs->name; \ }) #define REGISTER(dw, name) \ { dw, #name, REGS_ADDR(dw, name) } #define CTX_REGISTER(dw, name, dir, ch) \ { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch } #define WR_REGISTER(dw, name) \ { dw, #name, REGS_ADDR(dw, wr_##name) } #define RD_REGISTER(dw, name) \ { dw, #name, REGS_ADDR(dw, rd_##name) } #define WR_REGISTER_LEGACY(dw, name) \ { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) } #define RD_REGISTER_LEGACY(name) \ { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) } #define WR_REGISTER_UNROLL(dw, name) \ { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) } #define RD_REGISTER_UNROLL(dw, name) \ { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) } #define WRITE_STR "write" #define READ_STR "read" #define CHANNEL_STR "channel" #define REGISTERS_STR "registers" struct dw_edma_debugfs_entry { struct dw_edma *dw; const char *name; void __iomem *reg; enum dw_edma_dir dir; u16 ch; }; static int dw_edma_debugfs_u32_get(void *data, u64 *val) { struct dw_edma_debugfs_entry *entry = data; struct dw_edma *dw = entry->dw; void __iomem *reg = entry->reg; if (dw->chip->mf == EDMA_MF_EDMA_LEGACY && reg >= REGS_ADDR(dw, type.legacy.ch)) { unsigned long flags; u32 viewport_sel; viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0; viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch); raw_spin_lock_irqsave(&dw->lock, flags); writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel)); *val = readl(reg); raw_spin_unlock_irqrestore(&dw->lock, flags); } else { *val = readl(reg); } return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); static void dw_edma_debugfs_create_x32(struct dw_edma *dw, const struct dw_edma_debugfs_entry ini[], int nr_entries, struct dentry *dent) { struct dw_edma_debugfs_entry *entries; int i; entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries), GFP_KERNEL); if (!entries) return; for (i = 0; i < nr_entries; i++) { entries[i] = ini[i]; debugfs_create_file_unsafe(entries[i].name, 0444, dent, &entries[i], &fops_x32); } } static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, struct dentry *dent) { struct dw_edma_debugfs_entry debugfs_regs[] = { CTX_REGISTER(dw, ch_control1, dir, ch), CTX_REGISTER(dw, ch_control2, dir, ch), CTX_REGISTER(dw, transfer_size, dir, ch), CTX_REGISTER(dw, sar.lsb, dir, ch), CTX_REGISTER(dw, sar.msb, dir, ch), CTX_REGISTER(dw, dar.lsb, dir, ch), CTX_REGISTER(dw, dar.msb, dir, ch), CTX_REGISTER(dw, llp.lsb, dir, ch), CTX_REGISTER(dw, llp.msb, dir, ch), }; int nr_entries; nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent); } static noinline_for_stack void dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent) { const struct dw_edma_debugfs_entry debugfs_regs[] = { /* eDMA global registers */ WR_REGISTER(dw, engine_en), WR_REGISTER(dw, doorbell), WR_REGISTER(dw, ch_arb_weight.lsb), WR_REGISTER(dw, ch_arb_weight.msb), /* eDMA interrupts registers */ WR_REGISTER(dw, int_status), WR_REGISTER(dw, int_mask), WR_REGISTER(dw, int_clear), WR_REGISTER(dw, err_status), WR_REGISTER(dw, done_imwr.lsb), WR_REGISTER(dw, done_imwr.msb), WR_REGISTER(dw, abort_imwr.lsb), WR_REGISTER(dw, abort_imwr.msb), WR_REGISTER(dw, ch01_imwr_data), WR_REGISTER(dw, ch23_imwr_data), WR_REGISTER(dw, ch45_imwr_data), WR_REGISTER(dw, ch67_imwr_data), WR_REGISTER(dw, linked_list_err_en), }; const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { /* eDMA channel context grouping */ WR_REGISTER_UNROLL(dw, engine_chgroup), WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), WR_REGISTER_UNROLL(dw, ch0_pwr_en), WR_REGISTER_UNROLL(dw, ch1_pwr_en), WR_REGISTER_UNROLL(dw, ch2_pwr_en), WR_REGISTER_UNROLL(dw, ch3_pwr_en), WR_REGISTER_UNROLL(dw, ch4_pwr_en), WR_REGISTER_UNROLL(dw, ch5_pwr_en), WR_REGISTER_UNROLL(dw, ch6_pwr_en), WR_REGISTER_UNROLL(dw, ch7_pwr_en), }; struct dentry *regs_dent, *ch_dent; int nr_entries, i; char name[16]; regs_dent = debugfs_create_dir(WRITE_STR, dent); nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, regs_dent); } for (i = 0; i < dw->wr_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); ch_dent = debugfs_create_dir(name, regs_dent); dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent); } } static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent) { const struct dw_edma_debugfs_entry debugfs_regs[] = { /* eDMA global registers */ RD_REGISTER(dw, engine_en), RD_REGISTER(dw, doorbell), RD_REGISTER(dw, ch_arb_weight.lsb), RD_REGISTER(dw, ch_arb_weight.msb), /* eDMA interrupts registers */ RD_REGISTER(dw, int_status), RD_REGISTER(dw, int_mask), RD_REGISTER(dw, int_clear), RD_REGISTER(dw, err_status.lsb), RD_REGISTER(dw, err_status.msb), RD_REGISTER(dw, linked_list_err_en), RD_REGISTER(dw, done_imwr.lsb), RD_REGISTER(dw, done_imwr.msb), RD_REGISTER(dw, abort_imwr.lsb), RD_REGISTER(dw, abort_imwr.msb), RD_REGISTER(dw, ch01_imwr_data), RD_REGISTER(dw, ch23_imwr_data), RD_REGISTER(dw, ch45_imwr_data), RD_REGISTER(dw, ch67_imwr_data), }; const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = { /* eDMA channel context grouping */ RD_REGISTER_UNROLL(dw, engine_chgroup), RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb), RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb), RD_REGISTER_UNROLL(dw, ch0_pwr_en), RD_REGISTER_UNROLL(dw, ch1_pwr_en), RD_REGISTER_UNROLL(dw, ch2_pwr_en), RD_REGISTER_UNROLL(dw, ch3_pwr_en), RD_REGISTER_UNROLL(dw, ch4_pwr_en), RD_REGISTER_UNROLL(dw, ch5_pwr_en), RD_REGISTER_UNROLL(dw, ch6_pwr_en), RD_REGISTER_UNROLL(dw, ch7_pwr_en), }; struct dentry *regs_dent, *ch_dent; int nr_entries, i; char name[16]; regs_dent = debugfs_create_dir(READ_STR, dent); nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) { nr_entries = ARRAY_SIZE(debugfs_unroll_regs); dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries, regs_dent); } for (i = 0; i < dw->rd_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); ch_dent = debugfs_create_dir(name, regs_dent); dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent); } } static void dw_edma_debugfs_regs(struct dw_edma *dw) { const struct dw_edma_debugfs_entry debugfs_regs[] = { REGISTER(dw, ctrl_data_arb_prior), REGISTER(dw, ctrl), }; struct dentry *regs_dent; int nr_entries; regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root); nr_entries = ARRAY_SIZE(debugfs_regs); dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent); dw_edma_debugfs_regs_wr(dw, regs_dent); dw_edma_debugfs_regs_rd(dw, regs_dent); } void dw_edma_v0_debugfs_on(struct dw_edma *dw) { if (!debugfs_initialized()) return; debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf); debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt); debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt); dw_edma_debugfs_regs(dw); }
linux-master
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. * Synopsys DesignWare eDMA core driver * * Author: Gustavo Pimentel <[email protected]> */ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/dma/edma.h> #include <linux/dma-mapping.h> #include "dw-edma-core.h" #include "dw-edma-v0-core.h" #include "dw-hdma-v0-core.h" #include "../dmaengine.h" #include "../virt-dma.h" static inline struct device *dchan2dev(struct dma_chan *dchan) { return &dchan->dev->device; } static inline struct device *chan2dev(struct dw_edma_chan *chan) { return &chan->vc.chan.dev->device; } static inline struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) { return container_of(vd, struct dw_edma_desc, vd); } static inline u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr) { struct dw_edma_chip *chip = chan->dw->chip; if (chip->ops->pci_address) return chip->ops->pci_address(chip->dev, cpu_addr); return cpu_addr; } static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) { struct dw_edma_burst *burst; burst = kzalloc(sizeof(*burst), GFP_NOWAIT); if (unlikely(!burst)) return NULL; INIT_LIST_HEAD(&burst->list); if (chunk->burst) { /* Create and add new element into the linked list */ chunk->bursts_alloc++; list_add_tail(&burst->list, &chunk->burst->list); } else { /* List head */ chunk->bursts_alloc = 0; chunk->burst = burst; } return burst; } static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) { struct dw_edma_chip *chip = desc->chan->dw->chip; struct dw_edma_chan *chan = desc->chan; struct dw_edma_chunk *chunk; chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); if (unlikely(!chunk)) return NULL; INIT_LIST_HEAD(&chunk->list); chunk->chan = chan; /* Toggling change bit (CB) in each chunk, this is a mechanism to * inform the eDMA HW block that this is a new linked list ready * to be consumed. * - Odd chunks originate CB equal to 0 * - Even chunks originate CB equal to 1 */ chunk->cb = !(desc->chunks_alloc % 2); if (chan->dir == EDMA_DIR_WRITE) { chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr; chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr; } else { chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr; chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr; } if (desc->chunk) { /* Create and add new element into the linked list */ if (!dw_edma_alloc_burst(chunk)) { kfree(chunk); return NULL; } desc->chunks_alloc++; list_add_tail(&chunk->list, &desc->chunk->list); } else { /* List head */ chunk->burst = NULL; desc->chunks_alloc = 0; desc->chunk = chunk; } return chunk; } static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) { struct dw_edma_desc *desc; desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (unlikely(!desc)) return NULL; desc->chan = chan; if (!dw_edma_alloc_chunk(desc)) { kfree(desc); return NULL; } return desc; } static void dw_edma_free_burst(struct dw_edma_chunk *chunk) { struct dw_edma_burst *child, *_next; /* Remove all the list elements */ list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { list_del(&child->list); kfree(child); chunk->bursts_alloc--; } /* Remove the list head */ kfree(child); chunk->burst = NULL; } static void dw_edma_free_chunk(struct dw_edma_desc *desc) { struct dw_edma_chunk *child, *_next; if (!desc->chunk) return; /* Remove all the list elements */ list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { dw_edma_free_burst(child); list_del(&child->list); kfree(child); desc->chunks_alloc--; } /* Remove the list head */ kfree(child); desc->chunk = NULL; } static void dw_edma_free_desc(struct dw_edma_desc *desc) { dw_edma_free_chunk(desc); kfree(desc); } static void vchan_free_desc(struct virt_dma_desc *vdesc) { dw_edma_free_desc(vd2dw_edma_desc(vdesc)); } static int dw_edma_start_transfer(struct dw_edma_chan *chan) { struct dw_edma *dw = chan->dw; struct dw_edma_chunk *child; struct dw_edma_desc *desc; struct virt_dma_desc *vd; vd = vchan_next_desc(&chan->vc); if (!vd) return 0; desc = vd2dw_edma_desc(vd); if (!desc) return 0; child = list_first_entry_or_null(&desc->chunk->list, struct dw_edma_chunk, list); if (!child) return 0; dw_edma_core_start(dw, child, !desc->xfer_sz); desc->xfer_sz += child->ll_region.sz; dw_edma_free_burst(child); list_del(&child->list); kfree(child); desc->chunks_alloc--; return 1; } static void dw_edma_device_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { if (chan->dir == EDMA_DIR_READ) caps->directions = BIT(DMA_DEV_TO_MEM); else caps->directions = BIT(DMA_MEM_TO_DEV); } else { if (chan->dir == EDMA_DIR_WRITE) caps->directions = BIT(DMA_DEV_TO_MEM); else caps->directions = BIT(DMA_MEM_TO_DEV); } } static int dw_edma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); memcpy(&chan->config, config, sizeof(*config)); chan->configured = true; return 0; } static int dw_edma_device_pause(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); int err = 0; if (!chan->configured) err = -EPERM; else if (chan->status != EDMA_ST_BUSY) err = -EPERM; else if (chan->request != EDMA_REQ_NONE) err = -EPERM; else chan->request = EDMA_REQ_PAUSE; return err; } static int dw_edma_device_resume(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); int err = 0; if (!chan->configured) { err = -EPERM; } else if (chan->status != EDMA_ST_PAUSE) { err = -EPERM; } else if (chan->request != EDMA_REQ_NONE) { err = -EPERM; } else { chan->status = EDMA_ST_BUSY; dw_edma_start_transfer(chan); } return err; } static int dw_edma_device_terminate_all(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); int err = 0; if (!chan->configured) { /* Do nothing */ } else if (chan->status == EDMA_ST_PAUSE) { chan->status = EDMA_ST_IDLE; chan->configured = false; } else if (chan->status == EDMA_ST_IDLE) { chan->configured = false; } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) { /* * The channel is in a false BUSY state, probably didn't * receive or lost an interrupt */ chan->status = EDMA_ST_IDLE; chan->configured = false; } else if (chan->request > EDMA_REQ_PAUSE) { err = -EPERM; } else { chan->request = EDMA_REQ_STOP; } return err; } static void dw_edma_device_issue_pending(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); unsigned long flags; if (!chan->configured) return; spin_lock_irqsave(&chan->vc.lock, flags); if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE && chan->status == EDMA_ST_IDLE) { chan->status = EDMA_ST_BUSY; dw_edma_start_transfer(chan); } spin_unlock_irqrestore(&chan->vc.lock, flags); } static enum dma_status dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); struct dw_edma_desc *desc; struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; u32 residue = 0; ret = dma_cookie_status(dchan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) ret = DMA_PAUSED; if (!txstate) goto ret_residue; spin_lock_irqsave(&chan->vc.lock, flags); vd = vchan_find_desc(&chan->vc, cookie); if (vd) { desc = vd2dw_edma_desc(vd); if (desc) residue = desc->alloc_sz - desc->xfer_sz; } spin_unlock_irqrestore(&chan->vc.lock, flags); ret_residue: dma_set_residue(txstate, residue); return ret; } static struct dma_async_tx_descriptor * dw_edma_device_transfer(struct dw_edma_transfer *xfer) { struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); enum dma_transfer_direction dir = xfer->direction; struct scatterlist *sg = NULL; struct dw_edma_chunk *chunk; struct dw_edma_burst *burst; struct dw_edma_desc *desc; u64 src_addr, dst_addr; size_t fsz = 0; u32 cnt = 0; int i; if (!chan->configured) return NULL; /* * Local Root Port/End-point Remote End-point * +-----------------------+ PCIe bus +----------------------+ * | | +-+ | | * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM | * | | | | | | * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV | * | | +-+ | | * +-----------------------+ +----------------------+ * * 1. Normal logic: * If eDMA is embedded into the DW PCIe RP/EP and controlled from the * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used * for the device read operations (DEV_TO_MEM) and the Tx channel * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV). * * 2. Inverted logic: * If eDMA is embedded into a Remote PCIe EP and is controlled by the * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx * channel (EDMA_DIR_WRITE) will be used for the device read operations * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write * operations (MEM_TO_DEV). * * It is the client driver responsibility to choose a proper channel * for the DMA transfers. */ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) || (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV)) return NULL; } else { if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) || (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV)) return NULL; } if (xfer->type == EDMA_XFER_CYCLIC) { if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) return NULL; } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { if (xfer->xfer.sg.len < 1) return NULL; } else if (xfer->type == EDMA_XFER_INTERLEAVED) { if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1) return NULL; if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc) return NULL; } else { return NULL; } desc = dw_edma_alloc_desc(chan); if (unlikely(!desc)) goto err_alloc; chunk = dw_edma_alloc_chunk(desc); if (unlikely(!chunk)) goto err_alloc; if (xfer->type == EDMA_XFER_INTERLEAVED) { src_addr = xfer->xfer.il->src_start; dst_addr = xfer->xfer.il->dst_start; } else { src_addr = chan->config.src_addr; dst_addr = chan->config.dst_addr; } if (dir == DMA_DEV_TO_MEM) src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr); else dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr); if (xfer->type == EDMA_XFER_CYCLIC) { cnt = xfer->xfer.cyclic.cnt; } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { cnt = xfer->xfer.sg.len; sg = xfer->xfer.sg.sgl; } else if (xfer->type == EDMA_XFER_INTERLEAVED) { cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size; fsz = xfer->xfer.il->frame_size; } for (i = 0; i < cnt; i++) { if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) break; if (chunk->bursts_alloc == chan->ll_max) { chunk = dw_edma_alloc_chunk(desc); if (unlikely(!chunk)) goto err_alloc; } burst = dw_edma_alloc_burst(chunk); if (unlikely(!burst)) goto err_alloc; if (xfer->type == EDMA_XFER_CYCLIC) burst->sz = xfer->xfer.cyclic.len; else if (xfer->type == EDMA_XFER_SCATTER_GATHER) burst->sz = sg_dma_len(sg); else if (xfer->type == EDMA_XFER_INTERLEAVED) burst->sz = xfer->xfer.il->sgl[i % fsz].size; chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; if (dir == DMA_DEV_TO_MEM) { burst->sar = src_addr; if (xfer->type == EDMA_XFER_CYCLIC) { burst->dar = xfer->xfer.cyclic.paddr; } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { src_addr += sg_dma_len(sg); burst->dar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a * linear memory and that why the source * and destination addresses are increased * by the same portion (data length) */ } else if (xfer->type == EDMA_XFER_INTERLEAVED) { burst->dar = dst_addr; } } else { burst->dar = dst_addr; if (xfer->type == EDMA_XFER_CYCLIC) { burst->sar = xfer->xfer.cyclic.paddr; } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { dst_addr += sg_dma_len(sg); burst->sar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a * linear memory and that why the source * and destination addresses are increased * by the same portion (data length) */ } else if (xfer->type == EDMA_XFER_INTERLEAVED) { burst->sar = src_addr; } } if (xfer->type == EDMA_XFER_SCATTER_GATHER) { sg = sg_next(sg); } else if (xfer->type == EDMA_XFER_INTERLEAVED) { struct dma_interleaved_template *il = xfer->xfer.il; struct data_chunk *dc = &il->sgl[i % fsz]; src_addr += burst->sz; if (il->src_sgl) src_addr += dmaengine_get_src_icg(il, dc); dst_addr += burst->sz; if (il->dst_sgl) dst_addr += dmaengine_get_dst_icg(il, dc); } } return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); err_alloc: if (desc) dw_edma_free_desc(desc); return NULL; } static struct dma_async_tx_descriptor * dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, unsigned int len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct dw_edma_transfer xfer; xfer.dchan = dchan; xfer.direction = direction; xfer.xfer.sg.sgl = sgl; xfer.xfer.sg.len = len; xfer.flags = flags; xfer.type = EDMA_XFER_SCATTER_GATHER; return dw_edma_device_transfer(&xfer); } static struct dma_async_tx_descriptor * dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, size_t len, size_t count, enum dma_transfer_direction direction, unsigned long flags) { struct dw_edma_transfer xfer; xfer.dchan = dchan; xfer.direction = direction; xfer.xfer.cyclic.paddr = paddr; xfer.xfer.cyclic.len = len; xfer.xfer.cyclic.cnt = count; xfer.flags = flags; xfer.type = EDMA_XFER_CYCLIC; return dw_edma_device_transfer(&xfer); } static struct dma_async_tx_descriptor * dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, struct dma_interleaved_template *ilt, unsigned long flags) { struct dw_edma_transfer xfer; xfer.dchan = dchan; xfer.direction = ilt->dir; xfer.xfer.il = ilt; xfer.flags = flags; xfer.type = EDMA_XFER_INTERLEAVED; return dw_edma_device_transfer(&xfer); } static void dw_edma_done_interrupt(struct dw_edma_chan *chan) { struct dw_edma_desc *desc; struct virt_dma_desc *vd; unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); vd = vchan_next_desc(&chan->vc); if (vd) { switch (chan->request) { case EDMA_REQ_NONE: desc = vd2dw_edma_desc(vd); if (!desc->chunks_alloc) { list_del(&vd->node); vchan_cookie_complete(vd); } /* Continue transferring if there are remaining chunks or issued requests. */ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; break; case EDMA_REQ_STOP: list_del(&vd->node); vchan_cookie_complete(vd); chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; break; case EDMA_REQ_PAUSE: chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_PAUSE; break; default: break; } } spin_unlock_irqrestore(&chan->vc.lock, flags); } static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) { struct virt_dma_desc *vd; unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); vd = vchan_next_desc(&chan->vc); if (vd) { list_del(&vd->node); vchan_cookie_complete(vd); } spin_unlock_irqrestore(&chan->vc.lock, flags); chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; } static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) { struct dw_edma_irq *dw_irq = data; return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE, dw_edma_done_interrupt, dw_edma_abort_interrupt); } static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) { struct dw_edma_irq *dw_irq = data; return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ, dw_edma_done_interrupt, dw_edma_abort_interrupt); } static irqreturn_t dw_edma_interrupt_common(int irq, void *data) { irqreturn_t ret = IRQ_NONE; ret |= dw_edma_interrupt_write(irq, data); ret |= dw_edma_interrupt_read(irq, data); return ret; } static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) { struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); if (chan->status != EDMA_ST_IDLE) return -EBUSY; return 0; } static void dw_edma_free_chan_resources(struct dma_chan *dchan) { unsigned long timeout = jiffies + msecs_to_jiffies(5000); int ret; while (time_before(jiffies, timeout)) { ret = dw_edma_device_terminate_all(dchan); if (!ret) break; if (time_after_eq(jiffies, timeout)) return; cpu_relax(); } } static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc) { struct dw_edma_chip *chip = dw->chip; struct device *dev = chip->dev; struct dw_edma_chan *chan; struct dw_edma_irq *irq; struct dma_device *dma; u32 i, ch_cnt; u32 pos; ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; dma = &dw->dma; INIT_LIST_HEAD(&dma->channels); for (i = 0; i < ch_cnt; i++) { chan = &dw->chan[i]; chan->dw = dw; if (i < dw->wr_ch_cnt) { chan->id = i; chan->dir = EDMA_DIR_WRITE; } else { chan->id = i - dw->wr_ch_cnt; chan->dir = EDMA_DIR_READ; } chan->configured = false; chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; if (chan->dir == EDMA_DIR_WRITE) chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ); else chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ); chan->ll_max -= 1; dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id, chan->ll_max); if (dw->nr_irqs == 1) pos = 0; else if (chan->dir == EDMA_DIR_WRITE) pos = chan->id % wr_alloc; else pos = wr_alloc + chan->id % rd_alloc; irq = &dw->irq[pos]; if (chan->dir == EDMA_DIR_WRITE) irq->wr_mask |= BIT(chan->id); else irq->rd_mask |= BIT(chan->id); irq->dw = dw; memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id, chan->msi.address_hi, chan->msi.address_lo, chan->msi.data); chan->vc.desc_free = vchan_free_desc; chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ? &dw->chip->dt_region_wr[chan->id] : &dw->chip->dt_region_rd[chan->id]; vchan_init(&chan->vc, dma); dw_edma_core_ch_config(chan); } /* Set DMA channel capabilities */ dma_cap_zero(dma->cap_mask); dma_cap_set(DMA_SLAVE, dma->cap_mask); dma_cap_set(DMA_CYCLIC, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; /* Set DMA channel callbacks */ dma->dev = chip->dev; dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; dma->device_free_chan_resources = dw_edma_free_chan_resources; dma->device_caps = dw_edma_device_caps; dma->device_config = dw_edma_device_config; dma->device_pause = dw_edma_device_pause; dma->device_resume = dw_edma_device_resume; dma->device_terminate_all = dw_edma_device_terminate_all; dma->device_issue_pending = dw_edma_device_issue_pending; dma->device_tx_status = dw_edma_device_tx_status; dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; dma_set_max_seg_size(dma->dev, U32_MAX); /* Register DMA device */ return dma_async_device_register(dma); } static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) { if (*nr_irqs && *alloc < cnt) { (*alloc)++; (*nr_irqs)--; } } static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) { while (*mask * alloc < cnt) (*mask)++; } static int dw_edma_irq_request(struct dw_edma *dw, u32 *wr_alloc, u32 *rd_alloc) { struct dw_edma_chip *chip = dw->chip; struct device *dev = dw->chip->dev; u32 wr_mask = 1; u32 rd_mask = 1; int i, err = 0; u32 ch_cnt; int irq; ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; if (chip->nr_irqs < 1 || !chip->ops->irq_vector) return -EINVAL; dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL); if (!dw->irq) return -ENOMEM; if (chip->nr_irqs == 1) { /* Common IRQ shared among all channels */ irq = chip->ops->irq_vector(dev, 0); err = request_irq(irq, dw_edma_interrupt_common, IRQF_SHARED, dw->name, &dw->irq[0]); if (err) { dw->nr_irqs = 0; return err; } if (irq_get_msi_desc(irq)) get_cached_msi_msg(irq, &dw->irq[0].msi); dw->nr_irqs = 1; } else { /* Distribute IRQs equally among all channels */ int tmp = chip->nr_irqs; while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); } dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { irq = chip->ops->irq_vector(dev, i); err = request_irq(irq, i < *wr_alloc ? dw_edma_interrupt_write : dw_edma_interrupt_read, IRQF_SHARED, dw->name, &dw->irq[i]); if (err) goto err_irq_free; if (irq_get_msi_desc(irq)) get_cached_msi_msg(irq, &dw->irq[i].msi); } dw->nr_irqs = i; } return 0; err_irq_free: for (i--; i >= 0; i--) { irq = chip->ops->irq_vector(dev, i); free_irq(irq, &dw->irq[i]); } return err; } int dw_edma_probe(struct dw_edma_chip *chip) { struct device *dev; struct dw_edma *dw; u32 wr_alloc = 0; u32 rd_alloc = 0; int i, err; if (!chip) return -EINVAL; dev = chip->dev; if (!dev || !chip->ops) return -EINVAL; dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); if (!dw) return -ENOMEM; dw->chip = chip; if (dw->chip->mf == EDMA_MF_HDMA_NATIVE) dw_hdma_v0_core_register(dw); else dw_edma_v0_core_register(dw); raw_spin_lock_init(&dw->lock); dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt, dw_edma_core_ch_count(dw, EDMA_DIR_WRITE)); dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt, dw_edma_core_ch_count(dw, EDMA_DIR_READ)); dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) return -EINVAL; dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", dw->wr_ch_cnt, dw->rd_ch_cnt); /* Allocate channels */ dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, sizeof(*dw->chan), GFP_KERNEL); if (!dw->chan) return -ENOMEM; snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s", dev_name(chip->dev)); /* Disable eDMA, only to establish the ideal initial conditions */ dw_edma_core_off(dw); /* Request IRQs */ err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc); if (err) return err; /* Setup write/read channels */ err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc); if (err) goto err_irq_free; /* Turn debugfs on */ dw_edma_core_debugfs_on(dw); chip->dw = dw; return 0; err_irq_free: for (i = (dw->nr_irqs - 1); i >= 0; i--) free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); return err; } EXPORT_SYMBOL_GPL(dw_edma_probe); int dw_edma_remove(struct dw_edma_chip *chip) { struct dw_edma_chan *chan, *_chan; struct device *dev = chip->dev; struct dw_edma *dw = chip->dw; int i; /* Skip removal if no private data found */ if (!dw) return -ENODEV; /* Disable eDMA */ dw_edma_core_off(dw); /* Free irqs */ for (i = (dw->nr_irqs - 1); i >= 0; i--) free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); /* Deregister eDMA device */ dma_async_device_unregister(&dw->dma); list_for_each_entry_safe(chan, _chan, &dw->dma.channels, vc.chan.device_node) { tasklet_kill(&chan->vc.task); list_del(&chan->vc.chan.device_node); } return 0; } EXPORT_SYMBOL_GPL(dw_edma_remove); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); MODULE_AUTHOR("Gustavo Pimentel <[email protected]>");
linux-master
drivers/dma/dw-edma/dw-edma-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2023 Cai Huoqing * Synopsys DesignWare HDMA v0 debugfs * * Author: Cai Huoqing <[email protected]> */ #include <linux/debugfs.h> #include <linux/bitfield.h> #include "dw-hdma-v0-debugfs.h" #include "dw-hdma-v0-regs.h" #include "dw-edma-core.h" #define REGS_ADDR(dw, name) \ ({ \ struct dw_hdma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \ \ (void __iomem *)&__regs->name; \ }) #define REGS_CH_ADDR(dw, name, _dir, _ch) \ ({ \ struct dw_hdma_v0_ch_regs __iomem *__ch_regs; \ \ if (_dir == EDMA_DIR_READ) \ __ch_regs = REGS_ADDR(dw, ch[_ch].rd); \ else \ __ch_regs = REGS_ADDR(dw, ch[_ch].wr); \ \ (void __iomem *)&__ch_regs->name; \ }) #define CTX_REGISTER(dw, name, dir, ch) \ {#name, REGS_CH_ADDR(dw, name, dir, ch)} #define WRITE_STR "write" #define READ_STR "read" #define CHANNEL_STR "channel" #define REGISTERS_STR "registers" struct dw_hdma_debugfs_entry { const char *name; void __iomem *reg; }; static int dw_hdma_debugfs_u32_get(void *data, u64 *val) { struct dw_hdma_debugfs_entry *entry = data; void __iomem *reg = entry->reg; *val = readl(reg); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_hdma_debugfs_u32_get, NULL, "0x%08llx\n"); static void dw_hdma_debugfs_create_x32(struct dw_edma *dw, const struct dw_hdma_debugfs_entry ini[], int nr_entries, struct dentry *dent) { struct dw_hdma_debugfs_entry *entries; int i; entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries), GFP_KERNEL); if (!entries) return; for (i = 0; i < nr_entries; i++) { entries[i] = ini[i]; debugfs_create_file_unsafe(entries[i].name, 0444, dent, &entries[i], &fops_x32); } } static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, struct dentry *dent) { const struct dw_hdma_debugfs_entry debugfs_regs[] = { CTX_REGISTER(dw, ch_en, dir, ch), CTX_REGISTER(dw, doorbell, dir, ch), CTX_REGISTER(dw, prefetch, dir, ch), CTX_REGISTER(dw, handshake, dir, ch), CTX_REGISTER(dw, llp.lsb, dir, ch), CTX_REGISTER(dw, llp.msb, dir, ch), CTX_REGISTER(dw, cycle_sync, dir, ch), CTX_REGISTER(dw, transfer_size, dir, ch), CTX_REGISTER(dw, sar.lsb, dir, ch), CTX_REGISTER(dw, sar.msb, dir, ch), CTX_REGISTER(dw, dar.lsb, dir, ch), CTX_REGISTER(dw, dar.msb, dir, ch), CTX_REGISTER(dw, watermark_en, dir, ch), CTX_REGISTER(dw, control1, dir, ch), CTX_REGISTER(dw, func_num, dir, ch), CTX_REGISTER(dw, qos, dir, ch), CTX_REGISTER(dw, ch_stat, dir, ch), CTX_REGISTER(dw, int_stat, dir, ch), CTX_REGISTER(dw, int_setup, dir, ch), CTX_REGISTER(dw, int_clear, dir, ch), CTX_REGISTER(dw, msi_stop.lsb, dir, ch), CTX_REGISTER(dw, msi_stop.msb, dir, ch), CTX_REGISTER(dw, msi_watermark.lsb, dir, ch), CTX_REGISTER(dw, msi_watermark.msb, dir, ch), CTX_REGISTER(dw, msi_abort.lsb, dir, ch), CTX_REGISTER(dw, msi_abort.msb, dir, ch), CTX_REGISTER(dw, msi_msgdata, dir, ch), }; int nr_entries = ARRAY_SIZE(debugfs_regs); dw_hdma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent); } static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent) { struct dentry *regs_dent, *ch_dent; char name[16]; int i; regs_dent = debugfs_create_dir(WRITE_STR, dent); for (i = 0; i < dw->wr_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); ch_dent = debugfs_create_dir(name, regs_dent); dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent); } } static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent) { struct dentry *regs_dent, *ch_dent; char name[16]; int i; regs_dent = debugfs_create_dir(READ_STR, dent); for (i = 0; i < dw->rd_ch_cnt; i++) { snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); ch_dent = debugfs_create_dir(name, regs_dent); dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent); } } static void dw_hdma_debugfs_regs(struct dw_edma *dw) { struct dentry *regs_dent; regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root); dw_hdma_debugfs_regs_wr(dw, regs_dent); dw_hdma_debugfs_regs_rd(dw, regs_dent); } void dw_hdma_v0_debugfs_on(struct dw_edma *dw) { if (!debugfs_initialized()) return; debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf); debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt); debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt); dw_hdma_debugfs_regs(dw); }
linux-master
drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. * Synopsys DesignWare eDMA PCIe driver * * Author: Gustavo Pimentel <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/dma/edma.h> #include <linux/pci-epf.h> #include <linux/msi.h> #include <linux/bitfield.h> #include "dw-edma-core.h" #define DW_PCIE_VSEC_DMA_ID 0x6 #define DW_PCIE_VSEC_DMA_BAR GENMASK(10, 8) #define DW_PCIE_VSEC_DMA_MAP GENMASK(2, 0) #define DW_PCIE_VSEC_DMA_WR_CH GENMASK(9, 0) #define DW_PCIE_VSEC_DMA_RD_CH GENMASK(25, 16) #define DW_BLOCK(a, b, c) \ { \ .bar = a, \ .off = b, \ .sz = c, \ }, struct dw_edma_block { enum pci_barno bar; off_t off; size_t sz; }; struct dw_edma_pcie_data { /* eDMA registers location */ struct dw_edma_block rg; /* eDMA memory linked list location */ struct dw_edma_block ll_wr[EDMA_MAX_WR_CH]; struct dw_edma_block ll_rd[EDMA_MAX_RD_CH]; /* eDMA memory data location */ struct dw_edma_block dt_wr[EDMA_MAX_WR_CH]; struct dw_edma_block dt_rd[EDMA_MAX_RD_CH]; /* Other */ enum dw_edma_map_format mf; u8 irqs; u16 wr_ch_cnt; u16 rd_ch_cnt; }; static const struct dw_edma_pcie_data snps_edda_data = { /* eDMA registers location */ .rg.bar = BAR_0, .rg.off = 0x00001000, /* 4 Kbytes */ .rg.sz = 0x00002000, /* 8 Kbytes */ /* eDMA memory linked list location */ .ll_wr = { /* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00000000, 0x00000800) /* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00200000, 0x00000800) }, .ll_rd = { /* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00400000, 0x00000800) /* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00600000, 0x00000800) }, /* eDMA memory data location */ .dt_wr = { /* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00800000, 0x00000800) /* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00900000, 0x00000800) }, .dt_rd = { /* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00a00000, 0x00000800) /* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */ DW_BLOCK(BAR_2, 0x00b00000, 0x00000800) }, /* Other */ .mf = EDMA_MF_EDMA_UNROLL, .irqs = 1, .wr_ch_cnt = 2, .rd_ch_cnt = 2, }; static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr) { return pci_irq_vector(to_pci_dev(dev), nr); } static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_bus_region region; struct resource res = { .flags = IORESOURCE_MEM, .start = cpu_addr, .end = cpu_addr, }; pcibios_resource_to_bus(pdev->bus, &region, &res); return region.start; } static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = { .irq_vector = dw_edma_pcie_irq_vector, .pci_address = dw_edma_pcie_address, }; static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev, struct dw_edma_pcie_data *pdata) { u32 val, map; u16 vsec; u64 off; vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS, DW_PCIE_VSEC_DMA_ID); if (!vsec) return; pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); if (PCI_VNDR_HEADER_REV(val) != 0x00 || PCI_VNDR_HEADER_LEN(val) != 0x18) return; pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n"); pci_read_config_dword(pdev, vsec + 0x8, &val); map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val); if (map != EDMA_MF_EDMA_LEGACY && map != EDMA_MF_EDMA_UNROLL && map != EDMA_MF_HDMA_COMPAT) return; pdata->mf = map; pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val); pci_read_config_dword(pdev, vsec + 0xc, &val); pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt, FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val)); pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt, FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val)); pci_read_config_dword(pdev, vsec + 0x14, &val); off = val; pci_read_config_dword(pdev, vsec + 0x10, &val); off <<= 32; off |= val; pdata->rg.off = off; } static int dw_edma_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; struct dw_edma_pcie_data vsec_data; struct device *dev = &pdev->dev; struct dw_edma_chip *chip; int err, nr_irqs; int i, mask; /* Enable PCI device */ err = pcim_enable_device(pdev); if (err) { pci_err(pdev, "enabling device failed\n"); return err; } memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data)); /* * Tries to find if exists a PCIe Vendor-Specific Extended Capability * for the DMA, if one exists, then reconfigures it. */ dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data); /* Mapping PCI BAR regions */ mask = BIT(vsec_data.rg.bar); for (i = 0; i < vsec_data.wr_ch_cnt; i++) { mask |= BIT(vsec_data.ll_wr[i].bar); mask |= BIT(vsec_data.dt_wr[i].bar); } for (i = 0; i < vsec_data.rd_ch_cnt; i++) { mask |= BIT(vsec_data.ll_rd[i].bar); mask |= BIT(vsec_data.dt_rd[i].bar); } err = pcim_iomap_regions(pdev, mask, pci_name(pdev)); if (err) { pci_err(pdev, "eDMA BAR I/O remapping failed\n"); return err; } pci_set_master(pdev); /* DMA configuration */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { pci_err(pdev, "DMA mask 64 set failed\n"); return err; } /* Data structure allocation */ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; /* IRQs allocation */ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (nr_irqs < 1) { pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", nr_irqs); return -EPERM; } /* Data structure initialization */ chip->dev = dev; chip->mf = vsec_data.mf; chip->nr_irqs = nr_irqs; chip->ops = &dw_edma_pcie_plat_ops; chip->ll_wr_cnt = vsec_data.wr_ch_cnt; chip->ll_rd_cnt = vsec_data.rd_ch_cnt; chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; if (!chip->reg_base) return -ENOMEM; for (i = 0; i < chip->ll_wr_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; struct dw_edma_region *dt_region = &chip->dt_region_wr[i]; struct dw_edma_block *ll_block = &vsec_data.ll_wr[i]; struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) return -ENOMEM; ll_region->vaddr.io += ll_block->off; ll_region->paddr = pci_bus_address(pdev, ll_block->bar); ll_region->paddr += ll_block->off; ll_region->sz = ll_block->sz; dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; if (!dt_region->vaddr.io) return -ENOMEM; dt_region->vaddr.io += dt_block->off; dt_region->paddr = pci_bus_address(pdev, dt_block->bar); dt_region->paddr += dt_block->off; dt_region->sz = dt_block->sz; } for (i = 0; i < chip->ll_rd_cnt; i++) { struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; struct dw_edma_region *dt_region = &chip->dt_region_rd[i]; struct dw_edma_block *ll_block = &vsec_data.ll_rd[i]; struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar]; if (!ll_region->vaddr.io) return -ENOMEM; ll_region->vaddr.io += ll_block->off; ll_region->paddr = pci_bus_address(pdev, ll_block->bar); ll_region->paddr += ll_block->off; ll_region->sz = ll_block->sz; dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar]; if (!dt_region->vaddr.io) return -ENOMEM; dt_region->vaddr.io += dt_block->off; dt_region->paddr = pci_bus_address(pdev, dt_block->bar); dt_region->paddr += dt_block->off; dt_region->sz = dt_block->sz; } /* Debug info */ if (chip->mf == EDMA_MF_EDMA_LEGACY) pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf); else if (chip->mf == EDMA_MF_EDMA_UNROLL) pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf); else if (chip->mf == EDMA_MF_HDMA_COMPAT) pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf); else pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n", vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, chip->reg_base); for (i = 0; i < chip->ll_wr_cnt; i++) { pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.ll_wr[i].bar, vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr); pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.dt_wr[i].bar, vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr); } for (i = 0; i < chip->ll_rd_cnt; i++) { pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.ll_rd[i].bar, vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr); pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", i, vsec_data.dt_rd[i].bar, vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr); } pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs); /* Validating if PCI interrupts were enabled */ if (!pci_dev_msi_enabled(pdev)) { pci_err(pdev, "enable interrupt failed\n"); return -EPERM; } /* Starting eDMA driver */ err = dw_edma_probe(chip); if (err) { pci_err(pdev, "eDMA probe failed\n"); return err; } /* Saving data structure reference */ pci_set_drvdata(pdev, chip); return 0; } static void dw_edma_pcie_remove(struct pci_dev *pdev) { struct dw_edma_chip *chip = pci_get_drvdata(pdev); int err; /* Stopping eDMA driver */ err = dw_edma_remove(chip); if (err) pci_warn(pdev, "can't remove device properly: %d\n", err); /* Freeing IRQs */ pci_free_irq_vectors(pdev); } static const struct pci_device_id dw_edma_pcie_id_table[] = { { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, { } }; MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); static struct pci_driver dw_edma_pcie_driver = { .name = "dw-edma-pcie", .id_table = dw_edma_pcie_id_table, .probe = dw_edma_pcie_probe, .remove = dw_edma_pcie_remove, }; module_pci_driver(dw_edma_pcie_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); MODULE_AUTHOR("Gustavo Pimentel <[email protected]>");
linux-master
drivers/dma/dw-edma/dw-edma-pcie.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017-2018 MediaTek Inc. /* * Driver for MediaTek High-Speed DMA Controller * * Author: Sean Wang <[email protected]> * */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/refcount.h> #include <linux/slab.h> #include "../virt-dma.h" #define MTK_HSDMA_USEC_POLL 20 #define MTK_HSDMA_TIMEOUT_POLL 200000 #define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) /* The default number of virtual channel */ #define MTK_HSDMA_NR_VCHANS 3 /* Only one physical channel supported */ #define MTK_HSDMA_NR_MAX_PCHANS 1 /* Macro for physical descriptor (PD) manipulation */ /* The number of PD which must be 2 of power */ #define MTK_DMA_SIZE 64 #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1)) #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1)) #define MTK_HSDMA_MAX_LEN 0x3f80 #define MTK_HSDMA_ALIGN_SIZE 4 #define MTK_HSDMA_PLEN_MASK 0x3fff #define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16) #define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK) /* Registers for underlying ring manipulation */ #define MTK_HSDMA_TX_BASE 0x0 #define MTK_HSDMA_TX_CNT 0x4 #define MTK_HSDMA_TX_CPU 0x8 #define MTK_HSDMA_TX_DMA 0xc #define MTK_HSDMA_RX_BASE 0x100 #define MTK_HSDMA_RX_CNT 0x104 #define MTK_HSDMA_RX_CPU 0x108 #define MTK_HSDMA_RX_DMA 0x10c /* Registers for global setup */ #define MTK_HSDMA_GLO 0x204 #define MTK_HSDMA_GLO_MULTI_DMA BIT(10) #define MTK_HSDMA_TX_WB_DDONE BIT(6) #define MTK_HSDMA_BURST_64BYTES (0x2 << 4) #define MTK_HSDMA_GLO_RX_BUSY BIT(3) #define MTK_HSDMA_GLO_RX_DMA BIT(2) #define MTK_HSDMA_GLO_TX_BUSY BIT(1) #define MTK_HSDMA_GLO_TX_DMA BIT(0) #define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \ MTK_HSDMA_GLO_RX_DMA) #define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \ MTK_HSDMA_GLO_TX_BUSY) #define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \ MTK_HSDMA_GLO_RX_DMA | \ MTK_HSDMA_TX_WB_DDONE | \ MTK_HSDMA_BURST_64BYTES | \ MTK_HSDMA_GLO_MULTI_DMA) /* Registers for reset */ #define MTK_HSDMA_RESET 0x208 #define MTK_HSDMA_RST_TX BIT(0) #define MTK_HSDMA_RST_RX BIT(16) /* Registers for interrupt control */ #define MTK_HSDMA_DLYINT 0x20c #define MTK_HSDMA_RXDLY_INT_EN BIT(15) /* Interrupt fires when the pending number's more than the specified */ #define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8) /* Interrupt fires when the pending time's more than the specified in 20 us */ #define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f) #define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \ MTK_HSDMA_RXMAX_PINT(20) | \ MTK_HSDMA_RXMAX_PTIME(20)) #define MTK_HSDMA_INT_STATUS 0x220 #define MTK_HSDMA_INT_ENABLE 0x228 #define MTK_HSDMA_INT_RXDONE BIT(16) enum mtk_hsdma_vdesc_flag { MTK_HSDMA_VDESC_FINISHED = 0x01, }; #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED) /** * struct mtk_hsdma_pdesc - This is the struct holding info describing physical * descriptor (PD) and its placement must be kept at * 4-bytes alignment in little endian order. * @desc1: | The control pad used to indicate hardware how to * @desc2: | deal with the descriptor such as source and * @desc3: | destination address and data length. The maximum * @desc4: | data length each pdesc can handle is 0x3f80 bytes */ struct mtk_hsdma_pdesc { __le32 desc1; __le32 desc2; __le32 desc3; __le32 desc4; } __packed __aligned(4); /** * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual * descriptor (VD) * @vd: An instance for struct virt_dma_desc * @len: The total data size device wants to move * @residue: The remaining data size device will move * @dest: The destination address device wants to move to * @src: The source address device wants to move from */ struct mtk_hsdma_vdesc { struct virt_dma_desc vd; size_t len; size_t residue; dma_addr_t dest; dma_addr_t src; }; /** * struct mtk_hsdma_cb - This is the struct holding extra info required for RX * ring to know what relevant VD the PD is being * mapped to. * @vd: Pointer to the relevant VD. * @flag: Flag indicating what action should be taken when VD * is completed. */ struct mtk_hsdma_cb { struct virt_dma_desc *vd; enum mtk_hsdma_vdesc_flag flag; }; /** * struct mtk_hsdma_ring - This struct holds info describing underlying ring * space * @txd: The descriptor TX ring which describes DMA source * information * @rxd: The descriptor RX ring which describes DMA * destination information * @cb: The extra information pointed at by RX ring * @tphys: The physical addr of TX ring * @rphys: The physical addr of RX ring * @cur_tptr: Pointer to the next free descriptor used by the host * @cur_rptr: Pointer to the last done descriptor by the device */ struct mtk_hsdma_ring { struct mtk_hsdma_pdesc *txd; struct mtk_hsdma_pdesc *rxd; struct mtk_hsdma_cb *cb; dma_addr_t tphys; dma_addr_t rphys; u16 cur_tptr; u16 cur_rptr; }; /** * struct mtk_hsdma_pchan - This is the struct holding info describing physical * channel (PC) * @ring: An instance for the underlying ring * @sz_ring: Total size allocated for the ring * @nr_free: Total number of free rooms in the ring. It would * be accessed and updated frequently between IRQ * context and user context to reflect whether ring * can accept requests from VD. */ struct mtk_hsdma_pchan { struct mtk_hsdma_ring ring; size_t sz_ring; atomic_t nr_free; }; /** * struct mtk_hsdma_vchan - This is the struct holding info describing virtual * channel (VC) * @vc: An instance for struct virt_dma_chan * @issue_completion: The wait for all issued descriptors completited * @issue_synchronize: Bool indicating channel synchronization starts * @desc_hw_processing: List those descriptors the hardware is processing, * which is protected by vc.lock */ struct mtk_hsdma_vchan { struct virt_dma_chan vc; struct completion issue_completion; bool issue_synchronize; struct list_head desc_hw_processing; }; /** * struct mtk_hsdma_soc - This is the struct holding differences among SoCs * @ddone: Bit mask for DDONE * @ls0: Bit mask for LS0 */ struct mtk_hsdma_soc { __le32 ddone; __le32 ls0; }; /** * struct mtk_hsdma_device - This is the struct holding info describing HSDMA * device * @ddev: An instance for struct dma_device * @base: The mapped register I/O base * @clk: The clock that device internal is using * @irq: The IRQ that device are using * @dma_requests: The number of VCs the device supports to * @vc: The pointer to all available VCs * @pc: The pointer to the underlying PC * @pc_refcnt: Track how many VCs are using the PC * @lock: Lock protect agaisting multiple VCs access PC * @soc: The pointer to area holding differences among * vaious platform */ struct mtk_hsdma_device { struct dma_device ddev; void __iomem *base; struct clk *clk; u32 irq; u32 dma_requests; struct mtk_hsdma_vchan *vc; struct mtk_hsdma_pchan *pc; refcount_t pc_refcnt; /* Lock used to protect against multiple VCs access PC */ spinlock_t lock; const struct mtk_hsdma_soc *soc; }; static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan) { return container_of(chan->device, struct mtk_hsdma_device, ddev); } static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan) { return container_of(chan, struct mtk_hsdma_vchan, vc.chan); } static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd) { return container_of(vd, struct mtk_hsdma_vdesc, vd); } static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma) { return hsdma->ddev.dev; } static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg) { return readl(hsdma->base + reg); } static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) { writel(val, hsdma->base + reg); } static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg, u32 mask, u32 set) { u32 val; val = mtk_dma_read(hsdma, reg); val &= ~mask; val |= set; mtk_dma_write(hsdma, reg, val); } static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) { mtk_dma_rmw(hsdma, reg, 0, val); } static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) { mtk_dma_rmw(hsdma, reg, val, 0); } static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd) { kfree(container_of(vd, struct mtk_hsdma_vdesc, vd)); } static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma) { u32 status = 0; return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status, !(status & MTK_HSDMA_GLO_BUSY), MTK_HSDMA_USEC_POLL, MTK_HSDMA_TIMEOUT_POLL); } static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, struct mtk_hsdma_pchan *pc) { struct mtk_hsdma_ring *ring = &pc->ring; int err; memset(pc, 0, sizeof(*pc)); /* * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. */ pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, &ring->tphys, GFP_NOWAIT); if (!ring->txd) return -ENOMEM; ring->rxd = &ring->txd[MTK_DMA_SIZE]; ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd); ring->cur_tptr = 0; ring->cur_rptr = MTK_DMA_SIZE - 1; ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT); if (!ring->cb) { err = -ENOMEM; goto err_free_dma; } atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1); /* Disable HSDMA and wait for the completion */ mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); err = mtk_hsdma_busy_wait(hsdma); if (err) goto err_free_cb; /* Reset */ mtk_dma_set(hsdma, MTK_HSDMA_RESET, MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); mtk_dma_clr(hsdma, MTK_HSDMA_RESET, MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); /* Setup HSDMA initial pointer in the ring */ mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys); mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE); mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0); mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys); mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE); mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr); mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0); /* Enable HSDMA */ mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); /* Setup delayed interrupt */ mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT); /* Enable interrupt */ mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); return 0; err_free_cb: kfree(ring->cb); err_free_dma: dma_free_coherent(hsdma2dev(hsdma), pc->sz_ring, ring->txd, ring->tphys); return err; } static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma, struct mtk_hsdma_pchan *pc) { struct mtk_hsdma_ring *ring = &pc->ring; /* Disable HSDMA and then wait for the completion */ mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); mtk_hsdma_busy_wait(hsdma); /* Reset pointer in the ring */ mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0); mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0); mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0); mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0); mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0); mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1); kfree(ring->cb); dma_free_coherent(hsdma2dev(hsdma), pc->sz_ring, ring->txd, ring->tphys); } static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma, struct mtk_hsdma_pchan *pc, struct mtk_hsdma_vdesc *hvd) { struct mtk_hsdma_ring *ring = &pc->ring; struct mtk_hsdma_pdesc *txd, *rxd; u16 reserved, prev, tlen, num_sgs; unsigned long flags; /* Protect against PC is accessed by multiple VCs simultaneously */ spin_lock_irqsave(&hsdma->lock, flags); /* * Reserve rooms, where pc->nr_free is used to track how many free * rooms in the ring being updated in user and IRQ context. */ num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN); reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free)); if (!reserved) { spin_unlock_irqrestore(&hsdma->lock, flags); return -ENOSPC; } atomic_sub(reserved, &pc->nr_free); while (reserved--) { /* Limit size by PD capability for valid data moving */ tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ? MTK_HSDMA_MAX_LEN : hvd->len; /* * Setup PDs using the remaining VD info mapped on those * reserved rooms. And since RXD is shared memory between the * host and the device allocated by dma_alloc_coherent call, * the helper macro WRITE_ONCE can ensure the data written to * RAM would really happens. */ txd = &ring->txd[ring->cur_tptr]; WRITE_ONCE(txd->desc1, hvd->src); WRITE_ONCE(txd->desc2, hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen)); rxd = &ring->rxd[ring->cur_tptr]; WRITE_ONCE(rxd->desc1, hvd->dest); WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen)); /* Associate VD, the PD belonged to */ ring->cb[ring->cur_tptr].vd = &hvd->vd; /* Move forward the pointer of TX ring */ ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); /* Update VD with remaining data */ hvd->src += tlen; hvd->dest += tlen; hvd->len -= tlen; } /* * Tagging flag for the last PD for VD will be responsible for * completing VD. */ if (!hvd->len) { prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED; } /* Ensure all changes indeed done before we're going on */ wmb(); /* * Updating into hardware the pointer of TX ring lets HSDMA to take * action for those pending PDs. */ mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); spin_unlock_irqrestore(&hsdma->lock, flags); return 0; } static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma, struct mtk_hsdma_vchan *hvc) { struct virt_dma_desc *vd, *vd2; int err; lockdep_assert_held(&hvc->vc.lock); list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) { struct mtk_hsdma_vdesc *hvd; hvd = to_hsdma_vdesc(vd); /* Map VD into PC and all VCs shares a single PC */ err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd); /* * Move VD from desc_issued to desc_hw_processing when entire * VD is fit into available PDs. Otherwise, the uncompleted * VDs would stay in list desc_issued and then restart the * processing as soon as possible once underlying ring space * got freed. */ if (err == -ENOSPC || hvd->len > 0) break; /* * The extra list desc_hw_processing is used because * hardware can't provide sufficient information allowing us * to know what VDs are still working on the underlying ring. * Through the additional list, it can help us to implement * terminate_all, residue calculation and such thing needed * to know detail descriptor status on the hardware. */ list_move_tail(&vd->node, &hvc->desc_hw_processing); } } static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) { struct mtk_hsdma_vchan *hvc; struct mtk_hsdma_pdesc *rxd; struct mtk_hsdma_vdesc *hvd; struct mtk_hsdma_pchan *pc; struct mtk_hsdma_cb *cb; int i = MTK_DMA_SIZE; __le32 desc2; u32 status; u16 next; /* Read IRQ status */ status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS); if (unlikely(!(status & MTK_HSDMA_INT_RXDONE))) goto rx_done; pc = hsdma->pc; /* * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to * reclaim these finished descriptors: The most number of PDs the ISR * can handle at one time shouldn't be more than MTK_DMA_SIZE so we * take it as limited count instead of just using a dangerous infinite * poll. */ while (i--) { next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr, MTK_DMA_SIZE); rxd = &pc->ring.rxd[next]; /* * If MTK_HSDMA_DESC_DDONE is no specified, that means data * moving for the PD is still under going. */ desc2 = READ_ONCE(rxd->desc2); if (!(desc2 & hsdma->soc->ddone)) break; cb = &pc->ring.cb[next]; if (unlikely(!cb->vd)) { dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n"); break; } /* Update residue of VD the associated PD belonged to */ hvd = to_hsdma_vdesc(cb->vd); hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2); /* Complete VD until the relevant last PD is finished */ if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) { hvc = to_hsdma_vchan(cb->vd->tx.chan); spin_lock(&hvc->vc.lock); /* Remove VD from list desc_hw_processing */ list_del(&cb->vd->node); /* Add VD into list desc_completed */ vchan_cookie_complete(cb->vd); if (hvc->issue_synchronize && list_empty(&hvc->desc_hw_processing)) { complete(&hvc->issue_completion); hvc->issue_synchronize = false; } spin_unlock(&hvc->vc.lock); cb->flag = 0; } cb->vd = NULL; /* * Recycle the RXD with the helper WRITE_ONCE that can ensure * data written into RAM would really happens. */ WRITE_ONCE(rxd->desc1, 0); WRITE_ONCE(rxd->desc2, 0); pc->ring.cur_rptr = next; /* Release rooms */ atomic_inc(&pc->nr_free); } /* Ensure all changes indeed done before we're going on */ wmb(); /* Update CPU pointer for those completed PDs */ mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr); /* * Acking the pending IRQ allows hardware no longer to keep the used * IRQ line in certain trigger state when software has completed all * the finished physical descriptors. */ if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1) mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status); /* ASAP handles pending VDs in all VCs after freeing some rooms */ for (i = 0; i < hsdma->dma_requests; i++) { hvc = &hsdma->vc[i]; spin_lock(&hvc->vc.lock); mtk_hsdma_issue_vchan_pending(hsdma, hvc); spin_unlock(&hvc->vc.lock); } rx_done: /* All completed PDs are cleaned up, so enable interrupt again */ mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); } static irqreturn_t mtk_hsdma_irq(int irq, void *devid) { struct mtk_hsdma_device *hsdma = devid; /* * Disable interrupt until all completed PDs are cleaned up in * mtk_hsdma_free_rooms call. */ mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); mtk_hsdma_free_rooms_in_ring(hsdma); return IRQ_HANDLED; } static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c, dma_cookie_t cookie) { struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); struct virt_dma_desc *vd; list_for_each_entry(vd, &hvc->desc_hw_processing, node) if (vd->tx.cookie == cookie) return vd; list_for_each_entry(vd, &hvc->vc.desc_issued, node) if (vd->tx.cookie == cookie) return vd; return NULL; } static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); struct mtk_hsdma_vdesc *hvd; struct virt_dma_desc *vd; enum dma_status ret; unsigned long flags; size_t bytes = 0; ret = dma_cookie_status(c, cookie, txstate); if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&hvc->vc.lock, flags); vd = mtk_hsdma_find_active_desc(c, cookie); spin_unlock_irqrestore(&hvc->vc.lock, flags); if (vd) { hvd = to_hsdma_vdesc(vd); bytes = hvd->residue; } dma_set_residue(txstate, bytes); return ret; } static void mtk_hsdma_issue_pending(struct dma_chan *c) { struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); unsigned long flags; spin_lock_irqsave(&hvc->vc.lock, flags); if (vchan_issue_pending(&hvc->vc)) mtk_hsdma_issue_vchan_pending(hsdma, hvc); spin_unlock_irqrestore(&hvc->vc.lock, flags); } static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mtk_hsdma_vdesc *hvd; hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT); if (!hvd) return NULL; hvd->len = len; hvd->residue = len; hvd->src = src; hvd->dest = dest; return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); } static int mtk_hsdma_free_inactive_desc(struct dma_chan *c) { struct virt_dma_chan *vc = to_virt_chan(c); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); list_splice_tail_init(&vc->desc_allocated, &head); list_splice_tail_init(&vc->desc_submitted, &head); list_splice_tail_init(&vc->desc_issued, &head); spin_unlock_irqrestore(&vc->lock, flags); /* At the point, we don't expect users put descriptor into VC again */ vchan_dma_desc_free_list(vc, &head); return 0; } static void mtk_hsdma_free_active_desc(struct dma_chan *c) { struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); bool sync_needed = false; /* * Once issue_synchronize is being set, which means once the hardware * consumes all descriptors for the channel in the ring, the * synchronization must be notified immediately it is completed. */ spin_lock(&hvc->vc.lock); if (!list_empty(&hvc->desc_hw_processing)) { hvc->issue_synchronize = true; sync_needed = true; } spin_unlock(&hvc->vc.lock); if (sync_needed) wait_for_completion(&hvc->issue_completion); /* * At the point, we expect that all remaining descriptors in the ring * for the channel should be all processing done. */ WARN_ONCE(!list_empty(&hvc->desc_hw_processing), "Desc pending still in list desc_hw_processing\n"); /* Free all descriptors in list desc_completed */ vchan_synchronize(&hvc->vc); WARN_ONCE(!list_empty(&hvc->vc.desc_completed), "Desc pending still in list desc_completed\n"); } static int mtk_hsdma_terminate_all(struct dma_chan *c) { /* * Free pending descriptors not processed yet by hardware that have * previously been submitted to the channel. */ mtk_hsdma_free_inactive_desc(c); /* * However, the DMA engine doesn't provide any way to stop these * descriptors being processed currently by hardware. The only way is * to just waiting until these descriptors are all processed completely * through mtk_hsdma_free_active_desc call. */ mtk_hsdma_free_active_desc(c); return 0; } static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c) { struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); int err; /* * Since HSDMA has only one PC, the resource for PC is being allocated * when the first VC is being created and the other VCs would run on * the same PC. */ if (!refcount_read(&hsdma->pc_refcnt)) { err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc); if (err) return err; /* * refcount_inc would complain increment on 0; use-after-free. * Thus, we need to explicitly set it as 1 initially. */ refcount_set(&hsdma->pc_refcnt, 1); } else { refcount_inc(&hsdma->pc_refcnt); } return 0; } static void mtk_hsdma_free_chan_resources(struct dma_chan *c) { struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); /* Free all descriptors in all lists on the VC */ mtk_hsdma_terminate_all(c); /* The resource for PC is not freed until all the VCs are destroyed */ if (!refcount_dec_and_test(&hsdma->pc_refcnt)) return; mtk_hsdma_free_pchan(hsdma, hsdma->pc); } static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma) { int err; pm_runtime_enable(hsdma2dev(hsdma)); pm_runtime_get_sync(hsdma2dev(hsdma)); err = clk_prepare_enable(hsdma->clk); if (err) return err; mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT); return 0; } static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma) { mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0); clk_disable_unprepare(hsdma->clk); pm_runtime_put_sync(hsdma2dev(hsdma)); pm_runtime_disable(hsdma2dev(hsdma)); return 0; } static const struct mtk_hsdma_soc mt7623_soc = { .ddone = BIT(31), .ls0 = BIT(30), }; static const struct mtk_hsdma_soc mt7622_soc = { .ddone = BIT(15), .ls0 = BIT(14), }; static const struct of_device_id mtk_hsdma_match[] = { { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc}, { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk_hsdma_match); static int mtk_hsdma_probe(struct platform_device *pdev) { struct mtk_hsdma_device *hsdma; struct mtk_hsdma_vchan *vc; struct dma_device *dd; int i, err; hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); if (!hsdma) return -ENOMEM; dd = &hsdma->ddev; hsdma->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hsdma->base)) return PTR_ERR(hsdma->base); hsdma->soc = of_device_get_match_data(&pdev->dev); if (!hsdma->soc) { dev_err(&pdev->dev, "No device match found\n"); return -ENODEV; } hsdma->clk = devm_clk_get(&pdev->dev, "hsdma"); if (IS_ERR(hsdma->clk)) { dev_err(&pdev->dev, "No clock for %s\n", dev_name(&pdev->dev)); return PTR_ERR(hsdma->clk); } err = platform_get_irq(pdev, 0); if (err < 0) return err; hsdma->irq = err; refcount_set(&hsdma->pc_refcnt, 0); spin_lock_init(&hsdma->lock); dma_cap_set(DMA_MEMCPY, dd->cap_mask); dd->copy_align = MTK_HSDMA_ALIGN_SIZE; dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources; dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; dd->device_tx_status = mtk_hsdma_tx_status; dd->device_issue_pending = mtk_hsdma_issue_pending; dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; dd->device_terminate_all = mtk_hsdma_terminate_all; dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; dd->directions = BIT(DMA_MEM_TO_MEM); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; dd->dev = &pdev->dev; INIT_LIST_HEAD(&dd->channels); hsdma->dma_requests = MTK_HSDMA_NR_VCHANS; if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-requests", &hsdma->dma_requests)) { dev_info(&pdev->dev, "Using %u as missing dma-requests property\n", MTK_HSDMA_NR_VCHANS); } hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS, sizeof(*hsdma->pc), GFP_KERNEL); if (!hsdma->pc) return -ENOMEM; hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests, sizeof(*hsdma->vc), GFP_KERNEL); if (!hsdma->vc) return -ENOMEM; for (i = 0; i < hsdma->dma_requests; i++) { vc = &hsdma->vc[i]; vc->vc.desc_free = mtk_hsdma_vdesc_free; vchan_init(&vc->vc, dd); init_completion(&vc->issue_completion); INIT_LIST_HEAD(&vc->desc_hw_processing); } err = dma_async_device_register(dd); if (err) return err; err = of_dma_controller_register(pdev->dev.of_node, of_dma_xlate_by_chan_id, hsdma); if (err) { dev_err(&pdev->dev, "MediaTek HSDMA OF registration failed %d\n", err); goto err_unregister; } mtk_hsdma_hw_init(hsdma); err = devm_request_irq(&pdev->dev, hsdma->irq, mtk_hsdma_irq, 0, dev_name(&pdev->dev), hsdma); if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); goto err_free; } platform_set_drvdata(pdev, hsdma); dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n"); return 0; err_free: mtk_hsdma_hw_deinit(hsdma); of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd); return err; } static int mtk_hsdma_remove(struct platform_device *pdev) { struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); struct mtk_hsdma_vchan *vc; int i; /* Kill VC task */ for (i = 0; i < hsdma->dma_requests; i++) { vc = &hsdma->vc[i]; list_del(&vc->vc.chan.device_node); tasklet_kill(&vc->vc.task); } /* Disable DMA interrupt */ mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); /* Waits for any pending IRQ handlers to complete */ synchronize_irq(hsdma->irq); /* Disable hardware */ mtk_hsdma_hw_deinit(hsdma); dma_async_device_unregister(&hsdma->ddev); of_dma_controller_free(pdev->dev.of_node); return 0; } static struct platform_driver mtk_hsdma_driver = { .probe = mtk_hsdma_probe, .remove = mtk_hsdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_hsdma_match, }, }; module_platform_driver(mtk_hsdma_driver); MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver"); MODULE_AUTHOR("Sean Wang <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/mediatek/mtk-hsdma.c
// SPDX-License-Identifier: GPL-2.0 /* * MediaTek UART APDMA driver. * * Copyright (c) 2019 MediaTek Inc. * Author: Long Cheng <[email protected]> */ #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../virt-dma.h" /* The default number of virtual channel */ #define MTK_UART_APDMA_NR_VCHANS 8 #define VFF_EN_B BIT(0) #define VFF_STOP_B BIT(0) #define VFF_FLUSH_B BIT(0) #define VFF_4G_EN_B BIT(0) /* rx valid size >= vff thre */ #define VFF_RX_INT_EN_B (BIT(0) | BIT(1)) /* tx left size >= vff thre */ #define VFF_TX_INT_EN_B BIT(0) #define VFF_WARM_RST_B BIT(0) #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1)) #define VFF_TX_INT_CLR_B 0 #define VFF_STOP_CLR_B 0 #define VFF_EN_CLR_B 0 #define VFF_INT_EN_CLR_B 0 #define VFF_4G_SUPPORT_CLR_B 0 /* * interrupt trigger level for tx * if threshold is n, no polling is required to start tx. * otherwise need polling VFF_FLUSH. */ #define VFF_TX_THRE(n) (n) /* interrupt trigger level for rx */ #define VFF_RX_THRE(n) ((n) * 3 / 4) #define VFF_RING_SIZE 0xffff /* invert this bit when wrap ring head again */ #define VFF_RING_WRAP 0x10000 #define VFF_INT_FLAG 0x00 #define VFF_INT_EN 0x04 #define VFF_EN 0x08 #define VFF_RST 0x0c #define VFF_STOP 0x10 #define VFF_FLUSH 0x14 #define VFF_ADDR 0x1c #define VFF_LEN 0x24 #define VFF_THRE 0x28 #define VFF_WPT 0x2c #define VFF_RPT 0x30 /* TX: the buffer size HW can read. RX: the buffer size SW can read. */ #define VFF_VALID_SIZE 0x3c /* TX: the buffer size SW can write. RX: the buffer size HW can write. */ #define VFF_LEFT_SIZE 0x40 #define VFF_DEBUG_STATUS 0x50 #define VFF_4G_SUPPORT 0x54 struct mtk_uart_apdmadev { struct dma_device ddev; struct clk *clk; bool support_33bits; unsigned int dma_requests; }; struct mtk_uart_apdma_desc { struct virt_dma_desc vd; dma_addr_t addr; unsigned int avail_len; }; struct mtk_chan { struct virt_dma_chan vc; struct dma_slave_config cfg; struct mtk_uart_apdma_desc *desc; enum dma_transfer_direction dir; void __iomem *base; unsigned int irq; unsigned int rx_status; }; static inline struct mtk_uart_apdmadev * to_mtk_uart_apdma_dev(struct dma_device *d) { return container_of(d, struct mtk_uart_apdmadev, ddev); } static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) { return container_of(c, struct mtk_chan, vc.chan); } static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc (struct dma_async_tx_descriptor *t) { return container_of(t, struct mtk_uart_apdma_desc, vd.tx); } static void mtk_uart_apdma_write(struct mtk_chan *c, unsigned int reg, unsigned int val) { writel(val, c->base + reg); } static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) { return readl(c->base + reg); } static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) { kfree(container_of(vd, struct mtk_uart_apdma_desc, vd)); } static void mtk_uart_apdma_start_tx(struct mtk_chan *c) { struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(c->vc.chan.device); struct mtk_uart_apdma_desc *d = c->desc; unsigned int wpt, vff_sz; vff_sz = c->cfg.dst_port_window_size; if (!mtk_uart_apdma_read(c, VFF_LEN)) { mtk_uart_apdma_write(c, VFF_ADDR, d->addr); mtk_uart_apdma_write(c, VFF_LEN, vff_sz); mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); mtk_uart_apdma_write(c, VFF_WPT, 0); mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); if (mtkd->support_33bits) mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); } mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); return; } wpt = mtk_uart_apdma_read(c, VFF_WPT); wpt += c->desc->avail_len; if ((wpt & VFF_RING_SIZE) == vff_sz) wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP; /* Let DMA start moving data */ mtk_uart_apdma_write(c, VFF_WPT, wpt); /* HW auto set to 0 when left size >= threshold */ mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); if (!mtk_uart_apdma_read(c, VFF_FLUSH)) mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); } static void mtk_uart_apdma_start_rx(struct mtk_chan *c) { struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(c->vc.chan.device); struct mtk_uart_apdma_desc *d = c->desc; unsigned int vff_sz; vff_sz = c->cfg.src_port_window_size; if (!mtk_uart_apdma_read(c, VFF_LEN)) { mtk_uart_apdma_write(c, VFF_ADDR, d->addr); mtk_uart_apdma_write(c, VFF_LEN, vff_sz); mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); mtk_uart_apdma_write(c, VFF_RPT, 0); mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); if (mtkd->support_33bits) mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); } mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); } static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) { mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); } static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) { struct mtk_uart_apdma_desc *d = c->desc; unsigned int len, wg, rg; int cnt; mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) return; mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); len = c->cfg.src_port_window_size; rg = mtk_uart_apdma_read(c, VFF_RPT); wg = mtk_uart_apdma_read(c, VFF_WPT); cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE); /* * The buffer is ring buffer. If wrap bit different, * represents the start of the next cycle for WPT */ if ((rg ^ wg) & VFF_RING_WRAP) cnt += len; c->rx_status = d->avail_len - cnt; mtk_uart_apdma_write(c, VFF_RPT, wg); } static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c) { struct mtk_uart_apdma_desc *d = c->desc; if (d) { list_del(&d->vd.node); vchan_cookie_complete(&d->vd); c->desc = NULL; } } static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) { struct dma_chan *chan = (struct dma_chan *)dev_id; struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); if (c->dir == DMA_DEV_TO_MEM) mtk_uart_apdma_rx_handler(c); else if (c->dir == DMA_MEM_TO_DEV) mtk_uart_apdma_tx_handler(c); mtk_uart_apdma_chan_complete_handler(c); spin_unlock_irqrestore(&c->vc.lock, flags); return IRQ_HANDLED; } static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) { struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); unsigned int status; int ret; ret = pm_runtime_resume_and_get(mtkd->ddev.dev); if (ret < 0) { pm_runtime_put_noidle(chan->device->dev); return ret; } mtk_uart_apdma_write(c, VFF_ADDR, 0); mtk_uart_apdma_write(c, VFF_THRE, 0); mtk_uart_apdma_write(c, VFF_LEN, 0); mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); ret = readx_poll_timeout(readl, c->base + VFF_EN, status, !status, 10, 100); if (ret) goto err_pm; ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); if (ret < 0) { dev_err(chan->device->dev, "Can't request dma IRQ\n"); ret = -EINVAL; goto err_pm; } if (mtkd->support_33bits) mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); err_pm: pm_runtime_put_noidle(mtkd->ddev.dev); return ret; } static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan) { struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); free_irq(c->irq, chan); tasklet_kill(&c->vc.task); vchan_free_chan_resources(&c->vc); pm_runtime_put_sync(mtkd->ddev.dev); } static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (!txstate) return ret; dma_set_residue(txstate, c->rx_status); return ret; } /* * dmaengine_prep_slave_single will call the function. and sglen is 1. * 8250 uart using one ring buffer, and deal with one sg. */ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg (struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); struct mtk_uart_apdma_desc *d; if (!is_slave_direction(dir) || sglen != 1) return NULL; /* Now allocate and setup the descriptor */ d = kzalloc(sizeof(*d), GFP_NOWAIT); if (!d) return NULL; d->avail_len = sg_dma_len(sgl); d->addr = sg_dma_address(sgl); c->dir = dir; return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); struct virt_dma_desc *vd; unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); if (vchan_issue_pending(&c->vc) && !c->desc) { vd = vchan_next_desc(&c->vc); c->desc = to_mtk_uart_apdma_desc(&vd->tx); if (c->dir == DMA_DEV_TO_MEM) mtk_uart_apdma_start_rx(c); else if (c->dir == DMA_MEM_TO_DEV) mtk_uart_apdma_start_tx(c); } spin_unlock_irqrestore(&c->vc.lock, flags); } static int mtk_uart_apdma_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); memcpy(&c->cfg, config, sizeof(*config)); return 0; } static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); unsigned long flags; unsigned int status; LIST_HEAD(head); int ret; mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, status, status != VFF_FLUSH_B, 10, 100); if (ret) dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); /* * Stop need 3 steps. * 1. set stop to 1 * 2. wait en to 0 * 3. set stop as 0 */ mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); ret = readx_poll_timeout(readl, c->base + VFF_EN, status, !status, 10, 100); if (ret) dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); if (c->dir == DMA_DEV_TO_MEM) mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); else if (c->dir == DMA_MEM_TO_DEV) mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); synchronize_irq(c->irq); spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); return 0; } static int mtk_uart_apdma_device_pause(struct dma_chan *chan) { struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); synchronize_irq(c->irq); spin_unlock_irqrestore(&c->vc.lock, flags); return 0; } static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) { while (!list_empty(&mtkd->ddev.channels)) { struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, struct mtk_chan, vc.chan.device_node); list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); } } static const struct of_device_id mtk_uart_apdma_match[] = { { .compatible = "mediatek,mt6577-uart-dma", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); static int mtk_uart_apdma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mtk_uart_apdmadev *mtkd; int bit_mask = 32, rc; struct mtk_chan *c; unsigned int i; mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); if (!mtkd) return -ENOMEM; mtkd->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mtkd->clk)) { dev_err(&pdev->dev, "No clock specified\n"); rc = PTR_ERR(mtkd->clk); return rc; } if (of_property_read_bool(np, "mediatek,dma-33bits")) mtkd->support_33bits = true; if (mtkd->support_33bits) bit_mask = 33; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); if (rc) return rc; dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask); mtkd->ddev.device_alloc_chan_resources = mtk_uart_apdma_alloc_chan_resources; mtkd->ddev.device_free_chan_resources = mtk_uart_apdma_free_chan_resources; mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status; mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending; mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg; mtkd->ddev.device_config = mtk_uart_apdma_slave_config; mtkd->ddev.device_pause = mtk_uart_apdma_device_pause; mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all; mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; mtkd->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&mtkd->ddev.channels); mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS; if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) { dev_info(&pdev->dev, "Using %u as missing dma-requests property\n", MTK_UART_APDMA_NR_VCHANS); } for (i = 0; i < mtkd->dma_requests; i++) { c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); if (!c) { rc = -ENODEV; goto err_no_dma; } c->base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(c->base)) { rc = PTR_ERR(c->base); goto err_no_dma; } c->vc.desc_free = mtk_uart_apdma_desc_free; vchan_init(&c->vc, &mtkd->ddev); rc = platform_get_irq(pdev, i); if (rc < 0) goto err_no_dma; c->irq = rc; } pm_runtime_enable(&pdev->dev); rc = dma_async_device_register(&mtkd->ddev); if (rc) goto rpm_disable; platform_set_drvdata(pdev, mtkd); /* Device-tree DMA controller registration */ rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd); if (rc) goto dma_remove; return rc; dma_remove: dma_async_device_unregister(&mtkd->ddev); rpm_disable: pm_runtime_disable(&pdev->dev); err_no_dma: mtk_uart_apdma_free(mtkd); return rc; } static int mtk_uart_apdma_remove(struct platform_device *pdev) { struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); of_dma_controller_free(pdev->dev.of_node); mtk_uart_apdma_free(mtkd); dma_async_device_unregister(&mtkd->ddev); pm_runtime_disable(&pdev->dev); return 0; } #ifdef CONFIG_PM_SLEEP static int mtk_uart_apdma_suspend(struct device *dev) { struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); if (!pm_runtime_suspended(dev)) clk_disable_unprepare(mtkd->clk); return 0; } static int mtk_uart_apdma_resume(struct device *dev) { int ret; struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); if (!pm_runtime_suspended(dev)) { ret = clk_prepare_enable(mtkd->clk); if (ret) return ret; } return 0; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM static int mtk_uart_apdma_runtime_suspend(struct device *dev) { struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); clk_disable_unprepare(mtkd->clk); return 0; } static int mtk_uart_apdma_runtime_resume(struct device *dev) { struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); return clk_prepare_enable(mtkd->clk); } #endif /* CONFIG_PM */ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume) SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend, mtk_uart_apdma_runtime_resume, NULL) }; static struct platform_driver mtk_uart_apdma_driver = { .probe = mtk_uart_apdma_probe, .remove = mtk_uart_apdma_remove, .driver = { .name = KBUILD_MODNAME, .pm = &mtk_uart_apdma_pm_ops, .of_match_table = of_match_ptr(mtk_uart_apdma_match), }, }; module_platform_driver(mtk_uart_apdma_driver); MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver"); MODULE_AUTHOR("Long Cheng <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/mediatek/mtk-uart-apdma.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018-2019 MediaTek Inc. /* * Driver for MediaTek Command-Queue DMA Controller * * Author: Shun-Chih Yu <[email protected]> * */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/refcount.h> #include <linux/slab.h> #include "../virt-dma.h" #define MTK_CQDMA_USEC_POLL 10 #define MTK_CQDMA_TIMEOUT_POLL 1000 #define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) #define MTK_CQDMA_ALIGN_SIZE 1 /* The default number of virtual channel */ #define MTK_CQDMA_NR_VCHANS 32 /* The default number of physical channel */ #define MTK_CQDMA_NR_PCHANS 3 /* Registers for underlying dma manipulation */ #define MTK_CQDMA_INT_FLAG 0x0 #define MTK_CQDMA_INT_EN 0x4 #define MTK_CQDMA_EN 0x8 #define MTK_CQDMA_RESET 0xc #define MTK_CQDMA_FLUSH 0x14 #define MTK_CQDMA_SRC 0x1c #define MTK_CQDMA_DST 0x20 #define MTK_CQDMA_LEN1 0x24 #define MTK_CQDMA_LEN2 0x28 #define MTK_CQDMA_SRC2 0x60 #define MTK_CQDMA_DST2 0x64 /* Registers setting */ #define MTK_CQDMA_EN_BIT BIT(0) #define MTK_CQDMA_INT_FLAG_BIT BIT(0) #define MTK_CQDMA_INT_EN_BIT BIT(0) #define MTK_CQDMA_FLUSH_BIT BIT(0) #define MTK_CQDMA_WARM_RST_BIT BIT(0) #define MTK_CQDMA_HARD_RST_BIT BIT(1) #define MTK_CQDMA_MAX_LEN GENMASK(27, 0) #define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0) #define MTK_CQDMA_ADDR2_SHFIT (32) /** * struct mtk_cqdma_vdesc - The struct holding info describing virtual * descriptor (CVD) * @vd: An instance for struct virt_dma_desc * @len: The total data size device wants to move * @residue: The remaining data size device will move * @dest: The destination address device wants to move to * @src: The source address device wants to move from * @ch: The pointer to the corresponding dma channel * @node: The lise_head struct to build link-list for VDs * @parent: The pointer to the parent CVD */ struct mtk_cqdma_vdesc { struct virt_dma_desc vd; size_t len; size_t residue; dma_addr_t dest; dma_addr_t src; struct dma_chan *ch; struct list_head node; struct mtk_cqdma_vdesc *parent; }; /** * struct mtk_cqdma_pchan - The struct holding info describing physical * channel (PC) * @queue: Queue for the PDs issued to this PC * @base: The mapped register I/O base of this PC * @irq: The IRQ that this PC are using * @refcnt: Track how many VCs are using this PC * @tasklet: Tasklet for this PC * @lock: Lock protect agaisting multiple VCs access PC */ struct mtk_cqdma_pchan { struct list_head queue; void __iomem *base; u32 irq; refcount_t refcnt; struct tasklet_struct tasklet; /* lock to protect PC */ spinlock_t lock; }; /** * struct mtk_cqdma_vchan - The struct holding info describing virtual * channel (VC) * @vc: An instance for struct virt_dma_chan * @pc: The pointer to the underlying PC * @issue_completion: The wait for all issued descriptors completited * @issue_synchronize: Bool indicating channel synchronization starts */ struct mtk_cqdma_vchan { struct virt_dma_chan vc; struct mtk_cqdma_pchan *pc; struct completion issue_completion; bool issue_synchronize; }; /** * struct mtk_cqdma_device - The struct holding info describing CQDMA * device * @ddev: An instance for struct dma_device * @clk: The clock that device internal is using * @dma_requests: The number of VCs the device supports to * @dma_channels: The number of PCs the device supports to * @vc: The pointer to all available VCs * @pc: The pointer to all the underlying PCs */ struct mtk_cqdma_device { struct dma_device ddev; struct clk *clk; u32 dma_requests; u32 dma_channels; struct mtk_cqdma_vchan *vc; struct mtk_cqdma_pchan **pc; }; static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan) { return container_of(chan->device, struct mtk_cqdma_device, ddev); } static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan) { return container_of(chan, struct mtk_cqdma_vchan, vc.chan); } static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd) { return container_of(vd, struct mtk_cqdma_vdesc, vd); } static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma) { return cqdma->ddev.dev; } static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) { return readl(pc->base + reg); } static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) { writel_relaxed(val, pc->base + reg); } static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, u32 mask, u32 set) { u32 val; val = mtk_dma_read(pc, reg); val &= ~mask; val |= set; mtk_dma_write(pc, reg, val); } static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) { mtk_dma_rmw(pc, reg, 0, val); } static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) { mtk_dma_rmw(pc, reg, val, 0); } static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd) { kfree(to_cqdma_vdesc(vd)); } static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) { u32 status = 0; if (!atomic) return readl_poll_timeout(pc->base + MTK_CQDMA_EN, status, !(status & MTK_CQDMA_EN_BIT), MTK_CQDMA_USEC_POLL, MTK_CQDMA_TIMEOUT_POLL); return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, status, !(status & MTK_CQDMA_EN_BIT), MTK_CQDMA_USEC_POLL, MTK_CQDMA_TIMEOUT_POLL); } static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) { mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); return mtk_cqdma_poll_engine_done(pc, true); } static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, struct mtk_cqdma_vdesc *cvd) { /* wait for the previous transaction done */ if (mtk_cqdma_poll_engine_done(pc, true) < 0) dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n"); /* warm reset the dma engine for the new transaction */ mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); if (mtk_cqdma_poll_engine_done(pc, true) < 0) dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n"); /* setup the source */ mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); #else mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); #endif /* setup the destination */ mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); #else mtk_dma_set(pc, MTK_CQDMA_DST2, 0); #endif /* setup the length */ mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); /* start dma engine */ mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); } static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc) { struct virt_dma_desc *vd, *vd2; struct mtk_cqdma_pchan *pc = cvc->pc; struct mtk_cqdma_vdesc *cvd; bool trigger_engine = false; lockdep_assert_held(&cvc->vc.lock); lockdep_assert_held(&pc->lock); list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) { /* need to trigger dma engine if PC's queue is empty */ if (list_empty(&pc->queue)) trigger_engine = true; cvd = to_cqdma_vdesc(vd); /* add VD into PC's queue */ list_add_tail(&cvd->node, &pc->queue); /* start the dma engine */ if (trigger_engine) mtk_cqdma_start(pc, cvd); /* remove VD from list desc_issued */ list_del(&vd->node); } } /* * return true if this VC is active, * meaning that there are VDs under processing by the PC */ static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc) { struct mtk_cqdma_vdesc *cvd; list_for_each_entry(cvd, &cvc->pc->queue, node) if (cvc == to_cqdma_vchan(cvd->ch)) return true; return false; } /* * return the pointer of the CVD that is just consumed by the PC */ static struct mtk_cqdma_vdesc *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) { struct mtk_cqdma_vchan *cvc; struct mtk_cqdma_vdesc *cvd, *ret = NULL; /* consume a CVD from PC's queue */ cvd = list_first_entry_or_null(&pc->queue, struct mtk_cqdma_vdesc, node); if (unlikely(!cvd || !cvd->parent)) return NULL; cvc = to_cqdma_vchan(cvd->ch); ret = cvd; /* update residue of the parent CVD */ cvd->parent->residue -= cvd->len; /* delete CVD from PC's queue */ list_del(&cvd->node); spin_lock(&cvc->vc.lock); /* check whether all the child CVDs completed */ if (!cvd->parent->residue) { /* add the parent VD into list desc_completed */ vchan_cookie_complete(&cvd->parent->vd); /* setup completion if this VC is under synchronization */ if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) { complete(&cvc->issue_completion); cvc->issue_synchronize = false; } } spin_unlock(&cvc->vc.lock); /* start transaction for next CVD in the queue */ cvd = list_first_entry_or_null(&pc->queue, struct mtk_cqdma_vdesc, node); if (cvd) mtk_cqdma_start(pc, cvd); return ret; } static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t) { struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet); struct mtk_cqdma_vdesc *cvd = NULL; unsigned long flags; spin_lock_irqsave(&pc->lock, flags); /* consume the queue */ cvd = mtk_cqdma_consume_work_queue(pc); spin_unlock_irqrestore(&pc->lock, flags); /* submit the next CVD */ if (cvd) { dma_run_dependencies(&cvd->vd.tx); /* * free child CVD after completion. * the parent CVD would be freed with desc_free by user. */ if (cvd->parent != cvd) kfree(cvd); } /* re-enable interrupt before leaving tasklet */ enable_irq(pc->irq); } static irqreturn_t mtk_cqdma_irq(int irq, void *devid) { struct mtk_cqdma_device *cqdma = devid; irqreturn_t ret = IRQ_NONE; bool schedule_tasklet = false; u32 i; /* clear interrupt flags for each PC */ for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) { spin_lock(&cqdma->pc[i]->lock); if (mtk_dma_read(cqdma->pc[i], MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) { /* clear interrupt */ mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, MTK_CQDMA_INT_FLAG_BIT); schedule_tasklet = true; ret = IRQ_HANDLED; } spin_unlock(&cqdma->pc[i]->lock); if (schedule_tasklet) { /* disable interrupt */ disable_irq_nosync(cqdma->pc[i]->irq); /* schedule the tasklet to handle the transactions */ tasklet_schedule(&cqdma->pc[i]->tasklet); } } return ret; } static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, dma_cookie_t cookie) { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); struct virt_dma_desc *vd; unsigned long flags; spin_lock_irqsave(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->pc->queue, node) if (vd->tx.cookie == cookie) { spin_unlock_irqrestore(&cvc->pc->lock, flags); return vd; } spin_unlock_irqrestore(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->vc.desc_issued, node) if (vd->tx.cookie == cookie) return vd; return NULL; } static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); struct mtk_cqdma_vdesc *cvd; struct virt_dma_desc *vd; enum dma_status ret; unsigned long flags; size_t bytes = 0; ret = dma_cookie_status(c, cookie, txstate); if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&cvc->vc.lock, flags); vd = mtk_cqdma_find_active_desc(c, cookie); spin_unlock_irqrestore(&cvc->vc.lock, flags); if (vd) { cvd = to_cqdma_vdesc(vd); bytes = cvd->residue; } dma_set_residue(txstate, bytes); return ret; } static void mtk_cqdma_issue_pending(struct dma_chan *c) { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); unsigned long pc_flags; unsigned long vc_flags; /* acquire PC's lock before VS's lock for lock dependency in tasklet */ spin_lock_irqsave(&cvc->pc->lock, pc_flags); spin_lock_irqsave(&cvc->vc.lock, vc_flags); if (vchan_issue_pending(&cvc->vc)) mtk_cqdma_issue_vchan_pending(cvc); spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); } static struct dma_async_tx_descriptor * mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mtk_cqdma_vdesc **cvd; struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL; size_t i, tlen, nr_vd; /* * In the case that trsanction length is larger than the * DMA engine supports, a single memcpy transaction needs * to be separated into several DMA transactions. * Each DMA transaction would be described by a CVD, * and the first one is referred as the parent CVD, * while the others are child CVDs. * The parent CVD's tx descriptor is the only tx descriptor * returned to the DMA user, and it should not be completed * until all the child CVDs completed. */ nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN); cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT); if (!cvd) return NULL; for (i = 0; i < nr_vd; ++i) { cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT); if (!cvd[i]) { for (; i > 0; --i) kfree(cvd[i - 1]); return NULL; } /* setup dma channel */ cvd[i]->ch = c; /* setup sourece, destination, and length */ tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len; cvd[i]->len = tlen; cvd[i]->src = src; cvd[i]->dest = dest; /* setup tx descriptor */ tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags); tx->next = NULL; if (!i) { cvd[0]->residue = len; } else { prev_tx->next = tx; cvd[i]->residue = tlen; } cvd[i]->parent = cvd[0]; /* update the src, dest, len, prev_tx for the next CVD */ src += tlen; dest += tlen; len -= tlen; prev_tx = tx; } return &cvd[0]->vd.tx; } static void mtk_cqdma_free_inactive_desc(struct dma_chan *c) { struct virt_dma_chan *vc = to_virt_chan(c); unsigned long flags; LIST_HEAD(head); /* * set desc_allocated, desc_submitted, * and desc_issued as the candicates to be freed */ spin_lock_irqsave(&vc->lock, flags); list_splice_tail_init(&vc->desc_allocated, &head); list_splice_tail_init(&vc->desc_submitted, &head); list_splice_tail_init(&vc->desc_issued, &head); spin_unlock_irqrestore(&vc->lock, flags); /* free descriptor lists */ vchan_dma_desc_free_list(vc, &head); } static void mtk_cqdma_free_active_desc(struct dma_chan *c) { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); bool sync_needed = false; unsigned long pc_flags; unsigned long vc_flags; /* acquire PC's lock first due to lock dependency in dma ISR */ spin_lock_irqsave(&cvc->pc->lock, pc_flags); spin_lock_irqsave(&cvc->vc.lock, vc_flags); /* synchronization is required if this VC is active */ if (mtk_cqdma_is_vchan_active(cvc)) { cvc->issue_synchronize = true; sync_needed = true; } spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); /* waiting for the completion of this VC */ if (sync_needed) wait_for_completion(&cvc->issue_completion); /* free all descriptors in list desc_completed */ vchan_synchronize(&cvc->vc); WARN_ONCE(!list_empty(&cvc->vc.desc_completed), "Desc pending still in list desc_completed\n"); } static int mtk_cqdma_terminate_all(struct dma_chan *c) { /* free descriptors not processed yet by hardware */ mtk_cqdma_free_inactive_desc(c); /* free descriptors being processed by hardware */ mtk_cqdma_free_active_desc(c); return 0; } static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) { struct mtk_cqdma_device *cqdma = to_cqdma_dev(c); struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c); struct mtk_cqdma_pchan *pc = NULL; u32 i, min_refcnt = U32_MAX, refcnt; unsigned long flags; /* allocate PC with the minimun refcount */ for (i = 0; i < cqdma->dma_channels; ++i) { refcnt = refcount_read(&cqdma->pc[i]->refcnt); if (refcnt < min_refcnt) { pc = cqdma->pc[i]; min_refcnt = refcnt; } } if (!pc) return -ENOSPC; spin_lock_irqsave(&pc->lock, flags); if (!refcount_read(&pc->refcnt)) { /* allocate PC when the refcount is zero */ mtk_cqdma_hard_reset(pc); /* enable interrupt for this PC */ mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); /* * refcount_inc would complain increment on 0; use-after-free. * Thus, we need to explicitly set it as 1 initially. */ refcount_set(&pc->refcnt, 1); } else { refcount_inc(&pc->refcnt); } spin_unlock_irqrestore(&pc->lock, flags); vc->pc = pc; return 0; } static void mtk_cqdma_free_chan_resources(struct dma_chan *c) { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); unsigned long flags; /* free all descriptors in all lists on the VC */ mtk_cqdma_terminate_all(c); spin_lock_irqsave(&cvc->pc->lock, flags); /* PC is not freed until there is no VC mapped to it */ if (refcount_dec_and_test(&cvc->pc->refcnt)) { /* start the flush operation and stop the engine */ mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); /* wait for the completion of flush operation */ if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0) dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); /* clear the flush bit and interrupt flag */ mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, MTK_CQDMA_INT_FLAG_BIT); /* disable interrupt for this PC */ mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); } spin_unlock_irqrestore(&cvc->pc->lock, flags); } static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma) { unsigned long flags; int err; u32 i; pm_runtime_enable(cqdma2dev(cqdma)); pm_runtime_get_sync(cqdma2dev(cqdma)); err = clk_prepare_enable(cqdma->clk); if (err) { pm_runtime_put_sync(cqdma2dev(cqdma)); pm_runtime_disable(cqdma2dev(cqdma)); return err; } /* reset all PCs */ for (i = 0; i < cqdma->dma_channels; ++i) { spin_lock_irqsave(&cqdma->pc[i]->lock, flags); if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); clk_disable_unprepare(cqdma->clk); pm_runtime_put_sync(cqdma2dev(cqdma)); pm_runtime_disable(cqdma2dev(cqdma)); return -EINVAL; } spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); } return 0; } static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma) { unsigned long flags; u32 i; /* reset all PCs */ for (i = 0; i < cqdma->dma_channels; ++i) { spin_lock_irqsave(&cqdma->pc[i]->lock, flags); if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); } clk_disable_unprepare(cqdma->clk); pm_runtime_put_sync(cqdma2dev(cqdma)); pm_runtime_disable(cqdma2dev(cqdma)); } static const struct of_device_id mtk_cqdma_match[] = { { .compatible = "mediatek,mt6765-cqdma" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk_cqdma_match); static int mtk_cqdma_probe(struct platform_device *pdev) { struct mtk_cqdma_device *cqdma; struct mtk_cqdma_vchan *vc; struct dma_device *dd; int err; u32 i; cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL); if (!cqdma) return -ENOMEM; dd = &cqdma->ddev; cqdma->clk = devm_clk_get(&pdev->dev, "cqdma"); if (IS_ERR(cqdma->clk)) { dev_err(&pdev->dev, "No clock for %s\n", dev_name(&pdev->dev)); return PTR_ERR(cqdma->clk); } dma_cap_set(DMA_MEMCPY, dd->cap_mask); dd->copy_align = MTK_CQDMA_ALIGN_SIZE; dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources; dd->device_free_chan_resources = mtk_cqdma_free_chan_resources; dd->device_tx_status = mtk_cqdma_tx_status; dd->device_issue_pending = mtk_cqdma_issue_pending; dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy; dd->device_terminate_all = mtk_cqdma_terminate_all; dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; dd->directions = BIT(DMA_MEM_TO_MEM); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; dd->dev = &pdev->dev; INIT_LIST_HEAD(&dd->channels); if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-requests", &cqdma->dma_requests)) { dev_info(&pdev->dev, "Using %u as missing dma-requests property\n", MTK_CQDMA_NR_VCHANS); cqdma->dma_requests = MTK_CQDMA_NR_VCHANS; } if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-channels", &cqdma->dma_channels)) { dev_info(&pdev->dev, "Using %u as missing dma-channels property\n", MTK_CQDMA_NR_PCHANS); cqdma->dma_channels = MTK_CQDMA_NR_PCHANS; } cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, sizeof(*cqdma->pc), GFP_KERNEL); if (!cqdma->pc) return -ENOMEM; /* initialization for PCs */ for (i = 0; i < cqdma->dma_channels; ++i) { cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, sizeof(**cqdma->pc), GFP_KERNEL); if (!cqdma->pc[i]) return -ENOMEM; INIT_LIST_HEAD(&cqdma->pc[i]->queue); spin_lock_init(&cqdma->pc[i]->lock); refcount_set(&cqdma->pc[i]->refcnt, 0); cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(cqdma->pc[i]->base)) return PTR_ERR(cqdma->pc[i]->base); /* allocate IRQ resource */ err = platform_get_irq(pdev, i); if (err < 0) return err; cqdma->pc[i]->irq = err; err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, mtk_cqdma_irq, 0, dev_name(&pdev->dev), cqdma); if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); return -EINVAL; } } /* allocate resource for VCs */ cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests, sizeof(*cqdma->vc), GFP_KERNEL); if (!cqdma->vc) return -ENOMEM; for (i = 0; i < cqdma->dma_requests; i++) { vc = &cqdma->vc[i]; vc->vc.desc_free = mtk_cqdma_vdesc_free; vchan_init(&vc->vc, dd); init_completion(&vc->issue_completion); } err = dma_async_device_register(dd); if (err) return err; err = of_dma_controller_register(pdev->dev.of_node, of_dma_xlate_by_chan_id, cqdma); if (err) { dev_err(&pdev->dev, "MediaTek CQDMA OF registration failed %d\n", err); goto err_unregister; } err = mtk_cqdma_hw_init(cqdma); if (err) { dev_err(&pdev->dev, "MediaTek CQDMA HW initialization failed %d\n", err); goto err_unregister; } platform_set_drvdata(pdev, cqdma); /* initialize tasklet for each PC */ for (i = 0; i < cqdma->dma_channels; ++i) tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb); dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n"); return 0; err_unregister: dma_async_device_unregister(dd); return err; } static int mtk_cqdma_remove(struct platform_device *pdev) { struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev); struct mtk_cqdma_vchan *vc; unsigned long flags; int i; /* kill VC task */ for (i = 0; i < cqdma->dma_requests; i++) { vc = &cqdma->vc[i]; list_del(&vc->vc.chan.device_node); tasklet_kill(&vc->vc.task); } /* disable interrupt */ for (i = 0; i < cqdma->dma_channels; i++) { spin_lock_irqsave(&cqdma->pc[i]->lock, flags); mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); /* Waits for any pending IRQ handlers to complete */ synchronize_irq(cqdma->pc[i]->irq); tasklet_kill(&cqdma->pc[i]->tasklet); } /* disable hardware */ mtk_cqdma_hw_deinit(cqdma); dma_async_device_unregister(&cqdma->ddev); of_dma_controller_free(pdev->dev.of_node); return 0; } static struct platform_driver mtk_cqdma_driver = { .probe = mtk_cqdma_probe, .remove = mtk_cqdma_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = mtk_cqdma_match, }, }; module_platform_driver(mtk_cqdma_driver); MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver"); MODULE_AUTHOR("Shun-Chih Yu <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/mediatek/mtk-cqdma.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SiFive FU540 Platform DMA driver * Copyright (C) 2019 SiFive * * Based partially on: * - drivers/dma/fsl-edma.c * - drivers/dma/dw-edma/ * - drivers/dma/pxa-dma.c * * See the following sources for further documentation: * - Chapter 12 "Platform DMA Engine (PDMA)" of * SiFive FU540-C000 v1.0 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf */ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/slab.h> #include "sf-pdma.h" #ifndef readq static inline unsigned long long readq(void __iomem *addr) { return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); } #endif #ifndef writeq static inline void writeq(unsigned long long v, void __iomem *addr) { writel(lower_32_bits(v), addr); writel(upper_32_bits(v), addr + 4); } #endif static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan) { return container_of(dchan, struct sf_pdma_chan, vchan.chan); } static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd) { return container_of(vd, struct sf_pdma_desc, vdesc); } static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc; desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) return NULL; desc->chan = chan; return desc; } static void sf_pdma_fill_desc(struct sf_pdma_desc *desc, u64 dst, u64 src, u64 size) { desc->xfer_type = PDMA_FULL_SPEED; desc->xfer_size = size; desc->dst_addr = dst; desc->src_addr = src; } static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan) { struct pdma_regs *regs = &chan->regs; writel(PDMA_CLEAR_CTRL, regs->ctrl); } static struct dma_async_tx_descriptor * sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); struct sf_pdma_desc *desc; unsigned long iflags; if (chan && (!len || !dest || !src)) { dev_err(chan->pdma->dma_dev.dev, "Please check dma len, dest, src!\n"); return NULL; } desc = sf_pdma_alloc_desc(chan); if (!desc) return NULL; desc->dirn = DMA_MEM_TO_MEM; desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); spin_lock_irqsave(&chan->vchan.lock, iflags); sf_pdma_fill_desc(desc, dest, src, len); spin_unlock_irqrestore(&chan->vchan.lock, iflags); return desc->async_tx; } static int sf_pdma_slave_config(struct dma_chan *dchan, struct dma_slave_config *cfg) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); memcpy(&chan->cfg, cfg, sizeof(*cfg)); return 0; } static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); struct pdma_regs *regs = &chan->regs; dma_cookie_init(dchan); writel(PDMA_CLAIM_MASK, regs->ctrl); return 0; } static void sf_pdma_disable_request(struct sf_pdma_chan *chan) { struct pdma_regs *regs = &chan->regs; writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl); } static void sf_pdma_free_chan_resources(struct dma_chan *dchan) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); sf_pdma_disable_request(chan); kfree(chan->desc); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); sf_pdma_disclaim_chan(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); vchan_dma_desc_free_list(&chan->vchan, &head); } static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, dma_cookie_t cookie) { struct virt_dma_desc *vd = NULL; struct pdma_regs *regs = &chan->regs; unsigned long flags; u64 residue = 0; struct sf_pdma_desc *desc; struct dma_async_tx_descriptor *tx = NULL; spin_lock_irqsave(&chan->vchan.lock, flags); list_for_each_entry(vd, &chan->vchan.desc_submitted, node) if (vd->tx.cookie == cookie) tx = &vd->tx; if (!tx) goto out; if (cookie == tx->chan->completed_cookie) goto out; if (cookie == tx->cookie) { residue = readq(regs->residue); } else { vd = vchan_find_desc(&chan->vchan, cookie); if (!vd) goto out; desc = to_sf_pdma_desc(vd); residue = desc->xfer_size; } out: spin_unlock_irqrestore(&chan->vchan.lock, flags); return residue; } static enum dma_status sf_pdma_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); enum dma_status status; status = dma_cookie_status(dchan, cookie, txstate); if (txstate && status != DMA_ERROR) dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie)); return status; } static int sf_pdma_terminate_all(struct dma_chan *dchan) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); sf_pdma_disable_request(chan); kfree(chan->desc); chan->desc = NULL; chan->xfer_err = false; vchan_get_all_descriptors(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } static void sf_pdma_enable_request(struct sf_pdma_chan *chan) { struct pdma_regs *regs = &chan->regs; u32 v; v = PDMA_CLAIM_MASK | PDMA_ENABLE_DONE_INT_MASK | PDMA_ENABLE_ERR_INT_MASK | PDMA_RUN_MASK; writel(v, regs->ctrl); } static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan) { struct virt_dma_chan *vchan = &chan->vchan; struct virt_dma_desc *vdesc; if (list_empty(&vchan->desc_issued)) return NULL; vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node); return container_of(vdesc, struct sf_pdma_desc, vdesc); } static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc = chan->desc; struct pdma_regs *regs = &chan->regs; if (!desc) { dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n"); return; } writel(desc->xfer_type, regs->xfer_type); writeq(desc->xfer_size, regs->xfer_size); writeq(desc->dst_addr, regs->dst_addr); writeq(desc->src_addr, regs->src_addr); chan->desc = desc; chan->status = DMA_IN_PROGRESS; sf_pdma_enable_request(chan); } static void sf_pdma_issue_pending(struct dma_chan *dchan) { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); unsigned long flags; spin_lock_irqsave(&chan->vchan.lock, flags); if (!chan->desc && vchan_issue_pending(&chan->vchan)) { /* vchan_issue_pending has made a check that desc in not NULL */ chan->desc = sf_pdma_get_first_pending_desc(chan); sf_pdma_xfer_desc(chan); } spin_unlock_irqrestore(&chan->vchan.lock, flags); } static void sf_pdma_free_desc(struct virt_dma_desc *vdesc) { struct sf_pdma_desc *desc; desc = to_sf_pdma_desc(vdesc); kfree(desc); } static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) { struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet); unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->xfer_err) { chan->retries = MAX_RETRY; chan->status = DMA_COMPLETE; chan->xfer_err = false; } spin_unlock_irqrestore(&chan->lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags); list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = sf_pdma_get_first_pending_desc(chan); if (chan->desc) sf_pdma_xfer_desc(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); } static void sf_pdma_errbh_tasklet(struct tasklet_struct *t) { struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet); struct sf_pdma_desc *desc = chan->desc; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); if (chan->retries <= 0) { /* fail to recover */ spin_unlock_irqrestore(&chan->lock, flags); dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); } else { /* retry */ chan->retries--; chan->xfer_err = true; chan->status = DMA_ERROR; sf_pdma_enable_request(chan); spin_unlock_irqrestore(&chan->lock, flags); } } static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id) { struct sf_pdma_chan *chan = dev_id; struct pdma_regs *regs = &chan->regs; u64 residue; spin_lock(&chan->vchan.lock); writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl); residue = readq(regs->residue); if (!residue) { tasklet_hi_schedule(&chan->done_tasklet); } else { /* submit next trascatioin if possible */ struct sf_pdma_desc *desc = chan->desc; desc->src_addr += desc->xfer_size - residue; desc->dst_addr += desc->xfer_size - residue; desc->xfer_size = residue; sf_pdma_xfer_desc(chan); } spin_unlock(&chan->vchan.lock); return IRQ_HANDLED; } static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id) { struct sf_pdma_chan *chan = dev_id; struct pdma_regs *regs = &chan->regs; spin_lock(&chan->lock); writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl); spin_unlock(&chan->lock); tasklet_schedule(&chan->err_tasklet); return IRQ_HANDLED; } /** * sf_pdma_irq_init() - Init PDMA IRQ Handlers * @pdev: pointer of platform_device * @pdma: pointer of PDMA engine. Caller should check NULL * * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should * make sure the pointer passed in are non-NULL. This function should be called * only one time during the device probe. * * Context: Any context. * * Return: * * 0 - OK to init all IRQ handlers * * -EINVAL - Fail to request IRQ */ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma) { int irq, r, i; struct sf_pdma_chan *chan; for (i = 0; i < pdma->n_chans; i++) { chan = &pdma->chans[i]; irq = platform_get_irq(pdev, i * 2); if (irq < 0) return -EINVAL; r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0, dev_name(&pdev->dev), (void *)chan); if (r) { dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r); return -EINVAL; } chan->txirq = irq; irq = platform_get_irq(pdev, (i * 2) + 1); if (irq < 0) return -EINVAL; r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0, dev_name(&pdev->dev), (void *)chan); if (r) { dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r); return -EINVAL; } chan->errirq = irq; } return 0; } /** * sf_pdma_setup_chans() - Init settings of each channel * @pdma: pointer of PDMA engine. Caller should check NULL * * Initialize all data structure and register base. Caller should make sure * the pointer passed in are non-NULL. This function should be called only * one time during the device probe. * * Context: Any context. * * Return: none */ static void sf_pdma_setup_chans(struct sf_pdma *pdma) { int i; struct sf_pdma_chan *chan; INIT_LIST_HEAD(&pdma->dma_dev.channels); for (i = 0; i < pdma->n_chans; i++) { chan = &pdma->chans[i]; chan->regs.ctrl = SF_PDMA_REG_BASE(i) + PDMA_CTRL; chan->regs.xfer_type = SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE; chan->regs.xfer_size = SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE; chan->regs.dst_addr = SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR; chan->regs.src_addr = SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR; chan->regs.act_type = SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE; chan->regs.residue = SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE; chan->regs.cur_dst_addr = SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR; chan->regs.cur_src_addr = SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR; chan->pdma = pdma; chan->pm_state = RUNNING; chan->slave_id = i; chan->xfer_err = false; spin_lock_init(&chan->lock); chan->vchan.desc_free = sf_pdma_free_desc; vchan_init(&chan->vchan, &pdma->dma_dev); writel(PDMA_CLEAR_CTRL, chan->regs.ctrl); tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet); tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet); } } static int sf_pdma_probe(struct platform_device *pdev) { struct sf_pdma *pdma; int ret, n_chans; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans); if (ret) { /* backwards-compatibility for no dma-channels property */ dev_dbg(&pdev->dev, "set number of channels to default value: 4\n"); n_chans = PDMA_MAX_NR_CH; } else if (n_chans > PDMA_MAX_NR_CH) { dev_err(&pdev->dev, "the number of channels exceeds the maximum\n"); return -EINVAL; } pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans), GFP_KERNEL); if (!pdma) return -ENOMEM; pdma->n_chans = n_chans; pdma->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pdma->membase)) return PTR_ERR(pdma->membase); ret = sf_pdma_irq_init(pdev, pdma); if (ret) return ret; sf_pdma_setup_chans(pdma); pdma->dma_dev.dev = &pdev->dev; /* Setup capability */ dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask); pdma->dma_dev.copy_align = 2; pdma->dma_dev.src_addr_widths = widths; pdma->dma_dev.dst_addr_widths = widths; pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM); pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; pdma->dma_dev.descriptor_reuse = true; /* Setup DMA APIs */ pdma->dma_dev.device_alloc_chan_resources = sf_pdma_alloc_chan_resources; pdma->dma_dev.device_free_chan_resources = sf_pdma_free_chan_resources; pdma->dma_dev.device_tx_status = sf_pdma_tx_status; pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy; pdma->dma_dev.device_config = sf_pdma_slave_config; pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all; pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending; platform_set_drvdata(pdev, pdma); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (ret) dev_warn(&pdev->dev, "Failed to set DMA mask. Fall back to default.\n"); ret = dma_async_device_register(&pdma->dma_dev); if (ret) { dev_err(&pdev->dev, "Can't register SiFive Platform DMA. (%d)\n", ret); return ret; } return 0; } static int sf_pdma_remove(struct platform_device *pdev) { struct sf_pdma *pdma = platform_get_drvdata(pdev); struct sf_pdma_chan *ch; int i; for (i = 0; i < pdma->n_chans; i++) { ch = &pdma->chans[i]; devm_free_irq(&pdev->dev, ch->txirq, ch); devm_free_irq(&pdev->dev, ch->errirq, ch); list_del(&ch->vchan.chan.device_node); tasklet_kill(&ch->vchan.task); tasklet_kill(&ch->done_tasklet); tasklet_kill(&ch->err_tasklet); } dma_async_device_unregister(&pdma->dma_dev); return 0; } static const struct of_device_id sf_pdma_dt_ids[] = { { .compatible = "sifive,fu540-c000-pdma" }, { .compatible = "sifive,pdma0" }, {}, }; MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); static struct platform_driver sf_pdma_driver = { .probe = sf_pdma_probe, .remove = sf_pdma_remove, .driver = { .name = "sf-pdma", .of_match_table = sf_pdma_dt_ids, }, }; static int __init sf_pdma_init(void) { return platform_driver_register(&sf_pdma_driver); } static void __exit sf_pdma_exit(void) { platform_driver_unregister(&sf_pdma_driver); } /* do early init */ subsys_initcall(sf_pdma_init); module_exit(sf_pdma_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("SiFive Platform DMA driver"); MODULE_AUTHOR("Green Wan <[email protected]>");
linux-master
drivers/dma/sf-pdma/sf-pdma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm GenBD TX task microcode * * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons <[email protected]> * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex * on Tue Mar 4 10:14:12 2006 GMT */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_gen_bd_tx_task[] = { /* header */ 0x4243544b, 0x0f040609, 0x00000000, 0x00000000, /* Task descriptors */ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ 0x000001f8, /* NOP */ /* VAR[9]-VAR[12] */ 0x40000000, 0x7fff7fff, 0x00000000, 0x40000004, /* INC[0]-INC[5] */ 0x40000000, 0xe0000000, 0xe0000000, 0xa0000008, 0x20000000, 0x4000ffff, };
linux-master
drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
// SPDX-License-Identifier: GPL-2.0-only /* * Simple memory allocator for on-board SRAM * * Maintainer : Sylvain Munaut <[email protected]> * * Copyright (C) 2005 Sylvain Munaut <[email protected]> */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/io.h> #include <asm/mmu.h> #include <linux/fsl/bestcomm/sram.h> /* Struct keeping our 'state' */ struct bcom_sram *bcom_sram = NULL; EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */ /* ======================================================================== */ /* Public API */ /* ======================================================================== */ /* DO NOT USE in interrupts, if needed in irq handler, we should use the _irqsave version of the spin_locks */ int bcom_sram_init(struct device_node *sram_node, char *owner) { int rv; const u32 *regaddr_p; struct resource res; unsigned int psize; /* Create our state struct */ if (bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Already initialized !\n", owner); return -EBUSY; } bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); if (!bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't allocate internal state !\n", owner); return -ENOMEM; } /* Get address and size of the sram */ rv = of_address_to_resource(sram_node, 0, &res); if (rv) { printk(KERN_ERR "%s: bcom_sram_init: " "Invalid device node !\n", owner); goto error_free; } bcom_sram->base_phys = res.start; bcom_sram->size = resource_size(&res); /* Request region */ if (!request_mem_region(res.start, resource_size(&res), owner)) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't request region !\n", owner); rv = -EBUSY; goto error_free; } /* Map SRAM */ /* sram is not really __iomem */ bcom_sram->base_virt = (void *)ioremap(res.start, resource_size(&res)); if (!bcom_sram->base_virt) { printk(KERN_ERR "%s: bcom_sram_init: " "Map error SRAM zone 0x%08lx (0x%0x)!\n", owner, (long)bcom_sram->base_phys, bcom_sram->size ); rv = -ENOMEM; goto error_release; } /* Create an rheap (defaults to 32 bits word alignment) */ bcom_sram->rh = rh_create(4); /* Attach the free zones */ #if 0 /* Currently disabled ... for future use only */ reg_addr_p = of_get_property(sram_node, "available", &psize); #else regaddr_p = NULL; psize = 0; #endif if (!regaddr_p || !psize) { /* Attach the whole zone */ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); } else { /* Attach each zone independently */ while (psize >= 2 * sizeof(u32)) { phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); regaddr_p += 2; psize -= 2 * sizeof(u32); } } /* Init our spinlock */ spin_lock_init(&bcom_sram->lock); return 0; error_release: release_mem_region(res.start, resource_size(&res)); error_free: kfree(bcom_sram); bcom_sram = NULL; return rv; } EXPORT_SYMBOL_GPL(bcom_sram_init); void bcom_sram_cleanup(void) { /* Free resources */ if (bcom_sram) { rh_destroy(bcom_sram->rh); iounmap((void __iomem *)bcom_sram->base_virt); release_mem_region(bcom_sram->base_phys, bcom_sram->size); kfree(bcom_sram); bcom_sram = NULL; } } EXPORT_SYMBOL_GPL(bcom_sram_cleanup); void* bcom_sram_alloc(int size, int align, phys_addr_t *phys) { unsigned long offset; spin_lock(&bcom_sram->lock); offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); spin_unlock(&bcom_sram->lock); if (IS_ERR_VALUE(offset)) return NULL; *phys = bcom_sram->base_phys + offset; return bcom_sram->base_virt + offset; } EXPORT_SYMBOL_GPL(bcom_sram_alloc); void bcom_sram_free(void *ptr) { unsigned long offset; if (!ptr) return; offset = ptr - bcom_sram->base_virt; spin_lock(&bcom_sram->lock); rh_free(bcom_sram->rh, offset); spin_unlock(&bcom_sram->lock); } EXPORT_SYMBOL_GPL(bcom_sram_free);
linux-master
drivers/dma/bestcomm/sram.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for MPC52xx processor BestComm peripheral controller * * Copyright (C) 2006-2007 Sylvain Munaut <[email protected]> * Copyright (C) 2005 Varma Electronics Oy, * ( by Andrey Volkov <[email protected]> ) * Copyright (C) 2003-2004 MontaVista, Software, Inc. * ( by Dale Farnsworth <[email protected]> ) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mpc52xx.h> #include <linux/fsl/bestcomm/sram.h> #include <linux/fsl/bestcomm/bestcomm_priv.h> #include "linux/fsl/bestcomm/bestcomm.h" #define DRIVER_NAME "bestcomm-core" /* MPC5200 device tree match tables */ static const struct of_device_id mpc52xx_sram_ids[] = { { .compatible = "fsl,mpc5200-sram", }, { .compatible = "mpc5200-sram", }, {} }; struct bcom_engine *bcom_eng = NULL; EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */ /* ======================================================================== */ /* Public and private API */ /* ======================================================================== */ /* Private API */ struct bcom_task * bcom_task_alloc(int bd_count, int bd_size, int priv_size) { int i, tasknum = -1; struct bcom_task *tsk; /* Don't try to do anything if bestcomm init failed */ if (!bcom_eng) return NULL; /* Get and reserve a task num */ spin_lock(&bcom_eng->lock); for (i=0; i<BCOM_MAX_TASKS; i++) if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */ tasknum = i; break; } spin_unlock(&bcom_eng->lock); if (tasknum < 0) return NULL; /* Allocate our structure */ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL); if (!tsk) goto error; tsk->tasknum = tasknum; if (priv_size) tsk->priv = (void*)tsk + sizeof(struct bcom_task); /* Get IRQ of that task */ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); if (!tsk->irq) goto error; /* Init the BDs, if needed */ if (bd_count) { tsk->cookie = kmalloc_array(bd_count, sizeof(void *), GFP_KERNEL); if (!tsk->cookie) goto error; tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); if (!tsk->bd) goto error; memset_io(tsk->bd, 0x00, bd_count * bd_size); tsk->num_bd = bd_count; tsk->bd_size = bd_size; } return tsk; error: if (tsk) { if (tsk->irq) irq_dispose_mapping(tsk->irq); bcom_sram_free(tsk->bd); kfree(tsk->cookie); kfree(tsk); } bcom_eng->tdt[tasknum].stop = 0; return NULL; } EXPORT_SYMBOL_GPL(bcom_task_alloc); void bcom_task_free(struct bcom_task *tsk) { /* Stop the task */ bcom_disable_task(tsk->tasknum); /* Clear TDT */ bcom_eng->tdt[tsk->tasknum].start = 0; bcom_eng->tdt[tsk->tasknum].stop = 0; /* Free everything */ irq_dispose_mapping(tsk->irq); bcom_sram_free(tsk->bd); kfree(tsk->cookie); kfree(tsk); } EXPORT_SYMBOL_GPL(bcom_task_free); int bcom_load_image(int task, u32 *task_image) { struct bcom_task_header *hdr = (struct bcom_task_header *)task_image; struct bcom_tdt *tdt; u32 *desc, *var, *inc; u32 *desc_src, *var_src, *inc_src; /* Safety checks */ if (hdr->magic != BCOM_TASK_MAGIC) { printk(KERN_ERR DRIVER_NAME ": Trying to load invalid microcode\n"); return -EINVAL; } if ((task < 0) || (task >= BCOM_MAX_TASKS)) { printk(KERN_ERR DRIVER_NAME ": Trying to load invalid task %d\n", task); return -EINVAL; } /* Initial load or reload */ tdt = &bcom_eng->tdt[task]; if (tdt->start) { desc = bcom_task_desc(task); if (hdr->desc_size != bcom_task_num_descs(task)) { printk(KERN_ERR DRIVER_NAME ": Trying to reload wrong task image " "(%d size %d/%d)!\n", task, hdr->desc_size, bcom_task_num_descs(task)); return -EINVAL; } } else { phys_addr_t start_pa; desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa); if (!desc) return -ENOMEM; tdt->start = start_pa; tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32)); } var = bcom_task_var(task); inc = bcom_task_inc(task); /* Clear & copy */ memset_io(var, 0x00, BCOM_VAR_SIZE); memset_io(inc, 0x00, BCOM_INC_SIZE); desc_src = (u32 *)(hdr + 1); var_src = desc_src + hdr->desc_size; inc_src = var_src + hdr->var_size; memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); return 0; } EXPORT_SYMBOL_GPL(bcom_load_image); void bcom_set_initiator(int task, int initiator) { int i; int num_descs; u32 *desc; int next_drd_has_initiator; bcom_set_tcr_initiator(task, initiator); /* Just setting tcr is apparently not enough due to some problem */ /* with it. So we just go thru all the microcode and replace in */ /* the DRD directly */ desc = bcom_task_desc(task); next_drd_has_initiator = 1; num_descs = bcom_task_num_descs(task); for (i=0; i<num_descs; i++, desc++) { if (!bcom_desc_is_drd(*desc)) continue; if (next_drd_has_initiator) if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS) bcom_set_desc_initiator(desc, initiator); next_drd_has_initiator = !bcom_drd_is_extended(*desc); } } EXPORT_SYMBOL_GPL(bcom_set_initiator); /* Public API */ void bcom_enable(struct bcom_task *tsk) { bcom_enable_task(tsk->tasknum); } EXPORT_SYMBOL_GPL(bcom_enable); void bcom_disable(struct bcom_task *tsk) { bcom_disable_task(tsk->tasknum); } EXPORT_SYMBOL_GPL(bcom_disable); /* ======================================================================== */ /* Engine init/cleanup */ /* ======================================================================== */ /* Function Descriptor table */ /* this will need to be updated if Freescale changes their task code FDT */ static u32 fdt_ops[] = { 0xa0045670, /* FDT[48] - load_acc() */ 0x80045670, /* FDT[49] - unload_acc() */ 0x21800000, /* FDT[50] - and() */ 0x21e00000, /* FDT[51] - or() */ 0x21500000, /* FDT[52] - xor() */ 0x21400000, /* FDT[53] - andn() */ 0x21500000, /* FDT[54] - not() */ 0x20400000, /* FDT[55] - add() */ 0x20500000, /* FDT[56] - sub() */ 0x20800000, /* FDT[57] - lsh() */ 0x20a00000, /* FDT[58] - rsh() */ 0xc0170000, /* FDT[59] - crc8() */ 0xc0145670, /* FDT[60] - crc16() */ 0xc0345670, /* FDT[61] - crc32() */ 0xa0076540, /* FDT[62] - endian32() */ 0xa0000760, /* FDT[63] - endian16() */ }; static int bcom_engine_init(void) { int task; phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; unsigned int tdt_size, ctx_size, var_size, fdt_size; /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE; var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE); fdt_size = BCOM_FDT_SIZE; bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa); bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa); bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa); bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa); if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) { printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n"); bcom_sram_free(bcom_eng->tdt); bcom_sram_free(bcom_eng->ctx); bcom_sram_free(bcom_eng->var); bcom_sram_free(bcom_eng->fdt); return -ENOMEM; } memset_io(bcom_eng->tdt, 0x00, tdt_size); memset_io(bcom_eng->ctx, 0x00, ctx_size); memset_io(bcom_eng->var, 0x00, var_size); memset_io(bcom_eng->fdt, 0x00, fdt_size); /* Copy the FDT for the EU#3 */ memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); /* Initialize Task base structure */ for (task=0; task<BCOM_MAX_TASKS; task++) { out_be16(&bcom_eng->regs->tcr[task], 0); out_8(&bcom_eng->regs->ipr[task], 0); bcom_eng->tdt[task].context = ctx_pa; bcom_eng->tdt[task].var = var_pa; bcom_eng->tdt[task].fdt = fdt_pa; var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE; ctx_pa += BCOM_CTX_SIZE; } out_be32(&bcom_eng->regs->taskBar, tdt_pa); /* Init 'always' initiator */ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); /* Disable COMM Bus Prefetch on the original 5200; it's broken */ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) bcom_disable_prefetch(); /* Init lock */ spin_lock_init(&bcom_eng->lock); return 0; } static void bcom_engine_cleanup(void) { int task; /* Stop all tasks */ for (task=0; task<BCOM_MAX_TASKS; task++) { out_be16(&bcom_eng->regs->tcr[task], 0); out_8(&bcom_eng->regs->ipr[task], 0); } out_be32(&bcom_eng->regs->taskBar, 0ul); /* Release the SRAM zones */ bcom_sram_free(bcom_eng->tdt); bcom_sram_free(bcom_eng->ctx); bcom_sram_free(bcom_eng->var); bcom_sram_free(bcom_eng->fdt); } /* ======================================================================== */ /* OF platform driver */ /* ======================================================================== */ static int mpc52xx_bcom_probe(struct platform_device *op) { struct device_node *ofn_sram; struct resource res_bcom; int rv; /* Inform user we're ok so far */ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n"); /* Get the bestcomm node */ of_node_get(op->dev.of_node); /* Prepare SRAM */ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids); if (!ofn_sram) { printk(KERN_ERR DRIVER_NAME ": " "No SRAM found in device tree\n"); rv = -ENODEV; goto error_ofput; } rv = bcom_sram_init(ofn_sram, DRIVER_NAME); of_node_put(ofn_sram); if (rv) { printk(KERN_ERR DRIVER_NAME ": " "Error in SRAM init\n"); goto error_ofput; } /* Get a clean struct */ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); if (!bcom_eng) { rv = -ENOMEM; goto error_sramclean; } /* Save the node */ bcom_eng->ofnode = op->dev.of_node; /* Get, reserve & map io */ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) { printk(KERN_ERR DRIVER_NAME ": " "Can't get resource\n"); rv = -EINVAL; goto error_sramclean; } if (!request_mem_region(res_bcom.start, resource_size(&res_bcom), DRIVER_NAME)) { printk(KERN_ERR DRIVER_NAME ": " "Can't request registers region\n"); rv = -EBUSY; goto error_sramclean; } bcom_eng->regs_base = res_bcom.start; bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma)); if (!bcom_eng->regs) { printk(KERN_ERR DRIVER_NAME ": " "Can't map registers\n"); rv = -ENOMEM; goto error_release; } /* Now, do the real init */ rv = bcom_engine_init(); if (rv) goto error_unmap; /* Done ! */ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n", (long)bcom_eng->regs_base); return 0; /* Error path */ error_unmap: iounmap(bcom_eng->regs); error_release: release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma)); error_sramclean: kfree(bcom_eng); bcom_sram_cleanup(); error_ofput: of_node_put(op->dev.of_node); printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n"); return rv; } static int mpc52xx_bcom_remove(struct platform_device *op) { /* Clean up the engine */ bcom_engine_cleanup(); /* Cleanup SRAM */ bcom_sram_cleanup(); /* Release regs */ iounmap(bcom_eng->regs); release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma)); /* Release the node */ of_node_put(bcom_eng->ofnode); /* Release memory */ kfree(bcom_eng); bcom_eng = NULL; return 0; } static const struct of_device_id mpc52xx_bcom_of_match[] = { { .compatible = "fsl,mpc5200-bestcomm", }, { .compatible = "mpc5200-bestcomm", }, {}, }; MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); static struct platform_driver mpc52xx_bcom_of_platform_driver = { .probe = mpc52xx_bcom_probe, .remove = mpc52xx_bcom_remove, .driver = { .name = DRIVER_NAME, .of_match_table = mpc52xx_bcom_of_match, }, }; /* ======================================================================== */ /* Module */ /* ======================================================================== */ static int __init mpc52xx_bcom_init(void) { return platform_driver_register(&mpc52xx_bcom_of_platform_driver); } static void __exit mpc52xx_bcom_exit(void) { platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); } /* If we're not a module, we must make sure everything is setup before */ /* anyone tries to use us ... that's why we use subsys_initcall instead */ /* of module_init. */ subsys_initcall(mpc52xx_bcom_init); module_exit(mpc52xx_bcom_exit); MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA"); MODULE_AUTHOR("Sylvain Munaut <[email protected]>"); MODULE_AUTHOR("Andrey Volkov <[email protected]>"); MODULE_AUTHOR("Dale Farnsworth <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/bestcomm/bestcomm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm FEC TX task microcode * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex * on Tue Mar 22 11:19:29 2005 GMT */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_fec_tx_task[] = { /* header */ 0x4243544b, 0x2407070d, 0x00000000, 0x00000000, /* Task descriptors */ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */ 0x000001f8, /* NOP */ /* VAR[13]-VAR[19] */ 0x0c000000, 0x40000000, 0x7fff7fff, 0x00000000, 0x00000003, 0x40000004, 0x43ffffff, /* INC[0]-INC[6] */ 0x40000000, 0xe0000000, 0xe0000000, 0xa0000008, 0x20000000, 0x00000000, 0x4000ffff, };
linux-master
drivers/dma/bestcomm/bcom_fec_tx_task.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm ATA task driver * * Patterned after bestcomm/fec.c by Dale Farnsworth <[email protected]> * 2003-2004 (c) MontaVista, Software, Inc. * * Copyright (C) 2006-2007 Sylvain Munaut <[email protected]> * Copyright (C) 2006 Freescale - John Rigby */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <linux/fsl/bestcomm/bestcomm.h> #include <linux/fsl/bestcomm/bestcomm_priv.h> #include <linux/fsl/bestcomm/ata.h> /* ======================================================================== */ /* Task image/var/inc */ /* ======================================================================== */ /* ata task image */ extern u32 bcom_ata_task[]; /* ata task vars that need to be set before enabling the task */ struct bcom_ata_var { u32 enable; /* (u16*) address of task's control register */ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ u32 bd_start; /* (struct bcom_bd*) current bd */ u32 buffer_size; /* size of receive buffer */ }; /* ata task incs that need to be set before enabling the task */ struct bcom_ata_inc { u16 pad0; s16 incr_bytes; u16 pad1; s16 incr_dst; u16 pad2; s16 incr_src; }; /* ======================================================================== */ /* Task support code */ /* ======================================================================== */ struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize) { struct bcom_task *tsk; struct bcom_ata_var *var; struct bcom_ata_inc *inc; /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */ bcom_disable_prefetch(); tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); if (!tsk) return NULL; tsk->flags = BCOM_FLAGS_NONE; bcom_ata_reset_bd(tsk); var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); if (bcom_load_image(tsk->tasknum, bcom_ata_task)) { bcom_task_free(tsk); return NULL; } var->enable = bcom_eng->regs_base + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); var->bd_base = tsk->bd_pa; var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); var->bd_start = tsk->bd_pa; var->buffer_size = maxbufsize; /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA); bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX); out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX); out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ return tsk; } EXPORT_SYMBOL_GPL(bcom_ata_init); void bcom_ata_rx_prepare(struct bcom_task *tsk) { struct bcom_ata_inc *inc; inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); inc->incr_bytes = -(s16)sizeof(u32); inc->incr_src = 0; inc->incr_dst = sizeof(u32); bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX); } EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare); void bcom_ata_tx_prepare(struct bcom_task *tsk) { struct bcom_ata_inc *inc; inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); inc->incr_bytes = -(s16)sizeof(u32); inc->incr_src = sizeof(u32); inc->incr_dst = 0; bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX); } EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare); void bcom_ata_reset_bd(struct bcom_task *tsk) { struct bcom_ata_var *var; /* Reset all BD */ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); tsk->index = 0; tsk->outdex = 0; var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); var->bd_start = var->bd_base; } EXPORT_SYMBOL_GPL(bcom_ata_reset_bd); void bcom_ata_release(struct bcom_task *tsk) { /* Nothing special for the ATA tasks */ bcom_task_free(tsk); } EXPORT_SYMBOL_GPL(bcom_ata_release); MODULE_DESCRIPTION("BestComm ATA task driver"); MODULE_AUTHOR("John Rigby"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/bestcomm/ata.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm FEC tasks driver * * Copyright (C) 2006-2007 Sylvain Munaut <[email protected]> * Copyright (C) 2003-2004 MontaVista, Software, Inc. * ( by Dale Farnsworth <[email protected]> ) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> #include <linux/fsl/bestcomm/bestcomm.h> #include <linux/fsl/bestcomm/bestcomm_priv.h> #include <linux/fsl/bestcomm/fec.h> /* ======================================================================== */ /* Task image/var/inc */ /* ======================================================================== */ /* fec tasks images */ extern u32 bcom_fec_rx_task[]; extern u32 bcom_fec_tx_task[]; /* rx task vars that need to be set before enabling the task */ struct bcom_fec_rx_var { u32 enable; /* (u16*) address of task's control register */ u32 fifo; /* (u32*) address of fec's fifo */ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ u32 bd_start; /* (struct bcom_bd*) current bd */ u32 buffer_size; /* size of receive buffer */ }; /* rx task incs that need to be set before enabling the task */ struct bcom_fec_rx_inc { u16 pad0; s16 incr_bytes; u16 pad1; s16 incr_dst; u16 pad2; s16 incr_dst_ma; }; /* tx task vars that need to be set before enabling the task */ struct bcom_fec_tx_var { u32 DRD; /* (u32*) address of self-modified DRD */ u32 fifo; /* (u32*) address of fec's fifo */ u32 enable; /* (u16*) address of task's control register */ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ u32 bd_start; /* (struct bcom_bd*) current bd */ u32 buffer_size; /* set by uCode for each packet */ }; /* tx task incs that need to be set before enabling the task */ struct bcom_fec_tx_inc { u16 pad0; s16 incr_bytes; u16 pad1; s16 incr_src; u16 pad2; s16 incr_src_ma; }; /* private structure in the task */ struct bcom_fec_priv { phys_addr_t fifo; int maxbufsize; }; /* ======================================================================== */ /* Task support code */ /* ======================================================================== */ struct bcom_task * bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize) { struct bcom_task *tsk; struct bcom_fec_priv *priv; tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), sizeof(struct bcom_fec_priv)); if (!tsk) return NULL; tsk->flags = BCOM_FLAGS_NONE; priv = tsk->priv; priv->fifo = fifo; priv->maxbufsize = maxbufsize; if (bcom_fec_rx_reset(tsk)) { bcom_task_free(tsk); return NULL; } return tsk; } EXPORT_SYMBOL_GPL(bcom_fec_rx_init); int bcom_fec_rx_reset(struct bcom_task *tsk) { struct bcom_fec_priv *priv = tsk->priv; struct bcom_fec_rx_var *var; struct bcom_fec_rx_inc *inc; /* Shutdown the task */ bcom_disable_task(tsk->tasknum); /* Reset the microcode */ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum); inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum); if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task)) return -1; var->enable = bcom_eng->regs_base + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); var->fifo = (u32) priv->fifo; var->bd_base = tsk->bd_pa; var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); var->bd_start = tsk->bd_pa; var->buffer_size = priv->maxbufsize; inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ inc->incr_dst = sizeof(u32); /* task image, but we stick */ inc->incr_dst_ma= sizeof(u8); /* to the official ones */ /* Reset the BDs */ tsk->index = 0; tsk->outdex = 0; memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX); out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ return 0; } EXPORT_SYMBOL_GPL(bcom_fec_rx_reset); void bcom_fec_rx_release(struct bcom_task *tsk) { /* Nothing special for the FEC tasks */ bcom_task_free(tsk); } EXPORT_SYMBOL_GPL(bcom_fec_rx_release); /* Return 2nd to last DRD */ /* This is an ugly hack, but at least it's only done once at initialization */ static u32 *self_modified_drd(int tasknum) { u32 *desc; int num_descs; int drd_count; int i; num_descs = bcom_task_num_descs(tasknum); desc = bcom_task_desc(tasknum) + num_descs - 1; drd_count = 0; for (i=0; i<num_descs; i++, desc--) if (bcom_desc_is_drd(*desc) && ++drd_count == 3) break; return desc; } struct bcom_task * bcom_fec_tx_init(int queue_len, phys_addr_t fifo) { struct bcom_task *tsk; struct bcom_fec_priv *priv; tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), sizeof(struct bcom_fec_priv)); if (!tsk) return NULL; tsk->flags = BCOM_FLAGS_ENABLE_TASK; priv = tsk->priv; priv->fifo = fifo; if (bcom_fec_tx_reset(tsk)) { bcom_task_free(tsk); return NULL; } return tsk; } EXPORT_SYMBOL_GPL(bcom_fec_tx_init); int bcom_fec_tx_reset(struct bcom_task *tsk) { struct bcom_fec_priv *priv = tsk->priv; struct bcom_fec_tx_var *var; struct bcom_fec_tx_inc *inc; /* Shutdown the task */ bcom_disable_task(tsk->tasknum); /* Reset the microcode */ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum); inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum); if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task)) return -1; var->enable = bcom_eng->regs_base + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); var->fifo = (u32) priv->fifo; var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum)); var->bd_base = tsk->bd_pa; var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); var->bd_start = tsk->bd_pa; inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ inc->incr_src = sizeof(u32); /* task image, but we stick */ inc->incr_src_ma= sizeof(u8); /* to the official ones */ /* Reset the BDs */ tsk->index = 0; tsk->outdex = 0; memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX); out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ return 0; } EXPORT_SYMBOL_GPL(bcom_fec_tx_reset); void bcom_fec_tx_release(struct bcom_task *tsk) { /* Nothing special for the FEC tasks */ bcom_task_free(tsk); } EXPORT_SYMBOL_GPL(bcom_fec_tx_release); MODULE_DESCRIPTION("BestComm FEC tasks driver"); MODULE_AUTHOR("Dale Farnsworth <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/bestcomm/fec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for MPC52xx processor BestComm General Buffer Descriptor * * Copyright (C) 2007 Sylvain Munaut <[email protected]> * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <asm/errno.h> #include <asm/io.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #include <linux/fsl/bestcomm/bestcomm.h> #include <linux/fsl/bestcomm/bestcomm_priv.h> #include <linux/fsl/bestcomm/gen_bd.h> /* ======================================================================== */ /* Task image/var/inc */ /* ======================================================================== */ /* gen_bd tasks images */ extern u32 bcom_gen_bd_rx_task[]; extern u32 bcom_gen_bd_tx_task[]; /* rx task vars that need to be set before enabling the task */ struct bcom_gen_bd_rx_var { u32 enable; /* (u16*) address of task's control register */ u32 fifo; /* (u32*) address of gen_bd's fifo */ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ u32 bd_start; /* (struct bcom_bd*) current bd */ u32 buffer_size; /* size of receive buffer */ }; /* rx task incs that need to be set before enabling the task */ struct bcom_gen_bd_rx_inc { u16 pad0; s16 incr_bytes; u16 pad1; s16 incr_dst; }; /* tx task vars that need to be set before enabling the task */ struct bcom_gen_bd_tx_var { u32 fifo; /* (u32*) address of gen_bd's fifo */ u32 enable; /* (u16*) address of task's control register */ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ u32 bd_start; /* (struct bcom_bd*) current bd */ u32 buffer_size; /* set by uCode for each packet */ }; /* tx task incs that need to be set before enabling the task */ struct bcom_gen_bd_tx_inc { u16 pad0; s16 incr_bytes; u16 pad1; s16 incr_src; u16 pad2; s16 incr_src_ma; }; /* private structure */ struct bcom_gen_bd_priv { phys_addr_t fifo; int initiator; int ipr; int maxbufsize; }; /* ======================================================================== */ /* Task support code */ /* ======================================================================== */ struct bcom_task * bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr, int maxbufsize) { struct bcom_task *tsk; struct bcom_gen_bd_priv *priv; tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), sizeof(struct bcom_gen_bd_priv)); if (!tsk) return NULL; tsk->flags = BCOM_FLAGS_NONE; priv = tsk->priv; priv->fifo = fifo; priv->initiator = initiator; priv->ipr = ipr; priv->maxbufsize = maxbufsize; if (bcom_gen_bd_rx_reset(tsk)) { bcom_task_free(tsk); return NULL; } return tsk; } EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init); int bcom_gen_bd_rx_reset(struct bcom_task *tsk) { struct bcom_gen_bd_priv *priv = tsk->priv; struct bcom_gen_bd_rx_var *var; struct bcom_gen_bd_rx_inc *inc; /* Shutdown the task */ bcom_disable_task(tsk->tasknum); /* Reset the microcode */ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum); inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum); if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task)) return -1; var->enable = bcom_eng->regs_base + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); var->fifo = (u32) priv->fifo; var->bd_base = tsk->bd_pa; var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); var->bd_start = tsk->bd_pa; var->buffer_size = priv->maxbufsize; inc->incr_bytes = -(s16)sizeof(u32); inc->incr_dst = sizeof(u32); /* Reset the BDs */ tsk->index = 0; tsk->outdex = 0; memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); bcom_set_initiator(tsk->tasknum, priv->initiator); out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ return 0; } EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset); void bcom_gen_bd_rx_release(struct bcom_task *tsk) { /* Nothing special for the GenBD tasks */ bcom_task_free(tsk); } EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release); extern struct bcom_task * bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, int initiator, int ipr) { struct bcom_task *tsk; struct bcom_gen_bd_priv *priv; tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), sizeof(struct bcom_gen_bd_priv)); if (!tsk) return NULL; tsk->flags = BCOM_FLAGS_NONE; priv = tsk->priv; priv->fifo = fifo; priv->initiator = initiator; priv->ipr = ipr; if (bcom_gen_bd_tx_reset(tsk)) { bcom_task_free(tsk); return NULL; } return tsk; } EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init); int bcom_gen_bd_tx_reset(struct bcom_task *tsk) { struct bcom_gen_bd_priv *priv = tsk->priv; struct bcom_gen_bd_tx_var *var; struct bcom_gen_bd_tx_inc *inc; /* Shutdown the task */ bcom_disable_task(tsk->tasknum); /* Reset the microcode */ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum); inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum); if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task)) return -1; var->enable = bcom_eng->regs_base + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); var->fifo = (u32) priv->fifo; var->bd_base = tsk->bd_pa; var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); var->bd_start = tsk->bd_pa; inc->incr_bytes = -(s16)sizeof(u32); inc->incr_src = sizeof(u32); inc->incr_src_ma = sizeof(u8); /* Reset the BDs */ tsk->index = 0; tsk->outdex = 0; memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); bcom_set_initiator(tsk->tasknum, priv->initiator); out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ return 0; } EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset); void bcom_gen_bd_tx_release(struct bcom_task *tsk) { /* Nothing special for the GenBD tasks */ bcom_task_free(tsk); } EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release); /* --------------------------------------------------------------------- * PSC support code */ /** * bcom_psc_parameters - Bestcomm initialization value table for PSC devices * * This structure is only used internally. It is a lookup table for PSC * specific parameters to bestcomm tasks. */ static struct bcom_psc_params { int rx_initiator; int rx_ipr; int tx_initiator; int tx_ipr; } bcom_psc_params[] = { [0] = { .rx_initiator = BCOM_INITIATOR_PSC1_RX, .rx_ipr = BCOM_IPR_PSC1_RX, .tx_initiator = BCOM_INITIATOR_PSC1_TX, .tx_ipr = BCOM_IPR_PSC1_TX, }, [1] = { .rx_initiator = BCOM_INITIATOR_PSC2_RX, .rx_ipr = BCOM_IPR_PSC2_RX, .tx_initiator = BCOM_INITIATOR_PSC2_TX, .tx_ipr = BCOM_IPR_PSC2_TX, }, [2] = { .rx_initiator = BCOM_INITIATOR_PSC3_RX, .rx_ipr = BCOM_IPR_PSC3_RX, .tx_initiator = BCOM_INITIATOR_PSC3_TX, .tx_ipr = BCOM_IPR_PSC3_TX, }, [3] = { .rx_initiator = BCOM_INITIATOR_PSC4_RX, .rx_ipr = BCOM_IPR_PSC4_RX, .tx_initiator = BCOM_INITIATOR_PSC4_TX, .tx_ipr = BCOM_IPR_PSC4_TX, }, [4] = { .rx_initiator = BCOM_INITIATOR_PSC5_RX, .rx_ipr = BCOM_IPR_PSC5_RX, .tx_initiator = BCOM_INITIATOR_PSC5_TX, .tx_ipr = BCOM_IPR_PSC5_TX, }, [5] = { .rx_initiator = BCOM_INITIATOR_PSC6_RX, .rx_ipr = BCOM_IPR_PSC6_RX, .tx_initiator = BCOM_INITIATOR_PSC6_TX, .tx_ipr = BCOM_IPR_PSC6_TX, }, }; /** * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port * @psc_num: Number of the PSC to allocate a task for * @queue_len: number of buffer descriptors to allocate for the task * @fifo: physical address of FIFO register * @maxbufsize: Maximum receive data size in bytes. * * Allocate a bestcomm task structure for receiving data from a PSC. */ struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, phys_addr_t fifo, int maxbufsize) { if (psc_num >= MPC52xx_PSC_MAXNUM) return NULL; return bcom_gen_bd_rx_init(queue_len, fifo, bcom_psc_params[psc_num].rx_initiator, bcom_psc_params[psc_num].rx_ipr, maxbufsize); } EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init); /** * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port * @psc_num: Number of the PSC to allocate a task for * @queue_len: number of buffer descriptors to allocate for the task * @fifo: physical address of FIFO register * * Allocate a bestcomm task structure for transmitting data to a PSC. */ struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo) { struct psc; return bcom_gen_bd_tx_init(queue_len, fifo, bcom_psc_params[psc_num].tx_initiator, bcom_psc_params[psc_num].tx_ipr); } EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init); MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver"); MODULE_AUTHOR("Jeff Gibbons <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/bestcomm/gen_bd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm GenBD RX task microcode * * Copyright (C) 2006 AppSpec Computer Technologies Corp. * Jeff Gibbons <[email protected]> * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex * on Tue Mar 4 10:14:12 2006 GMT */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_gen_bd_rx_task[] = { /* header */ 0x4243544b, 0x0d020409, 0x00000000, 0x00000000, /* Task descriptors */ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ 0x000001f8, /* NOP */ /* VAR[9]-VAR[10] */ 0x40000000, 0x7fff7fff, /* INC[0]-INC[3] */ 0x40000000, 0xe0000000, 0xa0000008, 0x20000000, };
linux-master
drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm ATA task microcode * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Created based on bestcom/code_dma/image_rtos1/dma_image.hex */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_ata_task[] = { /* header */ 0x4243544b, 0x0e060709, 0x00000000, 0x00000000, /* Task descriptors */ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */ 0x000001f8, /* NOP */ /* VAR[9]-VAR[14] */ 0x40000000, 0x7fff7fff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* INC[0]-INC[6] */ 0x40000000, 0xe0000000, 0xe0000000, 0xa000000c, 0x20000000, 0x00000000, 0x00000000, };
linux-master
drivers/dma/bestcomm/bcom_ata_task.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bestcomm FEC RX task microcode * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex * on Tue Mar 22 11:19:38 2005 GMT */ #include <asm/types.h> /* * The header consists of the following fields: * u32 magic; * u8 desc_size; * u8 var_size; * u8 inc_size; * u8 first_var; * u8 reserved[8]; * * The size fields contain the number of 32-bit words. */ u32 bcom_fec_rx_task[] = { /* header */ 0x4243544b, 0x18060709, 0x00000000, 0x00000000, /* Task descriptors */ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */ 0x000001f8, /* NOP */ /* VAR[9]-VAR[14] */ 0x40000000, 0x7fff7fff, 0x00000000, 0x00000003, 0x40000008, 0x43ffffff, /* INC[0]-INC[6] */ 0x40000000, 0xe0000000, 0xe0000000, 0xa0000008, 0x20000000, 0x00000000, 0x4000ffff, };
linux-master
drivers/dma/bestcomm/bcom_fec_rx_task.c
// SPDX-License-Identifier: GPL-2.0 /* * Dmaengine driver base library for DMA controllers, found on SH-based SoCs * * extracted from shdma.c * * Copyright (C) 2011-2012 Guennadi Liakhovetski <[email protected]> * Copyright (C) 2009 Nobuhiro Iwamatsu <[email protected]> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. */ #include <linux/delay.h> #include <linux/shdma-base.h> #include <linux/dmaengine.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../dmaengine.h" /* DMA descriptor control */ enum shdma_desc_status { DESC_IDLE, DESC_PREPARED, DESC_SUBMITTED, DESC_COMPLETED, /* completed, have to call callback */ DESC_WAITING, /* callback called, waiting for ack / re-submit */ }; #define NR_DESCS_PER_CHANNEL 32 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) /* * For slave DMA we assume, that there is a finite number of DMA slaves in the * system, and that each such slave can only use a finite number of channels. * We use slave channel IDs to make sure, that no such slave channel ID is * allocated more than once. */ static unsigned int slave_num = 256; module_param(slave_num, uint, 0444); /* A bitmask with slave_num bits */ static unsigned long *shdma_slave_used; /* Called under spin_lock_irq(&schan->chan_lock") */ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) { struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; struct shdma_desc *sdesc; /* DMA work check */ if (ops->channel_busy(schan)) return; /* Find the first not transferred descriptor */ list_for_each_entry(sdesc, &schan->ld_queue, node) if (sdesc->mark == DESC_SUBMITTED) { ops->start_xfer(schan, sdesc); break; } } static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct shdma_desc *chunk, *c, *desc = container_of(tx, struct shdma_desc, async_tx); struct shdma_chan *schan = to_shdma_chan(tx->chan); dma_async_tx_callback callback = tx->callback; dma_cookie_t cookie; bool power_up; spin_lock_irq(&schan->chan_lock); power_up = list_empty(&schan->ld_queue); cookie = dma_cookie_assign(tx); /* Mark all chunks of this descriptor as submitted, move to the queue */ list_for_each_entry_safe(chunk, c, desc->node.prev, node) { /* * All chunks are on the global ld_free, so, we have to find * the end of the chain ourselves */ if (chunk != desc && (chunk->mark == DESC_IDLE || chunk->async_tx.cookie > 0 || chunk->async_tx.cookie == -EBUSY || &chunk->node == &schan->ld_free)) break; chunk->mark = DESC_SUBMITTED; if (chunk->chunks == 1) { chunk->async_tx.callback = callback; chunk->async_tx.callback_param = tx->callback_param; } else { /* Callback goes to the last chunk */ chunk->async_tx.callback = NULL; } chunk->cookie = cookie; list_move_tail(&chunk->node, &schan->ld_queue); dev_dbg(schan->dev, "submit #%d@%p on %d\n", tx->cookie, &chunk->async_tx, schan->id); } if (power_up) { int ret; schan->pm_state = SHDMA_PM_BUSY; ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); if (ret < 0) dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); pm_runtime_barrier(schan->dev); spin_lock_irq(&schan->chan_lock); /* Have we been reset, while waiting? */ if (schan->pm_state != SHDMA_PM_ESTABLISHED) { struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; dev_dbg(schan->dev, "Bring up channel %d\n", schan->id); /* * TODO: .xfer_setup() might fail on some platforms. * Make it int then, on error remove chunks from the * queue again */ ops->setup_xfer(schan, schan->slave_id); if (schan->pm_state == SHDMA_PM_PENDING) shdma_chan_xfer_ld_queue(schan); schan->pm_state = SHDMA_PM_ESTABLISHED; } } else { /* * Tell .device_issue_pending() not to run the queue, interrupts * will do it anyway */ schan->pm_state = SHDMA_PM_PENDING; } spin_unlock_irq(&schan->chan_lock); return cookie; } /* Called with desc_lock held */ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) { struct shdma_desc *sdesc; list_for_each_entry(sdesc, &schan->ld_free, node) if (sdesc->mark != DESC_PREPARED) { BUG_ON(sdesc->mark != DESC_IDLE); list_del(&sdesc->node); return sdesc; } return NULL; } static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) { struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; int ret, match; if (schan->dev->of_node) { match = schan->hw_req; ret = ops->set_slave(schan, match, slave_addr, true); if (ret < 0) return ret; } else { match = schan->real_slave_id; } if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) return -EINVAL; if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) return -EBUSY; ret = ops->set_slave(schan, match, slave_addr, false); if (ret < 0) { clear_bit(schan->real_slave_id, shdma_slave_used); return ret; } schan->slave_id = schan->real_slave_id; return 0; } static int shdma_alloc_chan_resources(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; struct shdma_desc *desc; struct shdma_slave *slave = chan->private; int ret, i; /* * This relies on the guarantee from dmaengine that alloc_chan_resources * never runs concurrently with itself or free_chan_resources. */ if (slave) { /* Legacy mode: .private is set in filter */ schan->real_slave_id = slave->slave_id; ret = shdma_setup_slave(schan, 0); if (ret < 0) goto esetslave; } else { /* Normal mode: real_slave_id was set by filter */ schan->slave_id = -EINVAL; } schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, sdev->desc_size, GFP_KERNEL); if (!schan->desc) { ret = -ENOMEM; goto edescalloc; } schan->desc_num = NR_DESCS_PER_CHANNEL; for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { desc = ops->embedded_desc(schan->desc, i); dma_async_tx_descriptor_init(&desc->async_tx, &schan->dma_chan); desc->async_tx.tx_submit = shdma_tx_submit; desc->mark = DESC_IDLE; list_add(&desc->node, &schan->ld_free); } return NR_DESCS_PER_CHANNEL; edescalloc: if (slave) esetslave: clear_bit(slave->slave_id, shdma_slave_used); chan->private = NULL; return ret; } /* * This is the standard shdma filter function to be used as a replacement to the * "old" method, using the .private pointer. * You always have to pass a valid slave id as the argument, old drivers that * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config * need to be updated so we can remove the slave_id field from dma_slave_config. * parameter. If this filter is used, the slave driver, after calling * dma_request_channel(), will also have to call dmaengine_slave_config() with * .direction, and either .src_addr or .dst_addr set. * * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE * capability! If this becomes a requirement, hardware glue drivers, using this * services would have to provide their own filters, which first would check * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do * this, and only then, in case of a match, call this common filter. * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). * In that case the MID-RID value is used for slave channel filtering and is * passed to this function in the "arg" parameter. */ bool shdma_chan_filter(struct dma_chan *chan, void *arg) { struct shdma_chan *schan; struct shdma_dev *sdev; int slave_id = (long)arg; int ret; /* Only support channels handled by this driver. */ if (chan->device->device_alloc_chan_resources != shdma_alloc_chan_resources) return false; schan = to_shdma_chan(chan); sdev = to_shdma_dev(chan->device); /* * For DT, the schan->slave_id field is generated by the * set_slave function from the slave ID that is passed in * from xlate. For the non-DT case, the slave ID is * directly passed into the filter function by the driver */ if (schan->dev->of_node) { ret = sdev->ops->set_slave(schan, slave_id, 0, true); if (ret < 0) return false; schan->real_slave_id = schan->slave_id; return true; } if (slave_id < 0) { /* No slave requested - arbitrary channel */ dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n"); return true; } if (slave_id >= slave_num) return false; ret = sdev->ops->set_slave(schan, slave_id, 0, true); if (ret < 0) return false; schan->real_slave_id = slave_id; return true; } EXPORT_SYMBOL(shdma_chan_filter); static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) { struct shdma_desc *desc, *_desc; /* Is the "exposed" head of a chain acked? */ bool head_acked = false; dma_cookie_t cookie = 0; dma_async_tx_callback callback = NULL; struct dmaengine_desc_callback cb; unsigned long flags; LIST_HEAD(cyclic_list); memset(&cb, 0, sizeof(cb)); spin_lock_irqsave(&schan->chan_lock, flags); list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { struct dma_async_tx_descriptor *tx = &desc->async_tx; BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); BUG_ON(desc->mark != DESC_SUBMITTED && desc->mark != DESC_COMPLETED && desc->mark != DESC_WAITING); /* * queue is ordered, and we use this loop to (1) clean up all * completed descriptors, and to (2) update descriptor flags of * any chunks in a (partially) completed chain */ if (!all && desc->mark == DESC_SUBMITTED && desc->cookie != cookie) break; if (tx->cookie > 0) cookie = tx->cookie; if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { if (schan->dma_chan.completed_cookie != desc->cookie - 1) dev_dbg(schan->dev, "Completing cookie %d, expected %d\n", desc->cookie, schan->dma_chan.completed_cookie + 1); schan->dma_chan.completed_cookie = desc->cookie; } /* Call callback on the last chunk */ if (desc->mark == DESC_COMPLETED && tx->callback) { desc->mark = DESC_WAITING; dmaengine_desc_get_callback(tx, &cb); callback = tx->callback; dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", tx->cookie, tx, schan->id); BUG_ON(desc->chunks != 1); break; } if (tx->cookie > 0 || tx->cookie == -EBUSY) { if (desc->mark == DESC_COMPLETED) { BUG_ON(tx->cookie < 0); desc->mark = DESC_WAITING; } head_acked = async_tx_test_ack(tx); } else { switch (desc->mark) { case DESC_COMPLETED: desc->mark = DESC_WAITING; fallthrough; case DESC_WAITING: if (head_acked) async_tx_ack(&desc->async_tx); } } dev_dbg(schan->dev, "descriptor %p #%d completed.\n", tx, tx->cookie); if (((desc->mark == DESC_COMPLETED || desc->mark == DESC_WAITING) && async_tx_test_ack(&desc->async_tx)) || all) { if (all || !desc->cyclic) { /* Remove from ld_queue list */ desc->mark = DESC_IDLE; list_move(&desc->node, &schan->ld_free); } else { /* reuse as cyclic */ desc->mark = DESC_SUBMITTED; list_move_tail(&desc->node, &cyclic_list); } if (list_empty(&schan->ld_queue)) { dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); pm_runtime_put(schan->dev); schan->pm_state = SHDMA_PM_ESTABLISHED; } else if (schan->pm_state == SHDMA_PM_PENDING) { shdma_chan_xfer_ld_queue(schan); } } } if (all && !callback) /* * Terminating and the loop completed normally: forgive * uncompleted cookies */ schan->dma_chan.completed_cookie = schan->dma_chan.cookie; list_splice_tail(&cyclic_list, &schan->ld_queue); spin_unlock_irqrestore(&schan->chan_lock, flags); dmaengine_desc_callback_invoke(&cb, NULL); return callback; } /* * shdma_chan_ld_cleanup - Clean up link descriptors * * Clean up the ld_queue of DMA channel. */ static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) { while (__ld_cleanup(schan, all)) ; } /* * shdma_free_chan_resources - Free all resources of the channel. */ static void shdma_free_chan_resources(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(chan->device); const struct shdma_ops *ops = sdev->ops; LIST_HEAD(list); /* Protect against ISR */ spin_lock_irq(&schan->chan_lock); ops->halt_channel(schan); spin_unlock_irq(&schan->chan_lock); /* Now no new interrupts will occur */ /* Prepared and not submitted descriptors can still be on the queue */ if (!list_empty(&schan->ld_queue)) shdma_chan_ld_cleanup(schan, true); if (schan->slave_id >= 0) { /* The caller is holding dma_list_mutex */ clear_bit(schan->slave_id, shdma_slave_used); chan->private = NULL; } schan->real_slave_id = 0; spin_lock_irq(&schan->chan_lock); list_splice_init(&schan->ld_free, &list); schan->desc_num = 0; spin_unlock_irq(&schan->chan_lock); kfree(schan->desc); } /** * shdma_add_desc - get, set up and return one transfer descriptor * @schan: DMA channel * @flags: DMA transfer flags * @dst: destination DMA address, incremented when direction equals * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM * @src: source DMA address, incremented when direction equals * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM * @len: DMA transfer length * @first: if NULL, set to the current descriptor and cookie set to -EBUSY * @direction: needed for slave DMA to decide which address to keep constant, * equals DMA_MEM_TO_MEM for MEMCPY * Returns 0 or an error * Locks: called with desc_lock held */ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, struct shdma_desc **first, enum dma_transfer_direction direction) { struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; struct shdma_desc *new; size_t copy_size = *len; if (!copy_size) return NULL; /* Allocate the link descriptor from the free list */ new = shdma_get_desc(schan); if (!new) { dev_err(schan->dev, "No free link descriptor available\n"); return NULL; } ops->desc_setup(schan, new, *src, *dst, &copy_size); if (!*first) { /* First desc */ new->async_tx.cookie = -EBUSY; *first = new; } else { /* Other desc - invisible to the user */ new->async_tx.cookie = -EINVAL; } dev_dbg(schan->dev, "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", copy_size, *len, src, dst, &new->async_tx, new->async_tx.cookie); new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; new->partial = 0; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) *src += copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) *dst += copy_size; return new; } /* * shdma_prep_sg - prepare transfer descriptors from an SG list * * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also * converted to scatter-gather to guarantee consistent locking and a correct * list manipulation. For slave DMA direction carries the usual meaning, and, * logically, the SG list is RAM and the addr variable contains slave address, * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM * and the SG list contains only one element and points at the source buffer. */ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, enum dma_transfer_direction direction, unsigned long flags, bool cyclic) { struct scatterlist *sg; struct shdma_desc *first = NULL, *new = NULL /* compiler... */; LIST_HEAD(tx_list); int chunks = 0; unsigned long irq_flags; int i; for_each_sg(sgl, sg, sg_len, i) chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); /* Have to lock the whole loop to protect against concurrent release */ spin_lock_irqsave(&schan->chan_lock, irq_flags); /* * Chaining: * first descriptor is what user is dealing with in all API calls, its * cookie is at first set to -EBUSY, at tx-submit to a positive * number * if more than one chunk is needed further chunks have cookie = -EINVAL * the last chunk, if not equal to the first, has cookie = -ENOSPC * all chunks are linked onto the tx_list head with their .node heads * only during this function, then they are immediately spliced * back onto the free list in form of a chain */ for_each_sg(sgl, sg, sg_len, i) { dma_addr_t sg_addr = sg_dma_address(sg); size_t len = sg_dma_len(sg); if (!len) goto err_get_desc; do { dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", i, sg, len, &sg_addr); if (direction == DMA_DEV_TO_MEM) new = shdma_add_desc(schan, flags, &sg_addr, addr, &len, &first, direction); else new = shdma_add_desc(schan, flags, addr, &sg_addr, &len, &first, direction); if (!new) goto err_get_desc; new->cyclic = cyclic; if (cyclic) new->chunks = 1; else new->chunks = chunks--; list_add_tail(&new->node, &tx_list); } while (len); } if (new != first) new->async_tx.cookie = -ENOSPC; /* Put them back on the free list, so, they don't get lost */ list_splice_tail(&tx_list, &schan->ld_free); spin_unlock_irqrestore(&schan->chan_lock, irq_flags); return &first->async_tx; err_get_desc: list_for_each_entry(new, &tx_list, node) new->mark = DESC_IDLE; list_splice(&tx_list, &schan->ld_free); spin_unlock_irqrestore(&schan->chan_lock, irq_flags); return NULL; } static struct dma_async_tx_descriptor *shdma_prep_memcpy( struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct shdma_chan *schan = to_shdma_chan(chan); struct scatterlist sg; if (!chan || !len) return NULL; BUG_ON(!schan->desc_num); sg_init_table(&sg, 1); sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, offset_in_page(dma_src)); sg_dma_address(&sg) = dma_src; sg_dma_len(&sg) = len; return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags, false); } static struct dma_async_tx_descriptor *shdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); const struct shdma_ops *ops = sdev->ops; int slave_id = schan->slave_id; dma_addr_t slave_addr; if (!chan) return NULL; BUG_ON(!schan->desc_num); /* Someone calling slave DMA on a generic channel? */ if (slave_id < 0 || !sg_len) { dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", __func__, sg_len, slave_id); return NULL; } slave_addr = ops->slave_addr(schan); return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, direction, flags, false); } #define SHDMA_MAX_SG_LEN 32 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); struct dma_async_tx_descriptor *desc; const struct shdma_ops *ops = sdev->ops; unsigned int sg_len = buf_len / period_len; int slave_id = schan->slave_id; dma_addr_t slave_addr; struct scatterlist *sgl; int i; if (!chan) return NULL; BUG_ON(!schan->desc_num); if (sg_len > SHDMA_MAX_SG_LEN) { dev_err(schan->dev, "sg length %d exceeds limit %d", sg_len, SHDMA_MAX_SG_LEN); return NULL; } /* Someone calling slave DMA on a generic channel? */ if (slave_id < 0 || (buf_len < period_len)) { dev_warn(schan->dev, "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", __func__, buf_len, period_len, slave_id); return NULL; } slave_addr = ops->slave_addr(schan); /* * Allocate the sg list dynamically as it would consumer too much stack * space. */ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL); if (!sgl) return NULL; sg_init_table(sgl, sg_len); for (i = 0; i < sg_len; i++) { dma_addr_t src = buf_addr + (period_len * i); sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, offset_in_page(src)); sg_dma_address(&sgl[i]) = src; sg_dma_len(&sgl[i]) = period_len; } desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, direction, flags, true); kfree(sgl); return desc; } static int shdma_terminate_all(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(chan->device); const struct shdma_ops *ops = sdev->ops; unsigned long flags; spin_lock_irqsave(&schan->chan_lock, flags); ops->halt_channel(schan); if (ops->get_partial && !list_empty(&schan->ld_queue)) { /* Record partial transfer */ struct shdma_desc *desc = list_first_entry(&schan->ld_queue, struct shdma_desc, node); desc->partial = ops->get_partial(schan, desc); } spin_unlock_irqrestore(&schan->chan_lock, flags); shdma_chan_ld_cleanup(schan, true); return 0; } static int shdma_config(struct dma_chan *chan, struct dma_slave_config *config) { struct shdma_chan *schan = to_shdma_chan(chan); /* * So far only .slave_id is used, but the slave drivers are * encouraged to also set a transfer direction and an address. */ if (!config) return -EINVAL; /* * We could lock this, but you shouldn't be configuring the * channel, while using it... */ return shdma_setup_slave(schan, config->direction == DMA_DEV_TO_MEM ? config->src_addr : config->dst_addr); } static void shdma_issue_pending(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); spin_lock_irq(&schan->chan_lock); if (schan->pm_state == SHDMA_PM_ESTABLISHED) shdma_chan_xfer_ld_queue(schan); else schan->pm_state = SHDMA_PM_PENDING; spin_unlock_irq(&schan->chan_lock); } static enum dma_status shdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct shdma_chan *schan = to_shdma_chan(chan); enum dma_status status; unsigned long flags; shdma_chan_ld_cleanup(schan, false); spin_lock_irqsave(&schan->chan_lock, flags); status = dma_cookie_status(chan, cookie, txstate); /* * If we don't find cookie on the queue, it has been aborted and we have * to report error */ if (status != DMA_COMPLETE) { struct shdma_desc *sdesc; status = DMA_ERROR; list_for_each_entry(sdesc, &schan->ld_queue, node) if (sdesc->cookie == cookie) { status = DMA_IN_PROGRESS; break; } } spin_unlock_irqrestore(&schan->chan_lock, flags); return status; } /* Called from error IRQ or NMI */ bool shdma_reset(struct shdma_dev *sdev) { const struct shdma_ops *ops = sdev->ops; struct shdma_chan *schan; unsigned int handled = 0; int i; /* Reset all channels */ shdma_for_each_chan(schan, sdev, i) { struct shdma_desc *sdesc; LIST_HEAD(dl); if (!schan) continue; spin_lock(&schan->chan_lock); /* Stop the channel */ ops->halt_channel(schan); list_splice_init(&schan->ld_queue, &dl); if (!list_empty(&dl)) { dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); pm_runtime_put(schan->dev); } schan->pm_state = SHDMA_PM_ESTABLISHED; spin_unlock(&schan->chan_lock); /* Complete all */ list_for_each_entry(sdesc, &dl, node) { struct dma_async_tx_descriptor *tx = &sdesc->async_tx; sdesc->mark = DESC_IDLE; dmaengine_desc_get_callback_invoke(tx, NULL); } spin_lock(&schan->chan_lock); list_splice(&dl, &schan->ld_free); spin_unlock(&schan->chan_lock); handled++; } return !!handled; } EXPORT_SYMBOL(shdma_reset); static irqreturn_t chan_irq(int irq, void *dev) { struct shdma_chan *schan = dev; const struct shdma_ops *ops = to_shdma_dev(schan->dma_chan.device)->ops; irqreturn_t ret; spin_lock(&schan->chan_lock); ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; spin_unlock(&schan->chan_lock); return ret; } static irqreturn_t chan_irqt(int irq, void *dev) { struct shdma_chan *schan = dev; const struct shdma_ops *ops = to_shdma_dev(schan->dma_chan.device)->ops; struct shdma_desc *sdesc; spin_lock_irq(&schan->chan_lock); list_for_each_entry(sdesc, &schan->ld_queue, node) { if (sdesc->mark == DESC_SUBMITTED && ops->desc_completed(schan, sdesc)) { dev_dbg(schan->dev, "done #%d@%p\n", sdesc->async_tx.cookie, &sdesc->async_tx); sdesc->mark = DESC_COMPLETED; break; } } /* Next desc */ shdma_chan_xfer_ld_queue(schan); spin_unlock_irq(&schan->chan_lock); shdma_chan_ld_cleanup(schan, false); return IRQ_HANDLED; } int shdma_request_irq(struct shdma_chan *schan, int irq, unsigned long flags, const char *name) { int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, chan_irqt, flags, name, schan); schan->irq = ret < 0 ? ret : irq; return ret; } EXPORT_SYMBOL(shdma_request_irq); void shdma_chan_probe(struct shdma_dev *sdev, struct shdma_chan *schan, int id) { schan->pm_state = SHDMA_PM_ESTABLISHED; /* reference struct dma_device */ schan->dma_chan.device = &sdev->dma_dev; dma_cookie_init(&schan->dma_chan); schan->dev = sdev->dma_dev.dev; schan->id = id; if (!schan->max_xfer_len) schan->max_xfer_len = PAGE_SIZE; spin_lock_init(&schan->chan_lock); /* Init descripter manage list */ INIT_LIST_HEAD(&schan->ld_queue); INIT_LIST_HEAD(&schan->ld_free); /* Add the channel to DMA device channel list */ list_add_tail(&schan->dma_chan.device_node, &sdev->dma_dev.channels); sdev->schan[id] = schan; } EXPORT_SYMBOL(shdma_chan_probe); void shdma_chan_remove(struct shdma_chan *schan) { list_del(&schan->dma_chan.device_node); } EXPORT_SYMBOL(shdma_chan_remove); int shdma_init(struct device *dev, struct shdma_dev *sdev, int chan_num) { struct dma_device *dma_dev = &sdev->dma_dev; /* * Require all call-backs for now, they can trivially be made optional * later as required */ if (!sdev->ops || !sdev->desc_size || !sdev->ops->embedded_desc || !sdev->ops->start_xfer || !sdev->ops->setup_xfer || !sdev->ops->set_slave || !sdev->ops->desc_setup || !sdev->ops->slave_addr || !sdev->ops->channel_busy || !sdev->ops->halt_channel || !sdev->ops->desc_completed) return -EINVAL; sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); if (!sdev->schan) return -ENOMEM; INIT_LIST_HEAD(&dma_dev->channels); /* Common and MEMCPY operations */ dma_dev->device_alloc_chan_resources = shdma_alloc_chan_resources; dma_dev->device_free_chan_resources = shdma_free_chan_resources; dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; dma_dev->device_tx_status = shdma_tx_status; dma_dev->device_issue_pending = shdma_issue_pending; /* Compulsory for DMA_SLAVE fields */ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; dma_dev->device_config = shdma_config; dma_dev->device_terminate_all = shdma_terminate_all; dma_dev->dev = dev; return 0; } EXPORT_SYMBOL(shdma_init); void shdma_cleanup(struct shdma_dev *sdev) { kfree(sdev->schan); } EXPORT_SYMBOL(shdma_cleanup); static int __init shdma_enter(void) { shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL); if (!shdma_slave_used) return -ENOMEM; return 0; } module_init(shdma_enter); static void __exit shdma_exit(void) { bitmap_free(shdma_slave_used); } module_exit(shdma_exit); MODULE_DESCRIPTION("SH-DMA driver base library"); MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>");
linux-master
drivers/dma/sh/shdma-base.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas RZ/G2L DMA Controller Driver * * Based on imx-dma.c * * Copyright (C) 2021 Renesas Electronics Corp. * Copyright 2010 Sascha Hauer, Pengutronix <[email protected]> * Copyright 2012 Javier Martin, Vista Silicon <[email protected]> */ #include <linux/bitfield.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../dmaengine.h" #include "../virt-dma.h" enum rz_dmac_prep_type { RZ_DMAC_DESC_MEMCPY, RZ_DMAC_DESC_SLAVE_SG, }; struct rz_lmdesc { u32 header; u32 sa; u32 da; u32 tb; u32 chcfg; u32 chitvl; u32 chext; u32 nxla; }; struct rz_dmac_desc { struct virt_dma_desc vd; dma_addr_t src; dma_addr_t dest; size_t len; struct list_head node; enum dma_transfer_direction direction; enum rz_dmac_prep_type type; /* For slave sg */ struct scatterlist *sg; unsigned int sgcount; }; #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) struct rz_dmac_chan { struct virt_dma_chan vc; void __iomem *ch_base; void __iomem *ch_cmn_base; unsigned int index; int irq; struct rz_dmac_desc *desc; int descs_allocated; dma_addr_t src_per_address; dma_addr_t dst_per_address; u32 chcfg; u32 chctrl; int mid_rid; struct list_head ld_free; struct list_head ld_queue; struct list_head ld_active; struct { struct rz_lmdesc *base; struct rz_lmdesc *head; struct rz_lmdesc *tail; dma_addr_t base_dma; } lmdesc; }; #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) struct rz_dmac { struct dma_device engine; struct device *dev; struct reset_control *rstc; void __iomem *base; void __iomem *ext_base; unsigned int n_channels; struct rz_dmac_chan *channels; DECLARE_BITMAP(modules, 1024); }; #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) /* * ----------------------------------------------------------------------------- * Registers */ #define CHSTAT 0x0024 #define CHCTRL 0x0028 #define CHCFG 0x002c #define NXLA 0x0038 #define DCTRL 0x0000 #define EACH_CHANNEL_OFFSET 0x0040 #define CHANNEL_0_7_OFFSET 0x0000 #define CHANNEL_0_7_COMMON_BASE 0x0300 #define CHANNEL_8_15_OFFSET 0x0400 #define CHANNEL_8_15_COMMON_BASE 0x0700 #define CHSTAT_ER BIT(4) #define CHSTAT_EN BIT(0) #define CHCTRL_CLRINTMSK BIT(17) #define CHCTRL_CLRSUS BIT(9) #define CHCTRL_CLRTC BIT(6) #define CHCTRL_CLREND BIT(5) #define CHCTRL_CLRRQ BIT(4) #define CHCTRL_SWRST BIT(3) #define CHCTRL_STG BIT(2) #define CHCTRL_CLREN BIT(1) #define CHCTRL_SETEN BIT(0) #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ CHCTRL_CLRTC | CHCTRL_CLREND | \ CHCTRL_CLRRQ | CHCTRL_SWRST | \ CHCTRL_CLREN) #define CHCFG_DMS BIT(31) #define CHCFG_DEM BIT(24) #define CHCFG_DAD BIT(21) #define CHCFG_SAD BIT(20) #define CHCFG_REQD BIT(3) #define CHCFG_SEL(bits) ((bits) & 0x07) #define CHCFG_MEM_COPY (0x80400008) #define CHCFG_FILL_DDS_MASK GENMASK(19, 16) #define CHCFG_FILL_SDS_MASK GENMASK(15, 12) #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) #define MID_RID_MASK GENMASK(9, 0) #define CHCFG_MASK GENMASK(15, 10) #define CHCFG_DS_INVALID 0xFF #define DCTRL_LVINT BIT(1) #define DCTRL_PR BIT(0) #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) /* LINK MODE DESCRIPTOR */ #define HEADER_LV BIT(0) #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 #define RZ_DMAC_MAX_CHANNELS 16 #define DMAC_NR_LMDESC 64 /* * ----------------------------------------------------------------------------- * Device access */ static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, unsigned int offset) { writel(val, dmac->base + offset); } static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, unsigned int offset) { writel(val, dmac->ext_base + offset); } static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) { return readl(dmac->ext_base + offset); } static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, unsigned int offset, int which) { if (which) writel(val, channel->ch_base + offset); else writel(val, channel->ch_cmn_base + offset); } static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, unsigned int offset, int which) { if (which) return readl(channel->ch_base + offset); else return readl(channel->ch_cmn_base + offset); } /* * ----------------------------------------------------------------------------- * Initialization */ static void rz_lmdesc_setup(struct rz_dmac_chan *channel, struct rz_lmdesc *lmdesc) { u32 nxla; channel->lmdesc.base = lmdesc; channel->lmdesc.head = lmdesc; channel->lmdesc.tail = lmdesc; nxla = channel->lmdesc.base_dma; while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { lmdesc->header = 0; nxla += sizeof(*lmdesc); lmdesc->nxla = nxla; lmdesc++; } lmdesc->header = 0; lmdesc->nxla = channel->lmdesc.base_dma; } /* * ----------------------------------------------------------------------------- * Descriptors preparation */ static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) { struct rz_lmdesc *lmdesc = channel->lmdesc.head; while (!(lmdesc->header & HEADER_LV)) { lmdesc->header = 0; lmdesc++; if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) lmdesc = channel->lmdesc.base; } channel->lmdesc.head = lmdesc; } static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) { struct dma_chan *chan = &channel->vc.chan; struct rz_dmac *dmac = to_rz_dmac(chan->device); unsigned long flags; u32 nxla; u32 chctrl; u32 chstat; dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); local_irq_save(flags); rz_dmac_lmdesc_recycle(channel); nxla = channel->lmdesc.base_dma + (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - channel->lmdesc.base)); chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); if (!(chstat & CHSTAT_EN)) { chctrl = (channel->chctrl | CHCTRL_SETEN); rz_dmac_ch_writel(channel, nxla, NXLA, 1); rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); } local_irq_restore(flags); } static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) { struct dma_chan *chan = &channel->vc.chan; struct rz_dmac *dmac = to_rz_dmac(chan->device); unsigned long flags; dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); local_irq_save(flags); rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); local_irq_restore(flags); } static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) { u32 dmars_offset = (nr / 2) * 4; u32 shift = (nr % 2) * 16; u32 dmars32; dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); dmars32 &= ~(0xffff << shift); dmars32 |= dmars << shift; rz_dmac_ext_writel(dmac, dmars32, dmars_offset); } static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) { struct dma_chan *chan = &channel->vc.chan; struct rz_dmac *dmac = to_rz_dmac(chan->device); struct rz_lmdesc *lmdesc = channel->lmdesc.tail; struct rz_dmac_desc *d = channel->desc; u32 chcfg = CHCFG_MEM_COPY; /* prepare descriptor */ lmdesc->sa = d->src; lmdesc->da = d->dest; lmdesc->tb = d->len; lmdesc->chcfg = chcfg; lmdesc->chitvl = 0; lmdesc->chext = 0; lmdesc->header = HEADER_LV; rz_dmac_set_dmars_register(dmac, channel->index, 0); channel->chcfg = chcfg; channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; } static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) { struct dma_chan *chan = &channel->vc.chan; struct rz_dmac *dmac = to_rz_dmac(chan->device); struct rz_dmac_desc *d = channel->desc; struct scatterlist *sg, *sgl = d->sg; struct rz_lmdesc *lmdesc; unsigned int i, sg_len = d->sgcount; channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; if (d->direction == DMA_DEV_TO_MEM) { channel->chcfg |= CHCFG_SAD; channel->chcfg &= ~CHCFG_REQD; } else { channel->chcfg |= CHCFG_DAD | CHCFG_REQD; } lmdesc = channel->lmdesc.tail; for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { if (d->direction == DMA_DEV_TO_MEM) { lmdesc->sa = channel->src_per_address; lmdesc->da = sg_dma_address(sg); } else { lmdesc->sa = sg_dma_address(sg); lmdesc->da = channel->dst_per_address; } lmdesc->tb = sg_dma_len(sg); lmdesc->chitvl = 0; lmdesc->chext = 0; if (i == (sg_len - 1)) { lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); lmdesc->header = HEADER_LV; } else { lmdesc->chcfg = channel->chcfg; lmdesc->header = HEADER_LV; } if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) lmdesc = channel->lmdesc.base; } channel->lmdesc.tail = lmdesc; rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); channel->chctrl = CHCTRL_SETEN; } static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) { struct rz_dmac_desc *d = chan->desc; struct virt_dma_desc *vd; vd = vchan_next_desc(&chan->vc); if (!vd) return 0; list_del(&vd->node); switch (d->type) { case RZ_DMAC_DESC_MEMCPY: rz_dmac_prepare_desc_for_memcpy(chan); break; case RZ_DMAC_DESC_SLAVE_SG: rz_dmac_prepare_descs_for_slave_sg(chan); break; default: return -EINVAL; } rz_dmac_enable_hw(chan); return 0; } /* * ----------------------------------------------------------------------------- * DMA engine operations */ static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { struct rz_dmac_desc *desc; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) break; list_add_tail(&desc->node, &channel->ld_free); channel->descs_allocated++; } if (!channel->descs_allocated) return -ENOMEM; return channel->descs_allocated; } static void rz_dmac_free_chan_resources(struct dma_chan *chan) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac *dmac = to_rz_dmac(chan->device); struct rz_lmdesc *lmdesc = channel->lmdesc.base; struct rz_dmac_desc *desc, *_desc; unsigned long flags; unsigned int i; spin_lock_irqsave(&channel->vc.lock, flags); for (i = 0; i < DMAC_NR_LMDESC; i++) lmdesc[i].header = 0; rz_dmac_disable_hw(channel); list_splice_tail_init(&channel->ld_active, &channel->ld_free); list_splice_tail_init(&channel->ld_queue, &channel->ld_free); if (channel->mid_rid >= 0) { clear_bit(channel->mid_rid, dmac->modules); channel->mid_rid = -EINVAL; } spin_unlock_irqrestore(&channel->vc.lock, flags); list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { kfree(desc); channel->descs_allocated--; } INIT_LIST_HEAD(&channel->ld_free); vchan_free_chan_resources(&channel->vc); } static struct dma_async_tx_descriptor * rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac *dmac = to_rz_dmac(chan->device); struct rz_dmac_desc *desc; dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", __func__, channel->index, &src, &dest, len); if (list_empty(&channel->ld_free)) return NULL; desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); desc->type = RZ_DMAC_DESC_MEMCPY; desc->src = src; desc->dest = dest; desc->len = len; desc->direction = DMA_MEM_TO_MEM; list_move_tail(channel->ld_free.next, &channel->ld_queue); return vchan_tx_prep(&channel->vc, &desc->vd, flags); } static struct dma_async_tx_descriptor * rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac_desc *desc; struct scatterlist *sg; int dma_length = 0; int i = 0; if (list_empty(&channel->ld_free)) return NULL; desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); for_each_sg(sgl, sg, sg_len, i) { dma_length += sg_dma_len(sg); } desc->type = RZ_DMAC_DESC_SLAVE_SG; desc->sg = sgl; desc->sgcount = sg_len; desc->len = dma_length; desc->direction = direction; if (direction == DMA_DEV_TO_MEM) desc->src = channel->src_per_address; else desc->dest = channel->dst_per_address; list_move_tail(channel->ld_free.next, &channel->ld_queue); return vchan_tx_prep(&channel->vc, &desc->vd, flags); } static int rz_dmac_terminate_all(struct dma_chan *chan) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); unsigned long flags; LIST_HEAD(head); rz_dmac_disable_hw(channel); spin_lock_irqsave(&channel->vc.lock, flags); list_splice_tail_init(&channel->ld_active, &channel->ld_free); list_splice_tail_init(&channel->ld_queue, &channel->ld_free); spin_unlock_irqrestore(&channel->vc.lock, flags); vchan_get_all_descriptors(&channel->vc, &head); vchan_dma_desc_free_list(&channel->vc, &head); return 0; } static void rz_dmac_issue_pending(struct dma_chan *chan) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac *dmac = to_rz_dmac(chan->device); struct rz_dmac_desc *desc; unsigned long flags; spin_lock_irqsave(&channel->vc.lock, flags); if (!list_empty(&channel->ld_queue)) { desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, node); channel->desc = desc; if (vchan_issue_pending(&channel->vc)) { if (rz_dmac_xfer_desc(channel) < 0) dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", channel->index); else list_move_tail(channel->ld_queue.next, &channel->ld_active); } } spin_unlock_irqrestore(&channel->vc.lock, flags); } static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) { u8 i; static const enum dma_slave_buswidth ds_lut[] = { DMA_SLAVE_BUSWIDTH_1_BYTE, DMA_SLAVE_BUSWIDTH_2_BYTES, DMA_SLAVE_BUSWIDTH_4_BYTES, DMA_SLAVE_BUSWIDTH_8_BYTES, DMA_SLAVE_BUSWIDTH_16_BYTES, DMA_SLAVE_BUSWIDTH_32_BYTES, DMA_SLAVE_BUSWIDTH_64_BYTES, DMA_SLAVE_BUSWIDTH_128_BYTES, }; for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { if (ds_lut[i] == ds) return i; } return CHCFG_DS_INVALID; } static int rz_dmac_config(struct dma_chan *chan, struct dma_slave_config *config) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); u32 val; channel->src_per_address = config->src_addr; channel->dst_per_address = config->dst_addr; val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); if (val == CHCFG_DS_INVALID) return -EINVAL; channel->chcfg &= ~CHCFG_FILL_DDS_MASK; channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); val = rz_dmac_ds_to_val_mapping(config->src_addr_width); if (val == CHCFG_DS_INVALID) return -EINVAL; channel->chcfg &= ~CHCFG_FILL_SDS_MASK; channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); return 0; } static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) { /* * Place holder * Descriptor allocation is done during alloc_chan_resources and * get freed during free_chan_resources. * list is used to manage the descriptors and avoid any memory * allocation/free during DMA read/write. */ } static void rz_dmac_device_synchronize(struct dma_chan *chan) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac *dmac = to_rz_dmac(chan->device); u32 chstat; int ret; ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN), 100, 100000, false, channel, CHSTAT, 1); if (ret < 0) dev_warn(dmac->dev, "DMA Timeout"); rz_dmac_set_dmars_register(dmac, channel->index, 0); } /* * ----------------------------------------------------------------------------- * IRQ handling */ static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) { struct dma_chan *chan = &channel->vc.chan; struct rz_dmac *dmac = to_rz_dmac(chan->device); u32 chstat, chctrl; chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); if (chstat & CHSTAT_ER) { dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", channel->index, chstat); rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); goto done; } chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); done: return; } static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) { struct rz_dmac_chan *channel = dev_id; if (channel) { rz_dmac_irq_handle_channel(channel); return IRQ_WAKE_THREAD; } /* handle DMAERR irq */ return IRQ_HANDLED; } static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) { struct rz_dmac_chan *channel = dev_id; struct rz_dmac_desc *desc = NULL; unsigned long flags; spin_lock_irqsave(&channel->vc.lock, flags); if (list_empty(&channel->ld_active)) { /* Someone might have called terminate all */ goto out; } desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); vchan_cookie_complete(&desc->vd); list_move_tail(channel->ld_active.next, &channel->ld_free); if (!list_empty(&channel->ld_queue)) { desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, node); channel->desc = desc; if (rz_dmac_xfer_desc(channel) == 0) list_move_tail(channel->ld_queue.next, &channel->ld_active); } out: spin_unlock_irqrestore(&channel->vc.lock, flags); return IRQ_HANDLED; } /* * ----------------------------------------------------------------------------- * OF xlate and channel filter */ static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) { struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); struct rz_dmac *dmac = to_rz_dmac(chan->device); struct of_phandle_args *dma_spec = arg; u32 ch_cfg; channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); return !test_and_set_bit(channel->mid_rid, dmac->modules); } static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { dma_cap_mask_t mask; if (dma_spec->args_count != 1) return NULL; /* Only slave DMA channels can be allocated via DT */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); } /* * ----------------------------------------------------------------------------- * Probe and remove */ static int rz_dmac_chan_probe(struct rz_dmac *dmac, struct rz_dmac_chan *channel, unsigned int index) { struct platform_device *pdev = to_platform_device(dmac->dev); struct rz_lmdesc *lmdesc; char pdev_irqname[5]; char *irqname; int ret; channel->index = index; channel->mid_rid = -EINVAL; /* Request the channel interrupt. */ sprintf(pdev_irqname, "ch%u", index); channel->irq = platform_get_irq_byname(pdev, pdev_irqname); if (channel->irq < 0) return channel->irq; irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", dev_name(dmac->dev), index); if (!irqname) return -ENOMEM; ret = devm_request_threaded_irq(dmac->dev, channel->irq, rz_dmac_irq_handler, rz_dmac_irq_handler_thread, 0, irqname, channel); if (ret) { dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", channel->irq, ret); return ret; } /* Set io base address for each channel */ if (index < 8) { channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + EACH_CHANNEL_OFFSET * index; channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; } else { channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + EACH_CHANNEL_OFFSET * (index - 8); channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; } /* Allocate descriptors */ lmdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, &channel->lmdesc.base_dma, GFP_KERNEL); if (!lmdesc) { dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); return -ENOMEM; } rz_lmdesc_setup(channel, lmdesc); /* Initialize register for each channel */ rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); channel->vc.desc_free = rz_dmac_virt_desc_free; vchan_init(&channel->vc, &dmac->engine); INIT_LIST_HEAD(&channel->ld_queue); INIT_LIST_HEAD(&channel->ld_free); INIT_LIST_HEAD(&channel->ld_active); return 0; } static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) { struct device_node *np = dev->of_node; int ret; ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); if (ret < 0) { dev_err(dev, "unable to read dma-channels property\n"); return ret; } if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); return -EINVAL; } return 0; } static int rz_dmac_probe(struct platform_device *pdev) { const char *irqname = "error"; struct dma_device *engine; struct rz_dmac *dmac; int channel_num; unsigned int i; int ret; int irq; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); if (!dmac) return -ENOMEM; dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); ret = rz_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) return ret; dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); if (!dmac->channels) return -ENOMEM; /* Request resources */ dmac->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->base)) return PTR_ERR(dmac->base); dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dmac->ext_base)) return PTR_ERR(dmac->ext_base); /* Register interrupt handler for error */ irq = platform_get_irq_byname(pdev, irqname); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, irqname, NULL); if (ret) { dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", irq, ret); return ret; } /* Initialize the channels. */ INIT_LIST_HEAD(&dmac->engine.channels); dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev); if (IS_ERR(dmac->rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), "failed to get resets\n"); pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); goto err_pm_disable; } ret = reset_control_deassert(dmac->rstc); if (ret) goto err_pm_runtime_put; for (i = 0; i < dmac->n_channels; i++) { ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); if (ret < 0) goto err; } /* Register the DMAC as a DMA provider for DT. */ ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, NULL); if (ret < 0) goto err; /* Register the DMA engine device. */ engine = &dmac->engine; dma_cap_set(DMA_SLAVE, engine->cap_mask); dma_cap_set(DMA_MEMCPY, engine->cap_mask); rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); engine->dev = &pdev->dev; engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; engine->device_free_chan_resources = rz_dmac_free_chan_resources; engine->device_tx_status = dma_cookie_status; engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; engine->device_config = rz_dmac_config; engine->device_terminate_all = rz_dmac_terminate_all; engine->device_issue_pending = rz_dmac_issue_pending; engine->device_synchronize = rz_dmac_device_synchronize; engine->copy_align = DMAENGINE_ALIGN_1_BYTE; dma_set_max_seg_size(engine->dev, U32_MAX); ret = dma_async_device_register(engine); if (ret < 0) { dev_err(&pdev->dev, "unable to register\n"); goto dma_register_err; } return 0; dma_register_err: of_dma_controller_free(pdev->dev.of_node); err: channel_num = i ? i - 1 : 0; for (i = 0; i < channel_num; i++) { struct rz_dmac_chan *channel = &dmac->channels[i]; dma_free_coherent(&pdev->dev, sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, channel->lmdesc.base, channel->lmdesc.base_dma); } reset_control_assert(dmac->rstc); err_pm_runtime_put: pm_runtime_put(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; } static int rz_dmac_remove(struct platform_device *pdev) { struct rz_dmac *dmac = platform_get_drvdata(pdev); unsigned int i; dma_async_device_unregister(&dmac->engine); of_dma_controller_free(pdev->dev.of_node); for (i = 0; i < dmac->n_channels; i++) { struct rz_dmac_chan *channel = &dmac->channels[i]; dma_free_coherent(&pdev->dev, sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, channel->lmdesc.base, channel->lmdesc.base_dma); } reset_control_assert(dmac->rstc); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static const struct of_device_id of_rz_dmac_match[] = { { .compatible = "renesas,rz-dmac", }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_rz_dmac_match); static struct platform_driver rz_dmac_driver = { .driver = { .name = "rz-dmac", .of_match_table = of_rz_dmac_match, }, .probe = rz_dmac_probe, .remove = rz_dmac_remove, }; module_platform_driver(rz_dmac_driver); MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); MODULE_AUTHOR("Biju Das <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/sh/rz-dmac.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas R-Car Gen2/Gen3 DMA Controller Driver * * Copyright (C) 2014-2019 Renesas Electronics Inc. * * Author: Laurent Pinchart <[email protected]> */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../dmaengine.h" /* * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer * @node: entry in the parent's chunks list * @src_addr: device source address * @dst_addr: device destination address * @size: transfer size in bytes */ struct rcar_dmac_xfer_chunk { struct list_head node; dma_addr_t src_addr; dma_addr_t dst_addr; u32 size; }; /* * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk * @sar: value of the SAR register (source address) * @dar: value of the DAR register (destination address) * @tcr: value of the TCR register (transfer count) */ struct rcar_dmac_hw_desc { u32 sar; u32 dar; u32 tcr; u32 reserved; } __attribute__((__packed__)); /* * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor * @async_tx: base DMA asynchronous transaction descriptor * @direction: direction of the DMA transfer * @xfer_shift: log2 of the transfer size * @chcr: value of the channel configuration register for this transfer * @node: entry in the channel's descriptors lists * @chunks: list of transfer chunks for this transfer * @running: the transfer chunk being currently processed * @nchunks: number of transfer chunks for this transfer * @hwdescs.use: whether the transfer descriptor uses hardware descriptors * @hwdescs.mem: hardware descriptors memory for the transfer * @hwdescs.dma: device address of the hardware descriptors memory * @hwdescs.size: size of the hardware descriptors in bytes * @size: transfer size in bytes * @cyclic: when set indicates that the DMA transfer is cyclic */ struct rcar_dmac_desc { struct dma_async_tx_descriptor async_tx; enum dma_transfer_direction direction; unsigned int xfer_shift; u32 chcr; struct list_head node; struct list_head chunks; struct rcar_dmac_xfer_chunk *running; unsigned int nchunks; struct { bool use; struct rcar_dmac_hw_desc *mem; dma_addr_t dma; size_t size; } hwdescs; unsigned int size; bool cyclic; }; #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) /* * struct rcar_dmac_desc_page - One page worth of descriptors * @node: entry in the channel's pages list * @descs: array of DMA descriptors * @chunks: array of transfer chunk descriptors */ struct rcar_dmac_desc_page { struct list_head node; union { DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs); DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks); }; }; #define RCAR_DMAC_DESCS_PER_PAGE \ ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ sizeof(struct rcar_dmac_desc)) #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ sizeof(struct rcar_dmac_xfer_chunk)) /* * struct rcar_dmac_chan_slave - Slave configuration * @slave_addr: slave memory address * @xfer_size: size (in bytes) of hardware transfers */ struct rcar_dmac_chan_slave { phys_addr_t slave_addr; unsigned int xfer_size; }; /* * struct rcar_dmac_chan_map - Map of slave device phys to dma address * @addr: slave dma address * @dir: direction of mapping * @slave: slave configuration that is mapped */ struct rcar_dmac_chan_map { dma_addr_t addr; enum dma_data_direction dir; struct rcar_dmac_chan_slave slave; }; /* * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel * @chan: base DMA channel object * @iomem: channel I/O memory base * @index: index of this channel in the controller * @irq: channel IRQ * @src: slave memory address and size on the source side * @dst: slave memory address and size on the destination side * @mid_rid: hardware MID/RID for the DMA client using this channel * @lock: protects the channel CHCR register and the desc members * @desc.free: list of free descriptors * @desc.pending: list of pending descriptors (submitted with tx_submit) * @desc.active: list of active descriptors (activated with issue_pending) * @desc.done: list of completed descriptors * @desc.wait: list of descriptors waiting for an ack * @desc.running: the descriptor being processed (a member of the active list) * @desc.chunks_free: list of free transfer chunk descriptors * @desc.pages: list of pages used by allocated descriptors */ struct rcar_dmac_chan { struct dma_chan chan; void __iomem *iomem; unsigned int index; int irq; struct rcar_dmac_chan_slave src; struct rcar_dmac_chan_slave dst; struct rcar_dmac_chan_map map; int mid_rid; spinlock_t lock; struct { struct list_head free; struct list_head pending; struct list_head active; struct list_head done; struct list_head wait; struct rcar_dmac_desc *running; struct list_head chunks_free; struct list_head pages; } desc; }; #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) /* * struct rcar_dmac - R-Car Gen2 DMA Controller * @engine: base DMA engine object * @dev: the hardware device * @dmac_base: remapped base register block * @chan_base: remapped channel register block (optional) * @n_channels: number of available channels * @channels: array of DMAC channels * @channels_mask: bitfield of which DMA channels are managed by this driver * @modules: bitmask of client modules in use */ struct rcar_dmac { struct dma_device engine; struct device *dev; void __iomem *dmac_base; void __iomem *chan_base; unsigned int n_channels; struct rcar_dmac_chan *channels; u32 channels_mask; DECLARE_BITMAP(modules, 256); }; #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) #define for_each_rcar_dmac_chan(i, dmac, chan) \ for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \ if (!((dmac)->channels_mask & BIT(i))) continue; else /* * struct rcar_dmac_of_data - This driver's OF data * @chan_offset_base: DMAC channels base offset * @chan_offset_stride: DMAC channels offset stride */ struct rcar_dmac_of_data { u32 chan_offset_base; u32 chan_offset_stride; }; /* ----------------------------------------------------------------------------- * Registers */ #define RCAR_DMAISTA 0x0020 #define RCAR_DMASEC 0x0030 #define RCAR_DMAOR 0x0060 #define RCAR_DMAOR_PRI_FIXED (0 << 8) #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) #define RCAR_DMAOR_AE (1 << 2) #define RCAR_DMAOR_DME (1 << 0) #define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */ #define RCAR_DMADPSEC 0x00a0 #define RCAR_DMASAR 0x0000 #define RCAR_DMADAR 0x0004 #define RCAR_DMATCR 0x0008 #define RCAR_DMATCR_MASK 0x00ffffff #define RCAR_DMATSR 0x0028 #define RCAR_DMACHCR 0x000c #define RCAR_DMACHCR_CAE (1 << 31) #define RCAR_DMACHCR_CAIE (1 << 30) #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) #define RCAR_DMACHCR_RPT_SAR (1 << 27) #define RCAR_DMACHCR_RPT_DAR (1 << 26) #define RCAR_DMACHCR_RPT_TCR (1 << 25) #define RCAR_DMACHCR_DPB (1 << 22) #define RCAR_DMACHCR_DSE (1 << 19) #define RCAR_DMACHCR_DSIE (1 << 18) #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) #define RCAR_DMACHCR_DM_FIXED (0 << 14) #define RCAR_DMACHCR_DM_INC (1 << 14) #define RCAR_DMACHCR_DM_DEC (2 << 14) #define RCAR_DMACHCR_SM_FIXED (0 << 12) #define RCAR_DMACHCR_SM_INC (1 << 12) #define RCAR_DMACHCR_SM_DEC (2 << 12) #define RCAR_DMACHCR_RS_AUTO (4 << 8) #define RCAR_DMACHCR_RS_DMARS (8 << 8) #define RCAR_DMACHCR_IE (1 << 2) #define RCAR_DMACHCR_TE (1 << 1) #define RCAR_DMACHCR_DE (1 << 0) #define RCAR_DMATCRB 0x0018 #define RCAR_DMATSRB 0x0038 #define RCAR_DMACHCRB 0x001c #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) #define RCAR_DMACHCRB_DPTR_SHIFT 16 #define RCAR_DMACHCRB_DRST (1 << 15) #define RCAR_DMACHCRB_DTS (1 << 8) #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) #define RCAR_DMACHCRB_PRI(n) ((n) << 0) #define RCAR_DMARS 0x0040 #define RCAR_DMABUFCR 0x0048 #define RCAR_DMABUFCR_MBU(n) ((n) << 16) #define RCAR_DMABUFCR_ULB(n) ((n) << 0) #define RCAR_DMADPBASE 0x0050 #define RCAR_DMADPBASE_MASK 0xfffffff0 #define RCAR_DMADPBASE_SEL (1 << 0) #define RCAR_DMADPCR 0x0054 #define RCAR_DMADPCR_DIPT(n) ((n) << 24) #define RCAR_DMAFIXSAR 0x0010 #define RCAR_DMAFIXDAR 0x0014 #define RCAR_DMAFIXDPBASE 0x0060 /* For R-Car Gen4 */ #define RCAR_GEN4_DMACHCLR 0x0100 /* Hardcode the MEMCPY transfer size to 4 bytes. */ #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 /* ----------------------------------------------------------------------------- * Device access */ static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) { if (reg == RCAR_DMAOR) writew(data, dmac->dmac_base + reg); else writel(data, dmac->dmac_base + reg); } static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) { if (reg == RCAR_DMAOR) return readw(dmac->dmac_base + reg); else return readl(dmac->dmac_base + reg); } static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) { if (reg == RCAR_DMARS) return readw(chan->iomem + reg); else return readl(chan->iomem + reg); } static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) { if (reg == RCAR_DMARS) writew(data, chan->iomem + reg); else writel(data, chan->iomem + reg); } static void rcar_dmac_chan_clear(struct rcar_dmac *dmac, struct rcar_dmac_chan *chan) { if (dmac->chan_base) rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); else rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); } static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac) { struct rcar_dmac_chan *chan; unsigned int i; if (dmac->chan_base) { for_each_rcar_dmac_chan(i, dmac, chan) rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); } else { rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); } } /* ----------------------------------------------------------------------------- * Initialization and configuration */ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); } static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc = chan->desc.running; u32 chcr = desc->chcr; WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); if (chan->mid_rid >= 0) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); if (desc->hwdescs.use) { struct rcar_dmac_xfer_chunk *chunk = list_first_entry(&desc->chunks, struct rcar_dmac_xfer_chunk, node); dev_dbg(chan->chan.device->dev, "chan%u: queue desc %p: %u@%pad\n", chan->index, desc, desc->nchunks, &desc->hwdescs.dma); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32); rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32); rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, desc->hwdescs.dma >> 32); #endif rcar_dmac_chan_write(chan, RCAR_DMADPBASE, (desc->hwdescs.dma & 0xfffffff0) | RCAR_DMADPBASE_SEL); rcar_dmac_chan_write(chan, RCAR_DMACHCRB, RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | RCAR_DMACHCRB_DRST); /* * Errata: When descriptor memory is accessed through an IOMMU * the DMADAR register isn't initialized automatically from the * first descriptor at beginning of transfer by the DMAC like it * should. Initialize it manually with the destination address * of the first chunk. */ rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); /* * Program the descriptor stage interrupt to occur after the end * of the first stage. */ rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; /* * If the descriptor isn't cyclic enable normal descriptor mode * and the transfer completion interrupt. */ if (!desc->cyclic) chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; /* * If the descriptor is cyclic and has a callback enable the * descriptor stage interrupt in infinite repeat mode. */ else if (desc->async_tx.callback) chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; /* * Otherwise just select infinite repeat mode without any * interrupt. */ else chcr |= RCAR_DMACHCR_DPM_INFINITE; } else { struct rcar_dmac_xfer_chunk *chunk = desc->running; dev_dbg(chan->chan.device->dev, "chan%u: queue chunk %p: %u@%pad -> %pad\n", chan->index, chunk, chunk->size, &chunk->src_addr, &chunk->dst_addr); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32); rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32); #endif rcar_dmac_chan_write(chan, RCAR_DMASAR, chunk->src_addr & 0xffffffff); rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); rcar_dmac_chan_write(chan, RCAR_DMATCR, chunk->size >> desc->xfer_shift); chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; } rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE); } static int rcar_dmac_init(struct rcar_dmac *dmac) { u16 dmaor; /* Clear all channels and enable the DMAC globally. */ rcar_dmac_chan_clear_all(dmac); rcar_dmac_write(dmac, RCAR_DMAOR, RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { dev_warn(dmac->dev, "DMAOR initialization failed.\n"); return -EIO; } return 0; } /* ----------------------------------------------------------------------------- * Descriptors submission */ static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) { struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); unsigned long flags; dma_cookie_t cookie; spin_lock_irqsave(&chan->lock, flags); cookie = dma_cookie_assign(tx); dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", chan->index, tx->cookie, desc); list_add_tail(&desc->node, &chan->desc.pending); desc->running = list_first_entry(&desc->chunks, struct rcar_dmac_xfer_chunk, node); spin_unlock_irqrestore(&chan->lock, flags); return cookie; } /* ----------------------------------------------------------------------------- * Descriptors allocation and free */ /* * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors * @chan: the DMA channel * @gfp: allocation flags */ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) { struct rcar_dmac_desc_page *page; unsigned long flags; LIST_HEAD(list); unsigned int i; page = (void *)get_zeroed_page(gfp); if (!page) return -ENOMEM; for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { struct rcar_dmac_desc *desc = &page->descs[i]; dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); desc->async_tx.tx_submit = rcar_dmac_tx_submit; INIT_LIST_HEAD(&desc->chunks); list_add_tail(&desc->node, &list); } spin_lock_irqsave(&chan->lock, flags); list_splice_tail(&list, &chan->desc.free); list_add_tail(&page->node, &chan->desc.pages); spin_unlock_irqrestore(&chan->lock, flags); return 0; } /* * rcar_dmac_desc_put - Release a DMA transfer descriptor * @chan: the DMA channel * @desc: the descriptor * * Put the descriptor and its transfer chunk descriptors back in the channel's * free descriptors lists. The descriptor's chunks list will be reinitialized to * an empty list as a result. * * The descriptor must have been removed from the channel's lists before calling * this function. */ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { unsigned long flags; spin_lock_irqsave(&chan->lock, flags); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); list_add(&desc->node, &chan->desc.free); spin_unlock_irqrestore(&chan->lock, flags); } static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc, *_desc; unsigned long flags; LIST_HEAD(list); /* * We have to temporarily move all descriptors from the wait list to a * local list as iterating over the wait list, even with * list_for_each_entry_safe, isn't safe if we release the channel lock * around the rcar_dmac_desc_put() call. */ spin_lock_irqsave(&chan->lock, flags); list_splice_init(&chan->desc.wait, &list); spin_unlock_irqrestore(&chan->lock, flags); list_for_each_entry_safe(desc, _desc, &list, node) { if (async_tx_test_ack(&desc->async_tx)) { list_del(&desc->node); rcar_dmac_desc_put(chan, desc); } } if (list_empty(&list)) return; /* Put the remaining descriptors back in the wait list. */ spin_lock_irqsave(&chan->lock, flags); list_splice(&list, &chan->desc.wait); spin_unlock_irqrestore(&chan->lock, flags); } /* * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer * @chan: the DMA channel * * Locking: This function must be called in a non-atomic context. * * Return: A pointer to the allocated descriptor or NULL if no descriptor can * be allocated. */ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc; unsigned long flags; int ret; /* Recycle acked descriptors before attempting allocation. */ rcar_dmac_desc_recycle_acked(chan); spin_lock_irqsave(&chan->lock, flags); while (list_empty(&chan->desc.free)) { /* * No free descriptors, allocate a page worth of them and try * again, as someone else could race us to get the newly * allocated descriptors. If the allocation fails return an * error. */ spin_unlock_irqrestore(&chan->lock, flags); ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); if (ret < 0) return NULL; spin_lock_irqsave(&chan->lock, flags); } desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); list_del(&desc->node); spin_unlock_irqrestore(&chan->lock, flags); return desc; } /* * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks * @chan: the DMA channel * @gfp: allocation flags */ static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) { struct rcar_dmac_desc_page *page; unsigned long flags; LIST_HEAD(list); unsigned int i; page = (void *)get_zeroed_page(gfp); if (!page) return -ENOMEM; for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; list_add_tail(&chunk->node, &list); } spin_lock_irqsave(&chan->lock, flags); list_splice_tail(&list, &chan->desc.chunks_free); list_add_tail(&page->node, &chan->desc.pages); spin_unlock_irqrestore(&chan->lock, flags); return 0; } /* * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer * @chan: the DMA channel * * Locking: This function must be called in a non-atomic context. * * Return: A pointer to the allocated transfer chunk descriptor or NULL if no * descriptor can be allocated. */ static struct rcar_dmac_xfer_chunk * rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) { struct rcar_dmac_xfer_chunk *chunk; unsigned long flags; int ret; spin_lock_irqsave(&chan->lock, flags); while (list_empty(&chan->desc.chunks_free)) { /* * No free descriptors, allocate a page worth of them and try * again, as someone else could race us to get the newly * allocated descriptors. If the allocation fails return an * error. */ spin_unlock_irqrestore(&chan->lock, flags); ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); if (ret < 0) return NULL; spin_lock_irqsave(&chan->lock, flags); } chunk = list_first_entry(&chan->desc.chunks_free, struct rcar_dmac_xfer_chunk, node); list_del(&chunk->node); spin_unlock_irqrestore(&chan->lock, flags); return chunk; } static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc, size_t size) { /* * dma_alloc_coherent() allocates memory in page size increments. To * avoid reallocating the hardware descriptors when the allocated size * wouldn't change align the requested size to a multiple of the page * size. */ size = PAGE_ALIGN(size); if (desc->hwdescs.size == size) return; if (desc->hwdescs.mem) { dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, desc->hwdescs.mem, desc->hwdescs.dma); desc->hwdescs.mem = NULL; desc->hwdescs.size = 0; } if (!size) return; desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, &desc->hwdescs.dma, GFP_NOWAIT); if (!desc->hwdescs.mem) return; desc->hwdescs.size = size; } static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_hw_desc *hwdesc; rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); hwdesc = desc->hwdescs.mem; if (!hwdesc) return -ENOMEM; list_for_each_entry(chunk, &desc->chunks, node) { hwdesc->sar = chunk->src_addr; hwdesc->dar = chunk->dst_addr; hwdesc->tcr = chunk->size >> desc->xfer_shift; hwdesc++; } return 0; } /* ----------------------------------------------------------------------------- * Stop and reset */ static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan) { u32 chcr; unsigned int i; /* * Ensure that the setting of the DE bit is actually 0 after * clearing it. */ for (i = 0; i < 1024; i++) { chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); if (!(chcr & RCAR_DMACHCR_DE)) return; udelay(1); } dev_err(chan->chan.device->dev, "CHCR DE check error\n"); } static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); /* set DE=0 and flush remaining data */ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); /* make sure all remaining data was flushed */ rcar_dmac_chcr_de_barrier(chan); } static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE); rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); rcar_dmac_chcr_de_barrier(chan); } static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc, *_desc; unsigned long flags; LIST_HEAD(descs); spin_lock_irqsave(&chan->lock, flags); /* Move all non-free descriptors to the local lists. */ list_splice_init(&chan->desc.pending, &descs); list_splice_init(&chan->desc.active, &descs); list_splice_init(&chan->desc.done, &descs); list_splice_init(&chan->desc.wait, &descs); chan->desc.running = NULL; spin_unlock_irqrestore(&chan->lock, flags); list_for_each_entry_safe(desc, _desc, &descs, node) { list_del(&desc->node); rcar_dmac_desc_put(chan, desc); } } static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) { struct rcar_dmac_chan *chan; unsigned int i; /* Stop all channels. */ for_each_rcar_dmac_chan(i, dmac, chan) { /* Stop and reinitialize the channel. */ spin_lock_irq(&chan->lock); rcar_dmac_chan_halt(chan); spin_unlock_irq(&chan->lock); } } static int rcar_dmac_chan_pause(struct dma_chan *chan) { unsigned long flags; struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); spin_lock_irqsave(&rchan->lock, flags); rcar_dmac_clear_chcr_de(rchan); spin_unlock_irqrestore(&rchan->lock, flags); return 0; } /* ----------------------------------------------------------------------------- * Descriptors preparation */ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { static const u32 chcr_ts[] = { RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, RCAR_DMACHCR_TS_64B, }; unsigned int xfer_size; u32 chcr; switch (desc->direction) { case DMA_DEV_TO_MEM: chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED | RCAR_DMACHCR_RS_DMARS; xfer_size = chan->src.xfer_size; break; case DMA_MEM_TO_DEV: chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC | RCAR_DMACHCR_RS_DMARS; xfer_size = chan->dst.xfer_size; break; case DMA_MEM_TO_MEM: default: chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC | RCAR_DMACHCR_RS_AUTO; xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; break; } desc->xfer_shift = ilog2(xfer_size); desc->chcr = chcr | chcr_ts[desc->xfer_shift]; } /* * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list * * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also * converted to scatter-gather to guarantee consistent locking and a correct * list manipulation. For slave DMA direction carries the usual meaning, and, * logically, the SG list is RAM and the addr variable contains slave address, * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM * and the SG list contains only one element and points at the source buffer. */ static struct dma_async_tx_descriptor * rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t dev_addr, enum dma_transfer_direction dir, unsigned long dma_flags, bool cyclic) { struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_desc *desc; struct scatterlist *sg; unsigned int nchunks = 0; unsigned int max_chunk_size; unsigned int full_size = 0; bool cross_boundary = false; unsigned int i; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT u32 high_dev_addr; u32 high_mem_addr; #endif desc = rcar_dmac_desc_get(chan); if (!desc) return NULL; desc->async_tx.flags = dma_flags; desc->async_tx.cookie = -EBUSY; desc->cyclic = cyclic; desc->direction = dir; rcar_dmac_chan_configure_desc(chan, desc); max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; /* * Allocate and fill the transfer chunk descriptors. We own the only * reference to the DMA descriptor, there's no need for locking. */ for_each_sg(sgl, sg, sg_len, i) { dma_addr_t mem_addr = sg_dma_address(sg); unsigned int len = sg_dma_len(sg); full_size += len; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (i == 0) { high_dev_addr = dev_addr >> 32; high_mem_addr = mem_addr >> 32; } if ((dev_addr >> 32 != high_dev_addr) || (mem_addr >> 32 != high_mem_addr)) cross_boundary = true; #endif while (len) { unsigned int size = min(len, max_chunk_size); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* * Prevent individual transfers from crossing 4GB * boundaries. */ if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; cross_boundary = true; } if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; cross_boundary = true; } #endif chunk = rcar_dmac_xfer_chunk_get(chan); if (!chunk) { rcar_dmac_desc_put(chan, desc); return NULL; } if (dir == DMA_DEV_TO_MEM) { chunk->src_addr = dev_addr; chunk->dst_addr = mem_addr; } else { chunk->src_addr = mem_addr; chunk->dst_addr = dev_addr; } chunk->size = size; dev_dbg(chan->chan.device->dev, "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", chan->index, chunk, desc, i, sg, size, len, &chunk->src_addr, &chunk->dst_addr); mem_addr += size; if (dir == DMA_MEM_TO_MEM) dev_addr += size; len -= size; list_add_tail(&chunk->node, &desc->chunks); nchunks++; } } desc->nchunks = nchunks; desc->size = full_size; /* * Use hardware descriptor lists if possible when more than one chunk * needs to be transferred (otherwise they don't make much sense). * * Source/Destination address should be located in same 4GiB region * in the 40bit address space when it uses Hardware descriptor, * and cross_boundary is checking it. */ desc->hwdescs.use = !cross_boundary && nchunks > 1; if (desc->hwdescs.use) { if (rcar_dmac_fill_hwdesc(chan, desc) < 0) desc->hwdescs.use = false; } return &desc->async_tx; } /* ----------------------------------------------------------------------------- * DMA engine operations */ static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); int ret; INIT_LIST_HEAD(&rchan->desc.chunks_free); INIT_LIST_HEAD(&rchan->desc.pages); /* Preallocate descriptors. */ ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); if (ret < 0) return -ENOMEM; ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); if (ret < 0) return -ENOMEM; return pm_runtime_get_sync(chan->device->dev); } static void rcar_dmac_free_chan_resources(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac *dmac = to_rcar_dmac(chan->device); struct rcar_dmac_chan_map *map = &rchan->map; struct rcar_dmac_desc_page *page, *_page; struct rcar_dmac_desc *desc; LIST_HEAD(list); /* Protect against ISR */ spin_lock_irq(&rchan->lock); rcar_dmac_chan_halt(rchan); spin_unlock_irq(&rchan->lock); /* * Now no new interrupts will occur, but one might already be * running. Wait for it to finish before freeing resources. */ synchronize_irq(rchan->irq); if (rchan->mid_rid >= 0) { /* The caller is holding dma_list_mutex */ clear_bit(rchan->mid_rid, dmac->modules); rchan->mid_rid = -EINVAL; } list_splice_init(&rchan->desc.free, &list); list_splice_init(&rchan->desc.pending, &list); list_splice_init(&rchan->desc.active, &list); list_splice_init(&rchan->desc.done, &list); list_splice_init(&rchan->desc.wait, &list); rchan->desc.running = NULL; list_for_each_entry(desc, &list, node) rcar_dmac_realloc_hwdesc(rchan, desc, 0); list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { list_del(&page->node); free_page((unsigned long)page); } /* Remove slave mapping if present. */ if (map->slave.xfer_size) { dma_unmap_resource(chan->device->dev, map->addr, map->slave.xfer_size, map->dir, 0); map->slave.xfer_size = 0; } pm_runtime_put(chan->device->dev); } static struct dma_async_tx_descriptor * rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct scatterlist sgl; if (!len) return NULL; sg_init_table(&sgl, 1); sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, offset_in_page(dma_src)); sg_dma_address(&sgl) = dma_src; sg_dma_len(&sgl) = len; return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, DMA_MEM_TO_MEM, flags, false); } static int rcar_dmac_map_slave_addr(struct dma_chan *chan, enum dma_transfer_direction dir) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac_chan_map *map = &rchan->map; phys_addr_t dev_addr; size_t dev_size; enum dma_data_direction dev_dir; if (dir == DMA_DEV_TO_MEM) { dev_addr = rchan->src.slave_addr; dev_size = rchan->src.xfer_size; dev_dir = DMA_TO_DEVICE; } else { dev_addr = rchan->dst.slave_addr; dev_size = rchan->dst.xfer_size; dev_dir = DMA_FROM_DEVICE; } /* Reuse current map if possible. */ if (dev_addr == map->slave.slave_addr && dev_size == map->slave.xfer_size && dev_dir == map->dir) return 0; /* Remove old mapping if present. */ if (map->slave.xfer_size) dma_unmap_resource(chan->device->dev, map->addr, map->slave.xfer_size, map->dir, 0); map->slave.xfer_size = 0; /* Create new slave address map. */ map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, dev_dir, 0); if (dma_mapping_error(chan->device->dev, map->addr)) { dev_err(chan->device->dev, "chan%u: failed to map %zx@%pap", rchan->index, dev_size, &dev_addr); return -EIO; } dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", rchan->index, dev_size, &dev_addr, &map->addr, dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); map->slave.slave_addr = dev_addr; map->slave.xfer_size = dev_size; map->dir = dev_dir; return 0; } static struct dma_async_tx_descriptor * rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags, void *context) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); /* Someone calling slave DMA on a generic channel? */ if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { dev_warn(chan->device->dev, "%s: bad parameter: len=%d, id=%d\n", __func__, sg_len, rchan->mid_rid); return NULL; } if (rcar_dmac_map_slave_addr(chan, dir)) return NULL; return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, dir, flags, false); } #define RCAR_DMAC_MAX_SG_LEN 32 static struct dma_async_tx_descriptor * rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct dma_async_tx_descriptor *desc; struct scatterlist *sgl; unsigned int sg_len; unsigned int i; /* Someone calling slave DMA on a generic channel? */ if (rchan->mid_rid < 0 || buf_len < period_len) { dev_warn(chan->device->dev, "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", __func__, buf_len, period_len, rchan->mid_rid); return NULL; } if (rcar_dmac_map_slave_addr(chan, dir)) return NULL; sg_len = buf_len / period_len; if (sg_len > RCAR_DMAC_MAX_SG_LEN) { dev_err(chan->device->dev, "chan%u: sg length %d exceeds limit %d", rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); return NULL; } /* * Allocate the sg list dynamically as it would consume too much stack * space. */ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT); if (!sgl) return NULL; sg_init_table(sgl, sg_len); for (i = 0; i < sg_len; ++i) { dma_addr_t src = buf_addr + (period_len * i); sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, offset_in_page(src)); sg_dma_address(&sgl[i]) = src; sg_dma_len(&sgl[i]) = period_len; } desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, dir, flags, true); kfree(sgl); return desc; } static int rcar_dmac_device_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); /* * We could lock this, but you shouldn't be configuring the * channel, while using it... */ rchan->src.slave_addr = cfg->src_addr; rchan->dst.slave_addr = cfg->dst_addr; rchan->src.xfer_size = cfg->src_addr_width; rchan->dst.xfer_size = cfg->dst_addr_width; return 0; } static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); unsigned long flags; spin_lock_irqsave(&rchan->lock, flags); rcar_dmac_chan_halt(rchan); spin_unlock_irqrestore(&rchan->lock, flags); /* * FIXME: No new interrupt can occur now, but the IRQ thread might still * be running. */ rcar_dmac_chan_reinit(rchan); return 0; } static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, dma_cookie_t cookie) { struct rcar_dmac_desc *desc = chan->desc.running; struct rcar_dmac_xfer_chunk *running = NULL; struct rcar_dmac_xfer_chunk *chunk; enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; unsigned int chcrb; unsigned int tcrb; unsigned int i; if (!desc) return 0; /* * If the cookie corresponds to a descriptor that has been completed * there is no residue. The same check has already been performed by the * caller but without holding the channel lock, so the descriptor could * now be complete. */ status = dma_cookie_status(&chan->chan, cookie, NULL); if (status == DMA_COMPLETE) return 0; /* * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. * Also, a client driver is possible to call this function before * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" * will be the next descriptor, and the done list will appear. So, if * the argument cookie matches the done list's cookie, we can assume * the residue is zero. */ if (cookie != desc->async_tx.cookie) { list_for_each_entry(desc, &chan->desc.done, node) { if (cookie == desc->async_tx.cookie) return 0; } list_for_each_entry(desc, &chan->desc.pending, node) { if (cookie == desc->async_tx.cookie) return desc->size; } list_for_each_entry(desc, &chan->desc.active, node) { if (cookie == desc->async_tx.cookie) return desc->size; } /* * No descriptor found for the cookie, there's thus no residue. * This shouldn't happen if the calling driver passes a correct * cookie value. */ WARN(1, "No descriptor for cookie!"); return 0; } /* * We need to read two registers. * Make sure the control register does not skip to next chunk * while reading the counter. * Trying it 3 times should be enough: Initial read, retry, retry * for the paranoid. */ for (i = 0; i < 3; i++) { chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & RCAR_DMACHCRB_DPTR_MASK; tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); /* Still the same? */ if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & RCAR_DMACHCRB_DPTR_MASK)) break; } WARN_ONCE(i >= 3, "residue might be not continuous!"); /* * In descriptor mode the descriptor running pointer is not maintained * by the interrupt handler, find the running descriptor from the * descriptor pointer field in the CHCRB register. In non-descriptor * mode just use the running descriptor pointer. */ if (desc->hwdescs.use) { dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; if (dptr == 0) dptr = desc->nchunks; dptr--; WARN_ON(dptr >= desc->nchunks); } else { running = desc->running; } /* Compute the size of all chunks still to be transferred. */ list_for_each_entry_reverse(chunk, &desc->chunks, node) { if (chunk == running || ++dptr == desc->nchunks) break; residue += chunk->size; } /* Add the residue for the current chunk. */ residue += tcrb << desc->xfer_shift; return residue; } static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); enum dma_status status; unsigned long flags; unsigned int residue; bool cyclic; status = dma_cookie_status(chan, cookie, txstate); if (status == DMA_COMPLETE || !txstate) return status; spin_lock_irqsave(&rchan->lock, flags); residue = rcar_dmac_chan_get_residue(rchan, cookie); cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; spin_unlock_irqrestore(&rchan->lock, flags); /* if there's no residue, the cookie is complete */ if (!residue && !cyclic) return DMA_COMPLETE; dma_set_residue(txstate, residue); return status; } static void rcar_dmac_issue_pending(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); unsigned long flags; spin_lock_irqsave(&rchan->lock, flags); if (list_empty(&rchan->desc.pending)) goto done; /* Append the pending list to the active list. */ list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); /* * If no transfer is running pick the first descriptor from the active * list and start the transfer. */ if (!rchan->desc.running) { struct rcar_dmac_desc *desc; desc = list_first_entry(&rchan->desc.active, struct rcar_dmac_desc, node); rchan->desc.running = desc; rcar_dmac_chan_start_xfer(rchan); } done: spin_unlock_irqrestore(&rchan->lock, flags); } static void rcar_dmac_device_synchronize(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); synchronize_irq(rchan->irq); } /* ----------------------------------------------------------------------------- * IRQ handling */ static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc = chan->desc.running; unsigned int stage; if (WARN_ON(!desc || !desc->cyclic)) { /* * This should never happen, there should always be a running * cyclic descriptor when a descriptor stage end interrupt is * triggered. Warn and return. */ return IRQ_NONE; } /* Program the interrupt pointer to the next stage. */ stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); return IRQ_WAKE_THREAD; } static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc = chan->desc.running; irqreturn_t ret = IRQ_WAKE_THREAD; if (WARN_ON_ONCE(!desc)) { /* * This should never happen, there should always be a running * descriptor when a transfer end interrupt is triggered. Warn * and return. */ return IRQ_NONE; } /* * The transfer end interrupt isn't generated for each chunk when using * descriptor mode. Only update the running chunk pointer in * non-descriptor mode. */ if (!desc->hwdescs.use) { /* * If we haven't completed the last transfer chunk simply move * to the next one. Only wake the IRQ thread if the transfer is * cyclic. */ if (!list_is_last(&desc->running->node, &desc->chunks)) { desc->running = list_next_entry(desc->running, node); if (!desc->cyclic) ret = IRQ_HANDLED; goto done; } /* * We've completed the last transfer chunk. If the transfer is * cyclic, move back to the first one. */ if (desc->cyclic) { desc->running = list_first_entry(&desc->chunks, struct rcar_dmac_xfer_chunk, node); goto done; } } /* The descriptor is complete, move it to the done list. */ list_move_tail(&desc->node, &chan->desc.done); /* Queue the next descriptor, if any. */ if (!list_empty(&chan->desc.active)) chan->desc.running = list_first_entry(&chan->desc.active, struct rcar_dmac_desc, node); else chan->desc.running = NULL; done: if (chan->desc.running) rcar_dmac_chan_start_xfer(chan); return ret; } static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) { u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; struct rcar_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; bool reinit = false; u32 chcr; spin_lock(&chan->lock); chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); if (chcr & RCAR_DMACHCR_CAE) { struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); /* * We don't need to call rcar_dmac_chan_halt() * because channel is already stopped in error case. * We need to clear register and check DE bit as recovery. */ rcar_dmac_chan_clear(dmac, chan); rcar_dmac_chcr_de_barrier(chan); reinit = true; goto spin_lock_end; } if (chcr & RCAR_DMACHCR_TE) mask |= RCAR_DMACHCR_DE; rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); if (mask & RCAR_DMACHCR_DE) rcar_dmac_chcr_de_barrier(chan); if (chcr & RCAR_DMACHCR_DSE) ret |= rcar_dmac_isr_desc_stage_end(chan); if (chcr & RCAR_DMACHCR_TE) ret |= rcar_dmac_isr_transfer_end(chan); spin_lock_end: spin_unlock(&chan->lock); if (reinit) { dev_err(chan->chan.device->dev, "Channel Address Error\n"); rcar_dmac_chan_reinit(chan); ret = IRQ_HANDLED; } return ret; } static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) { struct rcar_dmac_chan *chan = dev; struct rcar_dmac_desc *desc; struct dmaengine_desc_callback cb; spin_lock_irq(&chan->lock); /* For cyclic transfers notify the user after every chunk. */ if (chan->desc.running && chan->desc.running->cyclic) { desc = chan->desc.running; dmaengine_desc_get_callback(&desc->async_tx, &cb); if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irq(&chan->lock); dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irq(&chan->lock); } } /* * Call the callback function for all descriptors on the done list and * move them to the ack wait list. */ while (!list_empty(&chan->desc.done)) { desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, node); dma_cookie_complete(&desc->async_tx); list_del(&desc->node); dmaengine_desc_get_callback(&desc->async_tx, &cb); if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irq(&chan->lock); /* * We own the only reference to this descriptor, we can * safely dereference it without holding the channel * lock. */ dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irq(&chan->lock); } list_add_tail(&desc->node, &chan->desc.wait); } spin_unlock_irq(&chan->lock); /* Recycle all acked descriptors. */ rcar_dmac_desc_recycle_acked(chan); return IRQ_HANDLED; } /* ----------------------------------------------------------------------------- * OF xlate and channel filter */ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) { struct rcar_dmac *dmac = to_rcar_dmac(chan->device); struct of_phandle_args *dma_spec = arg; /* * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate * function knows from which device it wants to allocate a channel from, * and would be perfectly capable of selecting the channel it wants. * Forcing it to call dma_request_channel() and iterate through all * channels from all controllers is just pointless. */ if (chan->device->device_config != rcar_dmac_device_config) return false; return !test_and_set_bit(dma_spec->args[0], dmac->modules); } static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct rcar_dmac_chan *rchan; struct dma_chan *chan; dma_cap_mask_t mask; if (dma_spec->args_count != 1) return NULL; /* Only slave DMA channels can be allocated via DT */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, ofdma->of_node); if (!chan) return NULL; rchan = to_rcar_dmac_chan(chan); rchan->mid_rid = dma_spec->args[0]; return chan; } /* ----------------------------------------------------------------------------- * Power management */ #ifdef CONFIG_PM static int rcar_dmac_runtime_suspend(struct device *dev) { return 0; } static int rcar_dmac_runtime_resume(struct device *dev) { struct rcar_dmac *dmac = dev_get_drvdata(dev); return rcar_dmac_init(dmac); } #endif static const struct dev_pm_ops rcar_dmac_pm = { /* * TODO for system sleep/resume: * - Wait for the current transfer to complete and stop the device, * - Resume transfers, if any. */ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, NULL) }; /* ----------------------------------------------------------------------------- * Probe and remove */ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, struct rcar_dmac_chan *rchan) { struct platform_device *pdev = to_platform_device(dmac->dev); struct dma_chan *chan = &rchan->chan; char pdev_irqname[5]; char *irqname; int ret; rchan->mid_rid = -EINVAL; spin_lock_init(&rchan->lock); INIT_LIST_HEAD(&rchan->desc.free); INIT_LIST_HEAD(&rchan->desc.pending); INIT_LIST_HEAD(&rchan->desc.active); INIT_LIST_HEAD(&rchan->desc.done); INIT_LIST_HEAD(&rchan->desc.wait); /* Request the channel interrupt. */ sprintf(pdev_irqname, "ch%u", rchan->index); rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); if (rchan->irq < 0) return -ENODEV; irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", dev_name(dmac->dev), rchan->index); if (!irqname) return -ENOMEM; /* * Initialize the DMA engine channel and add it to the DMA engine * channels list. */ chan->device = &dmac->engine; dma_cookie_init(chan); list_add_tail(&chan->device_node, &dmac->engine.channels); ret = devm_request_threaded_irq(dmac->dev, rchan->irq, rcar_dmac_isr_channel, rcar_dmac_isr_channel_thread, 0, irqname, rchan); if (ret) { dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", rchan->irq, ret); return ret; } return 0; } #define RCAR_DMAC_MAX_CHANNELS 32 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) { struct device_node *np = dev->of_node; int ret; ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); if (ret < 0) { dev_err(dev, "unable to read dma-channels property\n"); return ret; } /* The hardware and driver don't support more than 32 bits in CHCLR */ if (dmac->n_channels <= 0 || dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); return -EINVAL; } /* * If the driver is unable to read dma-channel-mask property, * the driver assumes that it can use all channels. */ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); /* If the property has out-of-channel mask, this driver clears it */ dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); return 0; } static int rcar_dmac_probe(struct platform_device *pdev) { const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; const struct rcar_dmac_of_data *data; struct rcar_dmac_chan *chan; struct dma_device *engine; void __iomem *chan_base; struct rcar_dmac *dmac; unsigned int i; int ret; data = of_device_get_match_data(&pdev->dev); if (!data) return -EINVAL; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); if (!dmac) return -ENOMEM; dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); if (ret) return ret; ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); if (ret) return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) return ret; /* * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 * is connected to microTLB 0 on currently supported platforms, so we * can't use it with the IPMMU. As the IOMMU API operates at the device * level we can't disable it selectively, so ignore channel 0 for now if * the device is part of an IOMMU group. */ if (device_iommu_mapped(&pdev->dev)) dmac->channels_mask &= ~BIT(0); dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); if (!dmac->channels) return -ENOMEM; /* Request resources. */ dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->dmac_base)) return PTR_ERR(dmac->dmac_base); if (!data->chan_offset_base) { dmac->chan_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dmac->chan_base)) return PTR_ERR(dmac->chan_base); chan_base = dmac->chan_base; } else { chan_base = dmac->dmac_base + data->chan_offset_base; } for_each_rcar_dmac_chan(i, dmac, chan) { chan->index = i; chan->iomem = chan_base + i * data->chan_offset_stride; } /* Enable runtime PM and initialize the device. */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); goto err_pm_disable; } ret = rcar_dmac_init(dmac); pm_runtime_put(&pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to reset device\n"); goto err_pm_disable; } /* Initialize engine */ engine = &dmac->engine; dma_cap_set(DMA_MEMCPY, engine->cap_mask); dma_cap_set(DMA_SLAVE, engine->cap_mask); engine->dev = &pdev->dev; engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); engine->src_addr_widths = widths; engine->dst_addr_widths = widths; engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; engine->device_free_chan_resources = rcar_dmac_free_chan_resources; engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; engine->device_config = rcar_dmac_device_config; engine->device_pause = rcar_dmac_chan_pause; engine->device_terminate_all = rcar_dmac_chan_terminate_all; engine->device_tx_status = rcar_dmac_tx_status; engine->device_issue_pending = rcar_dmac_issue_pending; engine->device_synchronize = rcar_dmac_device_synchronize; INIT_LIST_HEAD(&engine->channels); for_each_rcar_dmac_chan(i, dmac, chan) { ret = rcar_dmac_chan_probe(dmac, chan); if (ret < 0) goto err_pm_disable; } /* Register the DMAC as a DMA provider for DT. */ ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, NULL); if (ret < 0) goto err_pm_disable; /* * Register the DMA engine device. * * Default transfer size of 32 bytes requires 32-byte alignment. */ ret = dma_async_device_register(engine); if (ret < 0) goto err_dma_free; return 0; err_dma_free: of_dma_controller_free(pdev->dev.of_node); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; } static int rcar_dmac_remove(struct platform_device *pdev) { struct rcar_dmac *dmac = platform_get_drvdata(pdev); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&dmac->engine); pm_runtime_disable(&pdev->dev); return 0; } static void rcar_dmac_shutdown(struct platform_device *pdev) { struct rcar_dmac *dmac = platform_get_drvdata(pdev); rcar_dmac_stop_all_chan(dmac); } static const struct rcar_dmac_of_data rcar_dmac_data = { .chan_offset_base = 0x8000, .chan_offset_stride = 0x80, }; static const struct rcar_dmac_of_data rcar_gen4_dmac_data = { .chan_offset_base = 0x0, .chan_offset_stride = 0x1000, }; static const struct of_device_id rcar_dmac_of_ids[] = { { .compatible = "renesas,rcar-dmac", .data = &rcar_dmac_data, }, { .compatible = "renesas,rcar-gen4-dmac", .data = &rcar_gen4_dmac_data, }, { .compatible = "renesas,dmac-r8a779a0", .data = &rcar_gen4_dmac_data, }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); static struct platform_driver rcar_dmac_driver = { .driver = { .pm = &rcar_dmac_pm, .name = "rcar-dmac", .of_match_table = rcar_dmac_of_ids, }, .probe = rcar_dmac_probe, .remove = rcar_dmac_remove, .shutdown = rcar_dmac_shutdown, }; module_platform_driver(rcar_dmac_driver); MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/sh/rcar-dmac.c
// SPDX-License-Identifier: GPL-2.0+ /* * Renesas SuperH DMA Engine support * * base is drivers/dma/flsdma.c * * Copyright (C) 2011-2012 Guennadi Liakhovetski <[email protected]> * Copyright (C) 2009 Nobuhiro Iwamatsu <[email protected]> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * - DMA of SuperH does not have Hardware DMA chain mode. * - MAX DMA size is 16MB. * */ #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/rculist.h> #include <linux/sh_dma.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../dmaengine.h" #include "shdma.h" /* DMA registers */ #define SAR 0x00 /* Source Address Register */ #define DAR 0x04 /* Destination Address Register */ #define TCR 0x08 /* Transfer Count Register */ #define CHCR 0x0C /* Channel Control Register */ #define DMAOR 0x40 /* DMA Operation Register */ #define TEND 0x18 /* USB-DMAC */ #define SH_DMAE_DRV_NAME "sh-dma-engine" /* Default MEMCPY transfer size = 2^2 = 4 bytes */ #define LOG2_DEFAULT_XFER_SIZE 2 #define SH_DMA_SLAVE_NUMBER 256 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) /* * Used for write-side mutual exclusion for the global device list, * read-side synchronization by way of RCU, and per-controller data. */ static DEFINE_SPINLOCK(sh_dmae_lock); static LIST_HEAD(sh_dmae_devices); /* * Different DMAC implementations provide different ways to clear DMA channels: * (1) none - no CHCLR registers are available * (2) one CHCLR register per channel - 0 has to be written to it to clear * channel buffers * (3) one CHCLR per several channels - 1 has to be written to the bit, * corresponding to the specific channel to reset it */ static void channel_clear(struct sh_dmae_chan *sh_dc) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + sh_dc->shdma_chan.id; u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); } static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { __raw_writel(data, sh_dc->base + reg); } static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) { return __raw_readl(sh_dc->base + reg); } static u16 dmaor_read(struct sh_dmae_device *shdev) { void __iomem *addr = shdev->chan_reg + DMAOR; if (shdev->pdata->dmaor_is_32bit) return __raw_readl(addr); else return __raw_readw(addr); } static void dmaor_write(struct sh_dmae_device *shdev, u16 data) { void __iomem *addr = shdev->chan_reg + DMAOR; if (shdev->pdata->dmaor_is_32bit) __raw_writel(data, addr); else __raw_writew(data, addr); } static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); __raw_writel(data, sh_dc->base + shdev->chcr_offset); } static u32 chcr_read(struct sh_dmae_chan *sh_dc) { struct sh_dmae_device *shdev = to_sh_dev(sh_dc); return __raw_readl(sh_dc->base + shdev->chcr_offset); } /* * Reset DMA controller * * SH7780 has two DMAOR register */ static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev); dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); spin_unlock_irqrestore(&sh_dmae_lock, flags); } static int sh_dmae_rst(struct sh_dmae_device *shdev) { unsigned short dmaor; unsigned long flags; spin_lock_irqsave(&sh_dmae_lock, flags); dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); if (shdev->pdata->chclr_present) { int i; for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (sh_chan) channel_clear(sh_chan); } } dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); dmaor = dmaor_read(shdev); spin_unlock_irqrestore(&sh_dmae_lock, flags); if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); return -EIO; } if (shdev->pdata->dmaor_init & ~dmaor) dev_warn(shdev->shdma_dev.dma_dev.dev, "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", dmaor, shdev->pdata->dmaor_init); return 0; } static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { u32 chcr = chcr_read(sh_chan); if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) return true; /* working */ return false; /* waiting */ } static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); const struct sh_dmae_pdata *pdata = shdev->pdata; int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); if (cnt >= pdata->ts_shift_num) cnt = 0; return pdata->ts_shift[cnt]; } static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); const struct sh_dmae_pdata *pdata = shdev->pdata; int i; for (i = 0; i < pdata->ts_shift_num; i++) if (pdata->ts_shift[i] == l2size) break; if (i == pdata->ts_shift_num) i = 0; return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); } static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) { sh_dmae_writel(sh_chan, hw->sar, SAR); sh_dmae_writel(sh_chan, hw->dar, DAR); sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); } static void dmae_start(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); if (shdev->pdata->needs_tend_set) sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); chcr |= CHCR_DE | shdev->chcr_ie_bit; chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_init(struct sh_dmae_chan *sh_chan) { /* * Default configuration for dual address memory-memory transfer. */ u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); chcr_write(sh_chan, chcr); } static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) { /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ if (dmae_is_busy(sh_chan)) return -EBUSY; sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); chcr_write(sh_chan, val); return 0; } static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); const struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; void __iomem *addr = shdev->dmars; unsigned int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; if (pdata->no_dmars) return 0; /* in the case of a missing DMARS resource use first memory window */ if (!addr) addr = shdev->chan_reg; addr += chan_pdata->dmars; __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), addr); return 0; } static void sh_dmae_start_xfer(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", sdesc->async_tx.cookie, sh_chan->shdma_chan.id, sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); /* Get the ld start address from ld_queue */ dmae_set_reg(sh_chan, &sh_desc->hw); dmae_start(sh_chan); } static bool sh_dmae_channel_busy(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); return dmae_is_busy(sh_chan); } static void sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); if (slave_id >= 0) { const struct sh_dmae_slave_config *cfg = sh_chan->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } } /* * Find a slave channel configuration from the contoller list by either a slave * ID in the non-DT case, or by a MID/RID value in the DT case */ static const struct sh_dmae_slave_config *dmae_find_slave( struct sh_dmae_chan *sh_chan, int match) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); const struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_slave_config *cfg; int i; if (!sh_chan->shdma_chan.dev->of_node) { if (match >= SH_DMA_SLAVE_NUMBER) return NULL; for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) if (cfg->slave_id == match) return cfg; } else { for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) if (cfg->mid_rid == match) { sh_chan->shdma_chan.slave_id = i; return cfg; } } return NULL; } static int sh_dmae_set_slave(struct shdma_chan *schan, int slave_id, dma_addr_t slave_addr, bool try) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); if (!cfg) return -ENXIO; if (!try) { sh_chan->config = cfg; sh_chan->slave_addr = slave_addr ? : cfg->addr; } return 0; } static void dmae_halt(struct sh_dmae_chan *sh_chan) { struct sh_dmae_device *shdev = to_sh_dev(sh_chan); u32 chcr = chcr_read(sh_chan); chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); chcr_write(sh_chan, chcr); } static int sh_dmae_desc_setup(struct shdma_chan *schan, struct shdma_desc *sdesc, dma_addr_t src, dma_addr_t dst, size_t *len) { struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); if (*len > schan->max_xfer_len) *len = schan->max_xfer_len; sh_desc->hw.sar = src; sh_desc->hw.dar = dst; sh_desc->hw.tcr = *len; return 0; } static void sh_dmae_halt(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); dmae_halt(sh_chan); } static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); if (!(chcr_read(sh_chan) & CHCR_TE)) return false; /* DMA stop */ dmae_halt(sh_chan); return true; } static size_t sh_dmae_get_partial(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); return sh_desc->hw.tcr - (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); } /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { bool ret; /* halt the dma controller */ sh_dmae_ctl_stop(shdev); /* We cannot detect, which channel caused the error, have to reset all */ ret = shdma_reset(&shdev->shdma_dev); sh_dmae_rst(shdev); return ret; } static irqreturn_t sh_dmae_err(int irq, void *data) { struct sh_dmae_device *shdev = data; if (!(dmaor_read(shdev) & DMAOR_AE)) return IRQ_NONE; sh_dmae_reset(shdev); return IRQ_HANDLED; } static bool sh_dmae_desc_completed(struct shdma_chan *schan, struct shdma_desc *sdesc) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); struct sh_dmae_desc *sh_desc = container_of(sdesc, struct sh_dmae_desc, shdma_desc); u32 sar_buf = sh_dmae_readl(sh_chan, SAR); u32 dar_buf = sh_dmae_readl(sh_chan, DAR); return (sdesc->direction == DMA_DEV_TO_MEM && (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || (sdesc->direction != DMA_DEV_TO_MEM && (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); } static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) { /* Fast path out if NMIF is not asserted for this controller */ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) return false; return sh_dmae_reset(shdev); } static int sh_dmae_nmi_handler(struct notifier_block *self, unsigned long cmd, void *data) { struct sh_dmae_device *shdev; int ret = NOTIFY_DONE; bool triggered; /* * Only concern ourselves with NMI events. * * Normally we would check the die chain value, but as this needs * to be architecture independent, check for NMI context instead. */ if (!in_nmi()) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { /* * Only stop if one of the controllers has NMIF asserted, * we do not want to interfere with regular address error * handling or NMI events that don't concern the DMACs. */ triggered = sh_dmae_nmi_notify(shdev); if (triggered == true) ret = NOTIFY_OK; } rcu_read_unlock(); return ret; } static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { .notifier_call = sh_dmae_nmi_handler, /* Run before NMI debug handler and KGDB */ .priority = 1, }; static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, int irq, unsigned long flags) { const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; struct shdma_dev *sdev = &shdev->shdma_dev; struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); struct sh_dmae_chan *sh_chan; struct shdma_chan *schan; int err; sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), GFP_KERNEL); if (!sh_chan) return -ENOMEM; schan = &sh_chan->shdma_chan; schan->max_xfer_len = SH_DMA_TCR_MAX + 1; shdma_chan_probe(sdev, schan, id); sh_chan->base = shdev->chan_reg + chan_pdata->offset; /* set up channel irq */ if (pdev->id >= 0) snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), "sh-dmae%d.%d", pdev->id, id); else snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), "sh-dma%d", id); err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); if (err) { dev_err(sdev->dma_dev.dev, "DMA channel %d request_irq error %d\n", id, err); goto err_no_irq; } shdev->chan[id] = sh_chan; return 0; err_no_irq: /* remove from dmaengine device node */ shdma_chan_remove(schan); return err; } static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) { struct shdma_chan *schan; int i; shdma_for_each_chan(schan, &shdev->shdma_dev, i) { BUG_ON(!schan); shdma_chan_remove(schan); } } #ifdef CONFIG_PM static int sh_dmae_runtime_suspend(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); sh_dmae_ctl_stop(shdev); return 0; } static int sh_dmae_runtime_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); return sh_dmae_rst(shdev); } #endif #ifdef CONFIG_PM_SLEEP static int sh_dmae_suspend(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); sh_dmae_ctl_stop(shdev); return 0; } static int sh_dmae_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); int i, ret; ret = sh_dmae_rst(shdev); if (ret < 0) dev_err(dev, "Failed to reset!\n"); for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; if (!sh_chan->shdma_chan.desc_num) continue; if (sh_chan->shdma_chan.slave_id >= 0) { const struct sh_dmae_slave_config *cfg = sh_chan->config; dmae_set_dmars(sh_chan, cfg->mid_rid); dmae_set_chcr(sh_chan, cfg->chcr); } else { dmae_init(sh_chan); } } return 0; } #endif static const struct dev_pm_ops sh_dmae_pm = { SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume) SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume, NULL) }; static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) { struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, shdma_chan); /* * Implicit BUG_ON(!sh_chan->config) * This is an exclusive slave DMA operation, may only be called after a * successful slave configuration. */ return sh_chan->slave_addr; } static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) { return &((struct sh_dmae_desc *)buf)[i].shdma_desc; } static const struct shdma_ops sh_dmae_shdma_ops = { .desc_completed = sh_dmae_desc_completed, .halt_channel = sh_dmae_halt, .channel_busy = sh_dmae_channel_busy, .slave_addr = sh_dmae_slave_addr, .desc_setup = sh_dmae_desc_setup, .set_slave = sh_dmae_set_slave, .setup_xfer = sh_dmae_setup_xfer, .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, .get_partial = sh_dmae_get_partial, }; static int sh_dmae_probe(struct platform_device *pdev) { const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES; const struct sh_dmae_pdata *pdata; unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int chan_irq[SH_DMAE_MAX_CHANNELS]; unsigned long irqflags = 0; int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0; struct sh_dmae_device *shdev; struct dma_device *dma_dev; struct resource *dmars, *errirq_res, *chanirq_res; if (pdev->dev.of_node) pdata = of_device_get_match_data(&pdev->dev); else pdata = dev_get_platdata(&pdev->dev); /* get platform data */ if (!pdata || !pdata->channel_num) return -ENODEV; /* DMARS area is optional */ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); /* * IRQ resources: * 1. there always must be at least one IRQ IO-resource. On SH4 it is * the error IRQ, in which case it is the only IRQ in this resource: * start == end. If it is the only IRQ resource, all channels also * use the same IRQ. * 2. DMA channel IRQ resources can be specified one per resource or in * ranges (start != end) * 3. iff all events (channels and, optionally, error) on this * controller use the same IRQ, only one IRQ resource can be * specified, otherwise there must be one IRQ per channel, even if * some of them are equal * 4. if all IRQs on this controller are equal or if some specific IRQs * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be * requested with the IRQF_SHARED flag */ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!errirq_res) return -ENODEV; shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), GFP_KERNEL); if (!shdev) return -ENOMEM; dma_dev = &shdev->shdma_dev.dma_dev; shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(shdev->chan_reg)) return PTR_ERR(shdev->chan_reg); if (dmars) { shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); if (IS_ERR(shdev->dmars)) return PTR_ERR(shdev->dmars); } dma_dev->src_addr_widths = widths; dma_dev->dst_addr_widths = widths; dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; if (!pdata->slave_only) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); if (pdata->slave && pdata->slave_num) dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); /* Default transfer size of 32 bytes requires 32-byte alignment */ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; shdev->shdma_dev.ops = &sh_dmae_shdma_ops; shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); err = shdma_init(&pdev->dev, &shdev->shdma_dev, pdata->channel_num); if (err < 0) goto eshdma; /* platform data */ shdev->pdata = pdata; if (pdata->chcr_offset) shdev->chcr_offset = pdata->chcr_offset; else shdev->chcr_offset = CHCR; if (pdata->chcr_ie_bit) shdev->chcr_ie_bit = pdata->chcr_ie_bit; else shdev->chcr_ie_bit = CHCR_IE; platform_set_drvdata(pdev, shdev); pm_runtime_enable(&pdev->dev); err = pm_runtime_get_sync(&pdev->dev); if (err < 0) dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); spin_lock_irq(&sh_dmae_lock); list_add_tail_rcu(&shdev->node, &sh_dmae_devices); spin_unlock_irq(&sh_dmae_lock); /* reset dma controller - only needed as a test */ err = sh_dmae_rst(shdev); if (err) goto rst_err; if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) { chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!chanirq_res) chanirq_res = errirq_res; else irqres++; if (chanirq_res == errirq_res || (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) irqflags = IRQF_SHARED; errirq = errirq_res->start; err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, "DMAC Address Error", shdev); if (err) { dev_err(&pdev->dev, "DMA failed requesting irq #%d, error %d\n", errirq, err); goto eirq_err; } } else { chanirq_res = errirq_res; } if (chanirq_res->start == chanirq_res->end && !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { /* Special case - all multiplexed */ for (; irq_cnt < pdata->channel_num; irq_cnt++) { if (irq_cnt < SH_DMAE_MAX_CHANNELS) { chan_irq[irq_cnt] = chanirq_res->start; chan_flag[irq_cnt] = IRQF_SHARED; } else { irq_cap = 1; break; } } } else { do { for (i = chanirq_res->start; i <= chanirq_res->end; i++) { if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { irq_cap = 1; break; } if ((errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else chan_flag[irq_cnt] = 0; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); chan_irq[irq_cnt++] = i; } if (irq_cnt >= SH_DMAE_MAX_CHANNELS) break; chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, ++irqres); } while (irq_cnt < pdata->channel_num && chanirq_res); } /* Create DMA Channel */ for (i = 0; i < irq_cnt; i++) { err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err; } if (irq_cap) dev_notice(&pdev->dev, "Attempting to register %d DMA " "channels when a maximum of %d are supported.\n", pdata->channel_num, SH_DMAE_MAX_CHANNELS); pm_runtime_put(&pdev->dev); err = dma_async_device_register(&shdev->shdma_dev.dma_dev); if (err < 0) goto edmadevreg; return err; edmadevreg: pm_runtime_get(&pdev->dev); chan_probe_err: sh_dmae_chan_remove(shdev); eirq_err: rst_err: spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); shdma_cleanup(&shdev->shdma_dev); eshdma: synchronize_rcu(); return err; } static int sh_dmae_remove(struct platform_device *pdev) { struct sh_dmae_device *shdev = platform_get_drvdata(pdev); struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; dma_async_device_unregister(dma_dev); spin_lock_irq(&sh_dmae_lock); list_del_rcu(&shdev->node); spin_unlock_irq(&sh_dmae_lock); pm_runtime_disable(&pdev->dev); sh_dmae_chan_remove(shdev); shdma_cleanup(&shdev->shdma_dev); synchronize_rcu(); return 0; } static struct platform_driver sh_dmae_driver = { .driver = { .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, }, .remove = sh_dmae_remove, }; static int __init sh_dmae_init(void) { /* Wire up NMI handling */ int err = register_die_notifier(&sh_dmae_nmi_notifier); if (err) return err; return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); } module_init(sh_dmae_init); static void __exit sh_dmae_exit(void) { platform_driver_unregister(&sh_dmae_driver); unregister_die_notifier(&sh_dmae_nmi_notifier); } module_exit(sh_dmae_exit); MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]>"); MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
linux-master
drivers/dma/sh/shdmac.c
// SPDX-License-Identifier: GPL-2.0 /* * Renesas USB DMA Controller Driver * * Copyright (C) 2015 Renesas Electronics Corporation * * based on rcar-dmac.c * Copyright (C) 2014 Renesas Electronics Inc. * Author: Laurent Pinchart <[email protected]> */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "../dmaengine.h" #include "../virt-dma.h" /* * struct usb_dmac_sg - Descriptor for a hardware transfer * @mem_addr: memory address * @size: transfer size in bytes */ struct usb_dmac_sg { dma_addr_t mem_addr; u32 size; }; /* * struct usb_dmac_desc - USB DMA Transfer Descriptor * @vd: base virtual channel DMA transaction descriptor * @direction: direction of the DMA transfer * @sg_allocated_len: length of allocated sg * @sg_len: length of sg * @sg_index: index of sg * @residue: residue after the DMAC completed a transfer * @node: node for desc_got and desc_freed * @done_cookie: cookie after the DMAC completed a transfer * @sg: information for the transfer */ struct usb_dmac_desc { struct virt_dma_desc vd; enum dma_transfer_direction direction; unsigned int sg_allocated_len; unsigned int sg_len; unsigned int sg_index; u32 residue; struct list_head node; dma_cookie_t done_cookie; struct usb_dmac_sg sg[]; }; #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) /* * struct usb_dmac_chan - USB DMA Controller Channel * @vc: base virtual DMA channel object * @iomem: channel I/O memory base * @index: index of this channel in the controller * @irq: irq number of this channel * @desc: the current descriptor * @descs_allocated: number of descriptors allocated * @desc_got: got descriptors * @desc_freed: freed descriptors after the DMAC completed a transfer */ struct usb_dmac_chan { struct virt_dma_chan vc; void __iomem *iomem; unsigned int index; int irq; struct usb_dmac_desc *desc; int descs_allocated; struct list_head desc_got; struct list_head desc_freed; }; #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan) /* * struct usb_dmac - USB DMA Controller * @engine: base DMA engine object * @dev: the hardware device * @iomem: remapped I/O memory base * @n_channels: number of available channels * @channels: array of DMAC channels */ struct usb_dmac { struct dma_device engine; struct device *dev; void __iomem *iomem; unsigned int n_channels; struct usb_dmac_chan *channels; }; #define to_usb_dmac(d) container_of(d, struct usb_dmac, engine) /* ----------------------------------------------------------------------------- * Registers */ #define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i)) #define USB_DMASWR 0x0008 #define USB_DMASWR_SWR (1 << 0) #define USB_DMAOR 0x0060 #define USB_DMAOR_AE (1 << 1) #define USB_DMAOR_DME (1 << 0) #define USB_DMASAR 0x0000 #define USB_DMADAR 0x0004 #define USB_DMATCR 0x0008 #define USB_DMATCR_MASK 0x00ffffff #define USB_DMACHCR 0x0014 #define USB_DMACHCR_FTE (1 << 24) #define USB_DMACHCR_NULLE (1 << 16) #define USB_DMACHCR_NULL (1 << 12) #define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6)) #define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6)) #define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6)) #define USB_DMACHCR_IE (1 << 5) #define USB_DMACHCR_SP (1 << 2) #define USB_DMACHCR_TE (1 << 1) #define USB_DMACHCR_DE (1 << 0) #define USB_DMATEND 0x0018 /* Hardcode the xfer_shift to 5 (32bytes) */ #define USB_DMAC_XFER_SHIFT 5 #define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT) #define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B #define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES /* for descriptors */ #define USB_DMAC_INITIAL_NR_DESC 16 #define USB_DMAC_INITIAL_NR_SG 8 /* ----------------------------------------------------------------------------- * Device access */ static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data) { writel(data, dmac->iomem + reg); } static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg) { return readl(dmac->iomem + reg); } static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg) { return readl(chan->iomem + reg); } static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data) { writel(data, chan->iomem + reg); } /* ----------------------------------------------------------------------------- * Initialization and configuration */ static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan) { u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR); return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE; } static u32 usb_dmac_calc_tend(u32 size) { /* * Please refer to the Figure "Example of Final Transaction Valid * Data Transfer Enable (EDTEN) Setting" in the data sheet. */ return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? : USB_DMAC_XFER_SIZE)); } /* This function is already held by vc.lock */ static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan, unsigned int index) { struct usb_dmac_desc *desc = chan->desc; struct usb_dmac_sg *sg = desc->sg + index; dma_addr_t src_addr = 0, dst_addr = 0; WARN_ON_ONCE(usb_dmac_chan_is_busy(chan)); if (desc->direction == DMA_DEV_TO_MEM) dst_addr = sg->mem_addr; else src_addr = sg->mem_addr; dev_dbg(chan->vc.chan.device->dev, "chan%u: queue sg %p: %u@%pad -> %pad\n", chan->index, sg, sg->size, &src_addr, &dst_addr); usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff); usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff); usb_dmac_chan_write(chan, USB_DMATCR, DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE)); usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size)); usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS | USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE); } /* This function is already held by vc.lock */ static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan) { struct virt_dma_desc *vd; vd = vchan_next_desc(&chan->vc); if (!vd) { chan->desc = NULL; return; } /* * Remove this request from vc->desc_issued. Otherwise, this driver * will get the previous value from vchan_next_desc() after a transfer * was completed. */ list_del(&vd->node); chan->desc = to_usb_dmac_desc(vd); chan->desc->sg_index = 0; usb_dmac_chan_start_sg(chan, 0); } static int usb_dmac_init(struct usb_dmac *dmac) { u16 dmaor; /* Clear all channels and enable the DMAC globally. */ usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME); dmaor = usb_dmac_read(dmac, USB_DMAOR); if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) { dev_warn(dmac->dev, "DMAOR initialization failed.\n"); return -EIO; } return 0; } /* ----------------------------------------------------------------------------- * Descriptors allocation and free */ static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len, gfp_t gfp) { struct usb_dmac_desc *desc; unsigned long flags; desc = kzalloc(struct_size(desc, sg, sg_len), gfp); if (!desc) return -ENOMEM; desc->sg_allocated_len = sg_len; INIT_LIST_HEAD(&desc->node); spin_lock_irqsave(&chan->vc.lock, flags); list_add_tail(&desc->node, &chan->desc_freed); spin_unlock_irqrestore(&chan->vc.lock, flags); return 0; } static void usb_dmac_desc_free(struct usb_dmac_chan *chan) { struct usb_dmac_desc *desc, *_desc; LIST_HEAD(list); list_splice_init(&chan->desc_freed, &list); list_splice_init(&chan->desc_got, &list); list_for_each_entry_safe(desc, _desc, &list, node) { list_del(&desc->node); kfree(desc); } chan->descs_allocated = 0; } static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan, unsigned int sg_len, gfp_t gfp) { struct usb_dmac_desc *desc = NULL; unsigned long flags; /* Get a freed descritpor */ spin_lock_irqsave(&chan->vc.lock, flags); list_for_each_entry(desc, &chan->desc_freed, node) { if (sg_len <= desc->sg_allocated_len) { list_move_tail(&desc->node, &chan->desc_got); spin_unlock_irqrestore(&chan->vc.lock, flags); return desc; } } spin_unlock_irqrestore(&chan->vc.lock, flags); /* Allocate a new descriptor */ if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) { /* If allocated the desc, it was added to tail of the list */ spin_lock_irqsave(&chan->vc.lock, flags); desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc, node); list_move_tail(&desc->node, &chan->desc_got); spin_unlock_irqrestore(&chan->vc.lock, flags); return desc; } return NULL; } static void usb_dmac_desc_put(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc) { unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); list_move_tail(&desc->node, &chan->desc_freed); spin_unlock_irqrestore(&chan->vc.lock, flags); } /* ----------------------------------------------------------------------------- * Stop and reset */ static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan) { struct dma_chan *chan = &uchan->vc.chan; struct usb_dmac *dmac = to_usb_dmac(chan->device); int i; /* Don't issue soft reset if any one of channels is busy */ for (i = 0; i < dmac->n_channels; ++i) { if (usb_dmac_chan_is_busy(uchan)) return; } usb_dmac_write(dmac, USB_DMAOR, 0); usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR); udelay(100); usb_dmac_write(dmac, USB_DMASWR, 0); usb_dmac_write(dmac, USB_DMAOR, 1); } static void usb_dmac_chan_halt(struct usb_dmac_chan *chan) { u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR); chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE); usb_dmac_chan_write(chan, USB_DMACHCR, chcr); usb_dmac_soft_reset(chan); } static void usb_dmac_stop(struct usb_dmac *dmac) { usb_dmac_write(dmac, USB_DMAOR, 0); } /* ----------------------------------------------------------------------------- * DMA engine operations */ static int usb_dmac_alloc_chan_resources(struct dma_chan *chan) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); int ret; while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) { ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG, GFP_KERNEL); if (ret < 0) { usb_dmac_desc_free(uchan); return ret; } uchan->descs_allocated++; } return pm_runtime_get_sync(chan->device->dev); } static void usb_dmac_free_chan_resources(struct dma_chan *chan) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); unsigned long flags; /* Protect against ISR */ spin_lock_irqsave(&uchan->vc.lock, flags); usb_dmac_chan_halt(uchan); spin_unlock_irqrestore(&uchan->vc.lock, flags); usb_dmac_desc_free(uchan); vchan_free_chan_resources(&uchan->vc); pm_runtime_put(chan->device->dev); } static struct dma_async_tx_descriptor * usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long dma_flags, void *context) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); struct usb_dmac_desc *desc; struct scatterlist *sg; int i; if (!sg_len) { dev_warn(chan->device->dev, "%s: bad parameter: len=%d\n", __func__, sg_len); return NULL; } desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT); if (!desc) return NULL; desc->direction = dir; desc->sg_len = sg_len; for_each_sg(sgl, sg, sg_len, i) { desc->sg[i].mem_addr = sg_dma_address(sg); desc->sg[i].size = sg_dma_len(sg); } return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags); } static int usb_dmac_chan_terminate_all(struct dma_chan *chan) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); struct usb_dmac_desc *desc, *_desc; unsigned long flags; LIST_HEAD(head); LIST_HEAD(list); spin_lock_irqsave(&uchan->vc.lock, flags); usb_dmac_chan_halt(uchan); vchan_get_all_descriptors(&uchan->vc, &head); if (uchan->desc) uchan->desc = NULL; list_splice_init(&uchan->desc_got, &list); list_for_each_entry_safe(desc, _desc, &list, node) list_move_tail(&desc->node, &uchan->desc_freed); spin_unlock_irqrestore(&uchan->vc.lock, flags); vchan_dma_desc_free_list(&uchan->vc, &head); return 0; } static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc, unsigned int sg_index) { struct usb_dmac_sg *sg = desc->sg + sg_index; u32 mem_addr = sg->mem_addr & 0xffffffff; unsigned int residue = sg->size; /* * We cannot use USB_DMATCR to calculate residue because USB_DMATCR * has unsuited value to calculate. */ if (desc->direction == DMA_DEV_TO_MEM) residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr; else residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr; return residue; } static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan, dma_cookie_t cookie) { struct usb_dmac_desc *desc; u32 residue = 0; list_for_each_entry_reverse(desc, &chan->desc_freed, node) { if (desc->done_cookie == cookie) { residue = desc->residue; break; } } return residue; } static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan, dma_cookie_t cookie) { u32 residue = 0; struct virt_dma_desc *vd; struct usb_dmac_desc *desc = chan->desc; int i; if (!desc) { vd = vchan_find_desc(&chan->vc, cookie); if (!vd) return 0; desc = to_usb_dmac_desc(vd); } /* Compute the size of all usb_dmac_sg still to be transferred */ for (i = desc->sg_index + 1; i < desc->sg_len; i++) residue += desc->sg[i].size; /* Add the residue for the current sg */ residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index); return residue; } static enum dma_status usb_dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); enum dma_status status; unsigned int residue = 0; unsigned long flags; status = dma_cookie_status(chan, cookie, txstate); /* a client driver will get residue after DMA_COMPLETE */ if (!txstate) return status; spin_lock_irqsave(&uchan->vc.lock, flags); if (status == DMA_COMPLETE) residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie); else residue = usb_dmac_chan_get_residue(uchan, cookie); spin_unlock_irqrestore(&uchan->vc.lock, flags); dma_set_residue(txstate, residue); return status; } static void usb_dmac_issue_pending(struct dma_chan *chan) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); unsigned long flags; spin_lock_irqsave(&uchan->vc.lock, flags); if (vchan_issue_pending(&uchan->vc) && !uchan->desc) usb_dmac_chan_start_desc(uchan); spin_unlock_irqrestore(&uchan->vc.lock, flags); } static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd) { struct usb_dmac_desc *desc = to_usb_dmac_desc(vd); struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan); usb_dmac_desc_put(chan, desc); } /* ----------------------------------------------------------------------------- * IRQ handling */ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) { struct usb_dmac_desc *desc = chan->desc; BUG_ON(!desc); if (++desc->sg_index < desc->sg_len) { usb_dmac_chan_start_sg(chan, desc->sg_index); } else { desc->residue = usb_dmac_get_current_residue(chan, desc, desc->sg_index - 1); desc->done_cookie = desc->vd.tx.cookie; desc->vd.tx_result.result = DMA_TRANS_NOERROR; desc->vd.tx_result.residue = desc->residue; vchan_cookie_complete(&desc->vd); /* Restart the next transfer if this driver has a next desc */ usb_dmac_chan_start_desc(chan); } } static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) { struct usb_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; u32 mask = 0; u32 chcr; bool xfer_end = false; spin_lock(&chan->vc.lock); chcr = usb_dmac_chan_read(chan, USB_DMACHCR); if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; if (chcr & USB_DMACHCR_DE) xfer_end = true; ret |= IRQ_HANDLED; } if (chcr & USB_DMACHCR_NULL) { /* An interruption of TE will happen after we set FTE */ mask |= USB_DMACHCR_NULL; chcr |= USB_DMACHCR_FTE; ret |= IRQ_HANDLED; } if (mask) usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); if (xfer_end) usb_dmac_isr_transfer_end(chan); spin_unlock(&chan->vc.lock); return ret; } /* ----------------------------------------------------------------------------- * OF xlate and channel filter */ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) { struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); struct of_phandle_args *dma_spec = arg; /* USB-DMAC should be used with fixed usb controller's FIFO */ if (uchan->index != dma_spec->args[0]) return false; return true; } static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct dma_chan *chan; dma_cap_mask_t mask; if (dma_spec->args_count != 1) return NULL; /* Only slave DMA channels can be allocated via DT */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec, ofdma->of_node); if (!chan) return NULL; return chan; } /* ----------------------------------------------------------------------------- * Power management */ #ifdef CONFIG_PM static int usb_dmac_runtime_suspend(struct device *dev) { struct usb_dmac *dmac = dev_get_drvdata(dev); int i; for (i = 0; i < dmac->n_channels; ++i) { if (!dmac->channels[i].iomem) break; usb_dmac_chan_halt(&dmac->channels[i]); } return 0; } static int usb_dmac_runtime_resume(struct device *dev) { struct usb_dmac *dmac = dev_get_drvdata(dev); return usb_dmac_init(dmac); } #endif /* CONFIG_PM */ static const struct dev_pm_ops usb_dmac_pm = { SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, NULL) }; /* ----------------------------------------------------------------------------- * Probe and remove */ static int usb_dmac_chan_probe(struct usb_dmac *dmac, struct usb_dmac_chan *uchan, unsigned int index) { struct platform_device *pdev = to_platform_device(dmac->dev); char pdev_irqname[5]; char *irqname; int ret; uchan->index = index; uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index); /* Request the channel interrupt. */ sprintf(pdev_irqname, "ch%u", index); uchan->irq = platform_get_irq_byname(pdev, pdev_irqname); if (uchan->irq < 0) return -ENODEV; irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", dev_name(dmac->dev), index); if (!irqname) return -ENOMEM; ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel, IRQF_SHARED, irqname, uchan); if (ret) { dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", uchan->irq, ret); return ret; } uchan->vc.desc_free = usb_dmac_virt_desc_free; vchan_init(&uchan->vc, &dmac->engine); INIT_LIST_HEAD(&uchan->desc_freed); INIT_LIST_HEAD(&uchan->desc_got); return 0; } static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac) { struct device_node *np = dev->of_node; int ret; ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); if (ret < 0) { dev_err(dev, "unable to read dma-channels property\n"); return ret; } if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); return -EINVAL; } return 0; } static int usb_dmac_probe(struct platform_device *pdev) { const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH; struct dma_device *engine; struct usb_dmac *dmac; unsigned int i; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); if (!dmac) return -ENOMEM; dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); ret = usb_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) return ret; dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); if (!dmac->channels) return -ENOMEM; /* Request resources. */ dmac->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->iomem)) return PTR_ERR(dmac->iomem); /* Enable runtime PM and initialize the device. */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); goto error_pm; } ret = usb_dmac_init(dmac); if (ret) { dev_err(&pdev->dev, "failed to reset device\n"); goto error; } /* Initialize the channels. */ INIT_LIST_HEAD(&dmac->engine.channels); for (i = 0; i < dmac->n_channels; ++i) { ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i); if (ret < 0) goto error; } /* Register the DMAC as a DMA provider for DT. */ ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate, NULL); if (ret < 0) goto error; /* * Register the DMA engine device. * * Default transfer size of 32 bytes requires 32-byte alignment. */ engine = &dmac->engine; dma_cap_set(DMA_SLAVE, engine->cap_mask); engine->dev = &pdev->dev; engine->src_addr_widths = widths; engine->dst_addr_widths = widths; engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources; engine->device_free_chan_resources = usb_dmac_free_chan_resources; engine->device_prep_slave_sg = usb_dmac_prep_slave_sg; engine->device_terminate_all = usb_dmac_chan_terminate_all; engine->device_tx_status = usb_dmac_tx_status; engine->device_issue_pending = usb_dmac_issue_pending; ret = dma_async_device_register(engine); if (ret < 0) goto error; pm_runtime_put(&pdev->dev); return 0; error: of_dma_controller_free(pdev->dev.of_node); error_pm: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; } static void usb_dmac_chan_remove(struct usb_dmac *dmac, struct usb_dmac_chan *uchan) { usb_dmac_chan_halt(uchan); devm_free_irq(dmac->dev, uchan->irq, uchan); } static int usb_dmac_remove(struct platform_device *pdev) { struct usb_dmac *dmac = platform_get_drvdata(pdev); int i; for (i = 0; i < dmac->n_channels; ++i) usb_dmac_chan_remove(dmac, &dmac->channels[i]); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&dmac->engine); pm_runtime_disable(&pdev->dev); return 0; } static void usb_dmac_shutdown(struct platform_device *pdev) { struct usb_dmac *dmac = platform_get_drvdata(pdev); usb_dmac_stop(dmac); } static const struct of_device_id usb_dmac_of_ids[] = { { .compatible = "renesas,usb-dmac", }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, usb_dmac_of_ids); static struct platform_driver usb_dmac_driver = { .driver = { .pm = &usb_dmac_pm, .name = "usb-dmac", .of_match_table = usb_dmac_of_ids, }, .probe = usb_dmac_probe, .remove = usb_dmac_remove, .shutdown = usb_dmac_shutdown, }; module_platform_driver(usb_dmac_driver); MODULE_DESCRIPTION("Renesas USB DMA Controller Driver"); MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/sh/usb-dmac.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2013 Intel Corporation * Author: Andy Shevchenko <[email protected]> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include "internal.h" static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { const struct dw_dma_chip_pdata *drv_data = (void *)pid->driver_data; struct dw_dma_chip_pdata *data; struct dw_dma_chip *chip; int ret; ret = pcim_enable_device(pdev); if (ret) return ret; ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (ret) { dev_err(&pdev->dev, "I/O memory remapping failed\n"); return ret; } pci_set_master(pdev); pci_try_set_mwi(pdev); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL); if (!data) return -ENOMEM; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->dev = &pdev->dev; chip->id = pdev->devfn; chip->regs = pcim_iomap_table(pdev)[0]; chip->irq = pdev->irq; chip->pdata = data->pdata; data->chip = chip; ret = data->probe(chip); if (ret) return ret; dw_dma_acpi_controller_register(chip->dw); pci_set_drvdata(pdev, data); return 0; } static void dw_pci_remove(struct pci_dev *pdev) { struct dw_dma_chip_pdata *data = pci_get_drvdata(pdev); struct dw_dma_chip *chip = data->chip; int ret; dw_dma_acpi_controller_free(chip->dw); ret = data->remove(chip); if (ret) dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); } #ifdef CONFIG_PM_SLEEP static int dw_pci_suspend_late(struct device *dev) { struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); struct dw_dma_chip *chip = data->chip; return do_dw_dma_disable(chip); }; static int dw_pci_resume_early(struct device *dev) { struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); struct dw_dma_chip *chip = data->chip; return do_dw_dma_enable(chip); }; #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dw_pci_dev_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early) }; static const struct pci_device_id dw_pci_id_table[] = { /* Medfield (GPDMA) */ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_dma_chip_pdata }, /* BayTrail */ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_dma_chip_pdata }, { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_dma_chip_pdata }, /* Merrifield */ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_chip_pdata }, /* Braswell */ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_dma_chip_pdata }, { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata }, /* Elkhart Lake iDMA 32-bit (PSE DMA) */ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&xbar_chip_pdata }, { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&xbar_chip_pdata }, { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&xbar_chip_pdata }, /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata }, /* Broadwell */ { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_dma_chip_pdata }, { } }; MODULE_DEVICE_TABLE(pci, dw_pci_id_table); static struct pci_driver dw_pci_driver = { .name = "dw_dmac_pci", .id_table = dw_pci_id_table, .probe = dw_pci_probe, .remove = dw_pci_remove, .driver = { .pm = &dw_pci_dev_pm_ops, }, }; module_pci_driver(dw_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver"); MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
linux-master
drivers/dma/dw/pci.c
// SPDX-License-Identifier: GPL-2.0 /* * Platform driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007-2008 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2013 Intel Corporation * * Some parts of this driver are derived from the original dw_dmac. */ #include <linux/module.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/acpi.h> #include "internal.h" #define DRV_NAME "dw_dmac" static int dw_probe(struct platform_device *pdev) { const struct dw_dma_chip_pdata *match; struct dw_dma_chip_pdata *data; struct dw_dma_chip *chip; struct device *dev = &pdev->dev; int err; match = device_get_match_data(dev); if (!match) return -ENODEV; data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL); if (!data) return -ENOMEM; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) return chip->irq; chip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(chip->regs)) return PTR_ERR(chip->regs); err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) return err; if (!data->pdata) data->pdata = dev_get_platdata(dev); if (!data->pdata) data->pdata = dw_dma_parse_dt(pdev); chip->dev = dev; chip->id = pdev->id; chip->pdata = data->pdata; data->chip = chip; chip->clk = devm_clk_get_optional(chip->dev, "hclk"); if (IS_ERR(chip->clk)) return PTR_ERR(chip->clk); err = clk_prepare_enable(chip->clk); if (err) return err; pm_runtime_enable(&pdev->dev); err = data->probe(chip); if (err) goto err_dw_dma_probe; platform_set_drvdata(pdev, data); dw_dma_of_controller_register(chip->dw); dw_dma_acpi_controller_register(chip->dw); return 0; err_dw_dma_probe: pm_runtime_disable(&pdev->dev); clk_disable_unprepare(chip->clk); return err; } static int dw_remove(struct platform_device *pdev) { struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev); struct dw_dma_chip *chip = data->chip; int ret; dw_dma_acpi_controller_free(chip->dw); dw_dma_of_controller_free(chip->dw); ret = data->remove(chip); if (ret) dev_warn(chip->dev, "can't remove device properly: %d\n", ret); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(chip->clk); return 0; } static void dw_shutdown(struct platform_device *pdev) { struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev); struct dw_dma_chip *chip = data->chip; /* * We have to call do_dw_dma_disable() to stop any ongoing transfer. On * some platforms we can't do that since DMA device is powered off. * Moreover we have no possibility to check if the platform is affected * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() * unconditionally. On the other hand we can't use * pm_runtime_suspended() because runtime PM framework is not fully * used by the driver. */ pm_runtime_get_sync(chip->dev); do_dw_dma_disable(chip); pm_runtime_put_sync_suspend(chip->dev); clk_disable_unprepare(chip->clk); } #ifdef CONFIG_OF static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata }, {} }; MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); #endif #ifdef CONFIG_ACPI static const struct acpi_device_id dw_dma_acpi_id_table[] = { { "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata }, { "80862286", (kernel_ulong_t)&dw_dma_chip_pdata }, { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata }, /* Elkhart Lake iDMA 32-bit (PSE DMA) */ { "80864BB4", (kernel_ulong_t)&xbar_chip_pdata }, { "80864BB5", (kernel_ulong_t)&xbar_chip_pdata }, { "80864BB6", (kernel_ulong_t)&xbar_chip_pdata }, { } }; MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); #endif #ifdef CONFIG_PM_SLEEP static int dw_suspend_late(struct device *dev) { struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); struct dw_dma_chip *chip = data->chip; do_dw_dma_disable(chip); clk_disable_unprepare(chip->clk); return 0; } static int dw_resume_early(struct device *dev) { struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); struct dw_dma_chip *chip = data->chip; int ret; ret = clk_prepare_enable(chip->clk); if (ret) return ret; return do_dw_dma_enable(chip); } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dw_dev_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early) }; static struct platform_driver dw_driver = { .probe = dw_probe, .remove = dw_remove, .shutdown = dw_shutdown, .driver = { .name = DRV_NAME, .pm = &dw_dev_pm_ops, .of_match_table = of_match_ptr(dw_dma_of_id_table), .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), }, }; static int __init dw_init(void) { return platform_driver_register(&dw_driver); } subsys_initcall(dw_init); static void __exit dw_exit(void) { platform_driver_unregister(&dw_driver); } module_exit(dw_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/dma/dw/platform.c
// SPDX-License-Identifier: GPL-2.0 /* * Core driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007-2008 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2013 Intel Corporation */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include "../dmaengine.h" #include "internal.h" /* * This supports the Synopsys "DesignWare AHB Central DMA Controller", * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all * of which use ARM any more). See the "Databook" from Synopsys for * information beyond what licensees probably provide. */ /* The set of bus widths supported by the DMA controller */ #define DW_DMA_BUSWIDTHS \ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) /*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { return to_dw_desc(dwc->active_list.next); } static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) { struct dw_desc *desc = txd_to_dw_desc(tx); struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); dma_cookie_t cookie; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); cookie = dma_cookie_assign(tx); /* * REVISIT: We should attempt to chain as many descriptors as * possible, perhaps even appending to those already submitted * for DMA. But this is hard to do in a race-free manner. */ list_add_tail(&desc->desc_node, &dwc->queue); spin_unlock_irqrestore(&dwc->lock, flags); dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); return cookie; } static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_desc *desc; dma_addr_t phys; desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); if (!desc) return NULL; dwc->descs_allocated++; INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; desc->txd.phys = phys; return desc; } static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_desc *child, *_next; if (unlikely(!desc)) return; list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { list_del(&child->desc_node); dma_pool_free(dw->desc_pool, child, child->txd.phys); dwc->descs_allocated--; } dma_pool_free(dw->desc_pool, desc, desc->txd.phys); dwc->descs_allocated--; } static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); dw->initialize_chan(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); } /*----------------------------------------------------------------------*/ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) { dev_err(chan2dev(&dwc->chan), " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", channel_readl(dwc, SAR), channel_readl(dwc, DAR), channel_readl(dwc, LLP), channel_readl(dwc, CTL_HI), channel_readl(dwc, CTL_LO)); } static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) { channel_clear_bit(dw, CH_EN, dwc->mask); while (dma_readl(dw, CH_EN) & dwc->mask) cpu_relax(); } /*----------------------------------------------------------------------*/ /* Perform single block transfer */ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, struct dw_desc *desc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 ctllo; /* * Software emulation of LLP mode relies on interrupts to continue * multi block transfer. */ ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; channel_writel(dwc, SAR, lli_read(desc, sar)); channel_writel(dwc, DAR, lli_read(desc, dar)); channel_writel(dwc, CTL_LO, ctllo); channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); channel_set_bit(dw, CH_EN, dwc->mask); /* Move pointer to next descriptor */ dwc->tx_node_active = dwc->tx_node_active->next; } /* Called with dwc->lock held and bh disabled */ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); u8 lms = DWC_LLP_LMS(dwc->dws.m_master); unsigned long was_soft_llp; /* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "%s: BUG: Attempted to start non-idle channel\n", __func__); dwc_dump_chan_regs(dwc); /* The tasklet will hopefully advance the queue... */ return; } if (dwc->nollp) { was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); if (was_soft_llp) { dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start new LLP transfer inside ongoing one\n"); return; } dwc_initialize(dwc); first->residue = first->total_len; dwc->tx_node_active = &first->tx_list; /* Submit first block */ dwc_do_single_block(dwc, first); return; } dwc_initialize(dwc); channel_writel(dwc, LLP, first->txd.phys | lms); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); channel_writel(dwc, CTL_HI, 0); channel_set_bit(dw, CH_EN, dwc->mask); } static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) { struct dw_desc *desc; if (list_empty(&dwc->queue)) return; list_move(dwc->queue.next, &dwc->active_list); desc = dwc_first_active(dwc); dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); dwc_dostart(dwc, desc); } /*----------------------------------------------------------------------*/ static void dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, bool callback_required) { struct dma_async_tx_descriptor *txd = &desc->txd; struct dw_desc *child; unsigned long flags; struct dmaengine_desc_callback cb; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); spin_lock_irqsave(&dwc->lock, flags); dma_cookie_complete(txd); if (callback_required) dmaengine_desc_get_callback(txd, &cb); else memset(&cb, 0, sizeof(cb)); /* async_tx_ack */ list_for_each_entry(child, &desc->tx_list, desc_node) async_tx_ack(&child->txd); async_tx_ack(&desc->txd); dwc_desc_put(dwc, desc); spin_unlock_irqrestore(&dwc->lock, flags); dmaengine_desc_callback_invoke(&cb, NULL); } static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *desc, *_desc; LIST_HEAD(list); unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: XFER bit set, but channel not idle!\n"); /* Try to continue after resetting the channel... */ dwc_chan_disable(dw, dwc); } /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ list_splice_init(&dwc->active_list, &list); dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc, true); } /* Returns how many bytes were already received from source */ static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 ctlhi = channel_readl(dwc, CTL_HI); u32 ctllo = channel_readl(dwc, CTL_LO); return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); } static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) { dma_addr_t llp; struct dw_desc *desc, *_desc; struct dw_desc *child; u32 status_xfer; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); llp = channel_readl(dwc, LLP); status_xfer = dma_readl(dw, RAW.XFER); if (status_xfer & dwc->mask) { /* Everything we've submitted is done */ dma_writel(dw, CLEAR.XFER, dwc->mask); if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { struct list_head *head, *active = dwc->tx_node_active; /* * We are inside first active descriptor. * Otherwise something is really wrong. */ desc = dwc_first_active(dwc); head = &desc->tx_list; if (active != head) { /* Update residue to reflect last sent descriptor */ if (active == head->next) desc->residue -= desc->len; else desc->residue -= to_dw_desc(active->prev)->len; child = to_dw_desc(active); /* Submit next block */ dwc_do_single_block(dwc, child); spin_unlock_irqrestore(&dwc->lock, flags); return; } /* We are done here */ clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); } spin_unlock_irqrestore(&dwc->lock, flags); dwc_complete_all(dw, dwc); return; } if (list_empty(&dwc->active_list)) { spin_unlock_irqrestore(&dwc->lock, flags); return; } if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); spin_unlock_irqrestore(&dwc->lock, flags); return; } dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { /* Initial residue value */ desc->residue = desc->total_len; /* Check first descriptors addr */ if (desc->txd.phys == DWC_LLP_LOC(llp)) { spin_unlock_irqrestore(&dwc->lock, flags); return; } /* Check first descriptors llp */ if (lli_read(desc, llp) == llp) { /* This one is currently in progress */ desc->residue -= dwc_get_sent(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return; } desc->residue -= desc->len; list_for_each_entry(child, &desc->tx_list, desc_node) { if (lli_read(child, llp) == llp) { /* Currently in progress */ desc->residue -= dwc_get_sent(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return; } desc->residue -= child->len; } /* * No descriptors so far seem to be in progress, i.e. * this one must be done. */ spin_unlock_irqrestore(&dwc->lock, flags); dwc_descriptor_complete(dwc, desc, true); spin_lock_irqsave(&dwc->lock, flags); } dev_err(chan2dev(&dwc->chan), "BUG: All descriptors done, but channel not idle!\n"); /* Try to continue after resetting the channel... */ dwc_chan_disable(dw, dwc); dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) { dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", lli_read(desc, sar), lli_read(desc, dar), lli_read(desc, llp), lli_read(desc, ctlhi), lli_read(desc, ctllo)); } static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) { struct dw_desc *bad_desc; struct dw_desc *child; unsigned long flags; dwc_scan_descriptors(dw, dwc); spin_lock_irqsave(&dwc->lock, flags); /* * The descriptor currently at the head of the active list is * borked. Since we don't have any way to report errors, we'll * just have to scream loudly and try to carry on. */ bad_desc = dwc_first_active(dwc); list_del_init(&bad_desc->desc_node); list_move(dwc->queue.next, dwc->active_list.prev); /* Clear the error flag and try to restart the controller */ dma_writel(dw, CLEAR.ERROR, dwc->mask); if (!list_empty(&dwc->active_list)) dwc_dostart(dwc, dwc_first_active(dwc)); /* * WARN may seem harsh, but since this only happens * when someone submits a bad physical address in a * descriptor, we should consider ourselves lucky that the * controller flagged an error instead of scribbling over * random memory locations. */ dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" " cookie: %d\n", bad_desc->txd.cookie); dwc_dump_lli(dwc, bad_desc); list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, child); spin_unlock_irqrestore(&dwc->lock, flags); /* Pretend the descriptor completed successfully */ dwc_descriptor_complete(dwc, bad_desc, true); } static void dw_dma_tasklet(struct tasklet_struct *t) { struct dw_dma *dw = from_tasklet(dw, t, tasklet); struct dw_dma_chan *dwc; u32 status_xfer; u32 status_err; unsigned int i; status_xfer = dma_readl(dw, RAW.XFER); status_err = dma_readl(dw, RAW.ERROR); dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n"); else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if (status_xfer & (1 << i)) dwc_scan_descriptors(dw, dwc); } /* Re-enable interrupts */ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); } static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) { struct dw_dma *dw = dev_id; u32 status; /* Check if we have any interrupt from the DMAC which is not in use */ if (!dw->in_use) return IRQ_NONE; status = dma_readl(dw, STATUS_INT); dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); /* Check if we have any interrupt from the DMAC */ if (!status) return IRQ_NONE; /* * Just disable the interrupts. We'll turn them back on in the * softirq handler. */ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); status = dma_readl(dw, STATUS_INT); if (status) { dev_err(dw->dma.dev, "BUG: Unexpected interrupts pending: 0x%x\n", status); /* Try to recover */ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); } tasklet_schedule(&dw->tasklet); return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ static struct dma_async_tx_descriptor * dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; size_t xfer_count; size_t offset; u8 m_master = dwc->dws.m_master; unsigned int src_width; unsigned int dst_width; unsigned int data_width = dw->pdata->data_width[m_master]; u32 ctllo, ctlhi; u8 lms = DWC_LLP_LMS(m_master); dev_vdbg(chan2dev(chan), "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, &dest, &src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); return NULL; } dwc->direction = DMA_MEM_TO_MEM; src_width = dst_width = __ffs(data_width | src | dest | len); ctllo = dw->prepare_ctllo(dwc) | DWC_CTLL_DST_WIDTH(dst_width) | DWC_CTLL_SRC_WIDTH(src_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_INC | DWC_CTLL_FC_M2M; prev = first = NULL; for (offset = 0; offset < len; offset += xfer_count) { desc = dwc_desc_get(dwc); if (!desc) goto err_desc_get; ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); lli_write(desc, sar, src + offset); lli_write(desc, dar, dest + offset); lli_write(desc, ctllo, ctllo); lli_write(desc, ctlhi, ctlhi); desc->len = xfer_count; if (!first) { first = desc; } else { lli_write(prev, llp, desc->txd.phys | lms); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ lli_set(prev, ctllo, DWC_CTLL_INT_EN); prev->lli.llp = 0; lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); first->txd.flags = flags; first->total_len = len; return &first->txd; err_desc_get: dwc_desc_put(dwc, first); return NULL; } static struct dma_async_tx_descriptor * dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dma_slave_config *sconfig = &dwc->dma_sconfig; struct dw_desc *prev; struct dw_desc *first; u32 ctllo, ctlhi; u8 m_master = dwc->dws.m_master; u8 lms = DWC_LLP_LMS(m_master); dma_addr_t reg; unsigned int reg_width; unsigned int mem_width; unsigned int data_width = dw->pdata->data_width[m_master]; unsigned int i; struct scatterlist *sg; size_t total_len = 0; dev_vdbg(chan2dev(chan), "%s\n", __func__); if (unlikely(!is_slave_direction(direction) || !sg_len)) return NULL; dwc->direction = direction; prev = first = NULL; switch (direction) { case DMA_MEM_TO_DEV: reg_width = __ffs(sconfig->dst_addr_width); reg = sconfig->dst_addr; ctllo = dw->prepare_ctllo(dwc) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX | DWC_CTLL_SRC_INC; ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : DWC_CTLL_FC(DW_DMA_FC_D_M2P); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; u32 len, mem; size_t dlen; mem = sg_dma_address(sg); len = sg_dma_len(sg); mem_width = __ffs(data_width | mem | len); slave_sg_todev_fill_desc: desc = dwc_desc_get(dwc); if (!desc) goto err_desc_get; ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); lli_write(desc, sar, mem); lli_write(desc, dar, reg); lli_write(desc, ctlhi, ctlhi); lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); desc->len = dlen; if (!first) { first = desc; } else { lli_write(prev, llp, desc->txd.phys | lms); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; mem += dlen; len -= dlen; total_len += dlen; if (len) goto slave_sg_todev_fill_desc; } break; case DMA_DEV_TO_MEM: reg_width = __ffs(sconfig->src_addr_width); reg = sconfig->src_addr; ctllo = dw->prepare_ctllo(dwc) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC | DWC_CTLL_SRC_FIX; ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : DWC_CTLL_FC(DW_DMA_FC_D_P2M); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; u32 len, mem; size_t dlen; mem = sg_dma_address(sg); len = sg_dma_len(sg); slave_sg_fromdev_fill_desc: desc = dwc_desc_get(dwc); if (!desc) goto err_desc_get; ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); lli_write(desc, sar, reg); lli_write(desc, dar, mem); lli_write(desc, ctlhi, ctlhi); mem_width = __ffs(data_width | mem); lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); desc->len = dlen; if (!first) { first = desc; } else { lli_write(prev, llp, desc->txd.phys | lms); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; mem += dlen; len -= dlen; total_len += dlen; if (len) goto slave_sg_fromdev_fill_desc; } break; default: return NULL; } if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ lli_set(prev, ctllo, DWC_CTLL_INT_EN); prev->lli.llp = 0; lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); first->total_len = total_len; return &first->txd; err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available. Direction %d\n", direction); dwc_desc_put(dwc, first); return NULL; } bool dw_dma_filter(struct dma_chan *chan, void *param) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_slave *dws = param; if (dws->dma_dev != chan->device->dev) return false; /* permit channels in accordance with the channels mask */ if (dws->channels && !(dws->channels & dwc->mask)) return false; /* We have to copy data since dws can be temporary storage */ memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); return true; } EXPORT_SYMBOL_GPL(dw_dma_filter); static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); dwc->dma_sconfig.src_maxburst = clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst); dwc->dma_sconfig.dst_maxburst = clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst); dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); return 0; } static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); unsigned int count = 20; /* timeout iterations */ dw->suspend_chan(dwc, drain); while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) udelay(2); set_bit(DW_DMA_IS_PAUSED, &dwc->flags); } static int dwc_pause(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); dwc_chan_pause(dwc, false); spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); dw->resume_chan(dwc, drain); clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); } static int dwc_resume(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) dwc_chan_resume(dwc, false); spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc_terminate_all(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&dwc->lock, flags); clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); dwc_chan_pause(dwc, true); dwc_chan_disable(dw, dwc); dwc_chan_resume(dwc, true); /* active_list entries will end up before queued entries */ list_splice_init(&dwc->queue, &list); list_splice_init(&dwc->active_list, &list); spin_unlock_irqrestore(&dwc->lock, flags); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_descriptor_complete(dwc, desc, false); return 0; } static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) { struct dw_desc *desc; list_for_each_entry(desc, &dwc->active_list, desc_node) if (desc->txd.cookie == c) return desc; return NULL; } static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie, enum dma_status *status) { struct dw_desc *desc; unsigned long flags; u32 residue; spin_lock_irqsave(&dwc->lock, flags); desc = dwc_find_desc(dwc, cookie); if (desc) { if (desc == dwc_first_active(dwc)) { residue = desc->residue; if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) residue -= dwc_get_sent(dwc); if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) *status = DMA_PAUSED; } else { residue = desc->total_len; } } else { residue = 0; } spin_unlock_irqrestore(&dwc->lock, flags); return residue; } static enum dma_status dwc_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret)); return ret; } static void dwc_issue_pending(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); if (list_empty(&dwc->active_list)) dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } /*----------------------------------------------------------------------*/ void do_dw_dma_off(struct dw_dma *dw) { dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); } void do_dw_dma_on(struct dw_dma *dw) { dma_writel(dw, CFG, DW_CFG_DMA_EN); } static int dwc_alloc_chan_resources(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); dev_vdbg(chan2dev(chan), "%s\n", __func__); /* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); return -EIO; } dma_cookie_init(chan); /* * NOTE: some controllers may have additional features that we * need to initialize here, like "scatter-gather" (which * doesn't mean what you think it means), and status writeback. */ /* * We need controller-specific data to set up slave transfers. */ if (chan->private && !dw_dma_filter(chan, chan->private)) { dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); return -EINVAL; } /* Enable controller here if needed */ if (!dw->in_use) do_dw_dma_on(dw); dw->in_use |= dwc->mask; return 0; } static void dwc_free_chan_resources(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); unsigned long flags; dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, dwc->descs_allocated); /* ASSERT: channel is idle */ BUG_ON(!list_empty(&dwc->active_list)); BUG_ON(!list_empty(&dwc->queue)); BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); spin_lock_irqsave(&dwc->lock, flags); /* Clear custom channel configuration */ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask); spin_unlock_irqrestore(&dwc->lock, flags); /* Disable controller in case it was a last user */ dw->in_use &= ~dwc->mask; if (!dw->in_use) do_dw_dma_off(dw); dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); caps->max_burst = dwc->max_burst; /* * It might be crucial for some devices to have the hardware * accelerated multi-block transfers supported, aka LLPs in DW DMAC * notation. So if LLPs are supported then max_sg_burst is set to * zero which means unlimited number of SG entries can be handled in a * single DMA transaction, otherwise it's just one SG entry. */ if (dwc->nollp) caps->max_sg_burst = 1; else caps->max_sg_burst = 0; } int do_dma_probe(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; struct dw_dma_platform_data *pdata; bool autocfg = false; unsigned int dw_params; unsigned int i; int err; dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); if (!dw->pdata) return -ENOMEM; dw->regs = chip->regs; pm_runtime_get_sync(chip->dev); if (!chip->pdata) { dw_params = dma_readl(dw, DW_PARAMS); dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); autocfg = dw_params >> DW_PARAMS_EN & 1; if (!autocfg) { err = -EINVAL; goto err_pdata; } /* Reassign the platform data pointer */ pdata = dw->pdata; /* Get hardware configuration parameters */ pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; for (i = 0; i < pdata->nr_masters; i++) { pdata->data_width[i] = 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); } pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); /* Fill platform data with the default values */ pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; pdata->chan_priority = CHAN_PRIORITY_ASCENDING; } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { err = -EINVAL; goto err_pdata; } else { memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); /* Reassign the platform data pointer */ pdata = dw->pdata; } dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), GFP_KERNEL); if (!dw->chan) { err = -ENOMEM; goto err_pdata; } /* Calculate all channel mask before DMA setup */ dw->all_chan_mask = (1 << pdata->nr_channels) - 1; /* Force dma off, just in case */ dw->disable(dw); /* Device and instance ID for IRQ and DMA pool */ dw->set_device_name(dw, chip->id); /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create(dw->name, chip->dev, sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) { dev_err(chip->dev, "No memory for descriptors dma pool\n"); err = -ENOMEM; goto err_pdata; } tasklet_setup(&dw->tasklet, dw_dma_tasklet); err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, dw->name, dw); if (err) goto err_pdata; INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < pdata->nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) list_add_tail(&dwc->chan.device_node, &dw->dma.channels); else list_add(&dwc->chan.device_node, &dw->dma.channels); /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) dwc->priority = pdata->nr_channels - i - 1; else dwc->priority = i; dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; spin_lock_init(&dwc->lock); dwc->mask = 1 << i; INIT_LIST_HEAD(&dwc->active_list); INIT_LIST_HEAD(&dwc->queue); channel_clear_bit(dw, CH_EN, dwc->mask); dwc->direction = DMA_TRANS_NONE; /* Hardware configuration */ if (autocfg) { unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; unsigned int dwc_params = readl(addr); dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, dwc_params); /* * Decode maximum block size for given channel. The * stored 4 bit value represents blocks from 0x00 for 3 * up to 0x0a for 4095. */ dwc->block_size = (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; /* * According to the DW DMA databook the true scatter- * gether LLPs aren't available if either multi-block * config is disabled (CHx_MULTI_BLK_EN == 0) or the * LLP register is hard-coded to zeros * (CHx_HC_LLP == 1). */ dwc->nollp = (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 || (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1; dwc->max_burst = (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7)); } else { dwc->block_size = pdata->block_size; dwc->nollp = !pdata->multi_block[i]; dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST; } } /* Clear all interrupts on all channels. */ dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); /* Set capabilities */ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); dw->dma.dev = chip->dev; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources; dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; dw->dma.device_caps = dwc_caps; dw->dma.device_config = dwc_config; dw->dma.device_pause = dwc_pause; dw->dma.device_resume = dwc_resume; dw->dma.device_terminate_all = dwc_terminate_all; dw->dma.device_tx_status = dwc_tx_status; dw->dma.device_issue_pending = dwc_issue_pending; /* DMA capabilities */ dw->dma.min_burst = DW_DMA_MIN_BURST; dw->dma.max_burst = DW_DMA_MAX_BURST; dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM); dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; /* * For now there is no hardware with non uniform maximum block size * across all of the device channels, so we set the maximum segment * size as the block size found for the very first channel. */ dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size); err = dma_async_device_register(&dw->dma); if (err) goto err_dma_register; dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", pdata->nr_channels); pm_runtime_put_sync_suspend(chip->dev); return 0; err_dma_register: free_irq(chip->irq, dw); err_pdata: pm_runtime_put_sync_suspend(chip->dev); return err; } int do_dma_remove(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; struct dw_dma_chan *dwc, *_dwc; pm_runtime_get_sync(chip->dev); do_dw_dma_off(dw); dma_async_device_unregister(&dw->dma); free_irq(chip->irq, dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, chan.device_node) { list_del(&dwc->chan.device_node); channel_clear_bit(dw, CH_EN, dwc->mask); } pm_runtime_put_sync_suspend(chip->dev); return 0; } int do_dw_dma_disable(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; dw->disable(dw); return 0; } EXPORT_SYMBOL_GPL(do_dw_dma_disable); int do_dw_dma_enable(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; dw->enable(dw); return 0; } EXPORT_SYMBOL_GPL(do_dw_dma_enable); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_AUTHOR("Viresh Kumar <[email protected]>");
linux-master
drivers/dma/dw/core.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2007-2008 Atmel Corporation // Copyright (C) 2010-2011 ST Microelectronics // Copyright (C) 2013,2018 Intel Corporation #include <linux/bitops.h> #include <linux/dmaengine.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/types.h> #include "internal.h" static void dw_dma_initialize_chan(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); /* Set polarity of handshake interface */ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); } static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain) { u32 cfglo = channel_readl(dwc, CFG_LO); channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); } static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain) { u32 cfglo = channel_readl(dwc, CFG_LO); channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); } static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc, size_t bytes, unsigned int width, size_t *len) { u32 block; if ((bytes >> width) > dwc->block_size) { block = dwc->block_size; *len = dwc->block_size << width; } else { block = bytes >> width; *len = bytes; } return block; } static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) { return DWC_CTLH_BLOCK_TS(block) << width; } static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc) { struct dma_slave_config *sconfig = &dwc->dma_sconfig; u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0; u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0; u8 p_master = dwc->dws.p_master; u8 m_master = dwc->dws.m_master; u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master; u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master; return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) | DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms); } static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) { /* * Fix burst size according to dw_dmac. We need to convert them as: * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. */ *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0; } static void dw_dma_set_device_name(struct dw_dma *dw, int id) { snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id); } static void dw_dma_disable(struct dw_dma *dw) { do_dw_dma_off(dw); } static void dw_dma_enable(struct dw_dma *dw) { do_dw_dma_on(dw); } int dw_dma_probe(struct dw_dma_chip *chip) { struct dw_dma *dw; dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); if (!dw) return -ENOMEM; /* Channel operations */ dw->initialize_chan = dw_dma_initialize_chan; dw->suspend_chan = dw_dma_suspend_chan; dw->resume_chan = dw_dma_resume_chan; dw->prepare_ctllo = dw_dma_prepare_ctllo; dw->encode_maxburst = dw_dma_encode_maxburst; dw->bytes2block = dw_dma_bytes2block; dw->block2bytes = dw_dma_block2bytes; /* Device operations */ dw->set_device_name = dw_dma_set_device_name; dw->disable = dw_dma_disable; dw->enable = dw_dma_enable; chip->dw = dw; return do_dma_probe(chip); } EXPORT_SYMBOL_GPL(dw_dma_probe); int dw_dma_remove(struct dw_dma_chip *chip) { return do_dma_remove(chip); } EXPORT_SYMBOL_GPL(dw_dma_remove);
linux-master
drivers/dma/dw/dw.c
// SPDX-License-Identifier: GPL-2.0 /* * Platform driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007-2008 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics * Copyright (C) 2013 Intel Corporation */ #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include "internal.h" static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct dw_dma *dw = ofdma->of_dma_data; struct dw_dma_slave slave = { .dma_dev = dw->dma.dev, }; dma_cap_mask_t cap; if (dma_spec->args_count < 3 || dma_spec->args_count > 4) return NULL; slave.src_id = dma_spec->args[0]; slave.dst_id = dma_spec->args[0]; slave.m_master = dma_spec->args[1]; slave.p_master = dma_spec->args[2]; if (dma_spec->args_count >= 4) slave.channels = dma_spec->args[3]; if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || slave.m_master >= dw->pdata->nr_masters || slave.p_master >= dw->pdata->nr_masters || slave.channels >= BIT(dw->pdata->nr_channels))) return NULL; dma_cap_zero(cap); dma_cap_set(DMA_SLAVE, cap); /* TODO: there should be a simpler way to do this */ return dma_request_channel(cap, dw_dma_filter, &slave); } struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct dw_dma_platform_data *pdata; u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; u32 nr_masters; u32 nr_channels; if (of_property_read_u32(np, "dma-masters", &nr_masters)) return NULL; if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS) return NULL; if (of_property_read_u32(np, "dma-channels", &nr_channels)) return NULL; if (nr_channels > DW_DMA_MAX_NR_CHANNELS) return NULL; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; pdata->nr_masters = nr_masters; pdata->nr_channels = nr_channels; of_property_read_u32(np, "chan_allocation_order", &pdata->chan_allocation_order); of_property_read_u32(np, "chan_priority", &pdata->chan_priority); of_property_read_u32(np, "block_size", &pdata->block_size); /* Try deprecated property first */ if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) { for (tmp = 0; tmp < nr_masters; tmp++) pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); } /* If "data_width" and "data-width" both provided use the latter one */ of_property_read_u32_array(np, "data-width", pdata->data_width, nr_masters); memset32(pdata->multi_block, 1, nr_channels); of_property_read_u32_array(np, "multi-block", pdata->multi_block, nr_channels); memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels); of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, nr_channels); of_property_read_u32(np, "snps,dma-protection-control", &pdata->protctl); if (pdata->protctl > CHAN_PROTCTL_MASK) return NULL; return pdata; } void dw_dma_of_controller_register(struct dw_dma *dw) { struct device *dev = dw->dma.dev; int ret; if (!dev->of_node) return; ret = of_dma_controller_register(dev->of_node, dw_dma_of_xlate, dw); if (ret) dev_err(dev, "could not register of_dma_controller\n"); } void dw_dma_of_controller_free(struct dw_dma *dw) { struct device *dev = dw->dma.dev; if (!dev->of_node) return; of_dma_controller_free(dev->of_node); }
linux-master
drivers/dma/dw/of.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022 Schneider-Electric * Author: Miquel Raynal <[email protected] * Based on TI crossbar driver written by Peter Ujfalusi <[email protected]> */ #include <linux/bitops.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/soc/renesas/r9a06g032-sysctrl.h> #include <linux/types.h> #define RNZ1_DMAMUX_NCELLS 6 #define RZN1_DMAMUX_MAX_LINES 64 #define RZN1_DMAMUX_LINES_PER_CTLR 16 struct rzn1_dmamux_data { struct dma_router dmarouter; DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR); }; struct rzn1_dmamux_map { unsigned int req_idx; }; static void rzn1_dmamux_free(struct device *dev, void *route_data) { struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev); struct rzn1_dmamux_map *map = route_data; dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx); clear_bit(map->req_idx, dmamux->used_chans); kfree(map); } static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev); struct rzn1_dmamux_map *map; unsigned int dmac_idx, chan, val; u32 mask; int ret; if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) return ERR_PTR(-EINVAL); map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) return ERR_PTR(-ENOMEM); chan = dma_spec->args[0]; map->req_idx = dma_spec->args[4]; val = dma_spec->args[5]; dma_spec->args_count -= 2; if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) { dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan); ret = -EINVAL; goto free_map; } if (map->req_idx >= RZN1_DMAMUX_MAX_LINES || (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) { dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx); ret = -EINVAL; goto free_map; } dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0; dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); ret = -EINVAL; goto free_map; } dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n", map->req_idx, dmac_idx, chan); if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { ret = -EBUSY; goto free_map; } mask = BIT(map->req_idx); ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0); if (ret) goto clear_bitmap; return map; clear_bitmap: clear_bit(map->req_idx, dmamux->used_chans); free_map: kfree(map); return ERR_PTR(ret); } #ifdef CONFIG_OF static const struct of_device_id rzn1_dmac_match[] = { { .compatible = "renesas,rzn1-dma" }, {} }; #endif static int rzn1_dmamux_probe(struct platform_device *pdev) { struct device_node *mux_node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dmac_node; struct rzn1_dmamux_data *dmamux; dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); if (!dmamux) return -ENOMEM; dmac_node = of_parse_phandle(mux_node, "dma-masters", 0); if (!dmac_node) return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n"); match = of_match_node(rzn1_dmac_match, dmac_node); of_node_put(dmac_node); if (!match) return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n"); dmamux->dmarouter.dev = &pdev->dev; dmamux->dmarouter.route_free = rzn1_dmamux_free; platform_set_drvdata(pdev, dmamux); return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate, &dmamux->dmarouter); } static const struct of_device_id rzn1_dmamux_match[] = { { .compatible = "renesas,rzn1-dmamux" }, {} }; MODULE_DEVICE_TABLE(of, rzn1_dmamux_match); static struct platform_driver rzn1_dmamux_driver = { .driver = { .name = "renesas,rzn1-dmamux", .of_match_table = rzn1_dmamux_match, }, .probe = rzn1_dmamux_probe, }; module_platform_driver(rzn1_dmamux_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Miquel Raynal <[email protected]"); MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
linux-master
drivers/dma/dw/rzn1-dmamux.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013,2018,2020-2021 Intel Corporation #include <linux/bitops.h> #include <linux/dmaengine.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/types.h> #include "internal.h" #define DMA_CTL_CH(x) (0x1000 + (x) * 4) #define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4) #define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4) #define DMA_XBAR_SEL(x) (0x1300 + (x) * 4) #define DMA_REGACCESS_CHID_CFG (0x1400) #define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0) #define CTL_CH_TRANSFER_MODE_S2S 0 #define CTL_CH_TRANSFER_MODE_S2D 1 #define CTL_CH_TRANSFER_MODE_D2S 2 #define CTL_CH_TRANSFER_MODE_D2D 3 #define CTL_CH_RD_RS_MASK GENMASK(4, 3) #define CTL_CH_WR_RS_MASK GENMASK(6, 5) #define CTL_CH_RD_NON_SNOOP_BIT BIT(8) #define CTL_CH_WR_NON_SNOOP_BIT BIT(9) #define XBAR_SEL_DEVID_MASK GENMASK(15, 0) #define XBAR_SEL_RX_TX_BIT BIT(16) #define XBAR_SEL_RX_TX_SHIFT 16 #define REGACCESS_CHID_MASK GENMASK(2, 0) static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc) { struct device *slave = dwc->chan.slave; if (!slave || !dev_is_pci(slave)) return 0; return to_pci_dev(slave)->devfn; } static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); void __iomem *misc = __dw_regs(dw); u32 cfghi = 0, cfglo = 0; u8 dst_id, src_id; u32 value; /* DMA Channel ID Configuration register must be programmed first */ value = readl(misc + DMA_REGACCESS_CHID_CFG); value &= ~REGACCESS_CHID_MASK; value |= dwc->chan.chan_id; writel(value, misc + DMA_REGACCESS_CHID_CFG); /* Configure channel attributes */ value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id)); value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT); value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK); value &= ~CTL_CH_TRANSFER_MODE_MASK; switch (dwc->direction) { case DMA_MEM_TO_DEV: value |= CTL_CH_TRANSFER_MODE_D2S; value |= CTL_CH_WR_NON_SNOOP_BIT; break; case DMA_DEV_TO_MEM: value |= CTL_CH_TRANSFER_MODE_S2D; value |= CTL_CH_RD_NON_SNOOP_BIT; break; default: /* * Memory-to-Memory and Device-to-Device are ignored for now. * * For Memory-to-Memory transfers we would need to set mode * and disable snooping on both sides. */ return; } writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id)); /* Configure crossbar selection */ value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id)); /* DEVFN selection */ value &= ~XBAR_SEL_DEVID_MASK; value |= idma32_get_slave_devfn(dwc); switch (dwc->direction) { case DMA_MEM_TO_DEV: value |= XBAR_SEL_RX_TX_BIT; break; case DMA_DEV_TO_MEM: value &= ~XBAR_SEL_RX_TX_BIT; break; default: /* Memory-to-Memory and Device-to-Device are ignored for now */ return; } writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id)); /* Configure DMA channel low and high registers */ switch (dwc->direction) { case DMA_MEM_TO_DEV: dst_id = dwc->chan.chan_id; src_id = dwc->dws.src_id; break; case DMA_DEV_TO_MEM: dst_id = dwc->dws.dst_id; src_id = dwc->chan.chan_id; break; default: /* Memory-to-Memory and Device-to-Device are ignored for now */ return; } /* Set default burst alignment */ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; /* Low 4 bits of the request lines */ cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf); cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf); /* Request line extension (2 bits) */ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3); cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3); channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); } static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc) { u32 cfghi = 0; u32 cfglo = 0; /* Set default burst alignment */ cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; /* Low 4 bits of the request lines */ cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); /* Request line extension (2 bits) */ cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); } static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain) { u32 cfglo = channel_readl(dwc, CFG_LO); if (drain) cfglo |= IDMA32C_CFGL_CH_DRAIN; channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); } static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain) { u32 cfglo = channel_readl(dwc, CFG_LO); if (drain) cfglo &= ~IDMA32C_CFGL_CH_DRAIN; channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); } static u32 idma32_bytes2block(struct dw_dma_chan *dwc, size_t bytes, unsigned int width, size_t *len) { u32 block; if (bytes > dwc->block_size) { block = dwc->block_size; *len = dwc->block_size; } else { block = bytes; *len = bytes; } return block; } static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) { return IDMA32C_CTLH_BLOCK_TS(block); } static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc) { struct dma_slave_config *sconfig = &dwc->dma_sconfig; u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0; u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0; return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize); } static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) { *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0; } static void idma32_set_device_name(struct dw_dma *dw, int id) { snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id); } /* * Program FIFO size of channels. * * By default full FIFO (512 bytes) is assigned to channel 0. Here we * slice FIFO on equal parts between channels. */ static void idma32_fifo_partition(struct dw_dma *dw) { u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | IDMA32C_FP_UPDATE; u64 fifo_partition = 0; /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ fifo_partition |= value << 0; /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ fifo_partition |= value << 32; /* Program FIFO Partition registers - 64 bytes per channel */ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); } static void idma32_disable(struct dw_dma *dw) { do_dw_dma_off(dw); idma32_fifo_partition(dw); } static void idma32_enable(struct dw_dma *dw) { idma32_fifo_partition(dw); do_dw_dma_on(dw); } int idma32_dma_probe(struct dw_dma_chip *chip) { struct dw_dma *dw; dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); if (!dw) return -ENOMEM; /* Channel operations */ if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT) dw->initialize_chan = idma32_initialize_chan_xbar; else dw->initialize_chan = idma32_initialize_chan_generic; dw->suspend_chan = idma32_suspend_chan; dw->resume_chan = idma32_resume_chan; dw->prepare_ctllo = idma32_prepare_ctllo; dw->encode_maxburst = idma32_encode_maxburst; dw->bytes2block = idma32_bytes2block; dw->block2bytes = idma32_block2bytes; /* Device operations */ dw->set_device_name = idma32_set_device_name; dw->disable = idma32_disable; dw->enable = idma32_enable; chip->dw = dw; return do_dma_probe(chip); } EXPORT_SYMBOL_GPL(idma32_dma_probe); int idma32_dma_remove(struct dw_dma_chip *chip) { return do_dma_remove(chip); } EXPORT_SYMBOL_GPL(idma32_dma_remove);
linux-master
drivers/dma/dw/idma32.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013,2019 Intel Corporation #include <linux/acpi.h> #include <linux/acpi_dma.h> #include "internal.h" static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) { struct acpi_dma_spec *dma_spec = param; struct dw_dma_slave slave = { .dma_dev = dma_spec->dev, .src_id = dma_spec->slave_id, .dst_id = dma_spec->slave_id, .m_master = 0, .p_master = 1, }; return dw_dma_filter(chan, &slave); } void dw_dma_acpi_controller_register(struct dw_dma *dw) { struct device *dev = dw->dma.dev; struct acpi_dma_filter_info *info; int ret; if (!has_acpi_companion(dev)) return; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) return; dma_cap_zero(info->dma_cap); dma_cap_set(DMA_SLAVE, info->dma_cap); info->filter_fn = dw_dma_acpi_filter; ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info); if (ret) dev_err(dev, "could not register acpi_dma_controller\n"); } EXPORT_SYMBOL_GPL(dw_dma_acpi_controller_register); void dw_dma_acpi_controller_free(struct dw_dma *dw) { struct device *dev = dw->dma.dev; if (!has_acpi_companion(dev)) return; acpi_dma_controller_free(dev); } EXPORT_SYMBOL_GPL(dw_dma_acpi_controller_free);
linux-master
drivers/dma/dw/acpi.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2020 Intel Corporation. All rights rsvd. */ #include <linux/sched/task.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "idxd.h" #include "perfmon.h" static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf); static cpumask_t perfmon_dsa_cpu_mask; static bool cpuhp_set_up; static enum cpuhp_state cpuhp_slot; /* * perf userspace reads this attribute to determine which cpus to open * counters on. It's connected to perfmon_dsa_cpu_mask, which is * maintained by the cpu hotplug handlers. */ static DEVICE_ATTR_RO(cpumask); static struct attribute *perfmon_cpumask_attrs[] = { &dev_attr_cpumask.attr, NULL, }; static struct attribute_group cpumask_attr_group = { .attrs = perfmon_cpumask_attrs, }; /* * These attributes specify the bits in the config word that the perf * syscall uses to pass the event ids and categories to perfmon. */ DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3"); DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31"); /* * These attributes specify the bits in the config1 word that the perf * syscall uses to pass filter data to perfmon. */ DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31"); DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39"); DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43"); DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51"); DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59"); #define PERFMON_FILTERS_START 2 #define PERFMON_FILTERS_MAX 5 static struct attribute *perfmon_format_attrs[] = { &format_attr_idxd_event_category.attr, &format_attr_idxd_event.attr, &format_attr_idxd_filter_wq.attr, &format_attr_idxd_filter_tc.attr, &format_attr_idxd_filter_pgsz.attr, &format_attr_idxd_filter_sz.attr, &format_attr_idxd_filter_eng.attr, NULL, }; static struct attribute_group perfmon_format_attr_group = { .name = "format", .attrs = perfmon_format_attrs, }; static const struct attribute_group *perfmon_attr_groups[] = { &perfmon_format_attr_group, &cpumask_attr_group, NULL, }; static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask); } static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) { return &idxd_pmu->pmu == event->pmu; } static int perfmon_collect_events(struct idxd_pmu *idxd_pmu, struct perf_event *leader, bool do_grp) { struct perf_event *event; int n, max_count; max_count = idxd_pmu->n_counters; n = idxd_pmu->n_events; if (n >= max_count) return -EINVAL; if (is_idxd_event(idxd_pmu, leader)) { idxd_pmu->event_list[n] = leader; idxd_pmu->event_list[n]->hw.idx = n; n++; } if (!do_grp) return n; for_each_sibling_event(event, leader) { if (!is_idxd_event(idxd_pmu, event) || event->state <= PERF_EVENT_STATE_OFF) continue; if (n >= max_count) return -EINVAL; idxd_pmu->event_list[n] = event; idxd_pmu->event_list[n]->hw.idx = n; n++; } return n; } static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu, struct perf_event *event, int idx) { struct idxd_device *idxd = idxd_pmu->idxd; struct hw_perf_event *hwc = &event->hw; hwc->idx = idx; hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx)); hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx)); } static int perfmon_assign_event(struct idxd_pmu *idxd_pmu, struct perf_event *event) { int i; for (i = 0; i < IDXD_PMU_EVENT_MAX; i++) if (!test_and_set_bit(i, idxd_pmu->used_mask)) return i; return -EINVAL; } /* * Check whether there are enough counters to satisfy that all the * events in the group can actually be scheduled at the same time. * * To do this, create a fake idxd_pmu object so the event collection * and assignment functions can be used without affecting the internal * state of the real idxd_pmu object. */ static int perfmon_validate_group(struct idxd_pmu *pmu, struct perf_event *event) { struct perf_event *leader = event->group_leader; struct idxd_pmu *fake_pmu; int i, ret = 0, n, idx; fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL); if (!fake_pmu) return -ENOMEM; fake_pmu->pmu.name = pmu->pmu.name; fake_pmu->n_counters = pmu->n_counters; n = perfmon_collect_events(fake_pmu, leader, true); if (n < 0) { ret = n; goto out; } fake_pmu->n_events = n; n = perfmon_collect_events(fake_pmu, event, false); if (n < 0) { ret = n; goto out; } fake_pmu->n_events = n; for (i = 0; i < n; i++) { event = fake_pmu->event_list[i]; idx = perfmon_assign_event(fake_pmu, event); if (idx < 0) { ret = idx; goto out; } } out: kfree(fake_pmu); return ret; } static int perfmon_pmu_event_init(struct perf_event *event) { struct idxd_device *idxd; int ret = 0; idxd = event_to_idxd(event); event->hw.idx = -1; if (event->attr.type != event->pmu->type) return -ENOENT; /* sampling not supported */ if (event->attr.sample_period) return -EINVAL; if (event->cpu < 0) return -EINVAL; if (event->pmu != &idxd->idxd_pmu->pmu) return -EINVAL; event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); event->cpu = idxd->idxd_pmu->cpu; event->hw.config = event->attr.config; if (event->group_leader != event) /* non-group events have themselves as leader */ ret = perfmon_validate_group(idxd->idxd_pmu, event); return ret; } static inline u64 perfmon_pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct idxd_device *idxd; int cntr = hwc->idx; idxd = event_to_idxd(event); return ioread64(CNTRDATA_REG(idxd, cntr)); } static void perfmon_pmu_event_update(struct perf_event *event) { struct idxd_device *idxd = event_to_idxd(event); u64 prev_raw_count, new_raw_count, delta, p, n; int shift = 64 - idxd->idxd_pmu->counter_width; struct hw_perf_event *hwc = &event->hw; prev_raw_count = local64_read(&hwc->prev_count); do { new_raw_count = perfmon_pmu_read_counter(event); } while (!local64_try_cmpxchg(&hwc->prev_count, &prev_raw_count, new_raw_count)); n = (new_raw_count << shift); p = (prev_raw_count << shift); delta = ((n - p) >> shift); local64_add(delta, &event->count); } void perfmon_counter_overflow(struct idxd_device *idxd) { int i, n_counters, max_loop = OVERFLOW_SIZE; struct perf_event *event; unsigned long ovfstatus; n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE); ovfstatus = ioread32(OVFSTATUS_REG(idxd)); /* * While updating overflowed counters, other counters behind * them could overflow and be missed in a given pass. * Normally this could happen at most n_counters times, but in * theory a tiny counter width could result in continual * overflows and endless looping. max_loop provides a * failsafe in that highly unlikely case. */ while (ovfstatus && max_loop--) { /* Figure out which counter(s) overflowed */ for_each_set_bit(i, &ovfstatus, n_counters) { unsigned long ovfstatus_clear = 0; /* Update event->count for overflowed counter */ event = idxd->idxd_pmu->event_list[i]; perfmon_pmu_event_update(event); /* Writing 1 to OVFSTATUS bit clears it */ set_bit(i, &ovfstatus_clear); iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd)); } ovfstatus = ioread32(OVFSTATUS_REG(idxd)); } /* * Should never happen. If so, it means a counter(s) looped * around twice while this handler was running. */ WARN_ON_ONCE(ovfstatus); } static inline void perfmon_reset_config(struct idxd_device *idxd) { iowrite32(CONFIG_RESET, PERFRST_REG(idxd)); iowrite32(0, OVFSTATUS_REG(idxd)); iowrite32(0, PERFFRZ_REG(idxd)); } static inline void perfmon_reset_counters(struct idxd_device *idxd) { iowrite32(CNTR_RESET, PERFRST_REG(idxd)); } static inline void perfmon_reset(struct idxd_device *idxd) { perfmon_reset_config(idxd); perfmon_reset_counters(idxd); } static void perfmon_pmu_event_start(struct perf_event *event, int mode) { u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0; u64 cntr_cfg, cntrdata, event_enc, event_cat = 0; struct hw_perf_event *hwc = &event->hw; union filter_cfg flt_cfg; union event_cfg event_cfg; struct idxd_device *idxd; int cntr; idxd = event_to_idxd(event); event->hw.idx = hwc->idx; cntr = hwc->idx; /* Obtain event category and event value from user space */ event_cfg.val = event->attr.config; flt_cfg.val = event->attr.config1; event_cat = event_cfg.event_cat; event_enc = event_cfg.event_enc; /* Obtain filter configuration from user space */ flt_wq = flt_cfg.wq; flt_tc = flt_cfg.tc; flt_pg_sz = flt_cfg.pg_sz; flt_xfer_sz = flt_cfg.xfer_sz; flt_eng = flt_cfg.eng; if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters)) iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ)); if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters)) iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC)); if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters)) iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ)); if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters)) iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ)); if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters)) iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG)); /* Read the start value */ cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); local64_set(&event->hw.prev_count, cntrdata); /* Set counter to event/category */ cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT; cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT; /* Set interrupt on overflow and counter enable bits */ cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE); iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); } static void perfmon_pmu_event_stop(struct perf_event *event, int mode) { struct hw_perf_event *hwc = &event->hw; struct idxd_device *idxd; int i, cntr = hwc->idx; u64 cntr_cfg; idxd = event_to_idxd(event); /* remove this event from event list */ for (i = 0; i < idxd->idxd_pmu->n_events; i++) { if (event != idxd->idxd_pmu->event_list[i]) continue; for (++i; i < idxd->idxd_pmu->n_events; i++) idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i]; --idxd->idxd_pmu->n_events; break; } cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr)); cntr_cfg &= ~CNTRCFG_ENABLE; iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); if (mode == PERF_EF_UPDATE) perfmon_pmu_event_update(event); event->hw.idx = -1; clear_bit(cntr, idxd->idxd_pmu->used_mask); } static void perfmon_pmu_event_del(struct perf_event *event, int mode) { perfmon_pmu_event_stop(event, PERF_EF_UPDATE); } static int perfmon_pmu_event_add(struct perf_event *event, int flags) { struct idxd_device *idxd = event_to_idxd(event); struct idxd_pmu *idxd_pmu = idxd->idxd_pmu; struct hw_perf_event *hwc = &event->hw; int idx, n; n = perfmon_collect_events(idxd_pmu, event, false); if (n < 0) return n; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_ARCH; idx = perfmon_assign_event(idxd_pmu, event); if (idx < 0) return idx; perfmon_assign_hw_event(idxd_pmu, event, idx); if (flags & PERF_EF_START) perfmon_pmu_event_start(event, 0); idxd_pmu->n_events = n; return 0; } static void enable_perfmon_pmu(struct idxd_device *idxd) { iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd)); } static void disable_perfmon_pmu(struct idxd_device *idxd) { iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd)); } static void perfmon_pmu_enable(struct pmu *pmu) { struct idxd_device *idxd = pmu_to_idxd(pmu); enable_perfmon_pmu(idxd); } static void perfmon_pmu_disable(struct pmu *pmu) { struct idxd_device *idxd = pmu_to_idxd(pmu); disable_perfmon_pmu(idxd); } static void skip_filter(int i) { int j; for (j = i; j < PERFMON_FILTERS_MAX; j++) perfmon_format_attrs[PERFMON_FILTERS_START + j] = perfmon_format_attrs[PERFMON_FILTERS_START + j + 1]; } static void idxd_pmu_init(struct idxd_pmu *idxd_pmu) { int i; for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) { if (!test_bit(i, &idxd_pmu->supported_filters)) skip_filter(i); } idxd_pmu->pmu.name = idxd_pmu->name; idxd_pmu->pmu.attr_groups = perfmon_attr_groups; idxd_pmu->pmu.task_ctx_nr = perf_invalid_context; idxd_pmu->pmu.event_init = perfmon_pmu_event_init; idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable, idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable, idxd_pmu->pmu.add = perfmon_pmu_event_add; idxd_pmu->pmu.del = perfmon_pmu_event_del; idxd_pmu->pmu.start = perfmon_pmu_event_start; idxd_pmu->pmu.stop = perfmon_pmu_event_stop; idxd_pmu->pmu.read = perfmon_pmu_event_update; idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; idxd_pmu->pmu.module = THIS_MODULE; } void perfmon_pmu_remove(struct idxd_device *idxd) { if (!idxd->idxd_pmu) return; cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node); perf_pmu_unregister(&idxd->idxd_pmu->pmu); kfree(idxd->idxd_pmu); idxd->idxd_pmu = NULL; } static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node) { struct idxd_pmu *idxd_pmu; idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); /* select the first online CPU as the designated reader */ if (cpumask_empty(&perfmon_dsa_cpu_mask)) { cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask); idxd_pmu->cpu = cpu; } return 0; } static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node) { struct idxd_pmu *idxd_pmu; unsigned int target; idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node); if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask)) return 0; target = cpumask_any_but(cpu_online_mask, cpu); /* migrate events if there is a valid target */ if (target < nr_cpu_ids) cpumask_set_cpu(target, &perfmon_dsa_cpu_mask); else target = -1; perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target); return 0; } int perfmon_pmu_init(struct idxd_device *idxd) { union idxd_perfcap perfcap; struct idxd_pmu *idxd_pmu; int rc = -ENODEV; /* * perfmon module initialization failed, nothing to do */ if (!cpuhp_set_up) return -ENODEV; /* * If perfmon_offset or num_counters is 0, it means perfmon is * not supported on this hardware. */ if (idxd->perfmon_offset == 0) return -ENODEV; idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL); if (!idxd_pmu) return -ENOMEM; idxd_pmu->idxd = idxd; idxd->idxd_pmu = idxd_pmu; if (idxd->data->type == IDXD_TYPE_DSA) { rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id); if (rc < 0) goto free; } else if (idxd->data->type == IDXD_TYPE_IAX) { rc = sprintf(idxd_pmu->name, "iax%d", idxd->id); if (rc < 0) goto free; } else { goto free; } perfmon_reset(idxd); perfcap.bits = ioread64(PERFCAP_REG(idxd)); /* * If total perf counter is 0, stop further registration. * This is necessary in order to support driver running on * guest which does not have pmon support. */ if (perfcap.num_perf_counter == 0) goto free; /* A counter width of 0 means it can't count */ if (perfcap.counter_width == 0) goto free; /* Overflow interrupt and counter freeze support must be available */ if (!perfcap.overflow_interrupt || !perfcap.counter_freeze) goto free; /* Number of event categories cannot be 0 */ if (perfcap.num_event_category == 0) goto free; /* * We don't support per-counter capabilities for now. */ if (perfcap.cap_per_counter) goto free; idxd_pmu->n_event_categories = perfcap.num_event_category; idxd_pmu->supported_event_categories = perfcap.global_event_category; idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter; /* check filter capability. If 0, then filters are not supported */ idxd_pmu->supported_filters = perfcap.filter; if (perfcap.filter) idxd_pmu->n_filters = hweight8(perfcap.filter); /* Store the total number of counters categories, and counter width */ idxd_pmu->n_counters = perfcap.num_perf_counter; idxd_pmu->counter_width = perfcap.counter_width; idxd_pmu_init(idxd_pmu); rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1); if (rc) goto free; rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node); if (rc) { perf_pmu_unregister(&idxd->idxd_pmu->pmu); goto free; } out: return rc; free: kfree(idxd_pmu); idxd->idxd_pmu = NULL; goto out; } void __init perfmon_init(void) { int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "driver/dma/idxd/perf:online", perf_event_cpu_online, perf_event_cpu_offline); if (WARN_ON(rc < 0)) return; cpuhp_slot = rc; cpuhp_set_up = true; } void __exit perfmon_exit(void) { if (cpuhp_set_up) cpuhp_remove_multi_state(cpuhp_slot); }
linux-master
drivers/dma/idxd/perfmon.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <uapi/linux/idxd.h> #include "registers.h" #include "idxd.h" static char *idxd_wq_type_names[] = { [IDXD_WQT_NONE] = "none", [IDXD_WQT_KERNEL] = "kernel", [IDXD_WQT_USER] = "user", }; /* IDXD engine attributes */ static ssize_t engine_group_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_engine *engine = confdev_to_engine(dev); if (engine->group) return sysfs_emit(buf, "%d\n", engine->group->id); else return sysfs_emit(buf, "%d\n", -1); } static ssize_t engine_group_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_engine *engine = confdev_to_engine(dev); struct idxd_device *idxd = engine->idxd; long id; int rc; struct idxd_group *prevg; rc = kstrtol(buf, 10, &id); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (id > idxd->max_groups - 1 || id < -1) return -EINVAL; if (id == -1) { if (engine->group) { engine->group->num_engines--; engine->group = NULL; } return count; } prevg = engine->group; if (prevg) prevg->num_engines--; engine->group = idxd->groups[id]; engine->group->num_engines++; return count; } static struct device_attribute dev_attr_engine_group = __ATTR(group_id, 0644, engine_group_id_show, engine_group_id_store); static struct attribute *idxd_engine_attributes[] = { &dev_attr_engine_group.attr, NULL, }; static const struct attribute_group idxd_engine_attribute_group = { .attrs = idxd_engine_attributes, }; static const struct attribute_group *idxd_engine_attribute_groups[] = { &idxd_engine_attribute_group, NULL, }; static void idxd_conf_engine_release(struct device *dev) { struct idxd_engine *engine = confdev_to_engine(dev); kfree(engine); } struct device_type idxd_engine_device_type = { .name = "engine", .release = idxd_conf_engine_release, .groups = idxd_engine_attribute_groups, }; /* Group attributes */ static void idxd_set_free_rdbufs(struct idxd_device *idxd) { int i, rdbufs; for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { struct idxd_group *g = idxd->groups[i]; rdbufs += g->rdbufs_reserved; } idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; } static ssize_t group_read_buffers_reserved_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); } static ssize_t group_tokens_reserved_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); return group_read_buffers_reserved_show(dev, attr, buf); } static ssize_t group_read_buffers_reserved_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; unsigned long val; int rc; rc = kstrtoul(buf, 10, &val); if (rc < 0) return -EINVAL; if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (val > idxd->max_rdbufs) return -EINVAL; if (val > idxd->nr_rdbufs + group->rdbufs_reserved) return -EINVAL; group->rdbufs_reserved = val; idxd_set_free_rdbufs(idxd); return count; } static ssize_t group_tokens_reserved_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); return group_read_buffers_reserved_store(dev, attr, buf, count); } static struct device_attribute dev_attr_group_tokens_reserved = __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, group_tokens_reserved_store); static struct device_attribute dev_attr_group_read_buffers_reserved = __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show, group_read_buffers_reserved_store); static ssize_t group_read_buffers_allowed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); } static ssize_t group_tokens_allowed_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); return group_read_buffers_allowed_show(dev, attr, buf); } static ssize_t group_read_buffers_allowed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; unsigned long val; int rc; rc = kstrtoul(buf, 10, &val); if (rc < 0) return -EINVAL; if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (val < 4 * group->num_engines || val > group->rdbufs_reserved + idxd->nr_rdbufs) return -EINVAL; group->rdbufs_allowed = val; return count; } static ssize_t group_tokens_allowed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); return group_read_buffers_allowed_store(dev, attr, buf, count); } static struct device_attribute dev_attr_group_tokens_allowed = __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, group_tokens_allowed_store); static struct device_attribute dev_attr_group_read_buffers_allowed = __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show, group_read_buffers_allowed_store); static ssize_t group_use_read_buffer_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); } static ssize_t group_use_token_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); return group_use_read_buffer_limit_show(dev, attr, buf); } static ssize_t group_use_read_buffer_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; unsigned long val; int rc; rc = kstrtoul(buf, 10, &val); if (rc < 0) return -EINVAL; if (idxd->data->type == IDXD_TYPE_IAX) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (idxd->rdbuf_limit == 0) return -EPERM; group->use_rdbuf_limit = !!val; return count; } static ssize_t group_use_token_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); return group_use_read_buffer_limit_store(dev, attr, buf, count); } static struct device_attribute dev_attr_group_use_token_limit = __ATTR(use_token_limit, 0644, group_use_token_limit_show, group_use_token_limit_store); static struct device_attribute dev_attr_group_use_read_buffer_limit = __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show, group_use_read_buffer_limit_store); static ssize_t group_engines_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); int i, rc = 0; struct idxd_device *idxd = group->idxd; for (i = 0; i < idxd->max_engines; i++) { struct idxd_engine *engine = idxd->engines[i]; if (!engine->group) continue; if (engine->group->id == group->id) rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); } if (!rc) return 0; rc--; rc += sysfs_emit_at(buf, rc, "\n"); return rc; } static struct device_attribute dev_attr_group_engines = __ATTR(engines, 0444, group_engines_show, NULL); static ssize_t group_work_queues_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); int i, rc = 0; struct idxd_device *idxd = group->idxd; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; if (!wq->group) continue; if (wq->group->id == group->id) rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); } if (!rc) return 0; rc--; rc += sysfs_emit_at(buf, rc, "\n"); return rc; } static struct device_attribute dev_attr_group_work_queues = __ATTR(work_queues, 0444, group_work_queues_show, NULL); static ssize_t group_traffic_class_a_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%d\n", group->tc_a); } static ssize_t group_traffic_class_a_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; long val; int rc; rc = kstrtol(buf, 10, &val); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) return -EPERM; if (val < 0 || val > 7) return -EINVAL; group->tc_a = val; return count; } static struct device_attribute dev_attr_group_traffic_class_a = __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, group_traffic_class_a_store); static ssize_t group_traffic_class_b_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%d\n", group->tc_b); } static ssize_t group_traffic_class_b_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; long val; int rc; rc = kstrtol(buf, 10, &val); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) return -EPERM; if (val < 0 || val > 7) return -EINVAL; group->tc_b = val; return count; } static struct device_attribute dev_attr_group_traffic_class_b = __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, group_traffic_class_b_store); static ssize_t group_desc_progress_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%d\n", group->desc_progress_limit); } static ssize_t group_desc_progress_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); int val, rc; rc = kstrtoint(buf, 10, &val); if (rc < 0) return -EINVAL; if (val & ~GENMASK(1, 0)) return -EINVAL; group->desc_progress_limit = val; return count; } static struct device_attribute dev_attr_group_desc_progress_limit = __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show, group_desc_progress_limit_store); static ssize_t group_batch_progress_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_group *group = confdev_to_group(dev); return sysfs_emit(buf, "%d\n", group->batch_progress_limit); } static ssize_t group_batch_progress_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_group *group = confdev_to_group(dev); int val, rc; rc = kstrtoint(buf, 10, &val); if (rc < 0) return -EINVAL; if (val & ~GENMASK(1, 0)) return -EINVAL; group->batch_progress_limit = val; return count; } static struct device_attribute dev_attr_group_batch_progress_limit = __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show, group_batch_progress_limit_store); static struct attribute *idxd_group_attributes[] = { &dev_attr_group_work_queues.attr, &dev_attr_group_engines.attr, &dev_attr_group_use_token_limit.attr, &dev_attr_group_use_read_buffer_limit.attr, &dev_attr_group_tokens_allowed.attr, &dev_attr_group_read_buffers_allowed.attr, &dev_attr_group_tokens_reserved.attr, &dev_attr_group_read_buffers_reserved.attr, &dev_attr_group_traffic_class_a.attr, &dev_attr_group_traffic_class_b.attr, &dev_attr_group_desc_progress_limit.attr, &dev_attr_group_batch_progress_limit.attr, NULL, }; static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr, struct idxd_device *idxd) { return (attr == &dev_attr_group_desc_progress_limit.attr || attr == &dev_attr_group_batch_progress_limit.attr) && !idxd->hw.group_cap.progress_limit; } static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr, struct idxd_device *idxd) { /* * Intel IAA does not support Read Buffer allocation control, * make these attributes invisible. */ return (attr == &dev_attr_group_use_token_limit.attr || attr == &dev_attr_group_use_read_buffer_limit.attr || attr == &dev_attr_group_tokens_allowed.attr || attr == &dev_attr_group_read_buffers_allowed.attr || attr == &dev_attr_group_tokens_reserved.attr || attr == &dev_attr_group_read_buffers_reserved.attr) && idxd->data->type == IDXD_TYPE_IAX; } static umode_t idxd_group_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct idxd_group *group = confdev_to_group(dev); struct idxd_device *idxd = group->idxd; if (idxd_group_attr_progress_limit_invisible(attr, idxd)) return 0; if (idxd_group_attr_read_buffers_invisible(attr, idxd)) return 0; return attr->mode; } static const struct attribute_group idxd_group_attribute_group = { .attrs = idxd_group_attributes, .is_visible = idxd_group_attr_visible, }; static const struct attribute_group *idxd_group_attribute_groups[] = { &idxd_group_attribute_group, NULL, }; static void idxd_conf_group_release(struct device *dev) { struct idxd_group *group = confdev_to_group(dev); kfree(group); } struct device_type idxd_group_device_type = { .name = "group", .release = idxd_conf_group_release, .groups = idxd_group_attribute_groups, }; /* IDXD work queue attribs */ static ssize_t wq_clients_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%d\n", wq->client_count); } static struct device_attribute dev_attr_wq_clients = __ATTR(clients, 0444, wq_clients_show, NULL); static ssize_t wq_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); switch (wq->state) { case IDXD_WQ_DISABLED: return sysfs_emit(buf, "disabled\n"); case IDXD_WQ_ENABLED: return sysfs_emit(buf, "enabled\n"); } return sysfs_emit(buf, "unknown\n"); } static struct device_attribute dev_attr_wq_state = __ATTR(state, 0444, wq_state_show, NULL); static ssize_t wq_group_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); if (wq->group) return sysfs_emit(buf, "%u\n", wq->group->id); else return sysfs_emit(buf, "-1\n"); } static ssize_t wq_group_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; long id; int rc; struct idxd_group *prevg, *group; rc = kstrtol(buf, 10, &id); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (id > idxd->max_groups - 1 || id < -1) return -EINVAL; if (id == -1) { if (wq->group) { wq->group->num_wqs--; wq->group = NULL; } return count; } group = idxd->groups[id]; prevg = wq->group; if (prevg) prevg->num_wqs--; wq->group = group; group->num_wqs++; return count; } static struct device_attribute dev_attr_wq_group_id = __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared"); } static ssize_t wq_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (sysfs_streq(buf, "dedicated")) { set_bit(WQ_FLAG_DEDICATED, &wq->flags); wq->threshold = 0; } else if (sysfs_streq(buf, "shared")) { clear_bit(WQ_FLAG_DEDICATED, &wq->flags); } else { return -EINVAL; } return count; } static struct device_attribute dev_attr_wq_mode = __ATTR(mode, 0644, wq_mode_show, wq_mode_store); static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", wq->size); } static int total_claimed_wq_size(struct idxd_device *idxd) { int i; int wq_size = 0; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; wq_size += wq->size; } return wq_size; } static ssize_t wq_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); unsigned long size; struct idxd_device *idxd = wq->idxd; int rc; rc = kstrtoul(buf, 10, &size); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) return -EINVAL; wq->size = size; return count; } static struct device_attribute dev_attr_wq_size = __ATTR(size, 0644, wq_size_show, wq_size_store); static ssize_t wq_priority_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", wq->priority); } static ssize_t wq_priority_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); unsigned long prio; struct idxd_device *idxd = wq->idxd; int rc; rc = kstrtoul(buf, 10, &prio); if (rc < 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (prio > IDXD_MAX_PRIORITY) return -EINVAL; wq->priority = prio; return count; } static struct device_attribute dev_attr_wq_priority = __ATTR(priority, 0644, wq_priority_show, wq_priority_store); static ssize_t wq_block_on_fault_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); } static ssize_t wq_block_on_fault_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; bool bof; int rc; if (!idxd->hw.gen_cap.block_on_fault) return -EOPNOTSUPP; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -ENXIO; rc = kstrtobool(buf, &bof); if (rc < 0) return rc; if (bof) { if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags)) return -EOPNOTSUPP; set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); } else { clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); } return count; } static struct device_attribute dev_attr_wq_block_on_fault = __ATTR(block_on_fault, 0644, wq_block_on_fault_show, wq_block_on_fault_store); static ssize_t wq_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", wq->threshold); } static ssize_t wq_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; unsigned int val; int rc; rc = kstrtouint(buf, 0, &val); if (rc < 0) return -EINVAL; if (val > wq->size || val <= 0) return -EINVAL; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -ENXIO; if (test_bit(WQ_FLAG_DEDICATED, &wq->flags)) return -EINVAL; wq->threshold = val; return count; } static struct device_attribute dev_attr_wq_threshold = __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store); static ssize_t wq_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); switch (wq->type) { case IDXD_WQT_KERNEL: return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]); case IDXD_WQT_USER: return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]); case IDXD_WQT_NONE: default: return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]); } return -EINVAL; } static ssize_t wq_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); enum idxd_wq_type old_type; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; old_type = wq->type; if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) wq->type = IDXD_WQT_NONE; else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) wq->type = IDXD_WQT_KERNEL; else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) wq->type = IDXD_WQT_USER; else return -EINVAL; /* If we are changing queue type, clear the name */ if (wq->type != old_type) memset(wq->name, 0, WQ_NAME_SIZE + 1); return count; } static struct device_attribute dev_attr_wq_type = __ATTR(type, 0644, wq_type_show, wq_type_store); static ssize_t wq_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%s\n", wq->name); } static ssize_t wq_name_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); char *input, *pos; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) return -EINVAL; input = kstrndup(buf, count, GFP_KERNEL); if (!input) return -ENOMEM; pos = strim(input); memset(wq->name, 0, WQ_NAME_SIZE + 1); sprintf(wq->name, "%s", pos); kfree(input); return count; } static struct device_attribute dev_attr_wq_name = __ATTR(name, 0644, wq_name_show, wq_name_store); static ssize_t wq_cdev_minor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); int minor = -1; mutex_lock(&wq->wq_lock); if (wq->idxd_cdev) minor = wq->idxd_cdev->minor; mutex_unlock(&wq->wq_lock); if (minor == -1) return -ENXIO; return sysfs_emit(buf, "%d\n", minor); } static struct device_attribute dev_attr_wq_cdev_minor = __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); static int __get_sysfs_u64(const char *buf, u64 *val) { int rc; rc = kstrtou64(buf, 0, val); if (rc < 0) return -EINVAL; if (*val == 0) return -EINVAL; *val = roundup_pow_of_two(*val); return 0; } static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes); } static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; u64 xfer_size; int rc; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; rc = __get_sysfs_u64(buf, &xfer_size); if (rc < 0) return rc; if (xfer_size > idxd->max_xfer_bytes) return -EINVAL; wq->max_xfer_bytes = xfer_size; return count; } static struct device_attribute dev_attr_wq_max_transfer_size = __ATTR(max_transfer_size, 0644, wq_max_transfer_size_show, wq_max_transfer_size_store); static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", wq->max_batch_size); } static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; u64 batch_size; int rc; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; rc = __get_sysfs_u64(buf, &batch_size); if (rc < 0) return rc; if (batch_size > idxd->max_batch_size) return -EINVAL; idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size); return count; } static struct device_attribute dev_attr_wq_max_batch_size = __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags)); } static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; bool ats_dis; int rc; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; rc = kstrtobool(buf, &ats_dis); if (rc < 0) return rc; if (ats_dis) set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); else clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); return count; } static struct device_attribute dev_attr_wq_ats_disable = __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store); static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags)); } static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; bool prs_dis; int rc; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; rc = kstrtobool(buf, &prs_dis); if (rc < 0) return rc; if (prs_dis) { set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags); /* when PRS is disabled, BOF needs to be off as well */ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); } else { clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags); } return count; } static struct device_attribute dev_attr_wq_prs_disable = __ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store); static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; u32 occup, offset; if (!idxd->hw.wq_cap.occupancy) return -EOPNOTSUPP; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX); occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK; return sysfs_emit(buf, "%u\n", occup); } static struct device_attribute dev_attr_wq_occupancy = __ATTR(occupancy, 0444, wq_occupancy_show, NULL); static ssize_t wq_enqcmds_retries_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); if (wq_dedicated(wq)) return -EOPNOTSUPP; return sysfs_emit(buf, "%u\n", wq->enqcmds_retries); } static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); int rc; unsigned int retries; if (wq_dedicated(wq)) return -EOPNOTSUPP; rc = kstrtouint(buf, 10, &retries); if (rc < 0) return rc; if (retries > IDXD_ENQCMDS_MAX_RETRIES) retries = IDXD_ENQCMDS_MAX_RETRIES; wq->enqcmds_retries = retries; return count; } static struct device_attribute dev_attr_wq_enqcmds_retries = __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); static ssize_t wq_op_config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap); } static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) { int bit; /* * The OPCAP is defined as 256 bits that represents each operation the device * supports per bit. Iterate through all the bits and check if the input mask * is set for bits that are not set in the OPCAP for the device. If no OPCAP * bit is set and input mask has the bit set, then return error. */ for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) { if (!test_bit(bit, idxd->opcap_bmap)) return -EINVAL; } return 0; } static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; unsigned long *opmask; int rc; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!opmask) return -ENOMEM; rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS); if (rc < 0) goto err; rc = idxd_verify_supported_opcap(idxd, opmask); if (rc < 0) goto err; bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS); bitmap_free(opmask); return count; err: bitmap_free(opmask); return rc; } static struct device_attribute dev_attr_wq_op_config = __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_clients.attr, &dev_attr_wq_state.attr, &dev_attr_wq_group_id.attr, &dev_attr_wq_mode.attr, &dev_attr_wq_size.attr, &dev_attr_wq_priority.attr, &dev_attr_wq_block_on_fault.attr, &dev_attr_wq_threshold.attr, &dev_attr_wq_type.attr, &dev_attr_wq_name.attr, &dev_attr_wq_cdev_minor.attr, &dev_attr_wq_max_transfer_size.attr, &dev_attr_wq_max_batch_size.attr, &dev_attr_wq_ats_disable.attr, &dev_attr_wq_prs_disable.attr, &dev_attr_wq_occupancy.attr, &dev_attr_wq_enqcmds_retries.attr, &dev_attr_wq_op_config.attr, NULL, }; /* A WQ attr is invisible if the feature is not supported in WQCAP. */ #define idxd_wq_attr_invisible(name, cap_field, a, idxd) \ ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field) static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr, struct idxd_device *idxd) { /* Intel IAA does not support batch processing, make it invisible */ return attr == &dev_attr_wq_max_batch_size.attr && idxd->data->type == IDXD_TYPE_IAX; } static umode_t idxd_wq_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct idxd_wq *wq = confdev_to_wq(dev); struct idxd_device *idxd = wq->idxd; if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd)) return 0; if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) return 0; if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd)) return 0; if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd)) return 0; return attr->mode; } static const struct attribute_group idxd_wq_attribute_group = { .attrs = idxd_wq_attributes, .is_visible = idxd_wq_attr_visible, }; static const struct attribute_group *idxd_wq_attribute_groups[] = { &idxd_wq_attribute_group, NULL, }; static void idxd_conf_wq_release(struct device *dev) { struct idxd_wq *wq = confdev_to_wq(dev); bitmap_free(wq->opcap_bmap); kfree(wq->wqcfg); xa_destroy(&wq->upasid_xa); kfree(wq); } struct device_type idxd_wq_device_type = { .name = "wq", .release = idxd_conf_wq_release, .groups = idxd_wq_attribute_groups, }; /* IDXD device attribs */ static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%#x\n", idxd->hw.version); } static DEVICE_ATTR_RO(version); static ssize_t max_work_queues_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_wq_size); } static DEVICE_ATTR_RO(max_work_queues_size); static ssize_t max_groups_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_groups); } static DEVICE_ATTR_RO(max_groups); static ssize_t max_work_queues_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_wqs); } static DEVICE_ATTR_RO(max_work_queues); static ssize_t max_engines_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_engines); } static DEVICE_ATTR_RO(max_engines); static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); } static DEVICE_ATTR_RO(numa_node); static ssize_t max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_batch_size); } static DEVICE_ATTR_RO(max_batch_size); static ssize_t max_transfer_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); } static DEVICE_ATTR_RO(max_transfer_size); static ssize_t op_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap); } static DEVICE_ATTR_RO(op_cap); static ssize_t gen_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); } static DEVICE_ATTR_RO(gen_cap); static ssize_t configurable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); } static DEVICE_ATTR_RO(configurable); static ssize_t clients_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); int count = 0, i; spin_lock(&idxd->dev_lock); for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; count += wq->client_count; } spin_unlock(&idxd->dev_lock); return sysfs_emit(buf, "%d\n", count); } static DEVICE_ATTR_RO(clients); static ssize_t pasid_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd)); } static DEVICE_ATTR_RO(pasid_enabled); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); switch (idxd->state) { case IDXD_DEV_DISABLED: return sysfs_emit(buf, "disabled\n"); case IDXD_DEV_ENABLED: return sysfs_emit(buf, "enabled\n"); case IDXD_DEV_HALTED: return sysfs_emit(buf, "halted\n"); } return sysfs_emit(buf, "unknown\n"); } static DEVICE_ATTR_RO(state); static ssize_t errors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); DECLARE_BITMAP(swerr_bmap, 256); bitmap_zero(swerr_bmap, 256); spin_lock(&idxd->dev_lock); multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4); spin_unlock(&idxd->dev_lock); return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap); } static DEVICE_ATTR_RO(errors); static ssize_t max_read_buffers_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); } static ssize_t max_tokens_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n"); return max_read_buffers_show(dev, attr, buf); } static DEVICE_ATTR_RO(max_tokens); /* deprecated */ static DEVICE_ATTR_RO(max_read_buffers); static ssize_t read_buffer_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); } static ssize_t token_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n"); return read_buffer_limit_show(dev, attr, buf); } static ssize_t read_buffer_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_device *idxd = confdev_to_idxd(dev); unsigned long val; int rc; rc = kstrtoul(buf, 10, &val); if (rc < 0) return -EINVAL; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (!idxd->hw.group_cap.rdbuf_limit) return -EPERM; if (val > idxd->hw.group_cap.total_rdbufs) return -EINVAL; idxd->rdbuf_limit = val; return count; } static ssize_t token_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n"); return read_buffer_limit_store(dev, attr, buf, count); } static DEVICE_ATTR_RW(token_limit); /* deprecated */ static DEVICE_ATTR_RW(read_buffer_limit); static ssize_t cdev_major_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%u\n", idxd->major); } static DEVICE_ATTR_RO(cdev_major); static ssize_t cmd_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); return sysfs_emit(buf, "%#x\n", idxd->cmd_status); } static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_device *idxd = confdev_to_idxd(dev); idxd->cmd_status = 0; return count; } static DEVICE_ATTR_RW(cmd_status); static ssize_t iaa_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); if (idxd->hw.version < DEVICE_VERSION_2) return -EOPNOTSUPP; return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits); } static DEVICE_ATTR_RO(iaa_cap); static ssize_t event_log_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_device *idxd = confdev_to_idxd(dev); if (!idxd->evl) return -EOPNOTSUPP; return sysfs_emit(buf, "%u\n", idxd->evl->size); } static ssize_t event_log_size_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct idxd_device *idxd = confdev_to_idxd(dev); unsigned long val; int rc; if (!idxd->evl) return -EOPNOTSUPP; rc = kstrtoul(buf, 10, &val); if (rc < 0) return -EINVAL; if (idxd->state == IDXD_DEV_ENABLED) return -EPERM; if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return -EPERM; if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX || (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma)) return -EINVAL; idxd->evl->size = val; return count; } static DEVICE_ATTR_RW(event_log_size); static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr, struct idxd_device *idxd) { /* Intel IAA does not support batch processing, make it invisible */ return attr == &dev_attr_max_batch_size.attr && idxd->data->type == IDXD_TYPE_IAX; } static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr, struct idxd_device *idxd) { /* * Intel IAA does not support Read Buffer allocation control, * make these attributes invisible. */ return (attr == &dev_attr_max_tokens.attr || attr == &dev_attr_max_read_buffers.attr || attr == &dev_attr_token_limit.attr || attr == &dev_attr_read_buffer_limit.attr) && idxd->data->type == IDXD_TYPE_IAX; } static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr, struct idxd_device *idxd) { return attr == &dev_attr_iaa_cap.attr && (idxd->data->type != IDXD_TYPE_IAX || idxd->hw.version < DEVICE_VERSION_2); } static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr, struct idxd_device *idxd) { return (attr == &dev_attr_event_log_size.attr && !idxd->hw.gen_cap.evl_support); } static umode_t idxd_device_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct idxd_device *idxd = confdev_to_idxd(dev); if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) return 0; if (idxd_device_attr_read_buffers_invisible(attr, idxd)) return 0; if (idxd_device_attr_iaa_cap_invisible(attr, idxd)) return 0; if (idxd_device_attr_event_log_size_invisible(attr, idxd)) return 0; return attr->mode; } static struct attribute *idxd_device_attributes[] = { &dev_attr_version.attr, &dev_attr_max_groups.attr, &dev_attr_max_work_queues.attr, &dev_attr_max_work_queues_size.attr, &dev_attr_max_engines.attr, &dev_attr_numa_node.attr, &dev_attr_max_batch_size.attr, &dev_attr_max_transfer_size.attr, &dev_attr_op_cap.attr, &dev_attr_gen_cap.attr, &dev_attr_configurable.attr, &dev_attr_clients.attr, &dev_attr_pasid_enabled.attr, &dev_attr_state.attr, &dev_attr_errors.attr, &dev_attr_max_tokens.attr, &dev_attr_max_read_buffers.attr, &dev_attr_token_limit.attr, &dev_attr_read_buffer_limit.attr, &dev_attr_cdev_major.attr, &dev_attr_cmd_status.attr, &dev_attr_iaa_cap.attr, &dev_attr_event_log_size.attr, NULL, }; static const struct attribute_group idxd_device_attribute_group = { .attrs = idxd_device_attributes, .is_visible = idxd_device_attr_visible, }; static const struct attribute_group *idxd_attribute_groups[] = { &idxd_device_attribute_group, NULL, }; static void idxd_conf_device_release(struct device *dev) { struct idxd_device *idxd = confdev_to_idxd(dev); kfree(idxd->groups); bitmap_free(idxd->wq_enable_map); kfree(idxd->wqs); kfree(idxd->engines); kfree(idxd->evl); kmem_cache_destroy(idxd->evl_cache); ida_free(&idxd_ida, idxd->id); bitmap_free(idxd->opcap_bmap); kfree(idxd); } struct device_type dsa_device_type = { .name = "dsa", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, }; struct device_type iax_device_type = { .name = "iax", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, }; static int idxd_register_engine_devices(struct idxd_device *idxd) { struct idxd_engine *engine; int i, j, rc; for (i = 0; i < idxd->max_engines; i++) { engine = idxd->engines[i]; rc = device_add(engine_confdev(engine)); if (rc < 0) goto cleanup; } return 0; cleanup: j = i - 1; for (; i < idxd->max_engines; i++) { engine = idxd->engines[i]; put_device(engine_confdev(engine)); } while (j--) { engine = idxd->engines[j]; device_unregister(engine_confdev(engine)); } return rc; } static int idxd_register_group_devices(struct idxd_device *idxd) { struct idxd_group *group; int i, j, rc; for (i = 0; i < idxd->max_groups; i++) { group = idxd->groups[i]; rc = device_add(group_confdev(group)); if (rc < 0) goto cleanup; } return 0; cleanup: j = i - 1; for (; i < idxd->max_groups; i++) { group = idxd->groups[i]; put_device(group_confdev(group)); } while (j--) { group = idxd->groups[j]; device_unregister(group_confdev(group)); } return rc; } static int idxd_register_wq_devices(struct idxd_device *idxd) { struct idxd_wq *wq; int i, rc, j; for (i = 0; i < idxd->max_wqs; i++) { wq = idxd->wqs[i]; rc = device_add(wq_confdev(wq)); if (rc < 0) goto cleanup; } return 0; cleanup: j = i - 1; for (; i < idxd->max_wqs; i++) { wq = idxd->wqs[i]; put_device(wq_confdev(wq)); } while (j--) { wq = idxd->wqs[j]; device_unregister(wq_confdev(wq)); } return rc; } int idxd_register_devices(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; int rc, i; rc = device_add(idxd_confdev(idxd)); if (rc < 0) return rc; rc = idxd_register_wq_devices(idxd); if (rc < 0) { dev_dbg(dev, "WQ devices registering failed: %d\n", rc); goto err_wq; } rc = idxd_register_engine_devices(idxd); if (rc < 0) { dev_dbg(dev, "Engine devices registering failed: %d\n", rc); goto err_engine; } rc = idxd_register_group_devices(idxd); if (rc < 0) { dev_dbg(dev, "Group device registering failed: %d\n", rc); goto err_group; } return 0; err_group: for (i = 0; i < idxd->max_engines; i++) device_unregister(engine_confdev(idxd->engines[i])); err_engine: for (i = 0; i < idxd->max_wqs; i++) device_unregister(wq_confdev(idxd->wqs[i])); err_wq: device_del(idxd_confdev(idxd)); return rc; } void idxd_unregister_devices(struct idxd_device *idxd) { int i; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; device_unregister(wq_confdev(wq)); } for (i = 0; i < idxd->max_engines; i++) { struct idxd_engine *engine = idxd->engines[i]; device_unregister(engine_confdev(engine)); } for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; device_unregister(group_confdev(group)); } } int idxd_register_bus_type(void) { return bus_register(&dsa_bus_type); } void idxd_unregister_bus_type(void) { bus_unregister(&dsa_bus_type); }
linux-master
drivers/dma/idxd/sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/device/bus.h> #include "idxd.h" extern int device_driver_attach(struct device_driver *drv, struct device *dev); extern void device_driver_detach(struct device *dev); #define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct driver_attribute driver_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = drv->bus; struct device *dev; int rc = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver) { device_driver_detach(dev); rc = count; } return rc; } static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store); static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = drv->bus; struct device *dev; struct device_driver *alt_drv = NULL; int rc = -ENODEV; struct idxd_dev *idxd_dev; dev = bus_find_device_by_name(bus, NULL, buf); if (!dev || dev->driver || drv != &dsa_drv.drv) return -ENODEV; idxd_dev = confdev_to_idxd_dev(dev); if (is_idxd_dev(idxd_dev)) { alt_drv = driver_find("idxd", bus); } else if (is_idxd_wq_dev(idxd_dev)) { struct idxd_wq *wq = confdev_to_wq(dev); if (is_idxd_wq_kernel(wq)) alt_drv = driver_find("dmaengine", bus); else if (is_idxd_wq_user(wq)) alt_drv = driver_find("user", bus); } if (!alt_drv) return -ENODEV; rc = device_driver_attach(alt_drv, dev); if (rc < 0) return rc; return count; } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); static struct attribute *dsa_drv_compat_attrs[] = { &driver_attr_bind.attr, &driver_attr_unbind.attr, NULL, }; static const struct attribute_group dsa_drv_compat_attr_group = { .attrs = dsa_drv_compat_attrs, }; static const struct attribute_group *dsa_drv_compat_groups[] = { &dsa_drv_compat_attr_group, NULL, }; static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev) { return -ENODEV; } static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev) { } static enum idxd_dev_type dev_types[] = { IDXD_DEV_NONE, }; struct idxd_device_driver dsa_drv = { .name = "dsa", .probe = idxd_dsa_drv_probe, .remove = idxd_dsa_drv_remove, .type = dev_types, .drv = { .suppress_bind_attrs = true, .groups = dsa_drv_compat_groups, }, }; module_idxd_driver(dsa_drv); MODULE_IMPORT_NS(IDXD);
linux-master
drivers/dma/idxd/compat.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/iommu.h> #include <linux/sched/mm.h> #include <uapi/linux/idxd.h> #include "../dmaengine.h" #include "idxd.h" #include "registers.h" enum irq_work_type { IRQ_WORK_NORMAL = 0, IRQ_WORK_PROCESS_FAULT, }; struct idxd_resubmit { struct work_struct work; struct idxd_desc *desc; }; struct idxd_int_handle_revoke { struct work_struct work; struct idxd_device *idxd; }; static void idxd_device_reinit(struct work_struct *work) { struct idxd_device *idxd = container_of(work, struct idxd_device, work); struct device *dev = &idxd->pdev->dev; int rc, i; idxd_device_reset(idxd); rc = idxd_device_config(idxd); if (rc < 0) goto out; rc = idxd_device_enable(idxd); if (rc < 0) goto out; for (i = 0; i < idxd->max_wqs; i++) { if (test_bit(i, idxd->wq_enable_map)) { struct idxd_wq *wq = idxd->wqs[i]; rc = idxd_wq_enable(wq); if (rc < 0) { clear_bit(i, idxd->wq_enable_map); dev_warn(dev, "Unable to re-enable wq %s\n", dev_name(wq_confdev(wq))); } } } return; out: idxd_device_clear_state(idxd); } /* * The function sends a drain descriptor for the interrupt handle. The drain ensures * all descriptors with this interrupt handle is flushed and the interrupt * will allow the cleanup of the outstanding descriptors. */ static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie) { struct idxd_wq *wq = ie_to_wq(ie); struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; struct dsa_hw_desc desc = {}; void __iomem *portal; int rc; /* Issue a simple drain operation with interrupt but no completion record */ desc.flags = IDXD_OP_FLAG_RCI; desc.opcode = DSA_OPCODE_DRAIN; desc.priv = 1; if (ie->pasid != IOMMU_PASID_INVALID) desc.pasid = ie->pasid; desc.int_handle = ie->int_handle; portal = idxd_wq_portal_addr(wq); /* * The wmb() makes sure that the descriptor is all there before we * issue. */ wmb(); if (wq_dedicated(wq)) { iosubmit_cmds512(portal, &desc, 1); } else { rc = idxd_enqcmds(wq, portal, &desc); /* This should not fail unless hardware failed. */ if (rc < 0) dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); } } static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie) { LIST_HEAD(flist); struct idxd_desc *d, *t; struct llist_node *head; spin_lock(&ie->list_lock); head = llist_del_all(&ie->pending_llist); if (head) { llist_for_each_entry_safe(d, t, head, llnode) list_add_tail(&d->list, &ie->work_list); } list_for_each_entry_safe(d, t, &ie->work_list, list) { if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL) list_move_tail(&d->list, &flist); } spin_unlock(&ie->list_lock); list_for_each_entry_safe(d, t, &flist, list) { list_del(&d->list); idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true); } } static void idxd_int_handle_revoke(struct work_struct *work) { struct idxd_int_handle_revoke *revoke = container_of(work, struct idxd_int_handle_revoke, work); struct idxd_device *idxd = revoke->idxd; struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; int i, new_handle, rc; if (!idxd->request_int_handles) { kfree(revoke); dev_warn(dev, "Unexpected int handle refresh interrupt.\n"); return; } /* * The loop attempts to acquire new interrupt handle for all interrupt * vectors that supports a handle. If a new interrupt handle is acquired and the * wq is kernel type, the driver will kill the percpu_ref to pause all * ongoing descriptor submissions. The interrupt handle is then changed. * After change, the percpu_ref is revived and all the pending submissions * are woken to try again. A drain is sent to for the interrupt handle * at the end to make sure all invalid int handle descriptors are processed. */ for (i = 1; i < idxd->irq_cnt; i++) { struct idxd_irq_entry *ie = idxd_get_ie(idxd, i); struct idxd_wq *wq = ie_to_wq(ie); if (ie->int_handle == INVALID_INT_HANDLE) continue; rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX); if (rc < 0) { dev_warn(dev, "get int handle %d failed: %d\n", i, rc); /* * Failed to acquire new interrupt handle. Kill the WQ * and release all the pending submitters. The submitters will * get error return code and handle appropriately. */ ie->int_handle = INVALID_INT_HANDLE; idxd_wq_quiesce(wq); idxd_abort_invalid_int_handle_descs(ie); continue; } /* No change in interrupt handle, nothing needs to be done */ if (ie->int_handle == new_handle) continue; if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) { /* * All the MSIX interrupts are allocated at once during probe. * Therefore we need to update all interrupts even if the WQ * isn't supporting interrupt operations. */ ie->int_handle = new_handle; continue; } mutex_lock(&wq->wq_lock); reinit_completion(&wq->wq_resurrect); /* Kill percpu_ref to pause additional descriptor submissions */ percpu_ref_kill(&wq->wq_active); /* Wait for all submitters quiesce before we change interrupt handle */ wait_for_completion(&wq->wq_dead); ie->int_handle = new_handle; /* Revive percpu ref and wake up all the waiting submitters */ percpu_ref_reinit(&wq->wq_active); complete_all(&wq->wq_resurrect); mutex_unlock(&wq->wq_lock); /* * The delay here is to wait for all possible MOVDIR64B that * are issued before percpu_ref_kill() has happened to have * reached the PCIe domain before the drain is issued. The driver * needs to ensure that the drain descriptor issued does not pass * all the other issued descriptors that contain the invalid * interrupt handle in order to ensure that the drain descriptor * interrupt will allow the cleanup of all the descriptors with * invalid interrupt handle. */ if (wq_dedicated(wq)) udelay(100); idxd_int_handle_revoke_drain(ie); } kfree(revoke); } static void idxd_evl_fault_work(struct work_struct *work) { struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work); struct idxd_wq *wq = fault->wq; struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; struct idxd_evl *evl = idxd->evl; struct __evl_entry *entry_head = fault->entry; void *cr = (void *)entry_head + idxd->data->evl_cr_off; int cr_size = idxd->data->compl_size; u8 *status = (u8 *)cr + idxd->data->cr_status_off; u8 *result = (u8 *)cr + idxd->data->cr_result_off; int copied, copy_size; bool *bf; switch (fault->status) { case DSA_COMP_CRA_XLAT: if (entry_head->batch && entry_head->first_err_in_batch) evl->batch_fail[entry_head->batch_id] = false; copy_size = cr_size; idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); break; case DSA_COMP_BATCH_EVL_ERR: bf = &evl->batch_fail[entry_head->batch_id]; copy_size = entry_head->rcr || *bf ? cr_size : 0; if (*bf) { if (*status == DSA_COMP_SUCCESS) *status = DSA_COMP_BATCH_FAIL; *result = 1; *bf = false; } idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); break; case DSA_COMP_DRAIN_EVL: copy_size = cr_size; break; default: copy_size = 0; dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status); break; } if (copy_size == 0) return; /* * Copy completion record to fault_addr in user address space * that is found by wq and PASID. */ copied = idxd_copy_cr(wq, entry_head->pasid, entry_head->fault_addr, cr, copy_size); /* * The task that triggered the page fault is unknown currently * because multiple threads may share the user address * space or the task exits already before this fault. * So if the copy fails, SIGSEGV can not be sent to the task. * Just print an error for the failure. The user application * waiting for the completion record will time out on this * failure. */ switch (fault->status) { case DSA_COMP_CRA_XLAT: if (copied != copy_size) { idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); dev_dbg_ratelimited(dev, "Failed to write to completion record: (%d:%d)\n", copy_size, copied); if (entry_head->batch) evl->batch_fail[entry_head->batch_id] = true; } break; case DSA_COMP_BATCH_EVL_ERR: if (copied != copy_size) { idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); dev_dbg_ratelimited(dev, "Failed to write to batch completion record: (%d:%d)\n", copy_size, copied); } break; case DSA_COMP_DRAIN_EVL: if (copied != copy_size) dev_dbg_ratelimited(dev, "Failed to write to drain completion record: (%d:%d)\n", copy_size, copied); break; } kmem_cache_free(idxd->evl_cache, fault); } static void process_evl_entry(struct idxd_device *idxd, struct __evl_entry *entry_head, unsigned int index) { struct device *dev = &idxd->pdev->dev; struct idxd_evl *evl = idxd->evl; u8 status; if (test_bit(index, evl->bmap)) { clear_bit(index, evl->bmap); } else { status = DSA_COMP_STATUS(entry_head->error); if (status == DSA_COMP_CRA_XLAT || status == DSA_COMP_DRAIN_EVL || status == DSA_COMP_BATCH_EVL_ERR) { struct idxd_evl_fault *fault; int ent_size = evl_ent_size(idxd); if (entry_head->rci) dev_dbg(dev, "Completion Int Req set, ignoring!\n"); if (!entry_head->rcr && status == DSA_COMP_DRAIN_EVL) return; fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC); if (fault) { struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; fault->wq = wq; fault->status = status; memcpy(&fault->entry, entry_head, ent_size); INIT_WORK(&fault->work, idxd_evl_fault_work); queue_work(wq->wq, &fault->work); } else { dev_warn(dev, "Failed to service fault work.\n"); } } else { dev_warn_ratelimited(dev, "Device error %#x operation: %#x fault addr: %#llx\n", status, entry_head->operation, entry_head->fault_addr); } } } static void process_evl_entries(struct idxd_device *idxd) { union evl_status_reg evl_status; unsigned int h, t; struct idxd_evl *evl = idxd->evl; struct __evl_entry *entry_head; unsigned int ent_size = evl_ent_size(idxd); u32 size; evl_status.bits = 0; evl_status.int_pending = 1; spin_lock(&evl->lock); /* Clear interrupt pending bit */ iowrite32(evl_status.bits_upper32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); h = evl->head; evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = evl_status.tail; size = idxd->evl->size; while (h != t) { entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); process_evl_entry(idxd, entry_head, h); h = (h + 1) % size; } evl->head = h; evl_status.head = h; iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); spin_unlock(&evl->lock); } irqreturn_t idxd_misc_thread(int vec, void *data) { struct idxd_irq_entry *irq_entry = data; struct idxd_device *idxd = ie_to_idxd(irq_entry); struct device *dev = &idxd->pdev->dev; union gensts_reg gensts; u32 val = 0; int i; bool err = false; u32 cause; cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (!cause) return IRQ_NONE; iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (cause & IDXD_INTC_HALT_STATE) goto halt; if (cause & IDXD_INTC_ERR) { spin_lock(&idxd->dev_lock); for (i = 0; i < 4; i++) idxd->sw_err.bits[i] = ioread64(idxd->reg_base + IDXD_SWERR_OFFSET + i * sizeof(u64)); iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { int id = idxd->sw_err.wq_idx; struct idxd_wq *wq = idxd->wqs[id]; if (wq->type == IDXD_WQT_USER) wake_up_interruptible(&wq->err_queue); } else { int i; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; if (wq->type == IDXD_WQT_USER) wake_up_interruptible(&wq->err_queue); } } spin_unlock(&idxd->dev_lock); val |= IDXD_INTC_ERR; for (i = 0; i < 4; i++) dev_warn(dev, "err[%d]: %#16.16llx\n", i, idxd->sw_err.bits[i]); err = true; } if (cause & IDXD_INTC_INT_HANDLE_REVOKED) { struct idxd_int_handle_revoke *revoke; val |= IDXD_INTC_INT_HANDLE_REVOKED; revoke = kzalloc(sizeof(*revoke), GFP_ATOMIC); if (revoke) { revoke->idxd = idxd; INIT_WORK(&revoke->work, idxd_int_handle_revoke); queue_work(idxd->wq, &revoke->work); } else { dev_err(dev, "Failed to allocate work for int handle revoke\n"); idxd_wqs_quiesce(idxd); } } if (cause & IDXD_INTC_CMD) { val |= IDXD_INTC_CMD; complete(idxd->cmd_done); } if (cause & IDXD_INTC_OCCUPY) { /* Driver does not utilize occupancy interrupt */ val |= IDXD_INTC_OCCUPY; } if (cause & IDXD_INTC_PERFMON_OVFL) { val |= IDXD_INTC_PERFMON_OVFL; perfmon_counter_overflow(idxd); } if (cause & IDXD_INTC_EVL) { val |= IDXD_INTC_EVL; process_evl_entries(idxd); } val ^= cause; if (val) dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", val); if (!err) goto out; halt: gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); if (gensts.state == IDXD_DEVICE_STATE_HALT) { idxd->state = IDXD_DEV_HALTED; if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { /* * If we need a software reset, we will throw the work * on a system workqueue in order to allow interrupts * for the device command completions. */ INIT_WORK(&idxd->work, idxd_device_reinit); queue_work(idxd->wq, &idxd->work); } else { idxd->state = IDXD_DEV_HALTED; idxd_wqs_quiesce(idxd); idxd_wqs_unmap_portal(idxd); idxd_device_clear_state(idxd); dev_err(&idxd->pdev->dev, "idxd halted, need %s.\n", gensts.reset_type == IDXD_DEVICE_RESET_FLR ? "FLR" : "system reset"); } } out: return IRQ_HANDLED; } static void idxd_int_handle_resubmit_work(struct work_struct *work) { struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work); struct idxd_desc *desc = irw->desc; struct idxd_wq *wq = desc->wq; int rc; desc->completion->status = 0; rc = idxd_submit_desc(wq, desc); if (rc < 0) { dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n", desc->id, wq->id); /* * If the error is not -EAGAIN, it means the submission failed due to wq * has been killed instead of ENQCMDS failure. Here the driver needs to * notify the submitter of the failure by reporting abort status. * * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the * abort. */ if (rc != -EAGAIN) { desc->completion->status = IDXD_COMP_DESC_ABORT; idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false); } idxd_free_desc(wq, desc); } kfree(irw); } bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc) { struct idxd_wq *wq = desc->wq; struct idxd_device *idxd = wq->idxd; struct idxd_resubmit *irw; irw = kzalloc(sizeof(*irw), GFP_KERNEL); if (!irw) return false; irw->desc = desc; INIT_WORK(&irw->work, idxd_int_handle_resubmit_work); queue_work(idxd->wq, &irw->work); return true; } static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry) { struct idxd_desc *desc, *t; struct llist_node *head; head = llist_del_all(&irq_entry->pending_llist); if (!head) return; llist_for_each_entry_safe(desc, t, head, llnode) { u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; if (status) { /* * Check against the original status as ABORT is software defined * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); continue; } idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); } else { spin_lock(&irq_entry->list_lock); list_add_tail(&desc->list, &irq_entry->work_list); spin_unlock(&irq_entry->list_lock); } } } static void irq_process_work_list(struct idxd_irq_entry *irq_entry) { LIST_HEAD(flist); struct idxd_desc *desc, *n; /* * This lock protects list corruption from access of list outside of the irq handler * thread. */ spin_lock(&irq_entry->list_lock); if (list_empty(&irq_entry->work_list)) { spin_unlock(&irq_entry->list_lock); return; } list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { if (desc->completion->status) { list_move_tail(&desc->list, &flist); } } spin_unlock(&irq_entry->list_lock); list_for_each_entry(desc, &flist, list) { /* * Check against the original status as ABORT is software defined * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); continue; } idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); } } irqreturn_t idxd_wq_thread(int irq, void *data) { struct idxd_irq_entry *irq_entry = data; /* * There are two lists we are processing. The pending_llist is where * submmiter adds all the submitted descriptor after sending it to * the workqueue. It's a lockless singly linked list. The work_list * is the common linux double linked list. We are in a scenario of * multiple producers and a single consumer. The producers are all * the kernel submitters of descriptors, and the consumer is the * kernel irq handler thread for the msix vector when using threaded * irq. To work with the restrictions of llist to remain lockless, * we are doing the following steps: * 1. Iterate through the work_list and process any completed * descriptor. Delete the completed entries during iteration. * 2. llist_del_all() from the pending list. * 3. Iterate through the llist that was deleted from the pending list * and process the completed entries. * 4. If the entry is still waiting on hardware, list_add_tail() to * the work_list. */ irq_process_work_list(irq_entry); irq_process_pending_llist(irq_entry); return IRQ_HANDLED; }
linux-master
drivers/dma/idxd/irq.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <uapi/linux/idxd.h> #include "idxd.h" #include "registers.h" static struct dentry *idxd_debugfs_dir; static void dump_event_entry(struct idxd_device *idxd, struct seq_file *s, u16 index, int *count, bool processed) { struct idxd_evl *evl = idxd->evl; struct dsa_evl_entry *entry; struct dsa_completion_record *cr; u64 *raw; int i; int evl_strides = evl_ent_size(idxd) / sizeof(u64); entry = (struct dsa_evl_entry *)evl->log + index; if (!entry->e.desc_valid) return; seq_printf(s, "Event Log entry %d (real index %u) processed: %u\n", *count, index, processed); seq_printf(s, "desc valid %u wq idx valid %u\n" "batch %u fault rw %u priv %u error 0x%x\n" "wq idx %u op %#x pasid %u batch idx %u\n" "fault addr %#llx\n", entry->e.desc_valid, entry->e.wq_idx_valid, entry->e.batch, entry->e.fault_rw, entry->e.priv, entry->e.error, entry->e.wq_idx, entry->e.operation, entry->e.pasid, entry->e.batch_idx, entry->e.fault_addr); cr = &entry->cr; seq_printf(s, "status %#x result %#x fault_info %#x bytes_completed %u\n" "fault addr %#llx inv flags %#x\n\n", cr->status, cr->result, cr->fault_info, cr->bytes_completed, cr->fault_addr, cr->invalid_flags); raw = (u64 *)entry; for (i = 0; i < evl_strides; i++) seq_printf(s, "entry[%d] = %#llx\n", i, raw[i]); seq_puts(s, "\n"); *count += 1; } static int debugfs_evl_show(struct seq_file *s, void *d) { struct idxd_device *idxd = s->private; struct idxd_evl *evl = idxd->evl; union evl_status_reg evl_status; u16 h, t, evl_size, i; int count = 0; bool processed = true; if (!evl || !evl->log) return 0; spin_lock(&evl->lock); h = evl->head; evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = evl_status.tail; evl_size = evl->size; seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n", evl_status.head, evl_status.tail, evl_status.int_pending); i = t; while (1) { i = (i + 1) % evl_size; if (i == t) break; if (processed && i == h) processed = false; dump_event_entry(idxd, s, i, &count, processed); } spin_unlock(&evl->lock); return 0; } DEFINE_SHOW_ATTRIBUTE(debugfs_evl); int idxd_device_init_debugfs(struct idxd_device *idxd) { if (IS_ERR_OR_NULL(idxd_debugfs_dir)) return 0; idxd->dbgfs_dir = debugfs_create_dir(dev_name(idxd_confdev(idxd)), idxd_debugfs_dir); if (IS_ERR(idxd->dbgfs_dir)) return PTR_ERR(idxd->dbgfs_dir); if (idxd->evl) { idxd->dbgfs_evl_file = debugfs_create_file("event_log", 0400, idxd->dbgfs_dir, idxd, &debugfs_evl_fops); if (IS_ERR(idxd->dbgfs_evl_file)) { debugfs_remove_recursive(idxd->dbgfs_dir); idxd->dbgfs_dir = NULL; return PTR_ERR(idxd->dbgfs_evl_file); } } return 0; } void idxd_device_remove_debugfs(struct idxd_device *idxd) { debugfs_remove_recursive(idxd->dbgfs_dir); } int idxd_init_debugfs(void) { if (!debugfs_initialized()) return 0; idxd_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); if (IS_ERR(idxd_debugfs_dir)) return PTR_ERR(idxd_debugfs_dir); return 0; } void idxd_remove_debugfs(void) { debugfs_remove_recursive(idxd_debugfs_dir); }
linux-master
drivers/dma/idxd/debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/workqueue.h> #include <linux/fs.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/iommu.h> #include <uapi/linux/idxd.h> #include <linux/dmaengine.h> #include "../dmaengine.h" #include "registers.h" #include "idxd.h" #include "perfmon.h" MODULE_VERSION(IDXD_DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); MODULE_IMPORT_NS(IDXD); static bool sva = true; module_param(sva, bool, 0644); MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); bool tc_override; module_param(tc_override, bool, 0644); MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); #define DRV_NAME "idxd" bool support_enqcmd; DEFINE_IDA(idxd_ida); static struct idxd_driver_data idxd_driver_data[] = { [IDXD_TYPE_DSA] = { .name_prefix = "dsa", .type = IDXD_TYPE_DSA, .compl_size = sizeof(struct dsa_completion_record), .align = 32, .dev_type = &dsa_device_type, .evl_cr_off = offsetof(struct dsa_evl_entry, cr), .cr_status_off = offsetof(struct dsa_completion_record, status), .cr_result_off = offsetof(struct dsa_completion_record, result), }, [IDXD_TYPE_IAX] = { .name_prefix = "iax", .type = IDXD_TYPE_IAX, .compl_size = sizeof(struct iax_completion_record), .align = 64, .dev_type = &iax_device_type, .evl_cr_off = offsetof(struct iax_evl_entry, cr), .cr_status_off = offsetof(struct iax_completion_record, status), .cr_result_off = offsetof(struct iax_completion_record, error_code), }, }; static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, /* IAX ver 1.0 platforms */ { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); static int idxd_setup_interrupts(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; struct idxd_irq_entry *ie; int i, msixcnt; int rc = 0; msixcnt = pci_msix_vec_count(pdev); if (msixcnt < 0) { dev_err(dev, "Not MSI-X interrupt capable.\n"); return -ENOSPC; } idxd->irq_cnt = msixcnt; rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); if (rc != msixcnt) { dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); return -ENOSPC; } dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); ie = idxd_get_ie(idxd, 0); ie->vector = pci_irq_vector(pdev, 0); rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); if (rc < 0) { dev_err(dev, "Failed to allocate misc interrupt.\n"); goto err_misc_irq; } dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); for (i = 0; i < idxd->max_wqs; i++) { int msix_idx = i + 1; ie = idxd_get_ie(idxd, msix_idx); ie->id = msix_idx; ie->int_handle = INVALID_INT_HANDLE; ie->pasid = IOMMU_PASID_INVALID; spin_lock_init(&ie->list_lock); init_llist_head(&ie->pending_llist); INIT_LIST_HEAD(&ie->work_list); } idxd_unmask_error_interrupts(idxd); return 0; err_misc_irq: idxd_mask_error_interrupts(idxd); pci_free_irq_vectors(pdev); dev_err(dev, "No usable interrupts\n"); return rc; } static void idxd_cleanup_interrupts(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct idxd_irq_entry *ie; int msixcnt; msixcnt = pci_msix_vec_count(pdev); if (msixcnt <= 0) return; ie = idxd_get_ie(idxd, 0); idxd_mask_error_interrupts(idxd); free_irq(ie->vector, ie); pci_free_irq_vectors(pdev); } static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; struct idxd_wq *wq; struct device *conf_dev; int i, rc; idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), GFP_KERNEL, dev_to_node(dev)); if (!idxd->wqs) return -ENOMEM; idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { kfree(idxd->wqs); return -ENOMEM; } for (i = 0; i < idxd->max_wqs; i++) { wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); if (!wq) { rc = -ENOMEM; goto err; } idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); conf_dev = wq_confdev(wq); wq->id = i; wq->idxd = idxd; device_initialize(wq_confdev(wq)); conf_dev->parent = idxd_confdev(idxd); conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); if (rc < 0) { put_device(conf_dev); goto err; } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); init_completion(&wq->wq_dead); init_completion(&wq->wq_resurrect); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { put_device(conf_dev); rc = -ENOMEM; goto err; } if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { put_device(conf_dev); rc = -ENOMEM; goto err; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } mutex_init(&wq->uc_lock); xa_init(&wq->upasid_xa); idxd->wqs[i] = wq; } return 0; err: while (--i >= 0) { wq = idxd->wqs[i]; conf_dev = wq_confdev(wq); put_device(conf_dev); } return rc; } static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; struct device *dev = &idxd->pdev->dev; struct device *conf_dev; int i, rc; idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), GFP_KERNEL, dev_to_node(dev)); if (!idxd->engines) return -ENOMEM; for (i = 0; i < idxd->max_engines; i++) { engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); if (!engine) { rc = -ENOMEM; goto err; } idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); conf_dev = engine_confdev(engine); engine->id = i; engine->idxd = idxd; device_initialize(conf_dev); conf_dev->parent = idxd_confdev(idxd); conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_engine_device_type; rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); goto err; } idxd->engines[i] = engine; } return 0; err: while (--i >= 0) { engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); } return rc; } static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; struct device *conf_dev; struct idxd_group *group; int i, rc; idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), GFP_KERNEL, dev_to_node(dev)); if (!idxd->groups) return -ENOMEM; for (i = 0; i < idxd->max_groups; i++) { group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); if (!group) { rc = -ENOMEM; goto err; } idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); conf_dev = group_confdev(group); group->id = i; group->idxd = idxd; device_initialize(conf_dev); conf_dev->parent = idxd_confdev(idxd); conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_group_device_type; rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); goto err; } idxd->groups[i] = group; if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; group->tc_b = 1; } else { group->tc_a = -1; group->tc_b = -1; } /* * The default value is the same as the value of * total read buffers in GRPCAP. */ group->rdbufs_allowed = idxd->max_rdbufs; } return 0; err: while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); } return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { int i; for (i = 0; i < idxd->max_groups; i++) put_device(group_confdev(idxd->groups[i])); for (i = 0; i < idxd->max_engines; i++) put_device(engine_confdev(idxd->engines[i])); for (i = 0; i < idxd->max_wqs; i++) put_device(wq_confdev(idxd->wqs[i])); destroy_workqueue(idxd->wq); } static int idxd_init_evl(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; struct idxd_evl *evl; if (idxd->hw.gen_cap.evl_support == 0) return 0; evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev)); if (!evl) return -ENOMEM; spin_lock_init(&evl->lock); evl->size = IDXD_EVL_SIZE_MIN; idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)), sizeof(struct idxd_evl_fault) + evl_ent_size(idxd), 0, 0, NULL); if (!idxd->evl_cache) { kfree(evl); return -ENOMEM; } idxd->evl = evl; return 0; } static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; int rc, i; init_waitqueue_head(&idxd->cmd_waitq); rc = idxd_setup_wqs(idxd); if (rc < 0) goto err_wqs; rc = idxd_setup_engines(idxd); if (rc < 0) goto err_engine; rc = idxd_setup_groups(idxd); if (rc < 0) goto err_group; idxd->wq = create_workqueue(dev_name(dev)); if (!idxd->wq) { rc = -ENOMEM; goto err_wkq_create; } rc = idxd_init_evl(idxd); if (rc < 0) goto err_evl; return 0; err_evl: destroy_workqueue(idxd->wq); err_wkq_create: for (i = 0; i < idxd->max_groups; i++) put_device(group_confdev(idxd->groups[i])); err_group: for (i = 0; i < idxd->max_engines; i++) put_device(engine_confdev(idxd->engines[i])); err_engine: for (i = 0; i < idxd->max_wqs; i++) put_device(wq_confdev(idxd->wqs[i])); err_wqs: return rc; } static void idxd_read_table_offsets(struct idxd_device *idxd) { union offsets_reg offsets; struct device *dev = &idxd->pdev->dev; offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); } void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) { int i, j, nr; for (i = 0, nr = 0; i < count; i++) { for (j = 0; j < BITS_PER_LONG_LONG; j++) { if (val[i] & BIT(j)) set_bit(nr, bmap); nr++; } } } static void idxd_read_caps(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; int i; /* reading generic capabilities */ idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); if (idxd->hw.gen_cap.cmd_cap) { idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); } /* reading command capabilities */ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) idxd->request_int_handles = true; idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); if (idxd->hw.gen_cap.config_en) set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); /* reading group capabilities */ idxd->hw.group_cap.bits = ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); idxd->max_groups = idxd->hw.group_cap.num_groups; dev_dbg(dev, "max groups: %u\n", idxd->max_groups); idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); idxd->nr_rdbufs = idxd->max_rdbufs; /* read engine capabilities */ idxd->hw.engine_cap.bits = ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); idxd->max_engines = idxd->hw.engine_cap.num_engines; dev_dbg(dev, "max engines: %u\n", idxd->max_engines); /* read workqueue capabilities */ idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); idxd->max_wqs = idxd->hw.wq_cap.num_wqs; dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); /* reading operation capabilities */ for (i = 0; i < 4; i++) { idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + IDXD_OPCAP_OFFSET + i * sizeof(u64)); dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); } multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); /* read iaa cap */ if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); } static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; struct device *conf_dev; struct idxd_device *idxd; int rc; idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); if (!idxd) return NULL; conf_dev = idxd_confdev(idxd); idxd->pdev = pdev; idxd->data = data; idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) return NULL; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); if (!idxd->opcap_bmap) { ida_free(&idxd_ida, idxd->id); return NULL; } device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); if (rc < 0) { put_device(conf_dev); return NULL; } spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; } static int idxd_enable_system_pasid(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; struct iommu_domain *domain; ioasid_t pasid; int ret; /* * Attach a global PASID to the DMA domain so that we can use ENQCMDS * to submit work on buffers mapped by DMA API. */ domain = iommu_get_domain_for_dev(dev); if (!domain) return -EPERM; pasid = iommu_alloc_global_pasid(dev); if (pasid == IOMMU_PASID_INVALID) return -ENOSPC; /* * DMA domain is owned by the driver, it should support all valid * types such as DMA-FQ, identity, etc. */ ret = iommu_attach_device_pasid(domain, dev, pasid); if (ret) { dev_err(dev, "failed to attach device pasid %d, domain type %d", pasid, domain->type); iommu_free_global_pasid(pasid); return ret; } /* Since we set user privilege for kernel DMA, enable completion IRQ */ idxd_set_user_intr(idxd, 1); idxd->pasid = pasid; return ret; } static void idxd_disable_system_pasid(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; struct iommu_domain *domain; domain = iommu_get_domain_for_dev(dev); if (!domain) return; iommu_detach_device_pasid(domain, dev, idxd->pasid); iommu_free_global_pasid(idxd->pasid); idxd_set_user_intr(idxd, 0); idxd->sva = NULL; idxd->pasid = IOMMU_PASID_INVALID; } static int idxd_enable_sva(struct pci_dev *pdev) { int ret; ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); if (ret) return ret; ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); if (ret) iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); return ret; } static void idxd_disable_sva(struct pci_dev *pdev) { iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); } static int idxd_probe(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; int rc; dev_dbg(dev, "%s entered and resetting device\n", __func__); rc = idxd_device_init_reset(idxd); if (rc < 0) return rc; dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { if (idxd_enable_sva(pdev)) { dev_warn(dev, "Unable to turn on user SVA feature.\n"); } else { set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); rc = idxd_enable_system_pasid(idxd); if (rc) dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); else set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } idxd_read_caps(idxd); idxd_read_table_offsets(idxd); rc = idxd_setup_internals(idxd); if (rc) goto err; /* If the configs are readonly, then load them from device */ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { dev_dbg(dev, "Loading RO device config\n"); rc = idxd_device_load_config(idxd); if (rc < 0) goto err_config; } rc = idxd_setup_interrupts(idxd); if (rc) goto err_config; idxd->major = idxd_cdev_get_major(idxd); rc = perfmon_pmu_init(idxd); if (rc < 0) dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); return 0; err_config: idxd_cleanup_internals(idxd); err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); if (device_user_pasid_enabled(idxd)) idxd_disable_sva(pdev); return rc; } static void idxd_cleanup(struct idxd_device *idxd) { perfmon_pmu_remove(idxd); idxd_cleanup_interrupts(idxd); idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); if (device_user_pasid_enabled(idxd)) idxd_disable_sva(idxd->pdev); } static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct idxd_device *idxd; struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; int rc; rc = pci_enable_device(pdev); if (rc) return rc; dev_dbg(dev, "Alloc IDXD context\n"); idxd = idxd_alloc(pdev, data); if (!idxd) { rc = -ENOMEM; goto err_idxd_alloc; } dev_dbg(dev, "Mapping BARs\n"); idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); if (!idxd->reg_base) { rc = -ENOMEM; goto err_iomap; } dev_dbg(dev, "Set DMA masks\n"); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) goto err; dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); pci_set_drvdata(pdev, idxd); idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); rc = idxd_probe(idxd); if (rc) { dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); goto err; } rc = idxd_register_devices(idxd); if (rc) { dev_err(dev, "IDXD sysfs setup failed\n"); goto err_dev_register; } rc = idxd_device_init_debugfs(idxd); if (rc) dev_warn(dev, "IDXD debugfs failed to setup\n"); dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", idxd->hw.version); return 0; err_dev_register: idxd_cleanup(idxd); err: pci_iounmap(pdev, idxd->reg_base); err_iomap: put_device(idxd_confdev(idxd)); err_idxd_alloc: pci_disable_device(pdev); return rc; } void idxd_wqs_quiesce(struct idxd_device *idxd) { struct idxd_wq *wq; int i; for (i = 0; i < idxd->max_wqs; i++) { wq = idxd->wqs[i]; if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) idxd_wq_quiesce(wq); } } static void idxd_shutdown(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); struct idxd_irq_entry *irq_entry; int rc; rc = idxd_device_disable(idxd); if (rc) dev_err(&pdev->dev, "Disabling device failed\n"); irq_entry = &idxd->ie; synchronize_irq(irq_entry->vector); idxd_mask_error_interrupts(idxd); flush_workqueue(idxd->wq); } static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* * When ->release() is called for the idxd->conf_dev, it frees all the memory related * to the idxd context. The driver still needs those bits in order to do the rest of * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref * on the device here to hold off the freeing while allowing the idxd sub-driver * to unbind. */ get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); idxd_device_remove_debugfs(idxd); irq_entry = idxd_get_ie(idxd, 0); free_irq(irq_entry->vector, irq_entry); pci_free_irq_vectors(pdev); pci_iounmap(pdev, idxd->reg_base); if (device_user_pasid_enabled(idxd)) idxd_disable_sva(pdev); pci_disable_device(pdev); destroy_workqueue(idxd->wq); perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); } static struct pci_driver idxd_pci_driver = { .name = DRV_NAME, .id_table = idxd_pci_tbl, .probe = idxd_pci_probe, .remove = idxd_remove, .shutdown = idxd_shutdown, }; static int __init idxd_init_module(void) { int err; /* * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in * enumerating the device. We can not utilize it. */ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { pr_warn("idxd driver failed to load without MOVDIR64B.\n"); return -ENODEV; } if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) pr_warn("Platform does not have ENQCMD(S) support.\n"); else support_enqcmd = true; perfmon_init(); err = idxd_driver_register(&idxd_drv); if (err < 0) goto err_idxd_driver_register; err = idxd_driver_register(&idxd_dmaengine_drv); if (err < 0) goto err_idxd_dmaengine_driver_register; err = idxd_driver_register(&idxd_user_drv); if (err < 0) goto err_idxd_user_driver_register; err = idxd_cdev_register(); if (err) goto err_cdev_register; err = idxd_init_debugfs(); if (err) goto err_debugfs; err = pci_register_driver(&idxd_pci_driver); if (err) goto err_pci_register; return 0; err_pci_register: idxd_remove_debugfs(); err_debugfs: idxd_cdev_remove(); err_cdev_register: idxd_driver_unregister(&idxd_user_drv); err_idxd_user_driver_register: idxd_driver_unregister(&idxd_dmaengine_drv); err_idxd_dmaengine_driver_register: idxd_driver_unregister(&idxd_drv); err_idxd_driver_register: return err; } module_init(idxd_init_module); static void __exit idxd_exit_module(void) { idxd_driver_unregister(&idxd_user_drv); idxd_driver_unregister(&idxd_dmaengine_drv); idxd_driver_unregister(&idxd_drv); pci_unregister_driver(&idxd_pci_driver); idxd_cdev_remove(); perfmon_exit(); idxd_remove_debugfs(); } module_exit(idxd_exit_module);
linux-master
drivers/dma/idxd/init.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmaengine.h> #include <uapi/linux/idxd.h> #include "../dmaengine.h" #include "registers.h" #include "idxd.h" static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) { struct idxd_dma_chan *idxd_chan; idxd_chan = container_of(c, struct idxd_dma_chan, chan); return idxd_chan->wq; } void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type, bool free_desc) { struct idxd_device *idxd = desc->wq->idxd; struct dma_async_tx_descriptor *tx; struct dmaengine_result res; int complete = 1; if (desc->completion->status == DSA_COMP_SUCCESS) { res.result = DMA_TRANS_NOERROR; } else if (desc->completion->status) { if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT && desc->completion->status == DSA_COMP_INT_HANDLE_INVAL && idxd_queue_int_handle_resubmit(desc)) return; res.result = DMA_TRANS_WRITE_FAILED; } else if (comp_type == IDXD_COMPLETE_ABORT) { res.result = DMA_TRANS_ABORTED; } else { complete = 0; } tx = &desc->txd; if (complete && tx->cookie) { dma_cookie_complete(tx); dma_descriptor_unmap(tx); dmaengine_desc_get_callback_invoke(tx, &res); tx->callback = NULL; tx->callback_result = NULL; } if (free_desc) idxd_free_desc(desc->wq, desc); } static void op_flag_setup(unsigned long flags, u32 *desc_flags) { *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; if (flags & DMA_PREP_INTERRUPT) *desc_flags |= IDXD_OP_FLAG_RCI; } static inline void idxd_prep_desc_common(struct idxd_wq *wq, struct dsa_hw_desc *hw, char opcode, u64 addr_f1, u64 addr_f2, u64 len, u64 compl, u32 flags) { hw->flags = flags; hw->opcode = opcode; hw->src_addr = addr_f1; hw->dst_addr = addr_f2; hw->xfer_size = len; /* * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv * field instead. This field should be set to 0 for kernel descriptors * since kernel DMA on VT-d supports "user" privilege only. */ hw->priv = 0; hw->completion_addr = compl; } static struct dma_async_tx_descriptor * idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) { struct idxd_wq *wq = to_idxd_wq(c); u32 desc_flags; struct idxd_desc *desc; if (wq->state != IDXD_WQ_ENABLED) return NULL; op_flag_setup(flags, &desc_flags); desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); if (IS_ERR(desc)) return NULL; idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, 0, 0, 0, desc->compl_dma, desc_flags); desc->txd.flags = flags; return &desc->txd; } static struct dma_async_tx_descriptor * idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct idxd_wq *wq = to_idxd_wq(c); u32 desc_flags; struct idxd_device *idxd = wq->idxd; struct idxd_desc *desc; if (wq->state != IDXD_WQ_ENABLED) return NULL; if (len > idxd->max_xfer_bytes) return NULL; op_flag_setup(flags, &desc_flags); desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); if (IS_ERR(desc)) return NULL; idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, dma_src, dma_dest, len, desc->compl_dma, desc_flags); desc->txd.flags = flags; return &desc->txd; } static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) { struct idxd_wq *wq = to_idxd_wq(chan); struct device *dev = &wq->idxd->pdev->dev; idxd_wq_get(wq); dev_dbg(dev, "%s: client_count: %d\n", __func__, idxd_wq_refcount(wq)); return 0; } static void idxd_dma_free_chan_resources(struct dma_chan *chan) { struct idxd_wq *wq = to_idxd_wq(chan); struct device *dev = &wq->idxd->pdev->dev; idxd_wq_put(wq); dev_dbg(dev, "%s: client_count: %d\n", __func__, idxd_wq_refcount(wq)); } static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { return DMA_OUT_OF_ORDER; } /* * issue_pending() does not need to do anything since tx_submit() does the job * already. */ static void idxd_dma_issue_pending(struct dma_chan *dma_chan) { } static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct dma_chan *c = tx->chan; struct idxd_wq *wq = to_idxd_wq(c); dma_cookie_t cookie; int rc; struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); cookie = dma_cookie_assign(tx); rc = idxd_submit_desc(wq, desc); if (rc < 0) { idxd_free_desc(wq, desc); return rc; } return cookie; } static void idxd_dma_release(struct dma_device *device) { struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma); kfree(idxd_dma); } int idxd_register_dma_device(struct idxd_device *idxd) { struct idxd_dma_dev *idxd_dma; struct dma_device *dma; struct device *dev = &idxd->pdev->dev; int rc; idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev)); if (!idxd_dma) return -ENOMEM; dma = &idxd_dma->dma; INIT_LIST_HEAD(&dma->channels); dma->dev = dev; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; } dma->device_tx_status = idxd_dma_tx_status; dma->device_issue_pending = idxd_dma_issue_pending; dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; dma->device_free_chan_resources = idxd_dma_free_chan_resources; rc = dma_async_device_register(dma); if (rc < 0) { kfree(idxd_dma); return rc; } idxd_dma->idxd = idxd; /* * This pointer is protected by the refs taken by the dma_chan. It will remain valid * as long as there are outstanding channels. */ idxd->idxd_dma = idxd_dma; return 0; } void idxd_unregister_dma_device(struct idxd_device *idxd) { dma_async_device_unregister(&idxd->idxd_dma->dma); } static int idxd_register_dma_channel(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct dma_device *dma = &idxd->idxd_dma->dma; struct device *dev = &idxd->pdev->dev; struct idxd_dma_chan *idxd_chan; struct dma_chan *chan; int rc, i; idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev)); if (!idxd_chan) return -ENOMEM; chan = &idxd_chan->chan; chan->device = dma; list_add_tail(&chan->device_node, &dma->channels); for (i = 0; i < wq->num_descs; i++) { struct idxd_desc *desc = wq->descs[i]; dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = idxd_dma_tx_submit; } rc = dma_async_device_channel_register(dma, chan); if (rc < 0) { kfree(idxd_chan); return rc; } wq->idxd_chan = idxd_chan; idxd_chan->wq = wq; get_device(wq_confdev(wq)); return 0; } static void idxd_unregister_dma_channel(struct idxd_wq *wq) { struct idxd_dma_chan *idxd_chan = wq->idxd_chan; struct dma_chan *chan = &idxd_chan->chan; struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma; dma_async_device_channel_unregister(&idxd_dma->dma, chan); list_del(&chan->device_node); kfree(wq->idxd_chan); wq->idxd_chan = NULL; put_device(wq_confdev(wq)); } static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) { struct device *dev = &idxd_dev->conf_dev; struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); struct idxd_device *idxd = wq->idxd; int rc; if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_KERNEL; rc = drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); rc = -ENXIO; goto err; } rc = idxd_register_dma_channel(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR; dev_dbg(dev, "Failed to register dma channel\n"); goto err_dma; } idxd->cmd_status = 0; mutex_unlock(&wq->wq_lock); return 0; err_dma: drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); return rc; } static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) { struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); drv_disable_wq(wq); mutex_unlock(&wq->wq_lock); } static enum idxd_dev_type dev_types[] = { IDXD_DEV_WQ, IDXD_DEV_NONE, }; struct idxd_device_driver idxd_dmaengine_drv = { .probe = idxd_dmaengine_drv_probe, .remove = idxd_dmaengine_drv_remove, .name = "dmaengine", .type = dev_types, }; EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
linux-master
drivers/dma/idxd/dma.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include "idxd.h" int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner, const char *mod_name) { struct device_driver *drv = &idxd_drv->drv; if (!idxd_drv->type) { pr_debug("driver type not set (%ps)\n", __builtin_return_address(0)); return -EINVAL; } drv->name = idxd_drv->name; drv->bus = &dsa_bus_type; drv->owner = owner; drv->mod_name = mod_name; return driver_register(drv); } EXPORT_SYMBOL_GPL(__idxd_driver_register); void idxd_driver_unregister(struct idxd_device_driver *idxd_drv) { driver_unregister(&idxd_drv->drv); } EXPORT_SYMBOL_GPL(idxd_driver_unregister); static int idxd_config_bus_match(struct device *dev, struct device_driver *drv) { struct idxd_device_driver *idxd_drv = container_of(drv, struct idxd_device_driver, drv); struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); int i = 0; while (idxd_drv->type[i] != IDXD_DEV_NONE) { if (idxd_dev->type == idxd_drv->type[i]) return 1; i++; } return 0; } static int idxd_config_bus_probe(struct device *dev) { struct idxd_device_driver *idxd_drv = container_of(dev->driver, struct idxd_device_driver, drv); struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); return idxd_drv->probe(idxd_dev); } static void idxd_config_bus_remove(struct device *dev) { struct idxd_device_driver *idxd_drv = container_of(dev->driver, struct idxd_device_driver, drv); struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); idxd_drv->remove(idxd_dev); } struct bus_type dsa_bus_type = { .name = "dsa", .match = idxd_config_bus_match, .probe = idxd_config_bus_probe, .remove = idxd_config_bus_remove, }; EXPORT_SYMBOL_GPL(dsa_bus_type); static int __init dsa_bus_init(void) { return bus_register(&dsa_bus_type); } module_init(dsa_bus_init); static void __exit dsa_bus_exit(void) { bus_unregister(&dsa_bus_type); } module_exit(dsa_bus_exit); MODULE_DESCRIPTION("IDXD driver dsa_bus_type driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/idxd/bus.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <uapi/linux/idxd.h> #include "idxd.h" #include "registers.h" static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) { struct idxd_desc *desc; struct idxd_device *idxd = wq->idxd; desc = wq->descs[idx]; memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); memset(desc->completion, 0, idxd->data->compl_size); desc->cpu = cpu; if (device_pasid_enabled(idxd)) desc->hw->pasid = idxd->pasid; return desc; } struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) { int cpu, idx; struct idxd_device *idxd = wq->idxd; DEFINE_SBQ_WAIT(wait); struct sbq_wait_state *ws; struct sbitmap_queue *sbq; if (idxd->state != IDXD_DEV_ENABLED) return ERR_PTR(-EIO); sbq = &wq->sbq; idx = sbitmap_queue_get(sbq, &cpu); if (idx < 0) { if (optype == IDXD_OP_NONBLOCK) return ERR_PTR(-EAGAIN); } else { return __get_desc(wq, idx, cpu); } ws = &sbq->ws[0]; for (;;) { sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE); if (signal_pending_state(TASK_INTERRUPTIBLE, current)) break; idx = sbitmap_queue_get(sbq, &cpu); if (idx >= 0) break; schedule(); } sbitmap_finish_wait(sbq, ws, &wait); if (idx < 0) return ERR_PTR(-EAGAIN); return __get_desc(wq, idx, cpu); } void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) { int cpu = desc->cpu; desc->cpu = -1; sbitmap_queue_clear(&wq->sbq, desc->id, cpu); } static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) { struct idxd_desc *d, *n; lockdep_assert_held(&ie->list_lock); list_for_each_entry_safe(d, n, &ie->work_list, list) { if (d == desc) { list_del(&d->list); return d; } } /* * At this point, the desc needs to be aborted is held by the completion * handler where it has taken it off the pending list but has not added to the * work list. It will be cleaned up by the interrupt handler when it sees the * IDXD_COMP_DESC_ABORT for completion status. */ return NULL; } static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) { struct idxd_desc *d, *t, *found = NULL; struct llist_node *head; LIST_HEAD(flist); desc->completion->status = IDXD_COMP_DESC_ABORT; /* * Grab the list lock so it will block the irq thread handler. This allows the * abort code to locate the descriptor need to be aborted. */ spin_lock(&ie->list_lock); head = llist_del_all(&ie->pending_llist); if (head) { llist_for_each_entry_safe(d, t, head, llnode) { if (d == desc) { found = desc; continue; } if (d->completion->status) list_add_tail(&d->list, &flist); else list_add_tail(&d->list, &ie->work_list); } } if (!found) found = list_abort_desc(wq, ie, desc); spin_unlock(&ie->list_lock); if (found) idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false); /* * completing the descriptor will return desc to allocator and * the desc can be acquired by a different process and the * desc->list can be modified. Delete desc from list so the * list trasversing does not get corrupted by the other process. */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true); } } /* * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver * has better control of number of descriptors being submitted to a shared wq by limiting * the number of driver allocated descriptors to the wq size. However, when the swq is * exported to a guest kernel, it may be shared with multiple guest kernels. This means * the likelihood of getting busy returned on the swq when submitting goes significantly up. * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving * up. The sysfs knob can be tuned by the system administrator. */ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc) { unsigned int retries = wq->enqcmds_retries; int rc; do { rc = enqcmds(portal, desc); if (rc == 0) break; cpu_relax(); } while (retries--); return rc; } int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) { struct idxd_device *idxd = wq->idxd; struct idxd_irq_entry *ie = NULL; u32 desc_flags = desc->hw->flags; void __iomem *portal; int rc; if (idxd->state != IDXD_DEV_ENABLED) return -EIO; if (!percpu_ref_tryget_live(&wq->wq_active)) { wait_for_completion(&wq->wq_resurrect); if (!percpu_ref_tryget_live(&wq->wq_active)) return -ENXIO; } portal = idxd_wq_portal_addr(wq); /* * The wmb() flushes writes to coherent DMA data before * possibly triggering a DMA read. The wmb() is necessary * even on UP because the recipient is a device. */ wmb(); /* * Pending the descriptor to the lockless list for the irq_entry * that we designated the descriptor to. */ if (desc_flags & IDXD_OP_FLAG_RCI) { ie = &wq->ie; desc->hw->int_handle = ie->int_handle; llist_add(&desc->llnode, &ie->pending_llist); } if (wq_dedicated(wq)) { iosubmit_cmds512(portal, desc->hw, 1); } else { rc = idxd_enqcmds(wq, portal, desc->hw); if (rc < 0) { percpu_ref_put(&wq->wq_active); /* abort operation frees the descriptor */ if (ie) llist_abort_desc(wq, ie, desc); return rc; } } percpu_ref_put(&wq->wq_active); return 0; }
linux-master
drivers/dma/idxd/submit.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmaengine.h> #include <linux/irq.h> #include <uapi/linux/idxd.h> #include "../dmaengine.h" #include "idxd.h" #include "registers.h" static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, u32 *status); static void idxd_device_wqs_clear_state(struct idxd_device *idxd); static void idxd_wq_disable_cleanup(struct idxd_wq *wq); /* Interrupt control bits */ void idxd_unmask_error_interrupts(struct idxd_device *idxd) { union genctrl_reg genctrl; genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.softerr_int_en = 1; genctrl.halt_int_en = 1; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); } void idxd_mask_error_interrupts(struct idxd_device *idxd) { union genctrl_reg genctrl; genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.softerr_int_en = 0; genctrl.halt_int_en = 0; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); } static void free_hw_descs(struct idxd_wq *wq) { int i; for (i = 0; i < wq->num_descs; i++) kfree(wq->hw_descs[i]); kfree(wq->hw_descs); } static int alloc_hw_descs(struct idxd_wq *wq, int num) { struct device *dev = &wq->idxd->pdev->dev; int i; int node = dev_to_node(dev); wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), GFP_KERNEL, node); if (!wq->hw_descs) return -ENOMEM; for (i = 0; i < num; i++) { wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), GFP_KERNEL, node); if (!wq->hw_descs[i]) { free_hw_descs(wq); return -ENOMEM; } } return 0; } static void free_descs(struct idxd_wq *wq) { int i; for (i = 0; i < wq->num_descs; i++) kfree(wq->descs[i]); kfree(wq->descs); } static int alloc_descs(struct idxd_wq *wq, int num) { struct device *dev = &wq->idxd->pdev->dev; int i; int node = dev_to_node(dev); wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), GFP_KERNEL, node); if (!wq->descs) return -ENOMEM; for (i = 0; i < num; i++) { wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), GFP_KERNEL, node); if (!wq->descs[i]) { free_descs(wq); return -ENOMEM; } } return 0; } /* WQ control bits */ int idxd_wq_alloc_resources(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; int rc, num_descs, i; if (wq->type != IDXD_WQT_KERNEL) return 0; num_descs = wq_dedicated(wq) ? wq->size : wq->threshold; wq->num_descs = num_descs; rc = alloc_hw_descs(wq, num_descs); if (rc < 0) return rc; wq->compls_size = num_descs * idxd->data->compl_size; wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL); if (!wq->compls) { rc = -ENOMEM; goto fail_alloc_compls; } rc = alloc_descs(wq, num_descs); if (rc < 0) goto fail_alloc_descs; rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL, dev_to_node(dev)); if (rc < 0) goto fail_sbitmap_init; for (i = 0; i < num_descs; i++) { struct idxd_desc *desc = wq->descs[i]; desc->hw = wq->hw_descs[i]; if (idxd->data->type == IDXD_TYPE_DSA) desc->completion = &wq->compls[i]; else if (idxd->data->type == IDXD_TYPE_IAX) desc->iax_completion = &wq->iax_compls[i]; desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; desc->id = i; desc->wq = wq; desc->cpu = -1; } return 0; fail_sbitmap_init: free_descs(wq); fail_alloc_descs: dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); fail_alloc_compls: free_hw_descs(wq); return rc; } void idxd_wq_free_resources(struct idxd_wq *wq) { struct device *dev = &wq->idxd->pdev->dev; if (wq->type != IDXD_WQT_KERNEL) return; free_hw_descs(wq); free_descs(wq); dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); sbitmap_queue_free(&wq->sbq); } int idxd_wq_enable(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 status; if (wq->state == IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d already enabled\n", wq->id); return 0; } idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); if (status != IDXD_CMDSTS_SUCCESS && status != IDXD_CMDSTS_ERR_WQ_ENABLED) { dev_dbg(dev, "WQ enable failed: %#x\n", status); return -ENXIO; } wq->state = IDXD_WQ_ENABLED; set_bit(wq->id, idxd->wq_enable_map); dev_dbg(dev, "WQ %d enabled\n", wq->id); return 0; } int idxd_wq_disable(struct idxd_wq *wq, bool reset_config) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 status, operand; dev_dbg(dev, "Disabling WQ %d\n", wq->id); if (wq->state != IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); return 0; } operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status); if (status != IDXD_CMDSTS_SUCCESS) { dev_dbg(dev, "WQ disable failed: %#x\n", status); return -ENXIO; } if (reset_config) idxd_wq_disable_cleanup(wq); clear_bit(wq->id, idxd->wq_enable_map); wq->state = IDXD_WQ_DISABLED; dev_dbg(dev, "WQ %d disabled\n", wq->id); return 0; } void idxd_wq_drain(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 operand; if (wq->state != IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); return; } dev_dbg(dev, "Draining WQ %d\n", wq->id); operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); } void idxd_wq_reset(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 operand; if (wq->state != IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); return; } operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); idxd_wq_disable_cleanup(wq); } int idxd_wq_map_portal(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; resource_size_t start; start = pci_resource_start(pdev, IDXD_WQ_BAR); start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED); wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); if (!wq->portal) return -ENOMEM; return 0; } void idxd_wq_unmap_portal(struct idxd_wq *wq) { struct device *dev = &wq->idxd->pdev->dev; devm_iounmap(dev, wq->portal); wq->portal = NULL; wq->portal_offset = 0; } void idxd_wqs_unmap_portal(struct idxd_device *idxd) { int i; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; if (wq->portal) idxd_wq_unmap_portal(wq); } } static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) { struct idxd_device *idxd = wq->idxd; union wqcfg wqcfg; unsigned int offset; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); spin_lock(&idxd->dev_lock); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.pasid_en = 1; wqcfg.pasid = pasid; wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); spin_unlock(&idxd->dev_lock); } int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) { int rc; rc = idxd_wq_disable(wq, false); if (rc < 0) return rc; __idxd_wq_set_pasid_locked(wq, pasid); rc = idxd_wq_enable(wq); if (rc < 0) return rc; return 0; } int idxd_wq_disable_pasid(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; int rc; union wqcfg wqcfg; unsigned int offset; rc = idxd_wq_disable(wq, false); if (rc < 0) return rc; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); spin_lock(&idxd->dev_lock); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.pasid_en = 0; wqcfg.pasid = 0; iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); spin_unlock(&idxd->dev_lock); rc = idxd_wq_enable(wq); if (rc < 0) return rc; return 0; } static void idxd_wq_disable_cleanup(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; lockdep_assert_held(&wq->wq_lock); wq->state = IDXD_WQ_DISABLED; memset(wq->wqcfg, 0, idxd->wqcfg_size); wq->type = IDXD_WQT_NONE; wq->threshold = 0; wq->priority = 0; wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->flags = 0; memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); if (wq->opcap_bmap) bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) { lockdep_assert_held(&wq->wq_lock); wq->size = 0; wq->group = NULL; } static void idxd_wq_ref_release(struct percpu_ref *ref) { struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); complete(&wq->wq_dead); } int idxd_wq_init_percpu_ref(struct idxd_wq *wq) { int rc; memset(&wq->wq_active, 0, sizeof(wq->wq_active)); rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); if (rc < 0) return rc; reinit_completion(&wq->wq_dead); reinit_completion(&wq->wq_resurrect); return 0; } void __idxd_wq_quiesce(struct idxd_wq *wq) { lockdep_assert_held(&wq->wq_lock); reinit_completion(&wq->wq_resurrect); percpu_ref_kill(&wq->wq_active); complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } void idxd_wq_quiesce(struct idxd_wq *wq) { mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); mutex_unlock(&wq->wq_lock); } /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { union gensts_reg gensts; gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); if (gensts.state == IDXD_DEVICE_STATE_ENABLED) return true; return false; } static inline bool idxd_device_is_halted(struct idxd_device *idxd) { union gensts_reg gensts; gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); return (gensts.state == IDXD_DEVICE_STATE_HALT); } /* * This is function is only used for reset during probe and will * poll for completion. Once the device is setup with interrupts, * all commands will be done via interrupt completion. */ int idxd_device_init_reset(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; union idxd_command_reg cmd; if (idxd_device_is_halted(idxd)) { dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); return -ENXIO; } memset(&cmd, 0, sizeof(cmd)); cmd.cmd = IDXD_CMD_RESET_DEVICE; dev_dbg(dev, "%s: sending reset for init.\n", __func__); spin_lock(&idxd->cmd_lock); iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) cpu_relax(); spin_unlock(&idxd->cmd_lock); return 0; } static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, u32 *status) { union idxd_command_reg cmd; DECLARE_COMPLETION_ONSTACK(done); u32 stat; if (idxd_device_is_halted(idxd)) { dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); if (status) *status = IDXD_CMDSTS_HW_ERR; return; } memset(&cmd, 0, sizeof(cmd)); cmd.cmd = cmd_code; cmd.operand = operand; cmd.int_req = 1; spin_lock(&idxd->cmd_lock); wait_event_lock_irq(idxd->cmd_waitq, !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), idxd->cmd_lock); dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", __func__, cmd_code, operand); idxd->cmd_status = 0; __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); idxd->cmd_done = &done; iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); /* * After command submitted, release lock and go to sleep until * the command completes via interrupt. */ spin_unlock(&idxd->cmd_lock); wait_for_completion(&done); stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); spin_lock(&idxd->cmd_lock); if (status) *status = stat; idxd->cmd_status = stat & GENMASK(7, 0); __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); /* Wake up other pending commands */ wake_up(&idxd->cmd_waitq); spin_unlock(&idxd->cmd_lock); } int idxd_device_enable(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; u32 status; if (idxd_is_enabled(idxd)) { dev_dbg(dev, "Device already enabled\n"); return -ENXIO; } idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status); /* If the command is successful or if the device was enabled */ if (status != IDXD_CMDSTS_SUCCESS && status != IDXD_CMDSTS_ERR_DEV_ENABLED) { dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); return -ENXIO; } idxd->state = IDXD_DEV_ENABLED; return 0; } int idxd_device_disable(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; u32 status; if (!idxd_is_enabled(idxd)) { dev_dbg(dev, "Device is not enabled\n"); return 0; } idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status); /* If the command is successful or if the device was disabled */ if (status != IDXD_CMDSTS_SUCCESS && !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); return -ENXIO; } idxd_device_clear_state(idxd); return 0; } void idxd_device_reset(struct idxd_device *idxd) { idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); idxd_device_clear_state(idxd); spin_lock(&idxd->dev_lock); idxd_unmask_error_interrupts(idxd); spin_unlock(&idxd->dev_lock); } void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) { struct device *dev = &idxd->pdev->dev; u32 operand; operand = pasid; dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand); idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL); dev_dbg(dev, "pasid %d drained\n", pasid); } int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, enum idxd_interrupt_type irq_type) { struct device *dev = &idxd->pdev->dev; u32 operand, status; if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) return -EOPNOTSUPP; dev_dbg(dev, "get int handle, idx %d\n", idx); operand = idx & GENMASK(15, 0); if (irq_type == IDXD_IRQ_IMS) operand |= CMD_INT_HANDLE_IMS; dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand); idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status); if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { dev_dbg(dev, "request int handle failed: %#x\n", status); return -ENXIO; } *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0); dev_dbg(dev, "int handle acquired: %u\n", *handle); return 0; } int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, enum idxd_interrupt_type irq_type) { struct device *dev = &idxd->pdev->dev; u32 operand, status; union idxd_command_reg cmd; if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) return -EOPNOTSUPP; dev_dbg(dev, "release int handle, handle %d\n", handle); memset(&cmd, 0, sizeof(cmd)); operand = handle & GENMASK(15, 0); if (irq_type == IDXD_IRQ_IMS) operand |= CMD_INT_HANDLE_IMS; cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE; cmd.operand = operand; dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand); spin_lock(&idxd->cmd_lock); iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) cpu_relax(); status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); spin_unlock(&idxd->cmd_lock); if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { dev_dbg(dev, "release int handle failed: %#x\n", status); return -ENXIO; } dev_dbg(dev, "int handle released.\n"); return 0; } /* Device configuration bits */ static void idxd_engines_clear_state(struct idxd_device *idxd) { struct idxd_engine *engine; int i; lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_engines; i++) { engine = idxd->engines[i]; engine->group = NULL; } } static void idxd_groups_clear_state(struct idxd_device *idxd) { struct idxd_group *group; int i; lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_groups; i++) { group = idxd->groups[i]; memset(&group->grpcfg, 0, sizeof(group->grpcfg)); group->num_engines = 0; group->num_wqs = 0; group->use_rdbuf_limit = false; /* * The default value is the same as the value of * total read buffers in GRPCAP. */ group->rdbufs_allowed = idxd->max_rdbufs; group->rdbufs_reserved = 0; if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { group->tc_a = 1; group->tc_b = 1; } else { group->tc_a = -1; group->tc_b = -1; } group->desc_progress_limit = 0; group->batch_progress_limit = 0; } } static void idxd_device_wqs_clear_state(struct idxd_device *idxd) { int i; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; mutex_lock(&wq->wq_lock); idxd_wq_disable_cleanup(wq); idxd_wq_device_reset_cleanup(wq); mutex_unlock(&wq->wq_lock); } } void idxd_device_clear_state(struct idxd_device *idxd) { /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { /* * Clearing wq state is protected by wq lock. * So no need to be protected by device lock. */ idxd_device_wqs_clear_state(idxd); spin_lock(&idxd->dev_lock); idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); } else { spin_lock(&idxd->dev_lock); } idxd->state = IDXD_DEV_DISABLED; spin_unlock(&idxd->dev_lock); } static int idxd_device_evl_setup(struct idxd_device *idxd) { union gencfg_reg gencfg; union evlcfg_reg evlcfg; union genctrl_reg genctrl; struct device *dev = &idxd->pdev->dev; void *addr; dma_addr_t dma_addr; int size; struct idxd_evl *evl = idxd->evl; unsigned long *bmap; int rc; if (!evl) return 0; size = evl_size(idxd); bmap = bitmap_zalloc(size, GFP_KERNEL); if (!bmap) { rc = -ENOMEM; goto err_bmap; } /* * Address needs to be page aligned. However, dma_alloc_coherent() provides * at minimal page size aligned address. No manual alignment required. */ addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); if (!addr) { rc = -ENOMEM; goto err_alloc; } spin_lock(&evl->lock); evl->log = addr; evl->dma = dma_addr; evl->log_size = size; evl->bmap = bmap; memset(&evlcfg, 0, sizeof(evlcfg)); evlcfg.bits[0] = dma_addr & GENMASK(63, 12); evlcfg.size = evl->size; iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET); iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.evl_int_en = 1; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); gencfg.evl_en = 1; iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); spin_unlock(&evl->lock); return 0; err_alloc: bitmap_free(bmap); err_bmap: return rc; } static void idxd_device_evl_free(struct idxd_device *idxd) { union gencfg_reg gencfg; union genctrl_reg genctrl; struct device *dev = &idxd->pdev->dev; struct idxd_evl *evl = idxd->evl; gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); if (!gencfg.evl_en) return; spin_lock(&evl->lock); gencfg.evl_en = 0; iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); genctrl.evl_int_en = 0; iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET); iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); dma_free_coherent(dev, evl->log_size, evl->log, evl->dma); bitmap_free(evl->bmap); evl->log = NULL; evl->size = IDXD_EVL_SIZE_MIN; spin_unlock(&evl->lock); } static void idxd_group_config_write(struct idxd_group *group) { struct idxd_device *idxd = group->idxd; struct device *dev = &idxd->pdev->dev; int i; u32 grpcfg_offset; dev_dbg(dev, "Writing group %d cfg registers\n", group->id); /* setup GRPWQCFG */ for (i = 0; i < GRPWQCFG_STRIDES; i++) { grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", group->id, i, grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); } /* setup GRPENGCFG */ grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); /* setup GRPFLAGS */ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n", group->id, grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); } static int idxd_groups_config_write(struct idxd_device *idxd) { union gencfg_reg reg; int i; struct device *dev = &idxd->pdev->dev; /* Setup bandwidth rdbuf limit */ if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); reg.rdbuf_limit = idxd->rdbuf_limit; iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; idxd_group_config_write(group); } return 0; } static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV)) return true; return false; } static int idxd_wq_config_write(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; u32 wq_offset; int i, n; if (!wq->group) return 0; /* * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after * wq reset. This will copy back the sticky values that are present on some devices. */ for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wq_offset = WQCFG_OFFSET(idxd, wq->id, i); wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); } if (wq->size == 0 && wq->type != IDXD_WQT_NONE) wq->size = WQ_DEFAULT_QUEUE_DEPTH; /* byte 0-3 */ wq->wqcfg->wq_size = wq->size; /* bytes 4-7 */ wq->wqcfg->wq_thresh = wq->threshold; /* byte 8-11 */ if (wq_dedicated(wq)) wq->wqcfg->mode = 1; /* * The WQ priv bit is set depending on the WQ type. priv = 1 if the * WQ type is kernel to indicate privileged access. This setting only * matters for dedicated WQ. According to the DSA spec: * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the * Privileged Mode Enable field of the PCI Express PASID capability * is 0, this field must be 0. * * In the case of a dedicated kernel WQ that is not able to support * the PASID cap, then the configuration will be rejected. */ if (wq_dedicated(wq) && wq->wqcfg->pasid_en && !idxd_device_pasid_priv_enabled(idxd) && wq->type == IDXD_WQT_KERNEL) { idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV; return -EOPNOTSUPP; } wq->wqcfg->priority = wq->priority; if (idxd->hw.gen_cap.block_on_fault && test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) && !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags)) wq->wqcfg->bof = 1; if (idxd->hw.wq_cap.wq_ats_support) wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); if (idxd->hw.wq_cap.wq_prs_support) wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags); /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); /* bytes 32-63 */ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8); for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) { int pos = n % BITS_PER_LONG_LONG; int idx = n / BITS_PER_LONG_LONG; wq->wqcfg->op_config[idx] |= BIT(pos); } } dev_dbg(dev, "WQ %d CFGs\n", wq->id); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wq_offset = WQCFG_OFFSET(idxd, wq->id, i); iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wq_offset, ioread32(idxd->reg_base + wq_offset)); } return 0; } static int idxd_wqs_config_write(struct idxd_device *idxd) { int i, rc; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; rc = idxd_wq_config_write(wq); if (rc < 0) return rc; } return 0; } static void idxd_group_flags_setup(struct idxd_device *idxd) { int i; /* TC-A 0 and TC-B 1 should be defaults */ for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; if (group->tc_a == -1) group->tc_a = group->grpcfg.flags.tc_a = 0; else group->grpcfg.flags.tc_a = group->tc_a; if (group->tc_b == -1) group->tc_b = group->grpcfg.flags.tc_b = 1; else group->grpcfg.flags.tc_b = group->tc_b; group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit; group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit; } } static int idxd_engines_setup(struct idxd_device *idxd) { int i, engines = 0; struct idxd_engine *eng; struct idxd_group *group; for (i = 0; i < idxd->max_groups; i++) { group = idxd->groups[i]; group->grpcfg.engines = 0; } for (i = 0; i < idxd->max_engines; i++) { eng = idxd->engines[i]; group = eng->group; if (!group) continue; group->grpcfg.engines |= BIT(eng->id); engines++; } if (!engines) return -EINVAL; return 0; } static int idxd_wqs_setup(struct idxd_device *idxd) { struct idxd_wq *wq; struct idxd_group *group; int i, j, configured = 0; struct device *dev = &idxd->pdev->dev; for (i = 0; i < idxd->max_groups; i++) { group = idxd->groups[i]; for (j = 0; j < 4; j++) group->grpcfg.wqs[j] = 0; } for (i = 0; i < idxd->max_wqs; i++) { wq = idxd->wqs[i]; group = wq->group; if (!wq->group) continue; if (wq_shared(wq) && !wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; dev_warn(dev, "No shared wq support but configured.\n"); return -EINVAL; } group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); configured++; } if (configured == 0) { idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED; return -EINVAL; } return 0; } int idxd_device_config(struct idxd_device *idxd) { int rc; lockdep_assert_held(&idxd->dev_lock); rc = idxd_wqs_setup(idxd); if (rc < 0) return rc; rc = idxd_engines_setup(idxd); if (rc < 0) return rc; idxd_group_flags_setup(idxd); rc = idxd_wqs_config_write(idxd); if (rc < 0) return rc; rc = idxd_groups_config_write(idxd); if (rc < 0) return rc; return 0; } static int idxd_wq_load_config(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; int wqcfg_offset; int i; wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); wq->size = wq->wqcfg->wq_size; wq->threshold = wq->wqcfg->wq_thresh; /* The driver does not support shared WQ mode in read-only config yet */ if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) return -EOPNOTSUPP; set_bit(WQ_FLAG_DEDICATED, &wq->flags); wq->priority = wq->wqcfg->priority; wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); } return 0; } static void idxd_group_load_config(struct idxd_group *group) { struct idxd_device *idxd = group->idxd; struct device *dev = &idxd->pdev->dev; int i, j, grpcfg_offset; /* * Load WQS bit fields * Iterate through all 256 bits 64 bits at a time */ for (i = 0; i < GRPWQCFG_STRIDES; i++) { struct idxd_wq *wq; grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); if (i * 64 >= idxd->max_wqs) break; /* Iterate through all 64 bits and check for wq set */ for (j = 0; j < 64; j++) { int id = i * 64 + j; /* No need to check beyond max wqs */ if (id >= idxd->max_wqs) break; /* Set group assignment for wq if wq bit is set */ if (group->grpcfg.wqs[i] & BIT(j)) { wq = idxd->wqs[id]; wq->group = group; } } } grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, grpcfg_offset, group->grpcfg.engines); /* Iterate through all 64 bits to check engines set */ for (i = 0; i < 64; i++) { if (i >= idxd->max_engines) break; if (group->grpcfg.engines & BIT(i)) { struct idxd_engine *engine = idxd->engines[i]; engine->group = group; } } grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset); dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n", group->id, grpcfg_offset, group->grpcfg.flags.bits); } int idxd_device_load_config(struct idxd_device *idxd) { union gencfg_reg reg; int i, rc; reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); idxd->rdbuf_limit = reg.rdbuf_limit; for (i = 0; i < idxd->max_groups; i++) { struct idxd_group *group = idxd->groups[i]; idxd_group_load_config(group); } for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; rc = idxd_wq_load_config(wq); if (rc < 0) return rc; } return 0; } static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) { struct idxd_desc *desc, *itr; struct llist_node *head; LIST_HEAD(flist); enum idxd_complete_type ctype; spin_lock(&ie->list_lock); head = llist_del_all(&ie->pending_llist); if (head) { llist_for_each_entry_safe(desc, itr, head, llnode) list_add_tail(&desc->list, &ie->work_list); } list_for_each_entry_safe(desc, itr, &ie->work_list, list) list_move_tail(&desc->list, &flist); spin_unlock(&ie->list_lock); list_for_each_entry_safe(desc, itr, &flist, list) { struct dma_async_tx_descriptor *tx; list_del(&desc->list); ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; /* * wq is being disabled. Any remaining descriptors are * likely to be stuck and can be dropped. callback could * point to code that is no longer accessible, for example * if dmatest module has been unloaded. */ tx = &desc->txd; tx->callback = NULL; tx->callback_result = NULL; idxd_dma_complete_txd(desc, ctype, true); } } static void idxd_device_set_perm_entry(struct idxd_device *idxd, struct idxd_irq_entry *ie) { union msix_perm mperm; if (ie->pasid == IOMMU_PASID_INVALID) return; mperm.bits = 0; mperm.pasid = ie->pasid; mperm.pasid_en = 1; iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); } static void idxd_device_clear_perm_entry(struct idxd_device *idxd, struct idxd_irq_entry *ie) { iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); } void idxd_wq_free_irq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct idxd_irq_entry *ie = &wq->ie; if (wq->type != IDXD_WQT_KERNEL) return; free_irq(ie->vector, ie); idxd_flush_pending_descs(ie); if (idxd->request_int_handles) idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); idxd_device_clear_perm_entry(idxd, ie); ie->vector = -1; ie->int_handle = INVALID_INT_HANDLE; ie->pasid = IOMMU_PASID_INVALID; } int idxd_wq_request_irq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct pci_dev *pdev = idxd->pdev; struct device *dev = &pdev->dev; struct idxd_irq_entry *ie; int rc; if (wq->type != IDXD_WQT_KERNEL) return 0; ie = &wq->ie; ie->vector = pci_irq_vector(pdev, ie->id); ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID; idxd_device_set_perm_entry(idxd, ie); rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); if (rc < 0) { dev_err(dev, "Failed to request irq %d.\n", ie->vector); goto err_irq; } if (idxd->request_int_handles) { rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, IDXD_IRQ_MSIX); if (rc < 0) goto err_int_handle; } else { ie->int_handle = ie->id; } return 0; err_int_handle: ie->int_handle = INVALID_INT_HANDLE; free_irq(ie->vector, ie); err_irq: idxd_device_clear_perm_entry(idxd, ie); ie->pasid = IOMMU_PASID_INVALID; return rc; } int drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; int rc = -ENXIO; lockdep_assert_held(&wq->wq_lock); if (idxd->state != IDXD_DEV_ENABLED) { idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED; goto err; } if (wq->state != IDXD_WQ_DISABLED) { dev_dbg(dev, "wq %d already enabled.\n", wq->id); idxd->cmd_status = IDXD_SCMD_WQ_ENABLED; rc = -EBUSY; goto err; } if (!wq->group) { dev_dbg(dev, "wq %d not attached to group.\n", wq->id); idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP; goto err; } if (strlen(wq->name) == 0) { idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME; dev_dbg(dev, "wq %d name not set.\n", wq->id); goto err; } /* Shared WQ checks */ if (wq_shared(wq)) { if (!wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; dev_dbg(dev, "PASID not enabled and shared wq.\n"); goto err; } /* * Shared wq with the threshold set to 0 means the user * did not set the threshold or transitioned from a * dedicated wq but did not set threshold. A value * of 0 would effectively disable the shared wq. The * driver does not allow a value of 0 to be set for * threshold via sysfs. */ if (wq->threshold == 0) { idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH; dev_dbg(dev, "Shared wq and threshold 0.\n"); goto err; } } /* * In the event that the WQ is configurable for pasid, the driver * should setup the pasid, pasid_en bit. This is true for both kernel * and user shared workqueues. There is no need to setup priv bit in * that in-kernel DMA will also do user privileged requests. * A dedicated wq that is not 'kernel' type will configure pasid and * pasid_en later on so there is no need to setup. */ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { if (wq_pasid_enabled(wq)) { if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; __idxd_wq_set_pasid_locked(wq, pasid); } } } rc = 0; spin_lock(&idxd->dev_lock); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) rc = idxd_device_config(idxd); spin_unlock(&idxd->dev_lock); if (rc < 0) { dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); goto err; } rc = idxd_wq_enable(wq); if (rc < 0) { dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc); goto err; } rc = idxd_wq_map_portal(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR; dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc); goto err_map_portal; } wq->client_count = 0; rc = idxd_wq_request_irq(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); goto err_irq; } rc = idxd_wq_alloc_resources(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; dev_dbg(dev, "WQ resource alloc failed\n"); goto err_res_alloc; } rc = idxd_wq_init_percpu_ref(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; dev_dbg(dev, "percpu_ref setup failed\n"); goto err_ref; } return 0; err_ref: idxd_wq_free_resources(wq); err_res_alloc: idxd_wq_free_irq(wq); err_irq: idxd_wq_unmap_portal(wq); err_map_portal: if (idxd_wq_disable(wq, false)) dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq))); err: return rc; } void drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; lockdep_assert_held(&wq->wq_lock); if (idxd_wq_refcount(wq)) dev_warn(dev, "Clients has claim on wq %d: %d\n", wq->id, idxd_wq_refcount(wq)); idxd_wq_unmap_portal(wq); idxd_wq_drain(wq); idxd_wq_free_irq(wq); idxd_wq_reset(wq); idxd_wq_free_resources(wq); percpu_ref_exit(&wq->wq_active); wq->type = IDXD_WQT_NONE; wq->client_count = 0; } int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); int rc = 0; /* * Device should be in disabled state for the idxd_drv to load. If it's in * enabled state, then the device was altered outside of driver's control. * If the state is in halted state, then we don't want to proceed. */ if (idxd->state != IDXD_DEV_DISABLED) { idxd->cmd_status = IDXD_SCMD_DEV_ENABLED; return -ENXIO; } /* Device configuration */ spin_lock(&idxd->dev_lock); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) rc = idxd_device_config(idxd); spin_unlock(&idxd->dev_lock); if (rc < 0) return -ENXIO; /* * System PASID is preserved across device disable/enable cycle, but * genconfig register content gets cleared during device reset. We * need to re-enable user interrupts for kernel work queue completion * IRQ to function. */ if (idxd->pasid != IOMMU_PASID_INVALID) idxd_set_user_intr(idxd, 1); rc = idxd_device_evl_setup(idxd); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR; return rc; } /* Start device */ rc = idxd_device_enable(idxd); if (rc < 0) { idxd_device_evl_free(idxd); return rc; } /* Setup DMA device without channels */ rc = idxd_register_dma_device(idxd); if (rc < 0) { idxd_device_disable(idxd); idxd_device_evl_free(idxd); idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR; return rc; } idxd->cmd_status = 0; return 0; } void idxd_device_drv_remove(struct idxd_dev *idxd_dev) { struct device *dev = &idxd_dev->conf_dev; struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); int i; for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; struct device *wq_dev = wq_confdev(wq); if (wq->state == IDXD_WQ_DISABLED) continue; dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev)); device_release_driver(wq_dev); } idxd_unregister_dma_device(idxd); idxd_device_disable(idxd); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) idxd_device_reset(idxd); idxd_device_evl_free(idxd); } static enum idxd_dev_type dev_types[] = { IDXD_DEV_DSA, IDXD_DEV_IAX, IDXD_DEV_NONE, }; struct idxd_device_driver idxd_drv = { .type = dev_types, .probe = idxd_device_drv_probe, .remove = idxd_device_drv_remove, .name = "idxd", }; EXPORT_SYMBOL_GPL(idxd_drv);
linux-master
drivers/dma/idxd/device.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/sched/task.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/iommu.h> #include <linux/highmem.h> #include <uapi/linux/idxd.h> #include <linux/xarray.h> #include "registers.h" #include "idxd.h" struct idxd_cdev_context { const char *name; dev_t devt; struct ida minor_ida; }; /* * Since user file names are global in DSA devices, define their ida's as * global to avoid conflict file names. */ static DEFINE_IDA(file_ida); static DEFINE_MUTEX(ida_lock); /* * ictx is an array based off of accelerator types. enum idxd_type * is used as index */ static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { { .name = "dsa" }, { .name = "iax" } }; struct idxd_user_context { struct idxd_wq *wq; struct task_struct *task; unsigned int pasid; struct mm_struct *mm; unsigned int flags; struct iommu_sva *sva; struct idxd_dev idxd_dev; u64 counters[COUNTER_MAX]; int id; pid_t pid; }; static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid); static void idxd_xa_pasid_remove(struct idxd_user_context *ctx); static inline struct idxd_user_context *dev_to_uctx(struct device *dev) { struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); return container_of(idxd_dev, struct idxd_user_context, idxd_dev); } static ssize_t cr_faults_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_user_context *ctx = dev_to_uctx(dev); return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]); } static DEVICE_ATTR_RO(cr_faults); static ssize_t cr_fault_failures_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_user_context *ctx = dev_to_uctx(dev); return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]); } static DEVICE_ATTR_RO(cr_fault_failures); static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_user_context *ctx = dev_to_uctx(dev); return sysfs_emit(buf, "%u\n", ctx->pid); } static DEVICE_ATTR_RO(pid); static struct attribute *cdev_file_attributes[] = { &dev_attr_cr_faults.attr, &dev_attr_cr_fault_failures.attr, &dev_attr_pid.attr, NULL }; static umode_t cdev_file_attr_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, typeof(*dev), kobj); struct idxd_user_context *ctx = dev_to_uctx(dev); struct idxd_wq *wq = ctx->wq; if (!wq_pasid_enabled(wq)) return 0; return a->mode; } static const struct attribute_group cdev_file_attribute_group = { .attrs = cdev_file_attributes, .is_visible = cdev_file_attr_visible, }; static const struct attribute_group *cdev_file_attribute_groups[] = { &cdev_file_attribute_group, NULL }; static void idxd_file_dev_release(struct device *dev) { struct idxd_user_context *ctx = dev_to_uctx(dev); struct idxd_wq *wq = ctx->wq; struct idxd_device *idxd = wq->idxd; int rc; mutex_lock(&ida_lock); ida_free(&file_ida, ctx->id); mutex_unlock(&ida_lock); /* Wait for in-flight operations to complete. */ if (wq_shared(wq)) { idxd_device_drain_pasid(idxd, ctx->pasid); } else { if (device_user_pasid_enabled(idxd)) { /* The wq disable in the disable pasid function will drain the wq */ rc = idxd_wq_disable_pasid(wq); if (rc < 0) dev_err(dev, "wq disable pasid failed.\n"); } else { idxd_wq_drain(wq); } } if (ctx->sva) { idxd_cdev_evl_drain_pasid(wq, ctx->pasid); iommu_sva_unbind_device(ctx->sva); idxd_xa_pasid_remove(ctx); } kfree(ctx); mutex_lock(&wq->wq_lock); idxd_wq_put(wq); mutex_unlock(&wq->wq_lock); } static struct device_type idxd_cdev_file_type = { .name = "idxd_file", .release = idxd_file_dev_release, .groups = cdev_file_attribute_groups, }; static void idxd_cdev_dev_release(struct device *dev) { struct idxd_cdev *idxd_cdev = dev_to_cdev(dev); struct idxd_cdev_context *cdev_ctx; struct idxd_wq *wq = idxd_cdev->wq; cdev_ctx = &ictx[wq->idxd->data->type]; ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); kfree(idxd_cdev); } static struct device_type idxd_cdev_device_type = { .name = "idxd_cdev", .release = idxd_cdev_dev_release, }; static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) { struct cdev *cdev = inode->i_cdev; return container_of(cdev, struct idxd_cdev, cdev); } static inline struct idxd_wq *inode_wq(struct inode *inode) { struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode); return idxd_cdev->wq; } static void idxd_xa_pasid_remove(struct idxd_user_context *ctx) { struct idxd_wq *wq = ctx->wq; void *ptr; mutex_lock(&wq->uc_lock); ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); if (ptr != (void *)ctx) dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", ctx->pasid); mutex_unlock(&wq->uc_lock); } void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) { struct idxd_user_context *ctx; if (index >= COUNTER_MAX) return; mutex_lock(&wq->uc_lock); ctx = xa_load(&wq->upasid_xa, pasid); if (!ctx) { mutex_unlock(&wq->uc_lock); return; } ctx->counters[index]++; mutex_unlock(&wq->uc_lock); } static int idxd_cdev_open(struct inode *inode, struct file *filp) { struct idxd_user_context *ctx; struct idxd_device *idxd; struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; struct iommu_sva *sva; unsigned int pasid; struct idxd_cdev *idxd_cdev; wq = inode_wq(inode); idxd = wq->idxd; dev = &idxd->pdev->dev; dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mutex_lock(&wq->wq_lock); if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { rc = -EBUSY; goto failed; } ctx->wq = wq; filp->private_data = ctx; ctx->pid = current->pid; if (device_user_pasid_enabled(idxd)) { sva = iommu_sva_bind_device(dev, current->mm); if (IS_ERR(sva)) { rc = PTR_ERR(sva); dev_err(dev, "pasid allocation failed: %d\n", rc); goto failed; } pasid = iommu_sva_get_pasid(sva); if (pasid == IOMMU_PASID_INVALID) { rc = -EINVAL; goto failed_get_pasid; } ctx->sva = sva; ctx->pasid = pasid; ctx->mm = current->mm; mutex_lock(&wq->uc_lock); rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); mutex_unlock(&wq->uc_lock); if (rc < 0) dev_warn(dev, "PASID entry already exist in xarray.\n"); if (wq_dedicated(wq)) { rc = idxd_wq_set_pasid(wq, pasid); if (rc < 0) { dev_err(dev, "wq set pasid failed: %d\n", rc); goto failed_set_pasid; } } } idxd_cdev = wq->idxd_cdev; mutex_lock(&ida_lock); ctx->id = ida_alloc(&file_ida, GFP_KERNEL); mutex_unlock(&ida_lock); if (ctx->id < 0) { dev_warn(dev, "ida alloc failure\n"); goto failed_ida; } ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE; fdev = user_ctx_dev(ctx); device_initialize(fdev); fdev->parent = cdev_dev(idxd_cdev); fdev->bus = &dsa_bus_type; fdev->type = &idxd_cdev_file_type; rc = dev_set_name(fdev, "file%d", ctx->id); if (rc < 0) { dev_warn(dev, "set name failure\n"); goto failed_dev_name; } rc = device_add(fdev); if (rc < 0) { dev_warn(dev, "file device add failure\n"); goto failed_dev_add; } idxd_wq_get(wq); mutex_unlock(&wq->wq_lock); return 0; failed_dev_add: failed_dev_name: put_device(fdev); failed_ida: failed_set_pasid: if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: if (device_user_pasid_enabled(idxd)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); kfree(ctx); return rc; } static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) { struct idxd_device *idxd = wq->idxd; struct idxd_evl *evl = idxd->evl; union evl_status_reg status; u16 h, t, size; int ent_size = evl_ent_size(idxd); struct __evl_entry *entry_head; if (!evl) return; spin_lock(&evl->lock); status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); t = status.tail; h = evl->head; size = evl->size; while (h != t) { entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id) set_bit(h, evl->bmap); h = (h + 1) % size; } spin_unlock(&evl->lock); drain_workqueue(wq->wq); } static int idxd_cdev_release(struct inode *node, struct file *filep) { struct idxd_user_context *ctx = filep->private_data; struct idxd_wq *wq = ctx->wq; struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; dev_dbg(dev, "%s called\n", __func__); filep->private_data = NULL; device_unregister(user_ctx_dev(ctx)); return 0; } static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, const char *func) { struct device *dev = &wq->idxd->pdev->dev; if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { dev_info_ratelimited(dev, "%s: %s: mapping too large: %lu\n", current->comm, func, vma->vm_end - vma->vm_start); return -EINVAL; } return 0; } static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) { struct idxd_user_context *ctx = filp->private_data; struct idxd_wq *wq = ctx->wq; struct idxd_device *idxd = wq->idxd; struct pci_dev *pdev = idxd->pdev; phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); unsigned long pfn; int rc; dev_dbg(&pdev->dev, "%s called\n", __func__); rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; vm_flags_set(vma, VM_DONTCOPY); pfn = (base + idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_private_data = ctx; return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, vma->vm_page_prot); } static __poll_t idxd_cdev_poll(struct file *filp, struct poll_table_struct *wait) { struct idxd_user_context *ctx = filp->private_data; struct idxd_wq *wq = ctx->wq; struct idxd_device *idxd = wq->idxd; __poll_t out = 0; poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) out = EPOLLIN | EPOLLRDNORM; spin_unlock(&idxd->dev_lock); return out; } static const struct file_operations idxd_cdev_fops = { .owner = THIS_MODULE, .open = idxd_cdev_open, .release = idxd_cdev_release, .mmap = idxd_cdev_mmap, .poll = idxd_cdev_poll, }; int idxd_cdev_get_major(struct idxd_device *idxd) { return MAJOR(ictx[idxd->data->type].devt); } int idxd_wq_add_cdev(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct idxd_cdev *idxd_cdev; struct cdev *cdev; struct device *dev; struct idxd_cdev_context *cdev_ctx; int rc, minor; idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL); if (!idxd_cdev) return -ENOMEM; idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV; idxd_cdev->wq = wq; cdev = &idxd_cdev->cdev; dev = cdev_dev(idxd_cdev); cdev_ctx = &ictx[wq->idxd->data->type]; minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); if (minor < 0) { kfree(idxd_cdev); return minor; } idxd_cdev->minor = minor; device_initialize(dev); dev->parent = wq_confdev(wq); dev->bus = &dsa_bus_type; dev->type = &idxd_cdev_device_type; dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); if (rc < 0) goto err; wq->idxd_cdev = idxd_cdev; cdev_init(cdev, &idxd_cdev_fops); rc = cdev_device_add(cdev, dev); if (rc) { dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); goto err; } return 0; err: put_device(dev); wq->idxd_cdev = NULL; return rc; } void idxd_wq_del_cdev(struct idxd_wq *wq) { struct idxd_cdev *idxd_cdev; idxd_cdev = wq->idxd_cdev; ida_destroy(&file_ida); wq->idxd_cdev = NULL; cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); put_device(cdev_dev(idxd_cdev)); } static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) { struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); struct idxd_device *idxd = wq->idxd; int rc; if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace * can directly access physical address through the WQ. * - The IDXD cdev driver does not provide any ways to pin * user pages and translate the address from user VA to IOVA or * PA without IOMMU SVA. Therefore the application has no way * to instruct the device to perform DMA function. This makes * the cdev not usable for normal application usage. */ if (!device_user_pasid_enabled(idxd)) { idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); return -EOPNOTSUPP; } mutex_lock(&wq->wq_lock); wq->wq = create_workqueue(dev_name(wq_confdev(wq))); if (!wq->wq) { rc = -ENOMEM; goto wq_err; } wq->type = IDXD_WQT_USER; rc = drv_enable_wq(wq); if (rc < 0) goto err; rc = idxd_wq_add_cdev(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_CDEV_ERR; goto err_cdev; } idxd->cmd_status = 0; mutex_unlock(&wq->wq_lock); return 0; err_cdev: drv_disable_wq(wq); err: destroy_workqueue(wq->wq); wq->type = IDXD_WQT_NONE; wq_err: mutex_unlock(&wq->wq_lock); return rc; } static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) { struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); mutex_lock(&wq->wq_lock); idxd_wq_del_cdev(wq); drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; destroy_workqueue(wq->wq); wq->wq = NULL; mutex_unlock(&wq->wq_lock); } static enum idxd_dev_type dev_types[] = { IDXD_DEV_WQ, IDXD_DEV_NONE, }; struct idxd_device_driver idxd_user_drv = { .probe = idxd_user_drv_probe, .remove = idxd_user_drv_remove, .name = "user", .type = dev_types, }; EXPORT_SYMBOL_GPL(idxd_user_drv); int idxd_cdev_register(void) { int rc, i; for (i = 0; i < IDXD_TYPE_MAX; i++) { ida_init(&ictx[i].minor_ida); rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, ictx[i].name); if (rc) goto err_free_chrdev_region; } return 0; err_free_chrdev_region: for (i--; i >= 0; i--) unregister_chrdev_region(ictx[i].devt, MINORMASK); return rc; } void idxd_cdev_remove(void) { int i; for (i = 0; i < IDXD_TYPE_MAX; i++) { unregister_chrdev_region(ictx[i].devt, MINORMASK); ida_destroy(&ictx[i].minor_ida); } } /** * idxd_copy_cr - copy completion record to user address space found by wq and * PASID * @wq: work queue * @pasid: PASID * @addr: user fault address to write * @cr: completion record * @len: number of bytes to copy * * This is called by a work that handles completion record fault. * * Return: number of bytes copied. */ int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, void *cr, int len) { struct device *dev = &wq->idxd->pdev->dev; int left = len, status_size = 1; struct idxd_user_context *ctx; struct mm_struct *mm; mutex_lock(&wq->uc_lock); ctx = xa_load(&wq->upasid_xa, pasid); if (!ctx) { dev_warn(dev, "No user context\n"); goto out; } mm = ctx->mm; /* * The completion record fault handling work is running in kernel * thread context. It temporarily switches to the mm to copy cr * to addr in the mm. */ kthread_use_mm(mm); left = copy_to_user((void __user *)addr + status_size, cr + status_size, len - status_size); /* * Copy status only after the rest of completion record is copied * successfully so that the user gets the complete completion record * when a non-zero status is polled. */ if (!left) { u8 status; /* * Ensure that the completion record's status field is written * after the rest of the completion record has been written. * This ensures that the user receives the correct completion * record information once polling for a non-zero status. */ wmb(); status = *(u8 *)cr; if (put_user(status, (u8 __user *)addr)) left += status_size; } else { left += status_size; } kthread_unuse_mm(mm); out: mutex_unlock(&wq->uc_lock); return len - left; }
linux-master
drivers/dma/idxd/cdev.c
// SPDX-License-Identifier: GPL-2.0 /* * Lightning Mountain centralized DMA controller driver * * Copyright (c) 2016 - 2020 Intel Corporation. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/of_dma.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/reset.h> #include "../dmaengine.h" #include "../virt-dma.h" #define DRIVER_NAME "lgm-dma" #define DMA_ID 0x0008 #define DMA_ID_REV GENMASK(7, 0) #define DMA_ID_PNR GENMASK(19, 16) #define DMA_ID_CHNR GENMASK(26, 20) #define DMA_ID_DW_128B BIT(27) #define DMA_ID_AW_36B BIT(28) #define DMA_VER32 0x32 #define DMA_VER31 0x31 #define DMA_VER22 0x0A #define DMA_CTRL 0x0010 #define DMA_CTRL_RST BIT(0) #define DMA_CTRL_DSRAM_PATH BIT(1) #define DMA_CTRL_DBURST_WR BIT(3) #define DMA_CTRL_VLD_DF_ACK BIT(4) #define DMA_CTRL_CH_FL BIT(6) #define DMA_CTRL_DS_FOD BIT(7) #define DMA_CTRL_DRB BIT(8) #define DMA_CTRL_ENBE BIT(9) #define DMA_CTRL_DESC_TMOUT_CNT_V31 GENMASK(27, 16) #define DMA_CTRL_DESC_TMOUT_EN_V31 BIT(30) #define DMA_CTRL_PKTARB BIT(31) #define DMA_CPOLL 0x0014 #define DMA_CPOLL_CNT GENMASK(15, 4) #define DMA_CPOLL_EN BIT(31) #define DMA_CS 0x0018 #define DMA_CS_MASK GENMASK(5, 0) #define DMA_CCTRL 0x001C #define DMA_CCTRL_ON BIT(0) #define DMA_CCTRL_RST BIT(1) #define DMA_CCTRL_CH_POLL_EN BIT(2) #define DMA_CCTRL_CH_ABC BIT(3) /* Adaptive Burst Chop */ #define DMA_CDBA_MSB GENMASK(7, 4) #define DMA_CCTRL_DIR_TX BIT(8) #define DMA_CCTRL_CLASS GENMASK(11, 9) #define DMA_CCTRL_CLASSH GENMASK(19, 18) #define DMA_CCTRL_WR_NP_EN BIT(21) #define DMA_CCTRL_PDEN BIT(23) #define DMA_MAX_CLASS (SZ_32 - 1) #define DMA_CDBA 0x0020 #define DMA_CDLEN 0x0024 #define DMA_CIS 0x0028 #define DMA_CIE 0x002C #define DMA_CI_EOP BIT(1) #define DMA_CI_DUR BIT(2) #define DMA_CI_DESCPT BIT(3) #define DMA_CI_CHOFF BIT(4) #define DMA_CI_RDERR BIT(5) #define DMA_CI_ALL \ (DMA_CI_EOP | DMA_CI_DUR | DMA_CI_DESCPT | DMA_CI_CHOFF | DMA_CI_RDERR) #define DMA_PS 0x0040 #define DMA_PCTRL 0x0044 #define DMA_PCTRL_RXBL16 BIT(0) #define DMA_PCTRL_TXBL16 BIT(1) #define DMA_PCTRL_RXBL GENMASK(3, 2) #define DMA_PCTRL_RXBL_8 3 #define DMA_PCTRL_TXBL GENMASK(5, 4) #define DMA_PCTRL_TXBL_8 3 #define DMA_PCTRL_PDEN BIT(6) #define DMA_PCTRL_RXBL32 BIT(7) #define DMA_PCTRL_RXENDI GENMASK(9, 8) #define DMA_PCTRL_TXENDI GENMASK(11, 10) #define DMA_PCTRL_TXBL32 BIT(15) #define DMA_PCTRL_MEM_FLUSH BIT(16) #define DMA_IRNEN1 0x00E8 #define DMA_IRNCR1 0x00EC #define DMA_IRNEN 0x00F4 #define DMA_IRNCR 0x00F8 #define DMA_C_DP_TICK 0x100 #define DMA_C_DP_TICK_TIKNARB GENMASK(15, 0) #define DMA_C_DP_TICK_TIKARB GENMASK(31, 16) #define DMA_C_HDRM 0x110 /* * If header mode is set in DMA descriptor, * If bit 30 is disabled, HDR_LEN must be configured according to channel * requirement. * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to * be configured. It will enable check sum for switch * If header mode is not set in DMA descriptor, * This register setting doesn't matter */ #define DMA_C_HDRM_HDR_SUM BIT(30) #define DMA_C_BOFF 0x120 #define DMA_C_BOFF_BOF_LEN GENMASK(7, 0) #define DMA_C_BOFF_EN BIT(31) #define DMA_ORRC 0x190 #define DMA_ORRC_ORRCNT GENMASK(8, 4) #define DMA_ORRC_EN BIT(31) #define DMA_C_ENDIAN 0x200 #define DMA_C_END_DATAENDI GENMASK(1, 0) #define DMA_C_END_DE_EN BIT(7) #define DMA_C_END_DESENDI GENMASK(9, 8) #define DMA_C_END_DES_EN BIT(16) /* DMA controller capability */ #define DMA_ADDR_36BIT BIT(0) #define DMA_DATA_128BIT BIT(1) #define DMA_CHAN_FLOW_CTL BIT(2) #define DMA_DESC_FOD BIT(3) #define DMA_DESC_IN_SRAM BIT(4) #define DMA_EN_BYTE_EN BIT(5) #define DMA_DBURST_WR BIT(6) #define DMA_VALID_DESC_FETCH_ACK BIT(7) #define DMA_DFT_DRB BIT(8) #define DMA_ORRC_MAX_CNT (SZ_32 - 1) #define DMA_DFT_POLL_CNT SZ_4 #define DMA_DFT_BURST_V22 SZ_2 #define DMA_BURSTL_8DW SZ_8 #define DMA_BURSTL_16DW SZ_16 #define DMA_BURSTL_32DW SZ_32 #define DMA_DFT_BURST DMA_BURSTL_16DW #define DMA_MAX_DESC_NUM (SZ_8K - 1) #define DMA_CHAN_BOFF_MAX (SZ_256 - 1) #define DMA_DFT_ENDIAN 0 #define DMA_DFT_DESC_TCNT 50 #define DMA_HDR_LEN_MAX (SZ_16K - 1) /* DMA flags */ #define DMA_TX_CH BIT(0) #define DMA_RX_CH BIT(1) #define DEVICE_ALLOC_DESC BIT(2) #define CHAN_IN_USE BIT(3) #define DMA_HW_DESC BIT(4) /* Descriptor fields */ #define DESC_DATA_LEN GENMASK(15, 0) #define DESC_BYTE_OFF GENMASK(25, 23) #define DESC_EOP BIT(28) #define DESC_SOP BIT(29) #define DESC_C BIT(30) #define DESC_OWN BIT(31) #define DMA_CHAN_RST 1 #define DMA_MAX_SIZE (BIT(16) - 1) #define MAX_LOWER_CHANS 32 #define MASK_LOWER_CHANS GENMASK(4, 0) #define DMA_OWN 1 #define HIGH_4_BITS GENMASK(3, 0) #define DMA_DFT_DESC_NUM 1 #define DMA_PKT_DROP_DIS 0 enum ldma_chan_on_off { DMA_CH_OFF = 0, DMA_CH_ON = 1, }; enum { DMA_TYPE_TX = 0, DMA_TYPE_RX, DMA_TYPE_MCPY, }; struct ldma_dev; struct ldma_port; struct ldma_chan { struct virt_dma_chan vchan; struct ldma_port *port; /* back pointer */ char name[8]; /* Channel name */ int nr; /* Channel id in hardware */ u32 flags; /* central way or channel based way */ enum ldma_chan_on_off onoff; dma_addr_t desc_phys; void *desc_base; /* Virtual address */ u32 desc_cnt; /* Number of descriptors */ int rst; u32 hdrm_len; bool hdrm_csum; u32 boff_len; u32 data_endian; u32 desc_endian; bool pden; bool desc_rx_np; bool data_endian_en; bool desc_endian_en; bool abc_en; bool desc_init; struct dma_pool *desc_pool; /* Descriptors pool */ u32 desc_num; struct dw2_desc_sw *ds; struct work_struct work; struct dma_slave_config config; }; struct ldma_port { struct ldma_dev *ldev; /* back pointer */ u32 portid; u32 rxbl; u32 txbl; u32 rxendi; u32 txendi; u32 pkt_drop; }; /* Instance specific data */ struct ldma_inst_data { bool desc_in_sram; bool chan_fc; bool desc_fod; /* Fetch On Demand */ bool valid_desc_fetch_ack; u32 orrc; /* Outstanding read count */ const char *name; u32 type; }; struct ldma_dev { struct device *dev; void __iomem *base; struct reset_control *rst; struct clk *core_clk; struct dma_device dma_dev; u32 ver; int irq; struct ldma_port *ports; struct ldma_chan *chans; /* channel list on this DMA or port */ spinlock_t dev_lock; /* Controller register exclusive */ u32 chan_nrs; u32 port_nrs; u32 channels_mask; u32 flags; u32 pollcnt; const struct ldma_inst_data *inst; struct workqueue_struct *wq; }; struct dw2_desc { u32 field; u32 addr; } __packed __aligned(8); struct dw2_desc_sw { struct virt_dma_desc vdesc; struct ldma_chan *chan; dma_addr_t desc_phys; size_t desc_cnt; size_t size; struct dw2_desc *desc_hw; }; static inline void ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs) { u32 old_val, new_val; old_val = readl(d->base + ofs); new_val = (old_val & ~mask) | (val & mask); if (new_val != old_val) writel(new_val, d->base + ofs); } static inline struct ldma_chan *to_ldma_chan(struct dma_chan *chan) { return container_of(chan, struct ldma_chan, vchan.chan); } static inline struct ldma_dev *to_ldma_dev(struct dma_device *dma_dev) { return container_of(dma_dev, struct ldma_dev, dma_dev); } static inline struct dw2_desc_sw *to_lgm_dma_desc(struct virt_dma_desc *vdesc) { return container_of(vdesc, struct dw2_desc_sw, vdesc); } static inline bool ldma_chan_tx(struct ldma_chan *c) { return !!(c->flags & DMA_TX_CH); } static inline bool ldma_chan_is_hw_desc(struct ldma_chan *c) { return !!(c->flags & DMA_HW_DESC); } static void ldma_dev_reset(struct ldma_dev *d) { unsigned long flags; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CTRL_RST, DMA_CTRL_RST, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_pkt_arb_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask = DMA_CTRL_PKTARB; u32 val = enable ? DMA_CTRL_PKTARB : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_sram_desc_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask = DMA_CTRL_DSRAM_PATH; u32 val = enable ? DMA_CTRL_DSRAM_PATH : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_chan_flow_ctl_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask, val; if (d->inst->type != DMA_TYPE_TX) return; mask = DMA_CTRL_CH_FL; val = enable ? DMA_CTRL_CH_FL : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_global_polling_enable(struct ldma_dev *d) { unsigned long flags; u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT; u32 val = DMA_CPOLL_EN; val |= FIELD_PREP(DMA_CPOLL_CNT, d->pollcnt); spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CPOLL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_desc_fetch_on_demand_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask, val; if (d->inst->type == DMA_TYPE_MCPY) return; mask = DMA_CTRL_DS_FOD; val = enable ? DMA_CTRL_DS_FOD : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_byte_enable_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask = DMA_CTRL_ENBE; u32 val = enable ? DMA_CTRL_ENBE : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_orrc_cfg(struct ldma_dev *d) { unsigned long flags; u32 val = 0; u32 mask; if (d->inst->type == DMA_TYPE_RX) return; mask = DMA_ORRC_EN | DMA_ORRC_ORRCNT; if (d->inst->orrc > 0 && d->inst->orrc <= DMA_ORRC_MAX_CNT) val = DMA_ORRC_EN | FIELD_PREP(DMA_ORRC_ORRCNT, d->inst->orrc); spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_ORRC); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_df_tout_cfg(struct ldma_dev *d, bool enable, int tcnt) { u32 mask = DMA_CTRL_DESC_TMOUT_CNT_V31; unsigned long flags; u32 val; if (enable) val = DMA_CTRL_DESC_TMOUT_EN_V31 | FIELD_PREP(DMA_CTRL_DESC_TMOUT_CNT_V31, tcnt); else val = 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_dburst_wr_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask, val; if (d->inst->type != DMA_TYPE_RX && d->inst->type != DMA_TYPE_MCPY) return; mask = DMA_CTRL_DBURST_WR; val = enable ? DMA_CTRL_DBURST_WR : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_vld_fetch_ack_cfg(struct ldma_dev *d, bool enable) { unsigned long flags; u32 mask, val; if (d->inst->type != DMA_TYPE_TX) return; mask = DMA_CTRL_VLD_DF_ACK; val = enable ? DMA_CTRL_VLD_DF_ACK : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_dev_drb_cfg(struct ldma_dev *d, int enable) { unsigned long flags; u32 mask = DMA_CTRL_DRB; u32 val = enable ? DMA_CTRL_DRB : 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, mask, val, DMA_CTRL); spin_unlock_irqrestore(&d->dev_lock, flags); } static int ldma_dev_cfg(struct ldma_dev *d) { bool enable; ldma_dev_pkt_arb_cfg(d, true); ldma_dev_global_polling_enable(d); enable = !!(d->flags & DMA_DFT_DRB); ldma_dev_drb_cfg(d, enable); enable = !!(d->flags & DMA_EN_BYTE_EN); ldma_dev_byte_enable_cfg(d, enable); enable = !!(d->flags & DMA_CHAN_FLOW_CTL); ldma_dev_chan_flow_ctl_cfg(d, enable); enable = !!(d->flags & DMA_DESC_FOD); ldma_dev_desc_fetch_on_demand_cfg(d, enable); enable = !!(d->flags & DMA_DESC_IN_SRAM); ldma_dev_sram_desc_cfg(d, enable); enable = !!(d->flags & DMA_DBURST_WR); ldma_dev_dburst_wr_cfg(d, enable); enable = !!(d->flags & DMA_VALID_DESC_FETCH_ACK); ldma_dev_vld_fetch_ack_cfg(d, enable); if (d->ver > DMA_VER22) { ldma_dev_orrc_cfg(d); ldma_dev_df_tout_cfg(d, true, DMA_DFT_DESC_TCNT); } dev_dbg(d->dev, "%s Controller 0x%08x configuration done\n", d->inst->name, readl(d->base + DMA_CTRL)); return 0; } static int ldma_chan_cctrl_cfg(struct ldma_chan *c, u32 val) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 class_low, class_high; unsigned long flags; u32 reg; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); reg = readl(d->base + DMA_CCTRL); /* Read from hardware */ if (reg & DMA_CCTRL_DIR_TX) c->flags |= DMA_TX_CH; else c->flags |= DMA_RX_CH; /* Keep the class value unchanged */ class_low = FIELD_GET(DMA_CCTRL_CLASS, reg); class_high = FIELD_GET(DMA_CCTRL_CLASSH, reg); val &= ~DMA_CCTRL_CLASS; val |= FIELD_PREP(DMA_CCTRL_CLASS, class_low); val &= ~DMA_CCTRL_CLASSH; val |= FIELD_PREP(DMA_CCTRL_CLASSH, class_high); writel(val, d->base + DMA_CCTRL); spin_unlock_irqrestore(&d->dev_lock, flags); return 0; } static void ldma_chan_irq_init(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; u32 enofs, crofs; u32 cn_bit; if (c->nr < MAX_LOWER_CHANS) { enofs = DMA_IRNEN; crofs = DMA_IRNCR; } else { enofs = DMA_IRNEN1; crofs = DMA_IRNCR1; } cn_bit = BIT(c->nr & MASK_LOWER_CHANS); spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); /* Clear all interrupts and disabled it */ writel(0, d->base + DMA_CIE); writel(DMA_CI_ALL, d->base + DMA_CIS); ldma_update_bits(d, cn_bit, 0, enofs); writel(cn_bit, d->base + crofs); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_chan_set_class(struct ldma_chan *c, u32 val) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 class_val; if (d->inst->type == DMA_TYPE_MCPY || val > DMA_MAX_CLASS) return; /* 3 bits low */ class_val = FIELD_PREP(DMA_CCTRL_CLASS, val & 0x7); /* 2 bits high */ class_val |= FIELD_PREP(DMA_CCTRL_CLASSH, (val >> 3) & 0x3); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH, class_val, DMA_CCTRL); } static int ldma_chan_on(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; /* If descriptors not configured, not allow to turn on channel */ if (WARN_ON(!c->desc_init)) return -EINVAL; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, DMA_CCTRL_ON, DMA_CCTRL_ON, DMA_CCTRL); spin_unlock_irqrestore(&d->dev_lock, flags); c->onoff = DMA_CH_ON; return 0; } static int ldma_chan_off(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; u32 val; int ret; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, DMA_CCTRL_ON, 0, DMA_CCTRL); spin_unlock_irqrestore(&d->dev_lock, flags); ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val, !(val & DMA_CCTRL_ON), 0, 10000); if (ret) return ret; c->onoff = DMA_CH_OFF; return 0; } static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base, int desc_num) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); writel(lower_32_bits(desc_base), d->base + DMA_CDBA); /* Higher 4 bits of 36 bit addressing */ if (IS_ENABLED(CONFIG_64BIT)) { u32 hi = upper_32_bits(desc_base) & HIGH_4_BITS; ldma_update_bits(d, DMA_CDBA_MSB, FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL); } writel(desc_num, d->base + DMA_CDLEN); spin_unlock_irqrestore(&d->dev_lock, flags); c->desc_init = true; } static struct dma_async_tx_descriptor * ldma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base, int desc_num) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); struct dma_async_tx_descriptor *tx; struct dw2_desc_sw *ds; if (!desc_num) { dev_err(d->dev, "Channel %d must allocate descriptor first\n", c->nr); return NULL; } if (desc_num > DMA_MAX_DESC_NUM) { dev_err(d->dev, "Channel %d descriptor number out of range %d\n", c->nr, desc_num); return NULL; } ldma_chan_desc_hw_cfg(c, desc_base, desc_num); c->flags |= DMA_HW_DESC; c->desc_cnt = desc_num; c->desc_phys = desc_base; ds = kzalloc(sizeof(*ds), GFP_NOWAIT); if (!ds) return NULL; tx = &ds->vdesc.tx; dma_async_tx_descriptor_init(tx, chan); return tx; } static int ldma_chan_reset(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; u32 val; int ret; ret = ldma_chan_off(c); if (ret) return ret; spin_lock_irqsave(&d->dev_lock, flags); ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, DMA_CCTRL_RST, DMA_CCTRL_RST, DMA_CCTRL); spin_unlock_irqrestore(&d->dev_lock, flags); ret = readl_poll_timeout_atomic(d->base + DMA_CCTRL, val, !(val & DMA_CCTRL_RST), 0, 10000); if (ret) return ret; c->rst = 1; c->desc_init = false; return 0; } static void ldma_chan_byte_offset_cfg(struct ldma_chan *c, u32 boff_len) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask = DMA_C_BOFF_EN | DMA_C_BOFF_BOF_LEN; u32 val; if (boff_len > 0 && boff_len <= DMA_CHAN_BOFF_MAX) val = FIELD_PREP(DMA_C_BOFF_BOF_LEN, boff_len) | DMA_C_BOFF_EN; else val = 0; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_C_BOFF); } static void ldma_chan_data_endian_cfg(struct ldma_chan *c, bool enable, u32 endian_type) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask = DMA_C_END_DE_EN | DMA_C_END_DATAENDI; u32 val; if (enable) val = DMA_C_END_DE_EN | FIELD_PREP(DMA_C_END_DATAENDI, endian_type); else val = 0; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_C_ENDIAN); } static void ldma_chan_desc_endian_cfg(struct ldma_chan *c, bool enable, u32 endian_type) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask = DMA_C_END_DES_EN | DMA_C_END_DESENDI; u32 val; if (enable) val = DMA_C_END_DES_EN | FIELD_PREP(DMA_C_END_DESENDI, endian_type); else val = 0; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_C_ENDIAN); } static void ldma_chan_hdr_mode_cfg(struct ldma_chan *c, u32 hdr_len, bool csum) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask, val; /* NB, csum disabled, hdr length must be provided */ if (!csum && (!hdr_len || hdr_len > DMA_HDR_LEN_MAX)) return; mask = DMA_C_HDRM_HDR_SUM; val = DMA_C_HDRM_HDR_SUM; if (!csum && hdr_len) val = hdr_len; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_C_HDRM); } static void ldma_chan_rxwr_np_cfg(struct ldma_chan *c, bool enable) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask, val; /* Only valid for RX channel */ if (ldma_chan_tx(c)) return; mask = DMA_CCTRL_WR_NP_EN; val = enable ? DMA_CCTRL_WR_NP_EN : 0; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_CCTRL); } static void ldma_chan_abc_cfg(struct ldma_chan *c, bool enable) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 mask, val; if (d->ver < DMA_VER32 || ldma_chan_tx(c)) return; mask = DMA_CCTRL_CH_ABC; val = enable ? DMA_CCTRL_CH_ABC : 0; ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS); ldma_update_bits(d, mask, val, DMA_CCTRL); } static int ldma_port_cfg(struct ldma_port *p) { unsigned long flags; struct ldma_dev *d; u32 reg; d = p->ldev; reg = FIELD_PREP(DMA_PCTRL_TXENDI, p->txendi); reg |= FIELD_PREP(DMA_PCTRL_RXENDI, p->rxendi); if (d->ver == DMA_VER22) { reg |= FIELD_PREP(DMA_PCTRL_TXBL, p->txbl); reg |= FIELD_PREP(DMA_PCTRL_RXBL, p->rxbl); } else { reg |= FIELD_PREP(DMA_PCTRL_PDEN, p->pkt_drop); if (p->txbl == DMA_BURSTL_32DW) reg |= DMA_PCTRL_TXBL32; else if (p->txbl == DMA_BURSTL_16DW) reg |= DMA_PCTRL_TXBL16; else reg |= FIELD_PREP(DMA_PCTRL_TXBL, DMA_PCTRL_TXBL_8); if (p->rxbl == DMA_BURSTL_32DW) reg |= DMA_PCTRL_RXBL32; else if (p->rxbl == DMA_BURSTL_16DW) reg |= DMA_PCTRL_RXBL16; else reg |= FIELD_PREP(DMA_PCTRL_RXBL, DMA_PCTRL_RXBL_8); } spin_lock_irqsave(&d->dev_lock, flags); writel(p->portid, d->base + DMA_PS); writel(reg, d->base + DMA_PCTRL); spin_unlock_irqrestore(&d->dev_lock, flags); reg = readl(d->base + DMA_PCTRL); /* read back */ dev_dbg(d->dev, "Port Control 0x%08x configuration done\n", reg); return 0; } static int ldma_chan_cfg(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; u32 reg; reg = c->pden ? DMA_CCTRL_PDEN : 0; reg |= c->onoff ? DMA_CCTRL_ON : 0; reg |= c->rst ? DMA_CCTRL_RST : 0; ldma_chan_cctrl_cfg(c, reg); ldma_chan_irq_init(c); if (d->ver <= DMA_VER22) return 0; spin_lock_irqsave(&d->dev_lock, flags); ldma_chan_set_class(c, c->nr); ldma_chan_byte_offset_cfg(c, c->boff_len); ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian); ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian); ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum); ldma_chan_rxwr_np_cfg(c, c->desc_rx_np); ldma_chan_abc_cfg(c, c->abc_en); spin_unlock_irqrestore(&d->dev_lock, flags); if (ldma_chan_is_hw_desc(c)) ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt); return 0; } static void ldma_dev_init(struct ldma_dev *d) { unsigned long ch_mask = (unsigned long)d->channels_mask; struct ldma_port *p; struct ldma_chan *c; int i; u32 j; spin_lock_init(&d->dev_lock); ldma_dev_reset(d); ldma_dev_cfg(d); /* DMA port initialization */ for (i = 0; i < d->port_nrs; i++) { p = &d->ports[i]; ldma_port_cfg(p); } /* DMA channel initialization */ for_each_set_bit(j, &ch_mask, d->chan_nrs) { c = &d->chans[j]; ldma_chan_cfg(c); } } static int ldma_parse_dt(struct ldma_dev *d) { struct fwnode_handle *fwnode = dev_fwnode(d->dev); struct ldma_port *p; int i; if (fwnode_property_read_bool(fwnode, "intel,dma-byte-en")) d->flags |= DMA_EN_BYTE_EN; if (fwnode_property_read_bool(fwnode, "intel,dma-dburst-wr")) d->flags |= DMA_DBURST_WR; if (fwnode_property_read_bool(fwnode, "intel,dma-drb")) d->flags |= DMA_DFT_DRB; if (fwnode_property_read_u32(fwnode, "intel,dma-poll-cnt", &d->pollcnt)) d->pollcnt = DMA_DFT_POLL_CNT; if (d->inst->chan_fc) d->flags |= DMA_CHAN_FLOW_CTL; if (d->inst->desc_fod) d->flags |= DMA_DESC_FOD; if (d->inst->desc_in_sram) d->flags |= DMA_DESC_IN_SRAM; if (d->inst->valid_desc_fetch_ack) d->flags |= DMA_VALID_DESC_FETCH_ACK; if (d->ver > DMA_VER22) { if (!d->port_nrs) return -EINVAL; for (i = 0; i < d->port_nrs; i++) { p = &d->ports[i]; p->rxendi = DMA_DFT_ENDIAN; p->txendi = DMA_DFT_ENDIAN; p->rxbl = DMA_DFT_BURST; p->txbl = DMA_DFT_BURST; p->pkt_drop = DMA_PKT_DROP_DIS; } } return 0; } static void dma_free_desc_resource(struct virt_dma_desc *vdesc) { struct dw2_desc_sw *ds = to_lgm_dma_desc(vdesc); struct ldma_chan *c = ds->chan; dma_pool_free(c->desc_pool, ds->desc_hw, ds->desc_phys); kfree(ds); } static struct dw2_desc_sw * dma_alloc_desc_resource(int num, struct ldma_chan *c) { struct device *dev = c->vchan.chan.device->dev; struct dw2_desc_sw *ds; if (num > c->desc_num) { dev_err(dev, "sg num %d exceed max %d\n", num, c->desc_num); return NULL; } ds = kzalloc(sizeof(*ds), GFP_NOWAIT); if (!ds) return NULL; ds->chan = c; ds->desc_hw = dma_pool_zalloc(c->desc_pool, GFP_ATOMIC, &ds->desc_phys); if (!ds->desc_hw) { dev_dbg(dev, "out of memory for link descriptor\n"); kfree(ds); return NULL; } ds->desc_cnt = num; return ds; } static void ldma_chan_irq_en(struct ldma_chan *c) { struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; spin_lock_irqsave(&d->dev_lock, flags); writel(c->nr, d->base + DMA_CS); writel(DMA_CI_EOP, d->base + DMA_CIE); writel(BIT(c->nr), d->base + DMA_IRNEN); spin_unlock_irqrestore(&d->dev_lock, flags); } static void ldma_issue_pending(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); unsigned long flags; if (d->ver == DMA_VER22) { spin_lock_irqsave(&c->vchan.lock, flags); if (vchan_issue_pending(&c->vchan)) { struct virt_dma_desc *vdesc; /* Get the next descriptor */ vdesc = vchan_next_desc(&c->vchan); if (!vdesc) { c->ds = NULL; spin_unlock_irqrestore(&c->vchan.lock, flags); return; } list_del(&vdesc->node); c->ds = to_lgm_dma_desc(vdesc); ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt); ldma_chan_irq_en(c); } spin_unlock_irqrestore(&c->vchan.lock, flags); } ldma_chan_on(c); } static void ldma_synchronize(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); /* * clear any pending work if any. In that * case the resource needs to be free here. */ cancel_work_sync(&c->work); vchan_synchronize(&c->vchan); if (c->ds) dma_free_desc_resource(&c->ds->vdesc); } static int ldma_terminate_all(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&c->vchan.lock, flags); vchan_get_all_descriptors(&c->vchan, &head); spin_unlock_irqrestore(&c->vchan.lock, flags); vchan_dma_desc_free_list(&c->vchan, &head); return ldma_chan_reset(c); } static int ldma_resume_chan(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); ldma_chan_on(c); return 0; } static int ldma_pause_chan(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); return ldma_chan_off(c); } static enum dma_status ldma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); enum dma_status status = DMA_COMPLETE; if (d->ver == DMA_VER22) status = dma_cookie_status(chan, cookie, txstate); return status; } static void dma_chan_irq(int irq, void *data) { struct ldma_chan *c = data; struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); u32 stat; /* Disable channel interrupts */ writel(c->nr, d->base + DMA_CS); stat = readl(d->base + DMA_CIS); if (!stat) return; writel(readl(d->base + DMA_CIE) & ~DMA_CI_ALL, d->base + DMA_CIE); writel(stat, d->base + DMA_CIS); queue_work(d->wq, &c->work); } static irqreturn_t dma_interrupt(int irq, void *dev_id) { struct ldma_dev *d = dev_id; struct ldma_chan *c; unsigned long irncr; u32 cid; irncr = readl(d->base + DMA_IRNCR); if (!irncr) { dev_err(d->dev, "dummy interrupt\n"); return IRQ_NONE; } for_each_set_bit(cid, &irncr, d->chan_nrs) { /* Mask */ writel(readl(d->base + DMA_IRNEN) & ~BIT(cid), d->base + DMA_IRNEN); /* Ack */ writel(readl(d->base + DMA_IRNCR) | BIT(cid), d->base + DMA_IRNCR); c = &d->chans[cid]; dma_chan_irq(irq, c); } return IRQ_HANDLED; } static void prep_slave_burst_len(struct ldma_chan *c) { struct ldma_port *p = c->port; struct dma_slave_config *cfg = &c->config; if (cfg->dst_maxburst) cfg->src_maxburst = cfg->dst_maxburst; /* TX and RX has the same burst length */ p->txbl = ilog2(cfg->src_maxburst); p->rxbl = p->txbl; } static struct dma_async_tx_descriptor * ldma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long flags, void *context) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); size_t len, avail, total = 0; struct dw2_desc *hw_ds; struct dw2_desc_sw *ds; struct scatterlist *sg; int num = sglen, i; dma_addr_t addr; if (!sgl) return NULL; if (d->ver > DMA_VER22) return ldma_chan_desc_cfg(chan, sgl->dma_address, sglen); for_each_sg(sgl, sg, sglen, i) { avail = sg_dma_len(sg); if (avail > DMA_MAX_SIZE) num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; } ds = dma_alloc_desc_resource(num, c); if (!ds) return NULL; c->ds = ds; num = 0; /* sop and eop has to be handled nicely */ for_each_sg(sgl, sg, sglen, i) { addr = sg_dma_address(sg); avail = sg_dma_len(sg); total += avail; do { len = min_t(size_t, avail, DMA_MAX_SIZE); hw_ds = &ds->desc_hw[num]; switch (sglen) { case 1: hw_ds->field &= ~DESC_SOP; hw_ds->field |= FIELD_PREP(DESC_SOP, 1); hw_ds->field &= ~DESC_EOP; hw_ds->field |= FIELD_PREP(DESC_EOP, 1); break; default: if (num == 0) { hw_ds->field &= ~DESC_SOP; hw_ds->field |= FIELD_PREP(DESC_SOP, 1); hw_ds->field &= ~DESC_EOP; hw_ds->field |= FIELD_PREP(DESC_EOP, 0); } else if (num == (sglen - 1)) { hw_ds->field &= ~DESC_SOP; hw_ds->field |= FIELD_PREP(DESC_SOP, 0); hw_ds->field &= ~DESC_EOP; hw_ds->field |= FIELD_PREP(DESC_EOP, 1); } else { hw_ds->field &= ~DESC_SOP; hw_ds->field |= FIELD_PREP(DESC_SOP, 0); hw_ds->field &= ~DESC_EOP; hw_ds->field |= FIELD_PREP(DESC_EOP, 0); } break; } /* Only 32 bit address supported */ hw_ds->addr = (u32)addr; hw_ds->field &= ~DESC_DATA_LEN; hw_ds->field |= FIELD_PREP(DESC_DATA_LEN, len); hw_ds->field &= ~DESC_C; hw_ds->field |= FIELD_PREP(DESC_C, 0); hw_ds->field &= ~DESC_BYTE_OFF; hw_ds->field |= FIELD_PREP(DESC_BYTE_OFF, addr & 0x3); /* Ensure data ready before ownership change */ wmb(); hw_ds->field &= ~DESC_OWN; hw_ds->field |= FIELD_PREP(DESC_OWN, DMA_OWN); /* Ensure ownership changed before moving forward */ wmb(); num++; addr += len; avail -= len; } while (avail); } ds->size = total; prep_slave_burst_len(c); return vchan_tx_prep(&c->vchan, &ds->vdesc, DMA_CTRL_ACK); } static int ldma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct ldma_chan *c = to_ldma_chan(chan); memcpy(&c->config, cfg, sizeof(c->config)); return 0; } static int ldma_alloc_chan_resources(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); struct device *dev = c->vchan.chan.device->dev; size_t desc_sz; if (d->ver > DMA_VER22) { c->flags |= CHAN_IN_USE; return 0; } if (c->desc_pool) return c->desc_num; desc_sz = c->desc_num * sizeof(struct dw2_desc); c->desc_pool = dma_pool_create(c->name, dev, desc_sz, __alignof__(struct dw2_desc), 0); if (!c->desc_pool) { dev_err(dev, "unable to allocate descriptor pool\n"); return -ENOMEM; } return c->desc_num; } static void ldma_free_chan_resources(struct dma_chan *chan) { struct ldma_chan *c = to_ldma_chan(chan); struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device); if (d->ver == DMA_VER22) { dma_pool_destroy(c->desc_pool); c->desc_pool = NULL; vchan_free_chan_resources(to_virt_chan(chan)); ldma_chan_reset(c); } else { c->flags &= ~CHAN_IN_USE; } } static void dma_work(struct work_struct *work) { struct ldma_chan *c = container_of(work, struct ldma_chan, work); struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx; struct virt_dma_chan *vc = &c->vchan; struct dmaengine_desc_callback cb; struct virt_dma_desc *vd, *_vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&c->vchan.lock, flags); list_splice_tail_init(&vc->desc_completed, &head); spin_unlock_irqrestore(&c->vchan.lock, flags); dmaengine_desc_get_callback(tx, &cb); dma_cookie_complete(tx); dmaengine_desc_callback_invoke(&cb, NULL); list_for_each_entry_safe(vd, _vd, &head, node) { dmaengine_desc_get_callback(tx, &cb); dma_cookie_complete(tx); list_del(&vd->node); dmaengine_desc_callback_invoke(&cb, NULL); vchan_vdesc_fini(vd); } c->ds = NULL; } static void update_burst_len_v22(struct ldma_chan *c, struct ldma_port *p, u32 burst) { if (ldma_chan_tx(c)) p->txbl = ilog2(burst); else p->rxbl = ilog2(burst); } static void update_burst_len_v3X(struct ldma_chan *c, struct ldma_port *p, u32 burst) { if (ldma_chan_tx(c)) p->txbl = burst; else p->rxbl = burst; } static int update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec) { struct ldma_dev *d = ofdma->of_dma_data; u32 chan_id = spec->args[0]; u32 port_id = spec->args[1]; u32 burst = spec->args[2]; struct ldma_port *p; struct ldma_chan *c; if (chan_id >= d->chan_nrs || port_id >= d->port_nrs) return 0; p = &d->ports[port_id]; c = &d->chans[chan_id]; c->port = p; if (d->ver == DMA_VER22) update_burst_len_v22(c, p, burst); else update_burst_len_v3X(c, p, burst); ldma_port_cfg(p); return 1; } static struct dma_chan *ldma_xlate(struct of_phandle_args *spec, struct of_dma *ofdma) { struct ldma_dev *d = ofdma->of_dma_data; u32 chan_id = spec->args[0]; int ret; if (!spec->args_count) return NULL; /* if args_count is 1 driver use default settings */ if (spec->args_count > 1) { ret = update_client_configs(ofdma, spec); if (!ret) return NULL; } return dma_get_slave_channel(&d->chans[chan_id].vchan.chan); } static void ldma_dma_init_v22(int i, struct ldma_dev *d) { struct ldma_chan *c; c = &d->chans[i]; c->nr = i; /* Real channel number */ c->rst = DMA_CHAN_RST; c->desc_num = DMA_DFT_DESC_NUM; snprintf(c->name, sizeof(c->name), "chan%d", c->nr); INIT_WORK(&c->work, dma_work); c->vchan.desc_free = dma_free_desc_resource; vchan_init(&c->vchan, &d->dma_dev); } static void ldma_dma_init_v3X(int i, struct ldma_dev *d) { struct ldma_chan *c; c = &d->chans[i]; c->data_endian = DMA_DFT_ENDIAN; c->desc_endian = DMA_DFT_ENDIAN; c->data_endian_en = false; c->desc_endian_en = false; c->desc_rx_np = false; c->flags |= DEVICE_ALLOC_DESC; c->onoff = DMA_CH_OFF; c->rst = DMA_CHAN_RST; c->abc_en = true; c->hdrm_csum = false; c->boff_len = 0; c->nr = i; c->vchan.desc_free = dma_free_desc_resource; vchan_init(&c->vchan, &d->dma_dev); } static int ldma_init_v22(struct ldma_dev *d, struct platform_device *pdev) { int ret; ret = device_property_read_u32(d->dev, "dma-channels", &d->chan_nrs); if (ret < 0) { dev_err(d->dev, "unable to read dma-channels property\n"); return ret; } d->irq = platform_get_irq(pdev, 0); if (d->irq < 0) return d->irq; ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt, 0, DRIVER_NAME, d); if (ret) return ret; d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI); if (!d->wq) return -ENOMEM; return 0; } static void ldma_clk_disable(void *data) { struct ldma_dev *d = data; clk_disable_unprepare(d->core_clk); reset_control_assert(d->rst); } static const struct ldma_inst_data dma0 = { .name = "dma0", .chan_fc = false, .desc_fod = false, .desc_in_sram = false, .valid_desc_fetch_ack = false, }; static const struct ldma_inst_data dma2tx = { .name = "dma2tx", .type = DMA_TYPE_TX, .orrc = 16, .chan_fc = true, .desc_fod = true, .desc_in_sram = true, .valid_desc_fetch_ack = true, }; static const struct ldma_inst_data dma1rx = { .name = "dma1rx", .type = DMA_TYPE_RX, .orrc = 16, .chan_fc = false, .desc_fod = true, .desc_in_sram = true, .valid_desc_fetch_ack = false, }; static const struct ldma_inst_data dma1tx = { .name = "dma1tx", .type = DMA_TYPE_TX, .orrc = 16, .chan_fc = true, .desc_fod = true, .desc_in_sram = true, .valid_desc_fetch_ack = true, }; static const struct ldma_inst_data dma0tx = { .name = "dma0tx", .type = DMA_TYPE_TX, .orrc = 16, .chan_fc = true, .desc_fod = true, .desc_in_sram = true, .valid_desc_fetch_ack = true, }; static const struct ldma_inst_data dma3 = { .name = "dma3", .type = DMA_TYPE_MCPY, .orrc = 16, .chan_fc = false, .desc_fod = false, .desc_in_sram = true, .valid_desc_fetch_ack = false, }; static const struct ldma_inst_data toe_dma30 = { .name = "toe_dma30", .type = DMA_TYPE_MCPY, .orrc = 16, .chan_fc = false, .desc_fod = false, .desc_in_sram = true, .valid_desc_fetch_ack = true, }; static const struct ldma_inst_data toe_dma31 = { .name = "toe_dma31", .type = DMA_TYPE_MCPY, .orrc = 16, .chan_fc = false, .desc_fod = false, .desc_in_sram = true, .valid_desc_fetch_ack = true, }; static const struct of_device_id intel_ldma_match[] = { { .compatible = "intel,lgm-cdma", .data = &dma0}, { .compatible = "intel,lgm-dma2tx", .data = &dma2tx}, { .compatible = "intel,lgm-dma1rx", .data = &dma1rx}, { .compatible = "intel,lgm-dma1tx", .data = &dma1tx}, { .compatible = "intel,lgm-dma0tx", .data = &dma0tx}, { .compatible = "intel,lgm-dma3", .data = &dma3}, { .compatible = "intel,lgm-toe-dma30", .data = &toe_dma30}, { .compatible = "intel,lgm-toe-dma31", .data = &toe_dma31}, {} }; static int intel_ldma_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dma_device *dma_dev; unsigned long ch_mask; struct ldma_chan *c; struct ldma_port *p; struct ldma_dev *d; u32 id, bitn = 32, j; int i, ret; d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; /* Link controller to platform device */ d->dev = &pdev->dev; d->inst = device_get_match_data(dev); if (!d->inst) { dev_err(dev, "No device match found\n"); return -ENODEV; } d->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(d->base)) return PTR_ERR(d->base); /* Power up and reset the dma engine, some DMAs always on?? */ d->core_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(d->core_clk)) return PTR_ERR(d->core_clk); d->rst = devm_reset_control_get_optional(dev, NULL); if (IS_ERR(d->rst)) return PTR_ERR(d->rst); clk_prepare_enable(d->core_clk); reset_control_deassert(d->rst); ret = devm_add_action_or_reset(dev, ldma_clk_disable, d); if (ret) { dev_err(dev, "Failed to devm_add_action_or_reset, %d\n", ret); return ret; } id = readl(d->base + DMA_ID); d->chan_nrs = FIELD_GET(DMA_ID_CHNR, id); d->port_nrs = FIELD_GET(DMA_ID_PNR, id); d->ver = FIELD_GET(DMA_ID_REV, id); if (id & DMA_ID_AW_36B) d->flags |= DMA_ADDR_36BIT; if (IS_ENABLED(CONFIG_64BIT) && (id & DMA_ID_AW_36B)) bitn = 36; if (id & DMA_ID_DW_128B) d->flags |= DMA_DATA_128BIT; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(bitn)); if (ret) { dev_err(dev, "No usable DMA configuration\n"); return ret; } if (d->ver == DMA_VER22) { ret = ldma_init_v22(d, pdev); if (ret) return ret; } ret = device_property_read_u32(dev, "dma-channel-mask", &d->channels_mask); if (ret < 0) d->channels_mask = GENMASK(d->chan_nrs - 1, 0); dma_dev = &d->dma_dev; dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); /* Channel initializations */ INIT_LIST_HEAD(&dma_dev->channels); /* Port Initializations */ d->ports = devm_kcalloc(dev, d->port_nrs, sizeof(*p), GFP_KERNEL); if (!d->ports) return -ENOMEM; /* Channels Initializations */ d->chans = devm_kcalloc(d->dev, d->chan_nrs, sizeof(*c), GFP_KERNEL); if (!d->chans) return -ENOMEM; for (i = 0; i < d->port_nrs; i++) { p = &d->ports[i]; p->portid = i; p->ldev = d; } dma_dev->dev = &pdev->dev; ch_mask = (unsigned long)d->channels_mask; for_each_set_bit(j, &ch_mask, d->chan_nrs) { if (d->ver == DMA_VER22) ldma_dma_init_v22(j, d); else ldma_dma_init_v3X(j, d); } ret = ldma_parse_dt(d); if (ret) return ret; dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources; dma_dev->device_free_chan_resources = ldma_free_chan_resources; dma_dev->device_terminate_all = ldma_terminate_all; dma_dev->device_issue_pending = ldma_issue_pending; dma_dev->device_tx_status = ldma_tx_status; dma_dev->device_resume = ldma_resume_chan; dma_dev->device_pause = ldma_pause_chan; dma_dev->device_prep_slave_sg = ldma_prep_slave_sg; if (d->ver == DMA_VER22) { dma_dev->device_config = ldma_slave_config; dma_dev->device_synchronize = ldma_synchronize; dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; } platform_set_drvdata(pdev, d); ldma_dev_init(d); ret = dma_async_device_register(dma_dev); if (ret) { dev_err(dev, "Failed to register slave DMA engine device\n"); return ret; } ret = of_dma_controller_register(pdev->dev.of_node, ldma_xlate, d); if (ret) { dev_err(dev, "Failed to register of DMA controller\n"); dma_async_device_unregister(dma_dev); return ret; } dev_info(dev, "Init done - rev: %x, ports: %d channels: %d\n", d->ver, d->port_nrs, d->chan_nrs); return 0; } static struct platform_driver intel_ldma_driver = { .probe = intel_ldma_probe, .driver = { .name = DRIVER_NAME, .of_match_table = intel_ldma_match, }, }; /* * Perform this driver as device_initcall to make sure initialization happens * before its DMA clients of some are platform specific and also to provide * registered DMA channels and DMA capabilities to clients before their * initialization. */ builtin_platform_driver(intel_ldma_driver);
linux-master
drivers/dma/lgm/lgm-dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Core driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation * Author: Andy Shevchenko <[email protected]> * * Partially based on the bits found in drivers/tty/serial/mfd.c. */ /* * DMA channel allocation: * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA * Write (UART RX). * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to * port 3, and so on. */ #include <linux/bits.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/percpu-defs.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/spinlock.h> #include "hsu.h" #define HSU_DMA_BUSWIDTHS \ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_16_BYTES) static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc) { hsu_chan_writel(hsuc, HSU_CH_CR, 0); } static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc) { u32 cr = HSU_CH_CR_CHA; if (hsuc->direction == DMA_MEM_TO_DEV) cr &= ~HSU_CH_CR_CHD; else if (hsuc->direction == DMA_DEV_TO_MEM) cr |= HSU_CH_CR_CHD; hsu_chan_writel(hsuc, HSU_CH_CR, cr); } static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) { struct dma_slave_config *config = &hsuc->config; struct hsu_dma_desc *desc = hsuc->desc; u32 bsr = 0, mtsr = 0; /* to shut the compiler up */ u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI; unsigned int i, count; if (hsuc->direction == DMA_MEM_TO_DEV) { bsr = config->dst_maxburst; mtsr = config->dst_addr_width; } else if (hsuc->direction == DMA_DEV_TO_MEM) { bsr = config->src_maxburst; mtsr = config->src_addr_width; } hsu_chan_disable(hsuc); hsu_chan_writel(hsuc, HSU_CH_DCR, 0); hsu_chan_writel(hsuc, HSU_CH_BSR, bsr); hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); /* Set descriptors */ count = desc->nents - desc->active; for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) { hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); /* Prepare value for DCR */ dcr |= HSU_CH_DCR_DESCA(i); dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */ desc->active++; } /* Only for the last descriptor in the chain */ dcr |= HSU_CH_DCR_CHSOD(count - 1); dcr |= HSU_CH_DCR_CHDI(count - 1); hsu_chan_writel(hsuc, HSU_CH_DCR, dcr); hsu_chan_enable(hsuc); } static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) { hsu_chan_disable(hsuc); hsu_chan_writel(hsuc, HSU_CH_DCR, 0); } static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) { hsu_dma_chan_start(hsuc); } static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) { struct virt_dma_desc *vdesc; /* Get the next descriptor */ vdesc = vchan_next_desc(&hsuc->vchan); if (!vdesc) { hsuc->desc = NULL; return; } list_del(&vdesc->node); hsuc->desc = to_hsu_dma_desc(vdesc); /* Start the channel with a new descriptor */ hsu_dma_start_channel(hsuc); } /* * hsu_dma_get_status() - get DMA channel status * @chip: HSUART DMA chip * @nr: DMA channel number * @status: pointer for DMA Channel Status Register value * * Description: * The function reads and clears the DMA Channel Status Register, checks * if it was a timeout interrupt and returns a corresponding value. * * Caller should provide a valid pointer for the DMA Channel Status * Register value that will be returned in @status. * * Return: * 1 for DMA timeout status, 0 for other DMA status, or error code for * invalid parameters or no interrupt pending. */ int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, u32 *status) { struct hsu_dma_chan *hsuc; unsigned long flags; u32 sr; /* Sanity check */ if (nr >= chip->hsu->nr_channels) return -EINVAL; hsuc = &chip->hsu->chan[nr]; /* * No matter what situation, need read clear the IRQ status * There is a bug, see Errata 5, HSD 2900918 */ spin_lock_irqsave(&hsuc->vchan.lock, flags); sr = hsu_chan_readl(hsuc, HSU_CH_SR); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); /* Check if any interrupt is pending */ sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); if (!sr) return -EIO; /* Timeout IRQ, need wait some time, see Errata 2 */ if (sr & HSU_CH_SR_DESCTO_ANY) udelay(2); /* * At this point, at least one of Descriptor Time Out, Channel Error * or Descriptor Done bits must be set. Clear the Descriptor Time Out * bits and if sr is still non-zero, it must be channel error or * descriptor done which are higher priority than timeout and handled * in hsu_dma_do_irq(). Else, it must be a timeout. */ sr &= ~HSU_CH_SR_DESCTO_ANY; *status = sr; return sr ? 0 : 1; } EXPORT_SYMBOL_GPL(hsu_dma_get_status); /* * hsu_dma_do_irq() - DMA interrupt handler * @chip: HSUART DMA chip * @nr: DMA channel number * @status: Channel Status Register value * * Description: * This function handles Channel Error and Descriptor Done interrupts. * This function should be called after determining that the DMA interrupt * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0. * * Return: * 0 for invalid channel number, 1 otherwise. */ int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status) { struct dma_chan_percpu *stat; struct hsu_dma_chan *hsuc; struct hsu_dma_desc *desc; unsigned long flags; /* Sanity check */ if (nr >= chip->hsu->nr_channels) return 0; hsuc = &chip->hsu->chan[nr]; stat = this_cpu_ptr(hsuc->vchan.chan.local); spin_lock_irqsave(&hsuc->vchan.lock, flags); desc = hsuc->desc; if (desc) { if (status & HSU_CH_SR_CHE) { desc->status = DMA_ERROR; } else if (desc->active < desc->nents) { hsu_dma_start_channel(hsuc); } else { vchan_cookie_complete(&desc->vdesc); desc->status = DMA_COMPLETE; stat->bytes_transferred += desc->length; hsu_dma_start_transfer(hsuc); } } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); return 1; } EXPORT_SYMBOL_GPL(hsu_dma_do_irq); static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) { struct hsu_dma_desc *desc; desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) return NULL; desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); if (!desc->sg) { kfree(desc); return NULL; } return desc; } static void hsu_dma_desc_free(struct virt_dma_desc *vdesc) { struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); kfree(desc->sg); kfree(desc); } static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); struct hsu_dma_desc *desc; struct scatterlist *sg; unsigned int i; desc = hsu_dma_alloc_desc(sg_len); if (!desc) return NULL; for_each_sg(sgl, sg, sg_len, i) { desc->sg[i].addr = sg_dma_address(sg); desc->sg[i].len = sg_dma_len(sg); desc->length += sg_dma_len(sg); } desc->nents = sg_len; desc->direction = direction; /* desc->active = 0 by kzalloc */ desc->status = DMA_IN_PROGRESS; return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); } static void hsu_dma_issue_pending(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&hsuc->vchan.lock, flags); if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) hsu_dma_start_transfer(hsuc); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); } static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) { struct hsu_dma_desc *desc = hsuc->desc; size_t bytes = 0; int i; for (i = desc->active; i < desc->nents; i++) bytes += desc->sg[i].len; i = HSU_DMA_CHAN_NR_DESC - 1; do { bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); } while (--i >= 0); return bytes; } static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *state) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); struct virt_dma_desc *vdesc; enum dma_status status; size_t bytes; unsigned long flags; status = dma_cookie_status(chan, cookie, state); if (status == DMA_COMPLETE) return status; spin_lock_irqsave(&hsuc->vchan.lock, flags); vdesc = vchan_find_desc(&hsuc->vchan, cookie); if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { bytes = hsu_dma_active_desc_size(hsuc); dma_set_residue(state, bytes); status = hsuc->desc->status; } else if (vdesc) { bytes = to_hsu_dma_desc(vdesc)->length; dma_set_residue(state, bytes); } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); return status; } static int hsu_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); memcpy(&hsuc->config, config, sizeof(hsuc->config)); return 0; } static int hsu_dma_pause(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&hsuc->vchan.lock, flags); if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { hsu_chan_disable(hsuc); hsuc->desc->status = DMA_PAUSED; } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); return 0; } static int hsu_dma_resume(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&hsuc->vchan.lock, flags); if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { hsuc->desc->status = DMA_IN_PROGRESS; hsu_chan_enable(hsuc); } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); return 0; } static int hsu_dma_terminate_all(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_stop_channel(hsuc); if (hsuc->desc) { hsu_dma_desc_free(&hsuc->desc->vdesc); hsuc->desc = NULL; } vchan_get_all_descriptors(&hsuc->vchan, &head); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); vchan_dma_desc_free_list(&hsuc->vchan, &head); return 0; } static void hsu_dma_free_chan_resources(struct dma_chan *chan) { vchan_free_chan_resources(to_virt_chan(chan)); } static void hsu_dma_synchronize(struct dma_chan *chan) { struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); vchan_synchronize(&hsuc->vchan); } int hsu_dma_probe(struct hsu_dma_chip *chip) { struct hsu_dma *hsu; void __iomem *addr = chip->regs + chip->offset; unsigned short i; int ret; hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL); if (!hsu) return -ENOMEM; chip->hsu = hsu; /* Calculate nr_channels from the IO space length */ hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH; hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels, sizeof(*hsu->chan), GFP_KERNEL); if (!hsu->chan) return -ENOMEM; INIT_LIST_HEAD(&hsu->dma.channels); for (i = 0; i < hsu->nr_channels; i++) { struct hsu_dma_chan *hsuc = &hsu->chan[i]; hsuc->vchan.desc_free = hsu_dma_desc_free; vchan_init(&hsuc->vchan, &hsu->dma); hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; } dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask); hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources; hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg; hsu->dma.device_issue_pending = hsu_dma_issue_pending; hsu->dma.device_tx_status = hsu_dma_tx_status; hsu->dma.device_config = hsu_dma_slave_config; hsu->dma.device_pause = hsu_dma_pause; hsu->dma.device_resume = hsu_dma_resume; hsu->dma.device_terminate_all = hsu_dma_terminate_all; hsu->dma.device_synchronize = hsu_dma_synchronize; hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; hsu->dma.dev = chip->dev; dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK); ret = dma_async_device_register(&hsu->dma); if (ret) return ret; dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels); return 0; } EXPORT_SYMBOL_GPL(hsu_dma_probe); int hsu_dma_remove(struct hsu_dma_chip *chip) { struct hsu_dma *hsu = chip->hsu; unsigned short i; dma_async_device_unregister(&hsu->dma); for (i = 0; i < hsu->nr_channels; i++) { struct hsu_dma_chan *hsuc = &hsu->chan[i]; tasklet_kill(&hsuc->vchan.task); } return 0; } EXPORT_SYMBOL_GPL(hsu_dma_remove); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("High Speed UART DMA core driver"); MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
linux-master
drivers/dma/hsu/hsu.c
// SPDX-License-Identifier: GPL-2.0-only /* * PCI driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation * Author: Andy Shevchenko <[email protected]> * * Partially based on the bits found in drivers/tty/serial/mfd.c. */ #include <linux/bitops.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include "hsu.h" #define HSU_PCI_DMASR 0x00 #define HSU_PCI_DMAISR 0x04 #define HSU_PCI_CHAN_OFFSET 0x100 #define PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA 0x081e #define PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA 0x1192 static irqreturn_t hsu_pci_irq(int irq, void *dev) { struct hsu_dma_chip *chip = dev; unsigned long dmaisr; unsigned short i; u32 status; int ret = 0; int err; dmaisr = readl(chip->regs + HSU_PCI_DMAISR); for_each_set_bit(i, &dmaisr, chip->hsu->nr_channels) { err = hsu_dma_get_status(chip, i, &status); if (err > 0) ret |= 1; else if (err == 0) ret |= hsu_dma_do_irq(chip, i, status); } return IRQ_RETVAL(ret); } static void hsu_pci_dma_remove(void *chip) { hsu_dma_remove(chip); } static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct hsu_dma_chip *chip; int ret; ret = pcim_enable_device(pdev); if (ret) return ret; ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); if (ret) { dev_err(&pdev->dev, "I/O memory remapping failed\n"); return ret; } pci_set_master(pdev); pci_try_set_mwi(pdev); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) return ret; chip->dev = &pdev->dev; chip->regs = pcim_iomap_table(pdev)[0]; chip->length = pci_resource_len(pdev, 0); chip->offset = HSU_PCI_CHAN_OFFSET; chip->irq = pci_irq_vector(pdev, 0); ret = hsu_dma_probe(chip); if (ret) return ret; ret = devm_add_action_or_reset(dev, hsu_pci_dma_remove, chip); if (ret) return ret; ret = devm_request_irq(dev, chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip); if (ret) return ret; /* * On Intel Tangier B0 and Anniedale the interrupt line, disregarding * to have different numbers, is shared between HSU DMA and UART IPs. * Thus on such SoCs we are expecting that IRQ handler is called in * UART driver only. Instead of handling the spurious interrupt * from HSU DMA here and waste CPU time and delay HSU UART interrupt * handling, disable the interrupt entirely. */ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) disable_irq_nosync(chip->irq); pci_set_drvdata(pdev, chip); return 0; } static const struct pci_device_id hsu_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA), 0 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA), 0 }, { } }; MODULE_DEVICE_TABLE(pci, hsu_pci_id_table); static struct pci_driver hsu_pci_driver = { .name = "hsu_dma_pci", .id_table = hsu_pci_id_table, .probe = hsu_pci_probe, }; module_pci_driver(hsu_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("High Speed UART DMA PCI driver"); MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
linux-master
drivers/dma/hsu/pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Passthrough DMA device driver * -- Based on the CCP driver * * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. * * Author: Sanjay R Mehta <[email protected]> * Author: Gary R Hook <[email protected]> */ #include "ptdma.h" #include "../dmaengine.h" #include "../virt-dma.h" static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan) { return container_of(dma_chan, struct pt_dma_chan, vc.chan); } static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd) { return container_of(vd, struct pt_dma_desc, vd); } static void pt_free_chan_resources(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); vchan_free_chan_resources(&chan->vc); } static void pt_synchronize(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); vchan_synchronize(&chan->vc); } static void pt_do_cleanup(struct virt_dma_desc *vd) { struct pt_dma_desc *desc = to_pt_desc(vd); struct pt_device *pt = desc->pt; kmem_cache_free(pt->dma_desc_cache, desc); } static int pt_dma_start_desc(struct pt_dma_desc *desc) { struct pt_passthru_engine *pt_engine; struct pt_device *pt; struct pt_cmd *pt_cmd; struct pt_cmd_queue *cmd_q; desc->issued_to_hw = 1; pt_cmd = &desc->pt_cmd; pt = pt_cmd->pt; cmd_q = &pt->cmd_q; pt_engine = &pt_cmd->passthru; pt->tdata.cmd = pt_cmd; /* Execute the command */ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine); return 0; } static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan) { /* Get the next DMA descriptor on the active list */ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc); return vd ? to_pt_desc(vd) : NULL; } static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan, struct pt_dma_desc *desc) { struct dma_async_tx_descriptor *tx_desc; struct virt_dma_desc *vd; unsigned long flags; /* Loop over descriptors until one is found with commands */ do { if (desc) { if (!desc->issued_to_hw) { /* No errors, keep going */ if (desc->status != DMA_ERROR) return desc; } tx_desc = &desc->vd.tx; vd = &desc->vd; } else { tx_desc = NULL; } spin_lock_irqsave(&chan->vc.lock, flags); if (desc) { if (desc->status != DMA_COMPLETE) { if (desc->status != DMA_ERROR) desc->status = DMA_COMPLETE; dma_cookie_complete(tx_desc); dma_descriptor_unmap(tx_desc); list_del(&desc->vd.node); } else { /* Don't handle it twice */ tx_desc = NULL; } } desc = pt_next_dma_desc(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); if (tx_desc) { dmaengine_desc_get_callback_invoke(tx_desc, NULL); dma_run_dependencies(tx_desc); vchan_vdesc_fini(vd); } } while (desc); return NULL; } static void pt_cmd_callback(void *data, int err) { struct pt_dma_desc *desc = data; struct dma_chan *dma_chan; struct pt_dma_chan *chan; int ret; if (err == -EINPROGRESS) return; dma_chan = desc->vd.tx.chan; chan = to_pt_chan(dma_chan); if (err) desc->status = DMA_ERROR; while (true) { /* Check for DMA descriptor completion */ desc = pt_handle_active_desc(chan, desc); /* Don't submit cmd if no descriptor or DMA is paused */ if (!desc) break; ret = pt_dma_start_desc(desc); if (!ret) break; desc->status = DMA_ERROR; } } static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, unsigned long flags) { struct pt_dma_desc *desc; desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT); if (!desc) return NULL; vchan_tx_prep(&chan->vc, &desc->vd, flags); desc->pt = chan->pt; desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT); desc->issued_to_hw = 0; desc->status = DMA_IN_PROGRESS; return desc; } static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, unsigned int len, unsigned long flags) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_passthru_engine *pt_engine; struct pt_dma_desc *desc; struct pt_cmd *pt_cmd; desc = pt_alloc_dma_desc(chan, flags); if (!desc) return NULL; pt_cmd = &desc->pt_cmd; pt_cmd->pt = chan->pt; pt_engine = &pt_cmd->passthru; pt_cmd->engine = PT_ENGINE_PASSTHRU; pt_engine->src_dma = src; pt_engine->dst_dma = dst; pt_engine->src_len = len; pt_cmd->pt_cmd_callback = pt_cmd_callback; pt_cmd->data = desc; desc->len = len; return desc; } static struct dma_async_tx_descriptor * pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { struct pt_dma_desc *desc; desc = pt_create_desc(dma_chan, dst, src, len, flags); if (!desc) return NULL; return &desc->vd.tx; } static struct dma_async_tx_descriptor * pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_desc *desc; desc = pt_alloc_dma_desc(chan, flags); if (!desc) return NULL; return &desc->vd.tx; } static void pt_issue_pending(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_desc *desc; unsigned long flags; bool engine_is_idle = true; spin_lock_irqsave(&chan->vc.lock, flags); desc = pt_next_dma_desc(chan); if (desc) engine_is_idle = false; vchan_issue_pending(&chan->vc); desc = pt_next_dma_desc(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); /* If there was nothing active, start processing */ if (engine_is_idle && desc) pt_cmd_callback(desc, 0); } static enum dma_status pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pt_device *pt = to_pt_chan(c)->pt; struct pt_cmd_queue *cmd_q = &pt->cmd_q; pt_check_status_trans(pt, cmd_q); return dma_cookie_status(c, cookie, txstate); } static int pt_pause(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); pt_stop_queue(&chan->pt->cmd_q); spin_unlock_irqrestore(&chan->vc.lock, flags); return 0; } static int pt_resume(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_desc *desc = NULL; unsigned long flags; spin_lock_irqsave(&chan->vc.lock, flags); pt_start_queue(&chan->pt->cmd_q); desc = pt_next_dma_desc(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); /* If there was something active, re-start */ if (desc) pt_cmd_callback(desc, 0); return 0; } static int pt_terminate_all(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); unsigned long flags; struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q; LIST_HEAD(head); iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head); spin_unlock_irqrestore(&chan->vc.lock, flags); vchan_dma_desc_free_list(&chan->vc, &head); vchan_free_chan_resources(&chan->vc); return 0; } int pt_dmaengine_register(struct pt_device *pt) { struct pt_dma_chan *chan; struct dma_device *dma_dev = &pt->dma_dev; char *cmd_cache_name; char *desc_cache_name; int ret; pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan), GFP_KERNEL); if (!pt->pt_dma_chan) return -ENOMEM; cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, "%s-dmaengine-cmd-cache", dev_name(pt->dev)); if (!cmd_cache_name) return -ENOMEM; desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, "%s-dmaengine-desc-cache", dev_name(pt->dev)); if (!desc_cache_name) { ret = -ENOMEM; goto err_cache; } pt->dma_desc_cache = kmem_cache_create(desc_cache_name, sizeof(struct pt_dma_desc), 0, SLAB_HWCACHE_ALIGN, NULL); if (!pt->dma_desc_cache) { ret = -ENOMEM; goto err_cache; } dma_dev->dev = pt->dev; dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES; dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES; dma_dev->directions = DMA_MEM_TO_MEM; dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); /* * PTDMA is intended to be used with the AMD NTB devices, hence * marking it as DMA_PRIVATE. */ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); INIT_LIST_HEAD(&dma_dev->channels); chan = pt->pt_dma_chan; chan->pt = pt; /* Set base and prep routines */ dma_dev->device_free_chan_resources = pt_free_chan_resources; dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; dma_dev->device_issue_pending = pt_issue_pending; dma_dev->device_tx_status = pt_tx_status; dma_dev->device_pause = pt_pause; dma_dev->device_resume = pt_resume; dma_dev->device_terminate_all = pt_terminate_all; dma_dev->device_synchronize = pt_synchronize; chan->vc.desc_free = pt_do_cleanup; vchan_init(&chan->vc, dma_dev); dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64)); ret = dma_async_device_register(dma_dev); if (ret) goto err_reg; return 0; err_reg: kmem_cache_destroy(pt->dma_desc_cache); err_cache: kmem_cache_destroy(pt->dma_cmd_cache); return ret; } void pt_dmaengine_unregister(struct pt_device *pt) { struct dma_device *dma_dev = &pt->dma_dev; dma_async_device_unregister(dma_dev); kmem_cache_destroy(pt->dma_desc_cache); kmem_cache_destroy(pt->dma_cmd_cache); }
linux-master
drivers/dma/ptdma/ptdma-dmaengine.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Passthrough DMA device driver * -- Based on the CCP driver * * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. * * Author: Sanjay R Mehta <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/debugfs.h> #include <linux/seq_file.h> #include "ptdma.h" /* DebugFS helpers */ #define RI_VERSION_NUM 0x0000003F #define RI_NUM_VQM 0x00078000 #define RI_NVQM_SHIFT 15 static int pt_debugfs_info_show(struct seq_file *s, void *p) { struct pt_device *pt = s->private; unsigned int regval; seq_printf(s, "Device name: %s\n", dev_name(pt->dev)); seq_printf(s, " # Queues: %d\n", 1); seq_printf(s, " # Cmds: %d\n", pt->cmd_count); regval = ioread32(pt->io_regs + CMD_PT_VERSION); seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM); seq_puts(s, " Engines:"); seq_puts(s, "\n"); seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT); return 0; } /* * Return a formatted buffer containing the current * statistics of queue for PTDMA */ static int pt_debugfs_stats_show(struct seq_file *s, void *p) { struct pt_device *pt = s->private; seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts); return 0; } static int pt_debugfs_queue_show(struct seq_file *s, void *p) { struct pt_cmd_queue *cmd_q = s->private; unsigned int regval; if (!cmd_q) return 0; seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops); regval = ioread32(cmd_q->reg_control + 0x000C); seq_puts(s, " Enabled Interrupts:"); if (regval & INT_EMPTY_QUEUE) seq_puts(s, " EMPTY"); if (regval & INT_QUEUE_STOPPED) seq_puts(s, " STOPPED"); if (regval & INT_ERROR) seq_puts(s, " ERROR"); if (regval & INT_COMPLETION) seq_puts(s, " COMPLETION"); seq_puts(s, "\n"); return 0; } DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info); DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue); DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats); void ptdma_debugfs_setup(struct pt_device *pt) { struct pt_cmd_queue *cmd_q; struct dentry *debugfs_q_instance; if (!debugfs_initialized()) return; debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt, &pt_debugfs_info_fops); debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt, &pt_debugfs_stats_fops); cmd_q = &pt->cmd_q; debugfs_q_instance = debugfs_create_dir("q", pt->dma_dev.dbg_dev_root); debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q, &pt_debugfs_queue_fops); }
linux-master
drivers/dma/ptdma/ptdma-debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Passthru DMA device driver * -- Based on the CCP driver * * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. * * Author: Sanjay R Mehta <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/bitfield.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include "ptdma.h" /* Human-readable error strings */ static char *pt_error_codes[] = { "", "ERR 01: ILLEGAL_ENGINE", "ERR 03: ILLEGAL_FUNCTION_TYPE", "ERR 04: ILLEGAL_FUNCTION_MODE", "ERR 06: ILLEGAL_FUNCTION_SIZE", "ERR 08: ILLEGAL_FUNCTION_RSVD", "ERR 09: ILLEGAL_BUFFER_LENGTH", "ERR 10: VLSB_FAULT", "ERR 11: ILLEGAL_MEM_ADDR", "ERR 12: ILLEGAL_MEM_SEL", "ERR 13: ILLEGAL_CONTEXT_ID", "ERR 15: 0xF Reserved", "ERR 18: CMD_TIMEOUT", "ERR 19: IDMA0_AXI_SLVERR", "ERR 20: IDMA0_AXI_DECERR", "ERR 21: 0x15 Reserved", "ERR 22: IDMA1_AXI_SLAVE_FAULT", "ERR 23: IDMA1_AIXI_DECERR", "ERR 24: 0x18 Reserved", "ERR 27: 0x1B Reserved", "ERR 38: ODMA0_AXI_SLVERR", "ERR 39: ODMA0_AXI_DECERR", "ERR 40: 0x28 Reserved", "ERR 41: ODMA1_AXI_SLVERR", "ERR 42: ODMA1_AXI_DECERR", "ERR 43: LSB_PARITY_ERR", }; static void pt_log_error(struct pt_device *d, int e) { dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e); } void pt_start_queue(struct pt_cmd_queue *cmd_q) { /* Turn on the run bit */ iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control); } void pt_stop_queue(struct pt_cmd_queue *cmd_q) { /* Turn off the run bit */ iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control); } static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q) { bool soc = FIELD_GET(DWORD0_SOC, desc->dw0); u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; u32 tail; unsigned long flags; if (soc) { desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0); desc->dw0 &= ~DWORD0_SOC; } spin_lock_irqsave(&cmd_q->q_lock, flags); /* Copy 32-byte command descriptor to hw queue. */ memcpy(q_desc, desc, 32); cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN; /* The data used by this command must be flushed to memory */ wmb(); /* Write the new tail address back to the queue register */ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); iowrite32(tail, cmd_q->reg_control + 0x0004); /* Turn the queue back on using our cached control register */ pt_start_queue(cmd_q); spin_unlock_irqrestore(&cmd_q->q_lock, flags); return 0; } int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, struct pt_passthru_engine *pt_engine) { struct ptdma_desc desc; struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q); cmd_q->cmd_error = 0; cmd_q->total_pt_ops++; memset(&desc, 0, sizeof(desc)); desc.dw0 = CMD_DESC_DW0_VAL; desc.length = pt_engine->src_len; desc.src_lo = lower_32_bits(pt_engine->src_dma); desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma); desc.dst_lo = lower_32_bits(pt_engine->dst_dma); desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); if (cmd_q->int_en) pt_core_enable_queue_interrupts(pt); else pt_core_disable_queue_interrupts(pt); return pt_core_execute_cmd(&desc, cmd_q); } static void pt_do_cmd_complete(unsigned long data) { struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data; struct pt_cmd *cmd = tdata->cmd; struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q; u32 tail; if (cmd_q->cmd_error) { /* * Log the error and flush the queue by * moving the head pointer */ tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); pt_log_error(cmd_q->pt, cmd_q->cmd_error); iowrite32(tail, cmd_q->reg_control + 0x0008); } cmd->pt_cmd_callback(cmd->data, cmd->ret); } void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q) { u32 status; status = ioread32(cmd_q->reg_control + 0x0010); if (status) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100); cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104); /* On error, only save the first error value */ if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); /* Acknowledge the completion */ iowrite32(status, cmd_q->reg_control + 0x0010); pt_do_cmd_complete((ulong)&pt->tdata); } } static irqreturn_t pt_core_irq_handler(int irq, void *data) { struct pt_device *pt = data; struct pt_cmd_queue *cmd_q = &pt->cmd_q; pt_core_disable_queue_interrupts(pt); pt->total_interrupts++; pt_check_status_trans(pt, cmd_q); pt_core_enable_queue_interrupts(pt); return IRQ_HANDLED; } int pt_core_init(struct pt_device *pt) { char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; struct pt_cmd_queue *cmd_q = &pt->cmd_q; u32 dma_addr_lo, dma_addr_hi; struct device *dev = pt->dev; struct dma_pool *dma_pool; int ret; /* Allocate a dma pool for the queue */ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev)); dma_pool = dma_pool_create(dma_pool_name, dev, PT_DMAPOOL_MAX_SIZE, PT_DMAPOOL_ALIGN, 0); if (!dma_pool) return -ENOMEM; /* ptdma core initialisation */ iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET); iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET); iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET); iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET); iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET); cmd_q->pt = pt; cmd_q->dma_pool = dma_pool; spin_lock_init(&cmd_q->q_lock); /* Page alignment satisfies our needs for N <= 128 */ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; goto e_destroy_pool; } cmd_q->qidx = 0; /* Preset some register values */ cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR; /* Turn off the queues and disable interrupts until ready */ pt_core_disable_queue_interrupts(pt); cmd_q->qcontrol = 0; /* Start with nothing */ iowrite32(cmd_q->qcontrol, cmd_q->reg_control); ioread32(cmd_q->reg_control + 0x0104); ioread32(cmd_q->reg_control + 0x0100); /* Clear the interrupt status */ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); /* Request an irq */ ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_free_dma; } /* Update the device registers with queue information. */ cmd_q->qcontrol &= ~CMD_Q_SIZE; cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL); cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = lower_32_bits(cmd_q->qdma_tail); iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004); iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008); dma_addr_hi = upper_32_bits(cmd_q->qdma_tail); cmd_q->qcontrol |= (dma_addr_hi << 16); iowrite32(cmd_q->qcontrol, cmd_q->reg_control); pt_core_enable_queue_interrupts(pt); /* Register the DMA engine support */ ret = pt_dmaengine_register(pt); if (ret) goto e_free_irq; /* Set up debugfs entries */ ptdma_debugfs_setup(pt); return 0; e_free_irq: free_irq(pt->pt_irq, pt); e_free_dma: dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); e_destroy_pool: dma_pool_destroy(pt->cmd_q.dma_pool); return ret; } void pt_core_destroy(struct pt_device *pt) { struct device *dev = pt->dev; struct pt_cmd_queue *cmd_q = &pt->cmd_q; struct pt_cmd *cmd; /* Unregister the DMA engine */ pt_dmaengine_unregister(pt); /* Disable and clear interrupts */ pt_core_disable_queue_interrupts(pt); /* Turn off the run bit */ pt_stop_queue(cmd_q); /* Clear the interrupt status */ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); ioread32(cmd_q->reg_control + 0x0104); ioread32(cmd_q->reg_control + 0x0100); free_irq(pt->pt_irq, pt); dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); /* Flush the cmd queue */ while (!list_empty(&pt->cmd)) { /* Invoke the callback directly with an error code */ cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry); list_del(&cmd->entry); cmd->pt_cmd_callback(cmd->data, -ENODEV); } }
linux-master
drivers/dma/ptdma/ptdma-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Passthru DMA device driver * -- Based on the CCP driver * * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. * * Author: Sanjay R Mehta <[email protected]> * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/pci_ids.h> #include <linux/pci.h> #include <linux/spinlock.h> #include "ptdma.h" struct pt_msix { int msix_count; struct msix_entry msix_entry; }; /* * pt_alloc_struct - allocate and initialize the pt_device struct * * @dev: device struct of the PTDMA */ static struct pt_device *pt_alloc_struct(struct device *dev) { struct pt_device *pt; pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL); if (!pt) return NULL; pt->dev = dev; INIT_LIST_HEAD(&pt->cmd); return pt; } static int pt_get_msix_irqs(struct pt_device *pt) { struct pt_msix *pt_msix = pt->pt_msix; struct device *dev = pt->dev; struct pci_dev *pdev = to_pci_dev(dev); int ret; pt_msix->msix_entry.entry = 0; ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1); if (ret < 0) return ret; pt_msix->msix_count = ret; pt->pt_irq = pt_msix->msix_entry.vector; return 0; } static int pt_get_msi_irq(struct pt_device *pt) { struct device *dev = pt->dev; struct pci_dev *pdev = to_pci_dev(dev); int ret; ret = pci_enable_msi(pdev); if (ret) return ret; pt->pt_irq = pdev->irq; return 0; } static int pt_get_irqs(struct pt_device *pt) { struct device *dev = pt->dev; int ret; ret = pt_get_msix_irqs(pt); if (!ret) return 0; /* Couldn't get MSI-X vectors, try MSI */ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret); ret = pt_get_msi_irq(pt); if (!ret) return 0; /* Couldn't get MSI interrupt */ dev_err(dev, "could not enable MSI (%d)\n", ret); return ret; } static void pt_free_irqs(struct pt_device *pt) { struct pt_msix *pt_msix = pt->pt_msix; struct device *dev = pt->dev; struct pci_dev *pdev = to_pci_dev(dev); if (pt_msix->msix_count) pci_disable_msix(pdev); else if (pt->pt_irq) pci_disable_msi(pdev); pt->pt_irq = 0; } static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pt_device *pt; struct pt_msix *pt_msix; struct device *dev = &pdev->dev; void __iomem * const *iomap_table; int bar_mask; int ret = -ENOMEM; pt = pt_alloc_struct(dev); if (!pt) goto e_err; pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL); if (!pt_msix) goto e_err; pt->pt_msix = pt_msix; pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data; if (!pt->dev_vdata) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } ret = pcim_enable_device(pdev); if (ret) { dev_err(dev, "pcim_enable_device failed (%d)\n", ret); goto e_err; } bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); ret = pcim_iomap_regions(pdev, bar_mask, "ptdma"); if (ret) { dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); goto e_err; } iomap_table = pcim_iomap_table(pdev); if (!iomap_table) { dev_err(dev, "pcim_iomap_table failed\n"); ret = -ENOMEM; goto e_err; } pt->io_regs = iomap_table[pt->dev_vdata->bar]; if (!pt->io_regs) { dev_err(dev, "ioremap failed\n"); ret = -ENOMEM; goto e_err; } ret = pt_get_irqs(pt); if (ret) goto e_err; pci_set_master(pdev); ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); goto e_err; } } dev_set_drvdata(dev, pt); if (pt->dev_vdata) ret = pt_core_init(pt); if (ret) goto e_err; return 0; e_err: dev_err(dev, "initialization failed ret = %d\n", ret); return ret; } static void pt_pci_remove(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct pt_device *pt = dev_get_drvdata(dev); if (!pt) return; if (pt->dev_vdata) pt_core_destroy(pt); pt_free_irqs(pt); } static const struct pt_dev_vdata dev_vdata[] = { { .bar = 2, }, }; static const struct pci_device_id pt_pci_table[] = { { PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] }, /* Last entry must be zero */ { 0, } }; MODULE_DEVICE_TABLE(pci, pt_pci_table); static struct pci_driver pt_pci_driver = { .name = "ptdma", .id_table = pt_pci_table, .probe = pt_pci_probe, .remove = pt_pci_remove, }; module_pci_driver(pt_pci_driver); MODULE_AUTHOR("Sanjay R Mehta <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AMD PassThru DMA driver");
linux-master
drivers/dma/ptdma/ptdma-pci.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pkt_mode = 1, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_ETHERNET(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ }, \ } #define PSIL_SA2UL(x, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .notdpkt = tx, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep j7200_src_ep_map[] = { /* PDMA_MCASP - McASP0-2 */ PSIL_PDMA_MCASP(0x4400), PSIL_PDMA_MCASP(0x4401), PSIL_PDMA_MCASP(0x4402), /* PDMA_SPI_G0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4600), PSIL_PDMA_XY_PKT(0x4601), PSIL_PDMA_XY_PKT(0x4602), PSIL_PDMA_XY_PKT(0x4603), PSIL_PDMA_XY_PKT(0x4604), PSIL_PDMA_XY_PKT(0x4605), PSIL_PDMA_XY_PKT(0x4606), PSIL_PDMA_XY_PKT(0x4607), PSIL_PDMA_XY_PKT(0x4608), PSIL_PDMA_XY_PKT(0x4609), PSIL_PDMA_XY_PKT(0x460a), PSIL_PDMA_XY_PKT(0x460b), PSIL_PDMA_XY_PKT(0x460c), PSIL_PDMA_XY_PKT(0x460d), PSIL_PDMA_XY_PKT(0x460e), PSIL_PDMA_XY_PKT(0x460f), /* PDMA_SPI_G1 - SPI4-7 */ PSIL_PDMA_XY_PKT(0x4610), PSIL_PDMA_XY_PKT(0x4611), PSIL_PDMA_XY_PKT(0x4612), PSIL_PDMA_XY_PKT(0x4613), PSIL_PDMA_XY_PKT(0x4614), PSIL_PDMA_XY_PKT(0x4615), PSIL_PDMA_XY_PKT(0x4616), PSIL_PDMA_XY_PKT(0x4617), PSIL_PDMA_XY_PKT(0x4618), PSIL_PDMA_XY_PKT(0x4619), PSIL_PDMA_XY_PKT(0x461a), PSIL_PDMA_XY_PKT(0x461b), PSIL_PDMA_XY_PKT(0x461c), PSIL_PDMA_XY_PKT(0x461d), PSIL_PDMA_XY_PKT(0x461e), PSIL_PDMA_XY_PKT(0x461f), /* PDMA_USART_G0 - UART0-1 */ PSIL_PDMA_XY_PKT(0x4700), PSIL_PDMA_XY_PKT(0x4701), /* PDMA_USART_G1 - UART2-3 */ PSIL_PDMA_XY_PKT(0x4702), PSIL_PDMA_XY_PKT(0x4703), /* PDMA_USART_G2 - UART4-9 */ PSIL_PDMA_XY_PKT(0x4704), PSIL_PDMA_XY_PKT(0x4705), PSIL_PDMA_XY_PKT(0x4706), PSIL_PDMA_XY_PKT(0x4707), PSIL_PDMA_XY_PKT(0x4708), PSIL_PDMA_XY_PKT(0x4709), /* CPSW5 */ PSIL_ETHERNET(0x4a00), /* CPSW0 */ PSIL_ETHERNET(0x7000), /* MCU_PDMA_MISC_G0 - SPI0 */ PSIL_PDMA_XY_PKT(0x7100), PSIL_PDMA_XY_PKT(0x7101), PSIL_PDMA_XY_PKT(0x7102), PSIL_PDMA_XY_PKT(0x7103), /* MCU_PDMA_MISC_G1 - SPI1-2 */ PSIL_PDMA_XY_PKT(0x7200), PSIL_PDMA_XY_PKT(0x7201), PSIL_PDMA_XY_PKT(0x7202), PSIL_PDMA_XY_PKT(0x7203), PSIL_PDMA_XY_PKT(0x7204), PSIL_PDMA_XY_PKT(0x7205), PSIL_PDMA_XY_PKT(0x7206), PSIL_PDMA_XY_PKT(0x7207), /* MCU_PDMA_MISC_G2 - UART0 */ PSIL_PDMA_XY_PKT(0x7300), /* MCU_PDMA_ADC - ADC0-1 */ PSIL_PDMA_XY_TR(0x7400), PSIL_PDMA_XY_TR(0x7401), /* SA2UL */ PSIL_SA2UL(0x7500, 0), PSIL_SA2UL(0x7501, 0), PSIL_SA2UL(0x7502, 0), PSIL_SA2UL(0x7503, 0), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep j7200_dst_ep_map[] = { /* PDMA_MCASP - McASP0-2 */ PSIL_PDMA_MCASP(0xc400), PSIL_PDMA_MCASP(0xc401), PSIL_PDMA_MCASP(0xc402), /* PDMA_SPI_G0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0xc600), PSIL_PDMA_XY_PKT(0xc601), PSIL_PDMA_XY_PKT(0xc602), PSIL_PDMA_XY_PKT(0xc603), PSIL_PDMA_XY_PKT(0xc604), PSIL_PDMA_XY_PKT(0xc605), PSIL_PDMA_XY_PKT(0xc606), PSIL_PDMA_XY_PKT(0xc607), PSIL_PDMA_XY_PKT(0xc608), PSIL_PDMA_XY_PKT(0xc609), PSIL_PDMA_XY_PKT(0xc60a), PSIL_PDMA_XY_PKT(0xc60b), PSIL_PDMA_XY_PKT(0xc60c), PSIL_PDMA_XY_PKT(0xc60d), PSIL_PDMA_XY_PKT(0xc60e), PSIL_PDMA_XY_PKT(0xc60f), /* PDMA_SPI_G1 - SPI4-7 */ PSIL_PDMA_XY_PKT(0xc610), PSIL_PDMA_XY_PKT(0xc611), PSIL_PDMA_XY_PKT(0xc612), PSIL_PDMA_XY_PKT(0xc613), PSIL_PDMA_XY_PKT(0xc614), PSIL_PDMA_XY_PKT(0xc615), PSIL_PDMA_XY_PKT(0xc616), PSIL_PDMA_XY_PKT(0xc617), PSIL_PDMA_XY_PKT(0xc618), PSIL_PDMA_XY_PKT(0xc619), PSIL_PDMA_XY_PKT(0xc61a), PSIL_PDMA_XY_PKT(0xc61b), PSIL_PDMA_XY_PKT(0xc61c), PSIL_PDMA_XY_PKT(0xc61d), PSIL_PDMA_XY_PKT(0xc61e), PSIL_PDMA_XY_PKT(0xc61f), /* PDMA_USART_G0 - UART0-1 */ PSIL_PDMA_XY_PKT(0xc700), PSIL_PDMA_XY_PKT(0xc701), /* PDMA_USART_G1 - UART2-3 */ PSIL_PDMA_XY_PKT(0xc702), PSIL_PDMA_XY_PKT(0xc703), /* PDMA_USART_G2 - UART4-9 */ PSIL_PDMA_XY_PKT(0xc704), PSIL_PDMA_XY_PKT(0xc705), PSIL_PDMA_XY_PKT(0xc706), PSIL_PDMA_XY_PKT(0xc707), PSIL_PDMA_XY_PKT(0xc708), PSIL_PDMA_XY_PKT(0xc709), /* CPSW5 */ PSIL_ETHERNET(0xca00), PSIL_ETHERNET(0xca01), PSIL_ETHERNET(0xca02), PSIL_ETHERNET(0xca03), PSIL_ETHERNET(0xca04), PSIL_ETHERNET(0xca05), PSIL_ETHERNET(0xca06), PSIL_ETHERNET(0xca07), /* CPSW0 */ PSIL_ETHERNET(0xf000), PSIL_ETHERNET(0xf001), PSIL_ETHERNET(0xf002), PSIL_ETHERNET(0xf003), PSIL_ETHERNET(0xf004), PSIL_ETHERNET(0xf005), PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), /* MCU_PDMA_MISC_G0 - SPI0 */ PSIL_PDMA_XY_PKT(0xf100), PSIL_PDMA_XY_PKT(0xf101), PSIL_PDMA_XY_PKT(0xf102), PSIL_PDMA_XY_PKT(0xf103), /* MCU_PDMA_MISC_G1 - SPI1-2 */ PSIL_PDMA_XY_PKT(0xf200), PSIL_PDMA_XY_PKT(0xf201), PSIL_PDMA_XY_PKT(0xf202), PSIL_PDMA_XY_PKT(0xf203), PSIL_PDMA_XY_PKT(0xf204), PSIL_PDMA_XY_PKT(0xf205), PSIL_PDMA_XY_PKT(0xf206), PSIL_PDMA_XY_PKT(0xf207), /* MCU_PDMA_MISC_G2 - UART0 */ PSIL_PDMA_XY_PKT(0xf300), /* SA2UL */ PSIL_SA2UL(0xf500, 1), PSIL_SA2UL(0xf501, 1), }; struct psil_ep_map j7200_ep_map = { .name = "j7200", .src = j7200_src_ep_map, .src_count = ARRAY_SIZE(j7200_src_ep_map), .dst = j7200_dst_ep_map, .dst_count = ARRAY_SIZE(j7200_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-j7200.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/of.h> #include <linux/of_platform.h> int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) { return navss_psil_pair(ud, src_thread, dst_thread); } EXPORT_SYMBOL(xudma_navss_psil_pair); int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) { return navss_psil_unpair(ud, src_thread, dst_thread); } EXPORT_SYMBOL(xudma_navss_psil_unpair); struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) { struct device_node *udma_node = np; struct platform_device *pdev; struct udma_dev *ud; if (property) { udma_node = of_parse_phandle(np, property, 0); if (!udma_node) { pr_err("UDMA node is not found\n"); return ERR_PTR(-ENODEV); } } pdev = of_find_device_by_node(udma_node); if (np != udma_node) of_node_put(udma_node); if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); } ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); } return ud; } EXPORT_SYMBOL(of_xudma_dev_get); struct device *xudma_get_device(struct udma_dev *ud) { return ud->dev; } EXPORT_SYMBOL(xudma_get_device); struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud) { return ud->ringacc; } EXPORT_SYMBOL(xudma_get_ringacc); u32 xudma_dev_get_psil_base(struct udma_dev *ud) { return ud->psil_base; } EXPORT_SYMBOL(xudma_dev_get_psil_base); struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud) { return &ud->tisci_rm; } EXPORT_SYMBOL(xudma_dev_get_tisci_rm); int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) { return __udma_alloc_gp_rflow_range(ud, from, cnt); } EXPORT_SYMBOL(xudma_alloc_gp_rflow_range); int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) { return __udma_free_gp_rflow_range(ud, from, cnt); } EXPORT_SYMBOL(xudma_free_gp_rflow_range); bool xudma_rflow_is_gp(struct udma_dev *ud, int id) { if (!ud->rflow_gp_map) return false; return !test_bit(id, ud->rflow_gp_map); } EXPORT_SYMBOL(xudma_rflow_is_gp); #define XUDMA_GET_PUT_RESOURCE(res) \ struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \ { \ return __udma_reserve_##res(ud, UDMA_TP_NORMAL, id); \ } \ EXPORT_SYMBOL(xudma_##res##_get); \ \ void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \ { \ clear_bit(p->id, ud->res##_map); \ } \ EXPORT_SYMBOL(xudma_##res##_put) XUDMA_GET_PUT_RESOURCE(tchan); XUDMA_GET_PUT_RESOURCE(rchan); struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id) { return __udma_get_rflow(ud, id); } EXPORT_SYMBOL(xudma_rflow_get); void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p) { __udma_put_rflow(ud, p); } EXPORT_SYMBOL(xudma_rflow_put); int xudma_get_rflow_ring_offset(struct udma_dev *ud) { return ud->tflow_cnt; } EXPORT_SYMBOL(xudma_get_rflow_ring_offset); #define XUDMA_GET_RESOURCE_ID(res) \ int xudma_##res##_get_id(struct udma_##res *p) \ { \ return p->id; \ } \ EXPORT_SYMBOL(xudma_##res##_get_id) XUDMA_GET_RESOURCE_ID(tchan); XUDMA_GET_RESOURCE_ID(rchan); XUDMA_GET_RESOURCE_ID(rflow); /* Exported register access functions */ #define XUDMA_RT_IO_FUNCTIONS(res) \ u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \ { \ if (!p) \ return 0; \ return udma_read(p->reg_rt, reg); \ } \ EXPORT_SYMBOL(xudma_##res##rt_read); \ \ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ { \ if (!p) \ return; \ udma_write(p->reg_rt, reg, val); \ } \ EXPORT_SYMBOL(xudma_##res##rt_write) XUDMA_RT_IO_FUNCTIONS(tchan); XUDMA_RT_IO_FUNCTIONS(rchan); int xudma_is_pktdma(struct udma_dev *ud) { return ud->match_data->type == DMA_TYPE_PKTDMA; } EXPORT_SYMBOL(xudma_is_pktdma); int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id) { const struct udma_oes_offsets *oes = &ud->soc_data->oes; return msi_get_virq(ud->dev, udma_tflow_id + oes->pktdma_tchan_flow); } EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq); int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id) { const struct udma_oes_offsets *oes = &ud->soc_data->oes; return msi_get_virq(ud->dev, udma_rflow_id + oes->pktdma_rchan_flow); } EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
linux-master
drivers/dma/ti/k3-udma-private.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pkt_mode = 1, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_ETHERNET(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ }, \ } #define PSIL_SA2UL(x, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .notdpkt = tx, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep j721s2_src_ep_map[] = { /* PDMA_MCASP - McASP0-4 */ PSIL_PDMA_MCASP(0x4400), PSIL_PDMA_MCASP(0x4401), PSIL_PDMA_MCASP(0x4402), PSIL_PDMA_MCASP(0x4403), PSIL_PDMA_MCASP(0x4404), /* PDMA_SPI_G0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4600), PSIL_PDMA_XY_PKT(0x4601), PSIL_PDMA_XY_PKT(0x4602), PSIL_PDMA_XY_PKT(0x4603), PSIL_PDMA_XY_PKT(0x4604), PSIL_PDMA_XY_PKT(0x4605), PSIL_PDMA_XY_PKT(0x4606), PSIL_PDMA_XY_PKT(0x4607), PSIL_PDMA_XY_PKT(0x4608), PSIL_PDMA_XY_PKT(0x4609), PSIL_PDMA_XY_PKT(0x460a), PSIL_PDMA_XY_PKT(0x460b), PSIL_PDMA_XY_PKT(0x460c), PSIL_PDMA_XY_PKT(0x460d), PSIL_PDMA_XY_PKT(0x460e), PSIL_PDMA_XY_PKT(0x460f), /* PDMA_SPI_G1 - SPI4-7 */ PSIL_PDMA_XY_PKT(0x4610), PSIL_PDMA_XY_PKT(0x4611), PSIL_PDMA_XY_PKT(0x4612), PSIL_PDMA_XY_PKT(0x4613), PSIL_PDMA_XY_PKT(0x4614), PSIL_PDMA_XY_PKT(0x4615), PSIL_PDMA_XY_PKT(0x4616), PSIL_PDMA_XY_PKT(0x4617), PSIL_PDMA_XY_PKT(0x4618), PSIL_PDMA_XY_PKT(0x4619), PSIL_PDMA_XY_PKT(0x461a), PSIL_PDMA_XY_PKT(0x461b), PSIL_PDMA_XY_PKT(0x461c), PSIL_PDMA_XY_PKT(0x461d), PSIL_PDMA_XY_PKT(0x461e), PSIL_PDMA_XY_PKT(0x461f), /* MAIN_CPSW2G */ PSIL_ETHERNET(0x4640), /* PDMA_USART_G0 - UART0-1 */ PSIL_PDMA_XY_PKT(0x4700), PSIL_PDMA_XY_PKT(0x4701), /* PDMA_USART_G1 - UART2-3 */ PSIL_PDMA_XY_PKT(0x4702), PSIL_PDMA_XY_PKT(0x4703), /* PDMA_USART_G2 - UART4-9 */ PSIL_PDMA_XY_PKT(0x4704), PSIL_PDMA_XY_PKT(0x4705), PSIL_PDMA_XY_PKT(0x4706), PSIL_PDMA_XY_PKT(0x4707), PSIL_PDMA_XY_PKT(0x4708), PSIL_PDMA_XY_PKT(0x4709), /* MAIN SA2UL */ PSIL_SA2UL(0x4a40, 0), PSIL_SA2UL(0x4a41, 0), PSIL_SA2UL(0x4a42, 0), PSIL_SA2UL(0x4a43, 0), /* CPSW0 */ PSIL_ETHERNET(0x7000), /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ PSIL_PDMA_XY_PKT(0x7100), PSIL_PDMA_XY_PKT(0x7101), PSIL_PDMA_XY_PKT(0x7102), PSIL_PDMA_XY_PKT(0x7103), /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ PSIL_PDMA_XY_PKT(0x7200), PSIL_PDMA_XY_PKT(0x7201), PSIL_PDMA_XY_PKT(0x7202), PSIL_PDMA_XY_PKT(0x7203), PSIL_PDMA_XY_PKT(0x7204), PSIL_PDMA_XY_PKT(0x7205), PSIL_PDMA_XY_PKT(0x7206), PSIL_PDMA_XY_PKT(0x7207), /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ PSIL_PDMA_XY_PKT(0x7300), /* MCU_PDMA_ADC - ADC0-1 */ PSIL_PDMA_XY_TR(0x7400), PSIL_PDMA_XY_TR(0x7401), PSIL_PDMA_XY_TR(0x7402), PSIL_PDMA_XY_TR(0x7403), /* SA2UL */ PSIL_SA2UL(0x7500, 0), PSIL_SA2UL(0x7501, 0), PSIL_SA2UL(0x7502, 0), PSIL_SA2UL(0x7503, 0), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep j721s2_dst_ep_map[] = { /* MAIN SA2UL */ PSIL_SA2UL(0xca40, 1), PSIL_SA2UL(0xca41, 1), /* CPSW0 */ PSIL_ETHERNET(0xf000), PSIL_ETHERNET(0xf001), PSIL_ETHERNET(0xf002), PSIL_ETHERNET(0xf003), PSIL_ETHERNET(0xf004), PSIL_ETHERNET(0xf005), PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), /* MAIN_CPSW2G */ PSIL_ETHERNET(0xc640), PSIL_ETHERNET(0xc641), PSIL_ETHERNET(0xc642), PSIL_ETHERNET(0xc643), PSIL_ETHERNET(0xc644), PSIL_ETHERNET(0xc645), PSIL_ETHERNET(0xc646), PSIL_ETHERNET(0xc647), /* SA2UL */ PSIL_SA2UL(0xf500, 1), PSIL_SA2UL(0xf501, 1), }; struct psil_ep_map j721s2_ep_map = { .name = "j721s2", .src = j721s2_src_ep_map, .src_count = ARRAY_SIZE(j721s2_src_ep_map), .dst = j721s2_dst_ep_map, .dst_count = ARRAY_SIZE(j721s2_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-j721s2.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .mapped_channel_id = -1, \ .default_flow_id = -1, \ .pkt_mode = 1, \ }, \ } #define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = flow_base, \ }, \ } #define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = default_flow, \ .notdpkt = tx, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_CSI2RX(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am62_src_ep_map[] = { /* SAUL */ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4304), PSIL_PDMA_XY_PKT(0x4305), PSIL_PDMA_XY_PKT(0x4306), PSIL_PDMA_XY_PKT(0x4307), PSIL_PDMA_XY_PKT(0x4308), PSIL_PDMA_XY_PKT(0x4309), PSIL_PDMA_XY_PKT(0x430a), PSIL_PDMA_XY_PKT(0x430b), PSIL_PDMA_XY_PKT(0x430c), PSIL_PDMA_XY_PKT(0x430d), /* PDMA_MAIN1 - UART0-6 */ PSIL_PDMA_XY_PKT(0x4400), PSIL_PDMA_XY_PKT(0x4401), PSIL_PDMA_XY_PKT(0x4402), PSIL_PDMA_XY_PKT(0x4403), PSIL_PDMA_XY_PKT(0x4404), PSIL_PDMA_XY_PKT(0x4405), PSIL_PDMA_XY_PKT(0x4406), /* PDMA_MAIN2 - MCASP0-2 */ PSIL_PDMA_MCASP(0x4500), PSIL_PDMA_MCASP(0x4501), PSIL_PDMA_MCASP(0x4502), /* CPSW3G */ PSIL_ETHERNET(0x4600, 19, 19, 16), /* CSI2RX */ PSIL_CSI2RX(0x4700), PSIL_CSI2RX(0x4701), PSIL_CSI2RX(0x4702), PSIL_CSI2RX(0x4703), PSIL_CSI2RX(0x4704), PSIL_CSI2RX(0x4705), PSIL_CSI2RX(0x4706), PSIL_CSI2RX(0x4707), PSIL_CSI2RX(0x4708), PSIL_CSI2RX(0x4709), PSIL_CSI2RX(0x470a), PSIL_CSI2RX(0x470b), PSIL_CSI2RX(0x470c), PSIL_CSI2RX(0x470d), PSIL_CSI2RX(0x470e), PSIL_CSI2RX(0x470f), PSIL_CSI2RX(0x4710), PSIL_CSI2RX(0x4711), PSIL_CSI2RX(0x4712), PSIL_CSI2RX(0x4713), PSIL_CSI2RX(0x4714), PSIL_CSI2RX(0x4715), PSIL_CSI2RX(0x4716), PSIL_CSI2RX(0x4717), PSIL_CSI2RX(0x4718), PSIL_CSI2RX(0x4719), PSIL_CSI2RX(0x471a), PSIL_CSI2RX(0x471b), PSIL_CSI2RX(0x471c), PSIL_CSI2RX(0x471d), PSIL_CSI2RX(0x471e), PSIL_CSI2RX(0x471f), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep am62_dst_ep_map[] = { /* SAUL */ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0xc302), PSIL_PDMA_XY_PKT(0xc303), PSIL_PDMA_XY_PKT(0xc304), PSIL_PDMA_XY_PKT(0xc305), PSIL_PDMA_XY_PKT(0xc306), PSIL_PDMA_XY_PKT(0xc307), PSIL_PDMA_XY_PKT(0xc308), PSIL_PDMA_XY_PKT(0xc309), PSIL_PDMA_XY_PKT(0xc30a), PSIL_PDMA_XY_PKT(0xc30b), PSIL_PDMA_XY_PKT(0xc30c), PSIL_PDMA_XY_PKT(0xc30d), /* PDMA_MAIN1 - UART0-6 */ PSIL_PDMA_XY_PKT(0xc400), PSIL_PDMA_XY_PKT(0xc401), PSIL_PDMA_XY_PKT(0xc402), PSIL_PDMA_XY_PKT(0xc403), PSIL_PDMA_XY_PKT(0xc404), PSIL_PDMA_XY_PKT(0xc405), PSIL_PDMA_XY_PKT(0xc406), /* PDMA_MAIN2 - MCASP0-2 */ PSIL_PDMA_MCASP(0xc500), PSIL_PDMA_MCASP(0xc501), PSIL_PDMA_MCASP(0xc502), /* CPSW3G */ PSIL_ETHERNET(0xc600, 19, 19, 8), PSIL_ETHERNET(0xc601, 20, 27, 8), PSIL_ETHERNET(0xc602, 21, 35, 8), PSIL_ETHERNET(0xc603, 22, 43, 8), PSIL_ETHERNET(0xc604, 23, 51, 8), PSIL_ETHERNET(0xc605, 24, 59, 8), PSIL_ETHERNET(0xc606, 25, 67, 8), PSIL_ETHERNET(0xc607, 26, 75, 8), }; struct psil_ep_map am62_ep_map = { .name = "am62", .src = am62_src_ep_map, .src_count = ARRAY_SIZE(am62_src_ep_map), .dst = am62_dst_ep_map, .dst_count = ARRAY_SIZE(am62_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-am62.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sys_soc.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_irq.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/soc/ti/k3-ringacc.h> #include <linux/soc/ti/ti_sci_protocol.h> #include <linux/soc/ti/ti_sci_inta_msi.h> #include <linux/dma/k3-event-router.h> #include <linux/dma/ti-cppi5.h> #include "../virt-dma.h" #include "k3-udma.h" #include "k3-psil-priv.h" struct udma_static_tr { u8 elsize; /* RPSTR0 */ u16 elcnt; /* RPSTR0 */ u16 bstcnt; /* RPSTR1 */ }; #define K3_UDMA_MAX_RFLOWS 1024 #define K3_UDMA_DEFAULT_RING_SIZE 16 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ #define UDMA_RFLOW_SRCTAG_NONE 0 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 #define UDMA_RFLOW_DSTTAG_NONE 0 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 struct udma_chan; enum k3_dma_type { DMA_TYPE_UDMA = 0, DMA_TYPE_BCDMA, DMA_TYPE_PKTDMA, }; enum udma_mmr { MMR_GCFG = 0, MMR_BCHANRT, MMR_RCHANRT, MMR_TCHANRT, MMR_LAST, }; static const char * const mmr_names[] = { [MMR_GCFG] = "gcfg", [MMR_BCHANRT] = "bchanrt", [MMR_RCHANRT] = "rchanrt", [MMR_TCHANRT] = "tchanrt", }; struct udma_tchan { void __iomem *reg_rt; int id; struct k3_ring *t_ring; /* Transmit ring */ struct k3_ring *tc_ring; /* Transmit Completion ring */ int tflow_id; /* applicable only for PKTDMA */ }; #define udma_bchan udma_tchan struct udma_rflow { int id; struct k3_ring *fd_ring; /* Free Descriptor ring */ struct k3_ring *r_ring; /* Receive ring */ }; struct udma_rchan { void __iomem *reg_rt; int id; }; struct udma_oes_offsets { /* K3 UDMA Output Event Offset */ u32 udma_rchan; /* BCDMA Output Event Offsets */ u32 bcdma_bchan_data; u32 bcdma_bchan_ring; u32 bcdma_tchan_data; u32 bcdma_tchan_ring; u32 bcdma_rchan_data; u32 bcdma_rchan_ring; /* PKTDMA Output Event Offsets */ u32 pktdma_tchan_flow; u32 pktdma_rchan_flow; }; #define UDMA_FLAG_PDMA_ACC32 BIT(0) #define UDMA_FLAG_PDMA_BURST BIT(1) #define UDMA_FLAG_TDTYPE BIT(2) #define UDMA_FLAG_BURST_SIZE BIT(3) #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ UDMA_FLAG_PDMA_BURST | \ UDMA_FLAG_TDTYPE | \ UDMA_FLAG_BURST_SIZE) struct udma_match_data { enum k3_dma_type type; u32 psil_base; bool enable_memcpy_support; u32 flags; u32 statictr_z_mask; u8 burst_size[3]; struct udma_soc_data *soc_data; }; struct udma_soc_data { struct udma_oes_offsets oes; u32 bcdma_trigger_event_offset; }; struct udma_hwdesc { size_t cppi5_desc_size; void *cppi5_desc_vaddr; dma_addr_t cppi5_desc_paddr; /* TR descriptor internal pointers */ void *tr_req_base; struct cppi5_tr_resp_t *tr_resp_base; }; struct udma_rx_flush { struct udma_hwdesc hwdescs[2]; size_t buffer_size; void *buffer_vaddr; dma_addr_t buffer_paddr; }; struct udma_tpl { u8 levels; u32 start_idx[3]; }; struct udma_dev { struct dma_device ddev; struct device *dev; void __iomem *mmrs[MMR_LAST]; const struct udma_match_data *match_data; const struct udma_soc_data *soc_data; struct udma_tpl bchan_tpl; struct udma_tpl tchan_tpl; struct udma_tpl rchan_tpl; size_t desc_align; /* alignment to use for descriptors */ struct udma_tisci_rm tisci_rm; struct k3_ringacc *ringacc; struct work_struct purge_work; struct list_head desc_to_purge; spinlock_t lock; struct udma_rx_flush rx_flush; int bchan_cnt; int tchan_cnt; int echan_cnt; int rchan_cnt; int rflow_cnt; int tflow_cnt; unsigned long *bchan_map; unsigned long *tchan_map; unsigned long *rchan_map; unsigned long *rflow_gp_map; unsigned long *rflow_gp_map_allocated; unsigned long *rflow_in_use; unsigned long *tflow_map; struct udma_bchan *bchans; struct udma_tchan *tchans; struct udma_rchan *rchans; struct udma_rflow *rflows; struct udma_chan *channels; u32 psil_base; u32 atype; u32 asel; }; struct udma_desc { struct virt_dma_desc vd; bool terminated; enum dma_transfer_direction dir; struct udma_static_tr static_tr; u32 residue; unsigned int sglen; unsigned int desc_idx; /* Only used for cyclic in packet mode */ unsigned int tr_idx; u32 metadata_size; void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ unsigned int hwdesc_count; struct udma_hwdesc hwdesc[]; }; enum udma_chan_state { UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ UDMA_CHAN_IS_ACTIVE, /* Normal operation */ UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ }; struct udma_tx_drain { struct delayed_work work; ktime_t tstamp; u32 residue; }; struct udma_chan_config { bool pkt_mode; /* TR or packet */ bool needs_epib; /* EPIB is needed for the communication or not */ u32 psd_size; /* size of Protocol Specific Data */ u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ u32 hdesc_size; /* Size of a packet descriptor in packet mode */ bool notdpkt; /* Suppress sending TDC packet */ int remote_thread_id; u32 atype; u32 asel; u32 src_thread; u32 dst_thread; enum psil_endpoint_type ep_type; bool enable_acc32; bool enable_burst; enum udma_tp_level channel_tpl; /* Channel Throughput Level */ u32 tr_trigger_type; unsigned long tx_flags; /* PKDMA mapped channel */ int mapped_channel_id; /* PKTDMA default tflow or rflow for mapped channel */ int default_flow_id; enum dma_transfer_direction dir; }; struct udma_chan { struct virt_dma_chan vc; struct dma_slave_config cfg; struct udma_dev *ud; struct device *dma_dev; struct udma_desc *desc; struct udma_desc *terminated_desc; struct udma_static_tr static_tr; char *name; struct udma_bchan *bchan; struct udma_tchan *tchan; struct udma_rchan *rchan; struct udma_rflow *rflow; bool psil_paired; int irq_num_ring; int irq_num_udma; bool cyclic; bool paused; enum udma_chan_state state; struct completion teardown_completed; struct udma_tx_drain tx_drain; /* Channel configuration parameters */ struct udma_chan_config config; /* Channel configuration parameters (backup) */ struct udma_chan_config backup_config; /* dmapool for packet mode descriptors */ bool use_dma_pool; struct dma_pool *hdesc_pool; u32 id; }; static inline struct udma_dev *to_udma_dev(struct dma_device *d) { return container_of(d, struct udma_dev, ddev); } static inline struct udma_chan *to_udma_chan(struct dma_chan *c) { return container_of(c, struct udma_chan, vc.chan); } static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) { return container_of(t, struct udma_desc, vd.tx); } /* Generic register access functions */ static inline u32 udma_read(void __iomem *base, int reg) { return readl(base + reg); } static inline void udma_write(void __iomem *base, int reg, u32 val) { writel(val, base + reg); } static inline void udma_update_bits(void __iomem *base, int reg, u32 mask, u32 val) { u32 tmp, orig; orig = readl(base + reg); tmp = orig & ~mask; tmp |= (val & mask); if (tmp != orig) writel(tmp, base + reg); } /* TCHANRT */ static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) { if (!uc->tchan) return 0; return udma_read(uc->tchan->reg_rt, reg); } static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) { if (!uc->tchan) return; udma_write(uc->tchan->reg_rt, reg, val); } static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, u32 mask, u32 val) { if (!uc->tchan) return; udma_update_bits(uc->tchan->reg_rt, reg, mask, val); } /* RCHANRT */ static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) { if (!uc->rchan) return 0; return udma_read(uc->rchan->reg_rt, reg); } static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) { if (!uc->rchan) return; udma_write(uc->rchan->reg_rt, reg, val); } static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, u32 mask, u32 val) { if (!uc->rchan) return; udma_update_bits(uc->rchan->reg_rt, reg, mask, val); } static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) { struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, tisci_rm->tisci_navss_dev_id, src_thread, dst_thread); } static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) { struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, tisci_rm->tisci_navss_dev_id, src_thread, dst_thread); } static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) { struct device *chan_dev = &chan->dev->device; if (asel == 0) { /* No special handling for the channel */ chan->dev->chan_dma_dev = false; chan_dev->dma_coherent = false; chan_dev->dma_parms = NULL; } else if (asel == 14 || asel == 15) { chan->dev->chan_dma_dev = true; chan_dev->dma_coherent = true; dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); chan_dev->dma_parms = chan_dev->parent->dma_parms; } else { dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); chan_dev->dma_coherent = false; chan_dev->dma_parms = NULL; } } static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) { int i; for (i = 0; i < tpl_map->levels; i++) { if (chan_id >= tpl_map->start_idx[i]) return i; } return 0; } static void udma_reset_uchan(struct udma_chan *uc) { memset(&uc->config, 0, sizeof(uc->config)); uc->config.remote_thread_id = -1; uc->config.mapped_channel_id = -1; uc->config.default_flow_id = -1; uc->state = UDMA_CHAN_IS_IDLE; } static void udma_dump_chan_stdata(struct udma_chan *uc) { struct device *dev = uc->ud->dev; u32 offset; int i; if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { dev_dbg(dev, "TCHAN State data:\n"); for (i = 0; i < 32; i++) { offset = UDMA_CHAN_RT_STDATA_REG + i * 4; dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, udma_tchanrt_read(uc, offset)); } } if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { dev_dbg(dev, "RCHAN State data:\n"); for (i = 0; i < 32; i++) { offset = UDMA_CHAN_RT_STDATA_REG + i * 4; dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, udma_rchanrt_read(uc, offset)); } } } static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, int idx) { return d->hwdesc[idx].cppi5_desc_paddr; } static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) { return d->hwdesc[idx].cppi5_desc_vaddr; } static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, dma_addr_t paddr) { struct udma_desc *d = uc->terminated_desc; if (d) { dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, d->desc_idx); if (desc_paddr != paddr) d = NULL; } if (!d) { d = uc->desc; if (d) { dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, d->desc_idx); if (desc_paddr != paddr) d = NULL; } } return d; } static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) { if (uc->use_dma_pool) { int i; for (i = 0; i < d->hwdesc_count; i++) { if (!d->hwdesc[i].cppi5_desc_vaddr) continue; dma_pool_free(uc->hdesc_pool, d->hwdesc[i].cppi5_desc_vaddr, d->hwdesc[i].cppi5_desc_paddr); d->hwdesc[i].cppi5_desc_vaddr = NULL; } } else if (d->hwdesc[0].cppi5_desc_vaddr) { dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, d->hwdesc[0].cppi5_desc_vaddr, d->hwdesc[0].cppi5_desc_paddr); d->hwdesc[0].cppi5_desc_vaddr = NULL; } } static void udma_purge_desc_work(struct work_struct *work) { struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); struct virt_dma_desc *vd, *_vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&ud->lock, flags); list_splice_tail_init(&ud->desc_to_purge, &head); spin_unlock_irqrestore(&ud->lock, flags); list_for_each_entry_safe(vd, _vd, &head, node) { struct udma_chan *uc = to_udma_chan(vd->tx.chan); struct udma_desc *d = to_udma_desc(&vd->tx); udma_free_hwdesc(uc, d); list_del(&vd->node); kfree(d); } /* If more to purge, schedule the work again */ if (!list_empty(&ud->desc_to_purge)) schedule_work(&ud->purge_work); } static void udma_desc_free(struct virt_dma_desc *vd) { struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); struct udma_chan *uc = to_udma_chan(vd->tx.chan); struct udma_desc *d = to_udma_desc(&vd->tx); unsigned long flags; if (uc->terminated_desc == d) uc->terminated_desc = NULL; if (uc->use_dma_pool) { udma_free_hwdesc(uc, d); kfree(d); return; } spin_lock_irqsave(&ud->lock, flags); list_add_tail(&vd->node, &ud->desc_to_purge); spin_unlock_irqrestore(&ud->lock, flags); schedule_work(&ud->purge_work); } static bool udma_is_chan_running(struct udma_chan *uc) { u32 trt_ctl = 0; u32 rrt_ctl = 0; if (uc->tchan) trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); if (uc->rchan) rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) return true; return false; } static bool udma_is_chan_paused(struct udma_chan *uc) { u32 val, pause_mask; switch (uc->config.dir) { case DMA_DEV_TO_MEM: val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); pause_mask = UDMA_PEER_RT_EN_PAUSE; break; case DMA_MEM_TO_DEV: val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); pause_mask = UDMA_PEER_RT_EN_PAUSE; break; case DMA_MEM_TO_MEM: val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); pause_mask = UDMA_CHAN_RT_CTL_PAUSE; break; default: return false; } if (val & pause_mask) return true; return false; } static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) { return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; } static int udma_push_to_ring(struct udma_chan *uc, int idx) { struct udma_desc *d = uc->desc; struct k3_ring *ring = NULL; dma_addr_t paddr; switch (uc->config.dir) { case DMA_DEV_TO_MEM: ring = uc->rflow->fd_ring; break; case DMA_MEM_TO_DEV: case DMA_MEM_TO_MEM: ring = uc->tchan->t_ring; break; default: return -EINVAL; } /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ if (idx == -1) { paddr = udma_get_rx_flush_hwdesc_paddr(uc); } else { paddr = udma_curr_cppi5_desc_paddr(d, idx); wmb(); /* Ensure that writes are not moved over this point */ } return k3_ringacc_ring_push(ring, &paddr); } static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) { if (uc->config.dir != DMA_DEV_TO_MEM) return false; if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) return true; return false; } static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) { struct k3_ring *ring = NULL; int ret; switch (uc->config.dir) { case DMA_DEV_TO_MEM: ring = uc->rflow->r_ring; break; case DMA_MEM_TO_DEV: case DMA_MEM_TO_MEM: ring = uc->tchan->tc_ring; break; default: return -ENOENT; } ret = k3_ringacc_ring_pop(ring, addr); if (ret) return ret; rmb(); /* Ensure that reads are not moved before this point */ /* Teardown completion */ if (cppi5_desc_is_tdcm(*addr)) return 0; /* Check for flush descriptor */ if (udma_desc_is_rx_flush(uc, *addr)) return -ENOENT; return 0; } static void udma_reset_rings(struct udma_chan *uc) { struct k3_ring *ring1 = NULL; struct k3_ring *ring2 = NULL; switch (uc->config.dir) { case DMA_DEV_TO_MEM: if (uc->rchan) { ring1 = uc->rflow->fd_ring; ring2 = uc->rflow->r_ring; } break; case DMA_MEM_TO_DEV: case DMA_MEM_TO_MEM: if (uc->tchan) { ring1 = uc->tchan->t_ring; ring2 = uc->tchan->tc_ring; } break; default: break; } if (ring1) k3_ringacc_ring_reset_dma(ring1, k3_ringacc_ring_get_occ(ring1)); if (ring2) k3_ringacc_ring_reset(ring2); /* make sure we are not leaking memory by stalled descriptor */ if (uc->terminated_desc) { udma_desc_free(&uc->terminated_desc->vd); uc->terminated_desc = NULL; } } static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) { if (uc->desc->dir == DMA_DEV_TO_MEM) { udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); if (uc->config.ep_type != PSIL_EP_NATIVE) udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); } else { udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); } } static void udma_reset_counters(struct udma_chan *uc) { u32 val; if (uc->tchan) { val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); if (!uc->bchan) { val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); } } if (uc->rchan) { val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); } } static int udma_reset_chan(struct udma_chan *uc, bool hard) { switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); break; case DMA_MEM_TO_DEV: udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); break; case DMA_MEM_TO_MEM: udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); break; default: return -EINVAL; } /* Reset all counters */ udma_reset_counters(uc); /* Hard reset: re-initialize the channel to reset */ if (hard) { struct udma_chan_config ucc_backup; int ret; memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); /* restore the channel configuration */ memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); if (ret) return ret; /* * Setting forced teardown after forced reset helps recovering * the rchan. */ if (uc->config.dir == DMA_DEV_TO_MEM) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN | UDMA_CHAN_RT_CTL_FTDOWN); } uc->state = UDMA_CHAN_IS_IDLE; return 0; } static void udma_start_desc(struct udma_chan *uc) { struct udma_chan_config *ucc = &uc->config; if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { int i; /* * UDMA only: Push all descriptors to ring for packet mode * cyclic or RX * PKTDMA supports pre-linked descriptor and cyclic is not * supported */ for (i = 0; i < uc->desc->sglen; i++) udma_push_to_ring(uc, i); } else { udma_push_to_ring(uc, 0); } } static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) { /* Only PDMAs have staticTR */ if (uc->config.ep_type == PSIL_EP_NATIVE) return false; /* Check if the staticTR configuration has changed for TX */ if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) return true; return false; } static int udma_start(struct udma_chan *uc) { struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); if (!vd) { uc->desc = NULL; return -ENOENT; } list_del(&vd->node); uc->desc = to_udma_desc(&vd->tx); /* Channel is already running and does not need reconfiguration */ if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { udma_start_desc(uc); goto out; } /* Make sure that we clear the teardown bit, if it is set */ udma_reset_chan(uc, false); /* Push descriptors before we start the channel */ udma_start_desc(uc); switch (uc->desc->dir) { case DMA_DEV_TO_MEM: /* Config remote TR */ if (uc->config.ep_type == PSIL_EP_PDMA_XY) { u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); const struct udma_match_data *match_data = uc->ud->match_data; if (uc->config.enable_acc32) val |= PDMA_STATIC_TR_XY_ACC32; if (uc->config.enable_burst) val |= PDMA_STATIC_TR_XY_BURST; udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, val); udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, match_data->statictr_z_mask)); /* save the current staticTR configuration */ memcpy(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)); } udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); /* Enable remote */ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE); break; case DMA_MEM_TO_DEV: /* Config remote TR */ if (uc->config.ep_type == PSIL_EP_PDMA_XY) { u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); if (uc->config.enable_acc32) val |= PDMA_STATIC_TR_XY_ACC32; if (uc->config.enable_burst) val |= PDMA_STATIC_TR_XY_BURST; udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, val); /* save the current staticTR configuration */ memcpy(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)); } /* Enable remote */ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE); udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); break; case DMA_MEM_TO_MEM: udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); break; default: return -EINVAL; } uc->state = UDMA_CHAN_IS_ACTIVE; out: return 0; } static int udma_stop(struct udma_chan *uc) { enum udma_chan_state old_state = uc->state; uc->state = UDMA_CHAN_IS_TERMINATING; reinit_completion(&uc->teardown_completed); switch (uc->config.dir) { case DMA_DEV_TO_MEM: if (!uc->cyclic && !uc->desc) udma_push_to_ring(uc, -1); udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); break; case DMA_MEM_TO_DEV: udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_FLUSH); udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); break; case DMA_MEM_TO_MEM: udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); break; default: uc->state = old_state; complete_all(&uc->teardown_completed); return -EINVAL; } return 0; } static void udma_cyclic_packet_elapsed(struct udma_chan *uc) { struct udma_desc *d = uc->desc; struct cppi5_host_desc_t *h_desc; h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; cppi5_hdesc_reset_to_original(h_desc); udma_push_to_ring(uc, d->desc_idx); d->desc_idx = (d->desc_idx + 1) % d->sglen; } static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) { struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; memcpy(d->metadata, h_desc->epib, d->metadata_size); } static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) { u32 peer_bcnt, bcnt; /* * Only TX towards PDMA is affected. * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer * completion calculation, consumer must ensure that there is no stale * data in DMA fabric in this case. */ if (uc->config.ep_type == PSIL_EP_NATIVE || uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) return true; peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); /* Transfer is incomplete, store current residue and time stamp */ if (peer_bcnt < bcnt) { uc->tx_drain.residue = bcnt - peer_bcnt; uc->tx_drain.tstamp = ktime_get(); return false; } return true; } static void udma_check_tx_completion(struct work_struct *work) { struct udma_chan *uc = container_of(work, typeof(*uc), tx_drain.work.work); bool desc_done = true; u32 residue_diff; ktime_t time_diff; unsigned long delay; while (1) { if (uc->desc) { /* Get previous residue and time stamp */ residue_diff = uc->tx_drain.residue; time_diff = uc->tx_drain.tstamp; /* * Get current residue and time stamp or see if * transfer is complete */ desc_done = udma_is_desc_really_done(uc, uc->desc); } if (!desc_done) { /* * Find the time delta and residue delta w.r.t * previous poll */ time_diff = ktime_sub(uc->tx_drain.tstamp, time_diff) + 1; residue_diff -= uc->tx_drain.residue; if (residue_diff) { /* * Try to guess when we should check * next time by calculating rate at * which data is being drained at the * peer device */ delay = (time_diff / residue_diff) * uc->tx_drain.residue; } else { /* No progress, check again in 1 second */ schedule_delayed_work(&uc->tx_drain.work, HZ); break; } usleep_range(ktime_to_us(delay), ktime_to_us(delay) + 10); continue; } if (uc->desc) { struct udma_desc *d = uc->desc; udma_decrement_byte_counters(uc, d->residue); udma_start(uc); vchan_cookie_complete(&d->vd); break; } break; } } static irqreturn_t udma_ring_irq_handler(int irq, void *data) { struct udma_chan *uc = data; struct udma_desc *d; dma_addr_t paddr = 0; if (udma_pop_from_ring(uc, &paddr) || !paddr) return IRQ_HANDLED; spin_lock(&uc->vc.lock); /* Teardown completion message */ if (cppi5_desc_is_tdcm(paddr)) { complete_all(&uc->teardown_completed); if (uc->terminated_desc) { udma_desc_free(&uc->terminated_desc->vd); uc->terminated_desc = NULL; } if (!uc->desc) udma_start(uc); goto out; } d = udma_udma_desc_from_paddr(uc, paddr); if (d) { dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, d->desc_idx); if (desc_paddr != paddr) { dev_err(uc->ud->dev, "not matching descriptors!\n"); goto out; } if (d == uc->desc) { /* active descriptor */ if (uc->cyclic) { udma_cyclic_packet_elapsed(uc); vchan_cyclic_callback(&d->vd); } else { if (udma_is_desc_really_done(uc, d)) { udma_decrement_byte_counters(uc, d->residue); udma_start(uc); vchan_cookie_complete(&d->vd); } else { schedule_delayed_work(&uc->tx_drain.work, 0); } } } else { /* * terminated descriptor, mark the descriptor as * completed to update the channel's cookie marker */ dma_cookie_complete(&d->vd.tx); } } out: spin_unlock(&uc->vc.lock); return IRQ_HANDLED; } static irqreturn_t udma_udma_irq_handler(int irq, void *data) { struct udma_chan *uc = data; struct udma_desc *d; spin_lock(&uc->vc.lock); d = uc->desc; if (d) { d->tr_idx = (d->tr_idx + 1) % d->sglen; if (uc->cyclic) { vchan_cyclic_callback(&d->vd); } else { /* TODO: figure out the real amount of data */ udma_decrement_byte_counters(uc, d->residue); udma_start(uc); vchan_cookie_complete(&d->vd); } } spin_unlock(&uc->vc.lock); return IRQ_HANDLED; } /** * __udma_alloc_gp_rflow_range - alloc range of GP RX flows * @ud: UDMA device * @from: Start the search from this flow id number * @cnt: Number of consecutive flow ids to allocate * * Allocate range of RX flow ids for future use, those flows can be requested * only using explicit flow id number. if @from is set to -1 it will try to find * first free range. if @from is positive value it will force allocation only * of the specified range of flows. * * Returns -ENOMEM if can't find free range. * -EEXIST if requested range is busy. * -EINVAL if wrong input values passed. * Returns flow id on success. */ static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) { int start, tmp_from; DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); tmp_from = from; if (tmp_from < 0) tmp_from = ud->rchan_cnt; /* default flows can't be allocated and accessible only by id */ if (tmp_from < ud->rchan_cnt) return -EINVAL; if (tmp_from + cnt > ud->rflow_cnt) return -EINVAL; bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, ud->rflow_cnt); start = bitmap_find_next_zero_area(tmp, ud->rflow_cnt, tmp_from, cnt, 0); if (start >= ud->rflow_cnt) return -ENOMEM; if (from >= 0 && start != from) return -EEXIST; bitmap_set(ud->rflow_gp_map_allocated, start, cnt); return start; } static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) { if (from < ud->rchan_cnt) return -EINVAL; if (from + cnt > ud->rflow_cnt) return -EINVAL; bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); return 0; } static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) { /* * Attempt to request rflow by ID can be made for any rflow * if not in use with assumption that caller knows what's doing. * TI-SCI FW will perform additional permission check ant way, it's * safe */ if (id < 0 || id >= ud->rflow_cnt) return ERR_PTR(-ENOENT); if (test_bit(id, ud->rflow_in_use)) return ERR_PTR(-ENOENT); if (ud->rflow_gp_map) { /* GP rflow has to be allocated first */ if (!test_bit(id, ud->rflow_gp_map) && !test_bit(id, ud->rflow_gp_map_allocated)) return ERR_PTR(-EINVAL); } dev_dbg(ud->dev, "get rflow%d\n", id); set_bit(id, ud->rflow_in_use); return &ud->rflows[id]; } static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) { if (!test_bit(rflow->id, ud->rflow_in_use)) { dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); return; } dev_dbg(ud->dev, "put rflow%d\n", rflow->id); clear_bit(rflow->id, ud->rflow_in_use); } #define UDMA_RESERVE_RESOURCE(res) \ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ enum udma_tp_level tpl, \ int id) \ { \ if (id >= 0) { \ if (test_bit(id, ud->res##_map)) { \ dev_err(ud->dev, "res##%d is in use\n", id); \ return ERR_PTR(-ENOENT); \ } \ } else { \ int start; \ \ if (tpl >= ud->res##_tpl.levels) \ tpl = ud->res##_tpl.levels - 1; \ \ start = ud->res##_tpl.start_idx[tpl]; \ \ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ start); \ if (id == ud->res##_cnt) { \ return ERR_PTR(-ENOENT); \ } \ } \ \ set_bit(id, ud->res##_map); \ return &ud->res##s[id]; \ } UDMA_RESERVE_RESOURCE(bchan); UDMA_RESERVE_RESOURCE(tchan); UDMA_RESERVE_RESOURCE(rchan); static int bcdma_get_bchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; enum udma_tp_level tpl; int ret; if (uc->bchan) { dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", uc->id, uc->bchan->id); return 0; } /* * Use normal channels for peripherals, and highest TPL channel for * mem2mem */ if (uc->config.tr_trigger_type) tpl = 0; else tpl = ud->bchan_tpl.levels - 1; uc->bchan = __udma_reserve_bchan(ud, tpl, -1); if (IS_ERR(uc->bchan)) { ret = PTR_ERR(uc->bchan); uc->bchan = NULL; return ret; } uc->tchan = uc->bchan; return 0; } static int udma_get_tchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; int ret; if (uc->tchan) { dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", uc->id, uc->tchan->id); return 0; } /* * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. * For PKTDMA mapped channels it is configured to a channel which must * be used to service the peripheral. */ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, uc->config.mapped_channel_id); if (IS_ERR(uc->tchan)) { ret = PTR_ERR(uc->tchan); uc->tchan = NULL; return ret; } if (ud->tflow_cnt) { int tflow_id; /* Only PKTDMA have support for tx flows */ if (uc->config.default_flow_id >= 0) tflow_id = uc->config.default_flow_id; else tflow_id = uc->tchan->id; if (test_bit(tflow_id, ud->tflow_map)) { dev_err(ud->dev, "tflow%d is in use\n", tflow_id); clear_bit(uc->tchan->id, ud->tchan_map); uc->tchan = NULL; return -ENOENT; } uc->tchan->tflow_id = tflow_id; set_bit(tflow_id, ud->tflow_map); } else { uc->tchan->tflow_id = -1; } return 0; } static int udma_get_rchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; int ret; if (uc->rchan) { dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", uc->id, uc->rchan->id); return 0; } /* * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. * For PKTDMA mapped channels it is configured to a channel which must * be used to service the peripheral. */ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, uc->config.mapped_channel_id); if (IS_ERR(uc->rchan)) { ret = PTR_ERR(uc->rchan); uc->rchan = NULL; return ret; } return 0; } static int udma_get_chan_pair(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; int chan_id, end; if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { dev_info(ud->dev, "chan%d: already have %d pair allocated\n", uc->id, uc->tchan->id); return 0; } if (uc->tchan) { dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", uc->id, uc->tchan->id); return -EBUSY; } else if (uc->rchan) { dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", uc->id, uc->rchan->id); return -EBUSY; } /* Can be optimized, but let's have it like this for now */ end = min(ud->tchan_cnt, ud->rchan_cnt); /* * Try to use the highest TPL channel pair for MEM_TO_MEM channels * Note: in UDMAP the channel TPL is symmetric between tchan and rchan */ chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; for (; chan_id < end; chan_id++) { if (!test_bit(chan_id, ud->tchan_map) && !test_bit(chan_id, ud->rchan_map)) break; } if (chan_id == end) return -ENOENT; set_bit(chan_id, ud->tchan_map); set_bit(chan_id, ud->rchan_map); uc->tchan = &ud->tchans[chan_id]; uc->rchan = &ud->rchans[chan_id]; /* UDMA does not use tx flows */ uc->tchan->tflow_id = -1; return 0; } static int udma_get_rflow(struct udma_chan *uc, int flow_id) { struct udma_dev *ud = uc->ud; int ret; if (!uc->rchan) { dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); return -EINVAL; } if (uc->rflow) { dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", uc->id, uc->rflow->id); return 0; } uc->rflow = __udma_get_rflow(ud, flow_id); if (IS_ERR(uc->rflow)) { ret = PTR_ERR(uc->rflow); uc->rflow = NULL; return ret; } return 0; } static void bcdma_put_bchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; if (uc->bchan) { dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, uc->bchan->id); clear_bit(uc->bchan->id, ud->bchan_map); uc->bchan = NULL; uc->tchan = NULL; } } static void udma_put_rchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; if (uc->rchan) { dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, uc->rchan->id); clear_bit(uc->rchan->id, ud->rchan_map); uc->rchan = NULL; } } static void udma_put_tchan(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; if (uc->tchan) { dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, uc->tchan->id); clear_bit(uc->tchan->id, ud->tchan_map); if (uc->tchan->tflow_id >= 0) clear_bit(uc->tchan->tflow_id, ud->tflow_map); uc->tchan = NULL; } } static void udma_put_rflow(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; if (uc->rflow) { dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, uc->rflow->id); __udma_put_rflow(ud, uc->rflow); uc->rflow = NULL; } } static void bcdma_free_bchan_resources(struct udma_chan *uc) { if (!uc->bchan) return; k3_ringacc_ring_free(uc->bchan->tc_ring); k3_ringacc_ring_free(uc->bchan->t_ring); uc->bchan->tc_ring = NULL; uc->bchan->t_ring = NULL; k3_configure_chan_coherency(&uc->vc.chan, 0); bcdma_put_bchan(uc); } static int bcdma_alloc_bchan_resources(struct udma_chan *uc) { struct k3_ring_cfg ring_cfg; struct udma_dev *ud = uc->ud; int ret; ret = bcdma_get_bchan(uc); if (ret) return ret; ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, &uc->bchan->t_ring, &uc->bchan->tc_ring); if (ret) { ret = -EBUSY; goto err_ring; } memset(&ring_cfg, 0, sizeof(ring_cfg)); ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; ring_cfg.mode = K3_RINGACC_RING_MODE_RING; k3_configure_chan_coherency(&uc->vc.chan, ud->asel); ring_cfg.asel = ud->asel; ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); if (ret) goto err_ringcfg; return 0; err_ringcfg: k3_ringacc_ring_free(uc->bchan->tc_ring); uc->bchan->tc_ring = NULL; k3_ringacc_ring_free(uc->bchan->t_ring); uc->bchan->t_ring = NULL; k3_configure_chan_coherency(&uc->vc.chan, 0); err_ring: bcdma_put_bchan(uc); return ret; } static void udma_free_tx_resources(struct udma_chan *uc) { if (!uc->tchan) return; k3_ringacc_ring_free(uc->tchan->t_ring); k3_ringacc_ring_free(uc->tchan->tc_ring); uc->tchan->t_ring = NULL; uc->tchan->tc_ring = NULL; udma_put_tchan(uc); } static int udma_alloc_tx_resources(struct udma_chan *uc) { struct k3_ring_cfg ring_cfg; struct udma_dev *ud = uc->ud; struct udma_tchan *tchan; int ring_idx, ret; ret = udma_get_tchan(uc); if (ret) return ret; tchan = uc->tchan; if (tchan->tflow_id >= 0) ring_idx = tchan->tflow_id; else ring_idx = ud->bchan_cnt + tchan->id; ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, &tchan->t_ring, &tchan->tc_ring); if (ret) { ret = -EBUSY; goto err_ring; } memset(&ring_cfg, 0, sizeof(ring_cfg)); ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; if (ud->match_data->type == DMA_TYPE_UDMA) { ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; } else { ring_cfg.mode = K3_RINGACC_RING_MODE_RING; k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); ring_cfg.asel = uc->config.asel; ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); } ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); if (ret) goto err_ringcfg; return 0; err_ringcfg: k3_ringacc_ring_free(uc->tchan->tc_ring); uc->tchan->tc_ring = NULL; k3_ringacc_ring_free(uc->tchan->t_ring); uc->tchan->t_ring = NULL; err_ring: udma_put_tchan(uc); return ret; } static void udma_free_rx_resources(struct udma_chan *uc) { if (!uc->rchan) return; if (uc->rflow) { struct udma_rflow *rflow = uc->rflow; k3_ringacc_ring_free(rflow->fd_ring); k3_ringacc_ring_free(rflow->r_ring); rflow->fd_ring = NULL; rflow->r_ring = NULL; udma_put_rflow(uc); } udma_put_rchan(uc); } static int udma_alloc_rx_resources(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct k3_ring_cfg ring_cfg; struct udma_rflow *rflow; int fd_ring_id; int ret; ret = udma_get_rchan(uc); if (ret) return ret; /* For MEM_TO_MEM we don't need rflow or rings */ if (uc->config.dir == DMA_MEM_TO_MEM) return 0; if (uc->config.default_flow_id >= 0) ret = udma_get_rflow(uc, uc->config.default_flow_id); else ret = udma_get_rflow(uc, uc->rchan->id); if (ret) { ret = -EBUSY; goto err_rflow; } rflow = uc->rflow; if (ud->tflow_cnt) fd_ring_id = ud->tflow_cnt + rflow->id; else fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, &rflow->fd_ring, &rflow->r_ring); if (ret) { ret = -EBUSY; goto err_ring; } memset(&ring_cfg, 0, sizeof(ring_cfg)); ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; if (ud->match_data->type == DMA_TYPE_UDMA) { if (uc->config.pkt_mode) ring_cfg.size = SG_MAX_SEGMENTS; else ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; } else { ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; ring_cfg.mode = K3_RINGACC_RING_MODE_RING; k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); ring_cfg.asel = uc->config.asel; ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); } ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); if (ret) goto err_ringcfg; return 0; err_ringcfg: k3_ringacc_ring_free(rflow->r_ring); rflow->r_ring = NULL; k3_ringacc_ring_free(rflow->fd_ring); rflow->fd_ring = NULL; err_ring: udma_put_rflow(uc); err_rflow: udma_put_rchan(uc); return ret; } #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) static int udma_tisci_m2m_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_tchan *tchan = uc->tchan; struct udma_rchan *rchan = uc->rchan; u8 burst_size = 0; int ret; u8 tpl; /* Non synchronized - mem to mem type of transfer */ int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); burst_size = ud->match_data->burst_size[tpl]; } req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.index = tchan->id; req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_tx.txcq_qnum = tc_ring; req_tx.tx_atype = ud->atype; if (burst_size) { req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; req_tx.tx_burst_size = burst_size; } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) { dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); return ret; } req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; req_rx.index = rchan->id; req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_rx.rxcq_qnum = tc_ring; req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_rx.rx_atype = ud->atype; if (burst_size) { req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; req_rx.rx_burst_size = burst_size; } ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); return ret; } static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; struct udma_bchan *bchan = uc->bchan; u8 burst_size = 0; int ret; u8 tpl; if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); burst_size = ud->match_data->burst_size[tpl]; } req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; req_tx.index = bchan->id; if (burst_size) { req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; req_tx.tx_burst_size = burst_size; } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); return ret; } static int udma_tisci_tx_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_tchan *tchan = uc->tchan; int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; u32 mode, fetch_size; int ret; if (uc->config.pkt_mode) { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, uc->config.psd_size, 0); } else { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; fetch_size = sizeof(struct cppi5_desc_hdr_t); } req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.index = tchan->id; req_tx.tx_chan_type = mode; req_tx.tx_supr_tdpkt = uc->config.notdpkt; req_tx.tx_fetch_size = fetch_size >> 2; req_tx.txcq_qnum = tc_ring; req_tx.tx_atype = uc->config.atype; if (uc->config.ep_type == PSIL_EP_PDMA_XY && ud->match_data->flags & UDMA_FLAG_TDTYPE) { /* wait for peer to complete the teardown for PDMAs */ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; req_tx.tx_tdtype = 1; } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); return ret; } static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_tchan *tchan = uc->tchan; struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; int ret; req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; req_tx.nav_id = tisci_rm->tisci_dev_id; req_tx.index = tchan->id; req_tx.tx_supr_tdpkt = uc->config.notdpkt; if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { /* wait for peer to complete the teardown for PDMAs */ req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; req_tx.tx_tdtype = 1; } ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); if (ret) dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); return ret; } #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config static int udma_tisci_rx_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_rchan *rchan = uc->rchan; int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; u32 mode, fetch_size; int ret; if (uc->config.pkt_mode) { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, uc->config.psd_size, 0); } else { mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; fetch_size = sizeof(struct cppi5_desc_hdr_t); } req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; req_rx.index = rchan->id; req_rx.rx_fetch_size = fetch_size >> 2; req_rx.rxcq_qnum = rx_ring; req_rx.rx_chan_type = mode; req_rx.rx_atype = uc->config.atype; ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) { dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); return ret; } flow_req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; flow_req.nav_id = tisci_rm->tisci_dev_id; flow_req.flow_index = rchan->id; if (uc->config.needs_epib) flow_req.rx_einfo_present = 1; else flow_req.rx_einfo_present = 0; if (uc->config.psd_size) flow_req.rx_psinfo_present = 1; else flow_req.rx_psinfo_present = 0; flow_req.rx_error_handling = 1; flow_req.rx_dest_qnum = rx_ring; flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; flow_req.rx_fdq0_sz0_qnum = fd_ring; flow_req.rx_fdq1_qnum = fd_ring; flow_req.rx_fdq2_qnum = fd_ring; flow_req.rx_fdq3_qnum = fd_ring; ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); if (ret) dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); return 0; } static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct udma_rchan *rchan = uc->rchan; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; int ret; req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; req_rx.index = rchan->id; ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); return ret; } static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) { struct udma_dev *ud = uc->ud; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; int ret; req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; req_rx.nav_id = tisci_rm->tisci_dev_id; req_rx.index = uc->rchan->id; ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); if (ret) { dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); return ret; } flow_req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; flow_req.nav_id = tisci_rm->tisci_dev_id; flow_req.flow_index = uc->rflow->id; if (uc->config.needs_epib) flow_req.rx_einfo_present = 1; else flow_req.rx_einfo_present = 0; if (uc->config.psd_size) flow_req.rx_psinfo_present = 1; else flow_req.rx_psinfo_present = 0; flow_req.rx_error_handling = 1; ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); if (ret) dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, ret); return ret; } static int udma_alloc_chan_resources(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); struct udma_dev *ud = to_udma_dev(chan->device); const struct udma_soc_data *soc_data = ud->soc_data; struct k3_ring *irq_ring; u32 irq_udma_idx; int ret; uc->dma_dev = ud->dev; if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { uc->use_dma_pool = true; /* in case of MEM_TO_MEM we have maximum of two TRs */ if (uc->config.dir == DMA_MEM_TO_MEM) { uc->config.hdesc_size = cppi5_trdesc_calc_size( sizeof(struct cppi5_tr_type15_t), 2); uc->config.pkt_mode = false; } } if (uc->use_dma_pool) { uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, uc->config.hdesc_size, ud->desc_align, 0); if (!uc->hdesc_pool) { dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; ret = -ENOMEM; goto err_cleanup; } } /* * Make sure that the completion is in a known state: * No teardown, the channel is idle */ reinit_completion(&uc->teardown_completed); complete_all(&uc->teardown_completed); uc->state = UDMA_CHAN_IS_IDLE; switch (uc->config.dir) { case DMA_MEM_TO_MEM: /* Non synchronized - mem to mem type of transfer */ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, uc->id); ret = udma_get_chan_pair(uc); if (ret) goto err_cleanup; ret = udma_alloc_tx_resources(uc); if (ret) { udma_put_rchan(uc); goto err_cleanup; } ret = udma_alloc_rx_resources(uc); if (ret) { udma_free_tx_resources(uc); goto err_cleanup; } uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring = uc->tchan->tc_ring; irq_udma_idx = uc->tchan->id; ret = udma_tisci_m2m_channel_config(uc); break; case DMA_MEM_TO_DEV: /* Slave transfer synchronized - mem to dev (TX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, uc->id); ret = udma_alloc_tx_resources(uc); if (ret) goto err_cleanup; uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = uc->config.remote_thread_id; uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring = uc->tchan->tc_ring; irq_udma_idx = uc->tchan->id; ret = udma_tisci_tx_channel_config(uc); break; case DMA_DEV_TO_MEM: /* Slave transfer synchronized - dev to mem (RX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, uc->id); ret = udma_alloc_rx_resources(uc); if (ret) goto err_cleanup; uc->config.src_thread = uc->config.remote_thread_id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring = uc->rflow->r_ring; irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; ret = udma_tisci_rx_channel_config(uc); break; default: /* Can not happen */ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", __func__, uc->id, uc->config.dir); ret = -EINVAL; goto err_cleanup; } /* check if the channel configuration was successful */ if (ret) goto err_res_free; if (udma_is_chan_running(uc)) { dev_warn(ud->dev, "chan%d: is running!\n", uc->id); udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); ret = -EBUSY; goto err_res_free; } } /* PSI-L pairing */ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); if (ret) { dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", uc->config.src_thread, uc->config.dst_thread); goto err_res_free; } uc->psil_paired = true; uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); if (uc->irq_num_ring <= 0) { dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", k3_ringacc_get_ring_id(irq_ring)); ret = -EINVAL; goto err_psi_free; } ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, IRQF_TRIGGER_HIGH, uc->name, uc); if (ret) { dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); goto err_irq_free; } /* Event from UDMA (TR events) only needed for slave TR mode channels */ if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); if (uc->irq_num_udma <= 0) { dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", irq_udma_idx); free_irq(uc->irq_num_ring, uc); ret = -EINVAL; goto err_irq_free; } ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, uc->name, uc); if (ret) { dev_err(ud->dev, "chan%d: UDMA irq request failed\n", uc->id); free_irq(uc->irq_num_ring, uc); goto err_irq_free; } } else { uc->irq_num_udma = 0; } udma_reset_rings(uc); return 0; err_irq_free: uc->irq_num_ring = 0; uc->irq_num_udma = 0; err_psi_free: navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); uc->psil_paired = false; err_res_free: udma_free_tx_resources(uc); udma_free_rx_resources(uc); err_cleanup: udma_reset_uchan(uc); if (uc->use_dma_pool) { dma_pool_destroy(uc->hdesc_pool); uc->use_dma_pool = false; } return ret; } static int bcdma_alloc_chan_resources(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); struct udma_dev *ud = to_udma_dev(chan->device); const struct udma_oes_offsets *oes = &ud->soc_data->oes; u32 irq_udma_idx, irq_ring_idx; int ret; /* Only TR mode is supported */ uc->config.pkt_mode = false; /* * Make sure that the completion is in a known state: * No teardown, the channel is idle */ reinit_completion(&uc->teardown_completed); complete_all(&uc->teardown_completed); uc->state = UDMA_CHAN_IS_IDLE; switch (uc->config.dir) { case DMA_MEM_TO_MEM: /* Non synchronized - mem to mem type of transfer */ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, uc->id); ret = bcdma_alloc_bchan_resources(uc); if (ret) return ret; irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; ret = bcdma_tisci_m2m_channel_config(uc); break; case DMA_MEM_TO_DEV: /* Slave transfer synchronized - mem to dev (TX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, uc->id); ret = udma_alloc_tx_resources(uc); if (ret) { uc->config.remote_thread_id = -1; return ret; } uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = uc->config.remote_thread_id; uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; ret = bcdma_tisci_tx_channel_config(uc); break; case DMA_DEV_TO_MEM: /* Slave transfer synchronized - dev to mem (RX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, uc->id); ret = udma_alloc_rx_resources(uc); if (ret) { uc->config.remote_thread_id = -1; return ret; } uc->config.src_thread = uc->config.remote_thread_id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; ret = bcdma_tisci_rx_channel_config(uc); break; default: /* Can not happen */ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", __func__, uc->id, uc->config.dir); return -EINVAL; } /* check if the channel configuration was successful */ if (ret) goto err_res_free; if (udma_is_chan_running(uc)) { dev_warn(ud->dev, "chan%d: is running!\n", uc->id); udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); ret = -EBUSY; goto err_res_free; } } uc->dma_dev = dmaengine_get_dma_device(chan); if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { uc->config.hdesc_size = cppi5_trdesc_calc_size( sizeof(struct cppi5_tr_type15_t), 2); uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, uc->config.hdesc_size, ud->desc_align, 0); if (!uc->hdesc_pool) { dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; ret = -ENOMEM; goto err_res_free; } uc->use_dma_pool = true; } else if (uc->config.dir != DMA_MEM_TO_MEM) { /* PSI-L pairing */ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); if (ret) { dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", uc->config.src_thread, uc->config.dst_thread); goto err_res_free; } uc->psil_paired = true; } uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); if (uc->irq_num_ring <= 0) { dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", irq_ring_idx); ret = -EINVAL; goto err_psi_free; } ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, IRQF_TRIGGER_HIGH, uc->name, uc); if (ret) { dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); goto err_irq_free; } /* Event from BCDMA (TR events) only needed for slave channels */ if (is_slave_direction(uc->config.dir)) { uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); if (uc->irq_num_udma <= 0) { dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", irq_udma_idx); free_irq(uc->irq_num_ring, uc); ret = -EINVAL; goto err_irq_free; } ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, uc->name, uc); if (ret) { dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", uc->id); free_irq(uc->irq_num_ring, uc); goto err_irq_free; } } else { uc->irq_num_udma = 0; } udma_reset_rings(uc); INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, udma_check_tx_completion); return 0; err_irq_free: uc->irq_num_ring = 0; uc->irq_num_udma = 0; err_psi_free: if (uc->psil_paired) navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); uc->psil_paired = false; err_res_free: bcdma_free_bchan_resources(uc); udma_free_tx_resources(uc); udma_free_rx_resources(uc); udma_reset_uchan(uc); if (uc->use_dma_pool) { dma_pool_destroy(uc->hdesc_pool); uc->use_dma_pool = false; } return ret; } static int bcdma_router_config(struct dma_chan *chan) { struct k3_event_route_data *router_data = chan->route_data; struct udma_chan *uc = to_udma_chan(chan); u32 trigger_event; if (!uc->bchan) return -EINVAL; if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) return -EINVAL; trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; return router_data->set_event(router_data->priv, trigger_event); } static int pktdma_alloc_chan_resources(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); struct udma_dev *ud = to_udma_dev(chan->device); const struct udma_oes_offsets *oes = &ud->soc_data->oes; u32 irq_ring_idx; int ret; /* * Make sure that the completion is in a known state: * No teardown, the channel is idle */ reinit_completion(&uc->teardown_completed); complete_all(&uc->teardown_completed); uc->state = UDMA_CHAN_IS_IDLE; switch (uc->config.dir) { case DMA_MEM_TO_DEV: /* Slave transfer synchronized - mem to dev (TX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, uc->id); ret = udma_alloc_tx_resources(uc); if (ret) { uc->config.remote_thread_id = -1; return ret; } uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = uc->config.remote_thread_id; uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; ret = pktdma_tisci_tx_channel_config(uc); break; case DMA_DEV_TO_MEM: /* Slave transfer synchronized - dev to mem (RX) trasnfer */ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, uc->id); ret = udma_alloc_rx_resources(uc); if (ret) { uc->config.remote_thread_id = -1; return ret; } uc->config.src_thread = uc->config.remote_thread_id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | K3_PSIL_DST_THREAD_ID_OFFSET; irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; ret = pktdma_tisci_rx_channel_config(uc); break; default: /* Can not happen */ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", __func__, uc->id, uc->config.dir); return -EINVAL; } /* check if the channel configuration was successful */ if (ret) goto err_res_free; if (udma_is_chan_running(uc)) { dev_warn(ud->dev, "chan%d: is running!\n", uc->id); udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); ret = -EBUSY; goto err_res_free; } } uc->dma_dev = dmaengine_get_dma_device(chan); uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, uc->config.hdesc_size, ud->desc_align, 0); if (!uc->hdesc_pool) { dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; ret = -ENOMEM; goto err_res_free; } uc->use_dma_pool = true; /* PSI-L pairing */ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); if (ret) { dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", uc->config.src_thread, uc->config.dst_thread); goto err_res_free; } uc->psil_paired = true; uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); if (uc->irq_num_ring <= 0) { dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", irq_ring_idx); ret = -EINVAL; goto err_psi_free; } ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, IRQF_TRIGGER_HIGH, uc->name, uc); if (ret) { dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); goto err_irq_free; } uc->irq_num_udma = 0; udma_reset_rings(uc); INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, udma_check_tx_completion); if (uc->tchan) dev_dbg(ud->dev, "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", uc->id, uc->tchan->id, uc->tchan->tflow_id, uc->config.remote_thread_id); else if (uc->rchan) dev_dbg(ud->dev, "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", uc->id, uc->rchan->id, uc->rflow->id, uc->config.remote_thread_id); return 0; err_irq_free: uc->irq_num_ring = 0; err_psi_free: navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); uc->psil_paired = false; err_res_free: udma_free_tx_resources(uc); udma_free_rx_resources(uc); udma_reset_uchan(uc); dma_pool_destroy(uc->hdesc_pool); uc->use_dma_pool = false; return ret; } static int udma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct udma_chan *uc = to_udma_chan(chan); memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); return 0; } static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, size_t tr_size, int tr_count, enum dma_transfer_direction dir) { struct udma_hwdesc *hwdesc; struct cppi5_desc_hdr_t *tr_desc; struct udma_desc *d; u32 reload_count = 0; u32 ring_id; switch (tr_size) { case 16: case 32: case 64: case 128: break; default: dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); return NULL; } /* We have only one descriptor containing multiple TRs */ d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); if (!d) return NULL; d->sglen = tr_count; d->hwdesc_count = 1; hwdesc = &d->hwdesc[0]; /* Allocate memory for DMA ring descriptor */ if (uc->use_dma_pool) { hwdesc->cppi5_desc_size = uc->config.hdesc_size; hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, GFP_NOWAIT, &hwdesc->cppi5_desc_paddr); } else { hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, tr_count); hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, uc->ud->desc_align); hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, hwdesc->cppi5_desc_size, &hwdesc->cppi5_desc_paddr, GFP_NOWAIT); } if (!hwdesc->cppi5_desc_vaddr) { kfree(d); return NULL; } /* Start of the TR req records */ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; /* Start address of the TR response array */ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; tr_desc = hwdesc->cppi5_desc_vaddr; if (uc->cyclic) reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; if (dir == DMA_DEV_TO_MEM) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); else ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); cppi5_desc_set_pktids(tr_desc, uc->id, CPPI5_INFO1_DESC_FLOWID_DEFAULT); cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); return d; } /** * udma_get_tr_counters - calculate TR counters for a given length * @len: Length of the trasnfer * @align_to: Preferred alignment * @tr0_cnt0: First TR icnt0 * @tr0_cnt1: First TR icnt1 * @tr1_cnt0: Second (if used) TR icnt0 * * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated * For len >= SZ_64K two TRs are used in a simple way: * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) * Second TR: the remaining length (tr1_cnt0) * * Returns the number of TRs the length needs (1 or 2) * -EINVAL if the length can not be supported */ static int udma_get_tr_counters(size_t len, unsigned long align_to, u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) { if (len < SZ_64K) { *tr0_cnt0 = len; *tr0_cnt1 = 1; return 1; } if (align_to > 3) align_to = 3; realign: *tr0_cnt0 = SZ_64K - BIT(align_to); if (len / *tr0_cnt0 >= SZ_64K) { if (align_to) { align_to--; goto realign; } return -EINVAL; } *tr0_cnt1 = len / *tr0_cnt0; *tr1_cnt0 = len % *tr0_cnt0; return 2; } static struct udma_desc * udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct scatterlist *sgent; struct udma_desc *d; struct cppi5_tr_type1_t *tr_req = NULL; u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; size_t tr_size; int num_tr = 0; int tr_idx = 0; u64 asel; /* estimate the number of TRs we will need */ for_each_sg(sgl, sgent, sglen, i) { if (sg_dma_len(sgent) < SZ_64K) num_tr++; else num_tr += 2; } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); if (!d) return NULL; d->sglen = sglen; if (uc->ud->match_data->type == DMA_TYPE_UDMA) asel = 0; else asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; tr_req = d->hwdesc[0].tr_req_base; for_each_sg(sgl, sgent, sglen, i) { dma_addr_t sg_addr = sg_dma_address(sgent); num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); if (num_tr < 0) { dev_err(uc->ud->dev, "size %u is not supported\n", sg_dma_len(sgent)); udma_free_hwdesc(uc, d); kfree(d); return NULL; } cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); sg_addr |= asel; tr_req[tr_idx].addr = sg_addr; tr_req[tr_idx].icnt0 = tr0_cnt0; tr_req[tr_idx].icnt1 = tr0_cnt1; tr_req[tr_idx].dim1 = tr0_cnt0; tr_idx++; if (num_tr == 2) { cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; tr_req[tr_idx].icnt0 = tr1_cnt0; tr_req[tr_idx].icnt1 = 1; tr_req[tr_idx].dim1 = tr1_cnt0; tr_idx++; } d->residue += sg_dma_len(sgent); } cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); return d; } static struct udma_desc * udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct scatterlist *sgent; struct cppi5_tr_type15_t *tr_req = NULL; enum dma_slave_buswidth dev_width; u32 csf = CPPI5_TR_CSF_SUPR_EVT; u16 tr_cnt0, tr_cnt1; dma_addr_t dev_addr; struct udma_desc *d; unsigned int i; size_t tr_size, sg_len; int num_tr = 0; int tr_idx = 0; u32 burst, trigger_size, port_window; u64 asel; if (dir == DMA_DEV_TO_MEM) { dev_addr = uc->cfg.src_addr; dev_width = uc->cfg.src_addr_width; burst = uc->cfg.src_maxburst; port_window = uc->cfg.src_port_window_size; } else if (dir == DMA_MEM_TO_DEV) { dev_addr = uc->cfg.dst_addr; dev_width = uc->cfg.dst_addr_width; burst = uc->cfg.dst_maxburst; port_window = uc->cfg.dst_port_window_size; } else { dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); return NULL; } if (!burst) burst = 1; if (port_window) { if (port_window != burst) { dev_err(uc->ud->dev, "The burst must be equal to port_window\n"); return NULL; } tr_cnt0 = dev_width * port_window; tr_cnt1 = 1; } else { tr_cnt0 = dev_width; tr_cnt1 = burst; } trigger_size = tr_cnt0 * tr_cnt1; /* estimate the number of TRs we will need */ for_each_sg(sgl, sgent, sglen, i) { sg_len = sg_dma_len(sgent); if (sg_len % trigger_size) { dev_err(uc->ud->dev, "Not aligned SG entry (%zu for %u)\n", sg_len, trigger_size); return NULL; } if (sg_len / trigger_size < SZ_64K) num_tr++; else num_tr += 2; } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type15_t); d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); if (!d) return NULL; d->sglen = sglen; if (uc->ud->match_data->type == DMA_TYPE_UDMA) { asel = 0; csf |= CPPI5_TR_CSF_EOL_ICNT0; } else { asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; dev_addr |= asel; } tr_req = d->hwdesc[0].tr_req_base; for_each_sg(sgl, sgent, sglen, i) { u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; dma_addr_t sg_addr = sg_dma_address(sgent); sg_len = sg_dma_len(sgent); num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); if (num_tr < 0) { dev_err(uc->ud->dev, "size %zu is not supported\n", sg_len); udma_free_hwdesc(uc, d); kfree(d); return NULL; } cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf); cppi5_tr_set_trigger(&tr_req[tr_idx].flags, uc->config.tr_trigger_type, CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); sg_addr |= asel; if (dir == DMA_DEV_TO_MEM) { tr_req[tr_idx].addr = dev_addr; tr_req[tr_idx].icnt0 = tr_cnt0; tr_req[tr_idx].icnt1 = tr_cnt1; tr_req[tr_idx].icnt2 = tr0_cnt2; tr_req[tr_idx].icnt3 = tr0_cnt3; tr_req[tr_idx].dim1 = (-1) * tr_cnt0; tr_req[tr_idx].daddr = sg_addr; tr_req[tr_idx].dicnt0 = tr_cnt0; tr_req[tr_idx].dicnt1 = tr_cnt1; tr_req[tr_idx].dicnt2 = tr0_cnt2; tr_req[tr_idx].dicnt3 = tr0_cnt3; tr_req[tr_idx].ddim1 = tr_cnt0; tr_req[tr_idx].ddim2 = trigger_size; tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; } else { tr_req[tr_idx].addr = sg_addr; tr_req[tr_idx].icnt0 = tr_cnt0; tr_req[tr_idx].icnt1 = tr_cnt1; tr_req[tr_idx].icnt2 = tr0_cnt2; tr_req[tr_idx].icnt3 = tr0_cnt3; tr_req[tr_idx].dim1 = tr_cnt0; tr_req[tr_idx].dim2 = trigger_size; tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; tr_req[tr_idx].daddr = dev_addr; tr_req[tr_idx].dicnt0 = tr_cnt0; tr_req[tr_idx].dicnt1 = tr_cnt1; tr_req[tr_idx].dicnt2 = tr0_cnt2; tr_req[tr_idx].dicnt3 = tr0_cnt3; tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; } tr_idx++; if (num_tr == 2) { cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf); cppi5_tr_set_trigger(&tr_req[tr_idx].flags, uc->config.tr_trigger_type, CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; if (dir == DMA_DEV_TO_MEM) { tr_req[tr_idx].addr = dev_addr; tr_req[tr_idx].icnt0 = tr_cnt0; tr_req[tr_idx].icnt1 = tr_cnt1; tr_req[tr_idx].icnt2 = tr1_cnt2; tr_req[tr_idx].icnt3 = 1; tr_req[tr_idx].dim1 = (-1) * tr_cnt0; tr_req[tr_idx].daddr = sg_addr; tr_req[tr_idx].dicnt0 = tr_cnt0; tr_req[tr_idx].dicnt1 = tr_cnt1; tr_req[tr_idx].dicnt2 = tr1_cnt2; tr_req[tr_idx].dicnt3 = 1; tr_req[tr_idx].ddim1 = tr_cnt0; tr_req[tr_idx].ddim2 = trigger_size; } else { tr_req[tr_idx].addr = sg_addr; tr_req[tr_idx].icnt0 = tr_cnt0; tr_req[tr_idx].icnt1 = tr_cnt1; tr_req[tr_idx].icnt2 = tr1_cnt2; tr_req[tr_idx].icnt3 = 1; tr_req[tr_idx].dim1 = tr_cnt0; tr_req[tr_idx].dim2 = trigger_size; tr_req[tr_idx].daddr = dev_addr; tr_req[tr_idx].dicnt0 = tr_cnt0; tr_req[tr_idx].dicnt1 = tr_cnt1; tr_req[tr_idx].dicnt2 = tr1_cnt2; tr_req[tr_idx].dicnt3 = 1; tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; } tr_idx++; } d->residue += sg_len; } cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP); return d; } static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, enum dma_slave_buswidth dev_width, u16 elcnt) { if (uc->config.ep_type != PSIL_EP_PDMA_XY) return 0; /* Bus width translates to the element size (ES) */ switch (dev_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: d->static_tr.elsize = 0; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: d->static_tr.elsize = 1; break; case DMA_SLAVE_BUSWIDTH_3_BYTES: d->static_tr.elsize = 2; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: d->static_tr.elsize = 3; break; case DMA_SLAVE_BUSWIDTH_8_BYTES: d->static_tr.elsize = 4; break; default: /* not reached */ return -EINVAL; } d->static_tr.elcnt = elcnt; /* * PDMA must to close the packet when the channel is in packet mode. * For TR mode when the channel is not cyclic we also need PDMA to close * the packet otherwise the transfer will stall because PDMA holds on * the data it has received from the peripheral. */ if (uc->config.pkt_mode || !uc->cyclic) { unsigned int div = dev_width * elcnt; if (uc->cyclic) d->static_tr.bstcnt = d->residue / d->sglen / div; else d->static_tr.bstcnt = d->residue / div; if (uc->config.dir == DMA_DEV_TO_MEM && d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) return -EINVAL; } else { d->static_tr.bstcnt = 0; } return 0; } static struct udma_desc * udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct scatterlist *sgent; struct cppi5_host_desc_t *h_desc = NULL; struct udma_desc *d; u32 ring_id; unsigned int i; u64 asel; d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); if (!d) return NULL; d->sglen = sglen; d->hwdesc_count = sglen; if (dir == DMA_DEV_TO_MEM) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); else ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); if (uc->ud->match_data->type == DMA_TYPE_UDMA) asel = 0; else asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; for_each_sg(sgl, sgent, sglen, i) { struct udma_hwdesc *hwdesc = &d->hwdesc[i]; dma_addr_t sg_addr = sg_dma_address(sgent); struct cppi5_host_desc_t *desc; size_t sg_len = sg_dma_len(sgent); hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, GFP_NOWAIT, &hwdesc->cppi5_desc_paddr); if (!hwdesc->cppi5_desc_vaddr) { dev_err(uc->ud->dev, "descriptor%d allocation failed\n", i); udma_free_hwdesc(uc, d); kfree(d); return NULL; } d->residue += sg_len; hwdesc->cppi5_desc_size = uc->config.hdesc_size; desc = hwdesc->cppi5_desc_vaddr; if (i == 0) { cppi5_hdesc_init(desc, 0, 0); /* Flow and Packed ID */ cppi5_desc_set_pktids(&desc->hdr, uc->id, CPPI5_INFO1_DESC_FLOWID_DEFAULT); cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); } else { cppi5_hdesc_reset_hbdesc(desc); cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); } /* attach the sg buffer to the descriptor */ sg_addr |= asel; cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); /* Attach link as host buffer descriptor */ if (h_desc) cppi5_hdesc_link_hbdesc(h_desc, hwdesc->cppi5_desc_paddr | asel); if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || dir == DMA_MEM_TO_DEV) h_desc = desc; } if (d->residue >= SZ_4M) { dev_err(uc->ud->dev, "%s: Transfer size %u is over the supported 4M range\n", __func__, d->residue); udma_free_hwdesc(uc, d); kfree(d); return NULL; } h_desc = d->hwdesc[0].cppi5_desc_vaddr; cppi5_hdesc_set_pktlen(h_desc, d->residue); return d; } static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, void *data, size_t len) { struct udma_desc *d = to_udma_desc(desc); struct udma_chan *uc = to_udma_chan(desc->chan); struct cppi5_host_desc_t *h_desc; u32 psd_size = len; u32 flags = 0; if (!uc->config.pkt_mode || !uc->config.metadata_size) return -ENOTSUPP; if (!data || len > uc->config.metadata_size) return -EINVAL; if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) return -EINVAL; h_desc = d->hwdesc[0].cppi5_desc_vaddr; if (d->dir == DMA_MEM_TO_DEV) memcpy(h_desc->epib, data, len); if (uc->config.needs_epib) psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; d->metadata = data; d->metadata_size = len; if (uc->config.needs_epib) flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; cppi5_hdesc_update_flags(h_desc, flags); cppi5_hdesc_update_psdata_size(h_desc, psd_size); return 0; } static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, size_t *payload_len, size_t *max_len) { struct udma_desc *d = to_udma_desc(desc); struct udma_chan *uc = to_udma_chan(desc->chan); struct cppi5_host_desc_t *h_desc; if (!uc->config.pkt_mode || !uc->config.metadata_size) return ERR_PTR(-ENOTSUPP); h_desc = d->hwdesc[0].cppi5_desc_vaddr; *max_len = uc->config.metadata_size; *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0; *payload_len += cppi5_hdesc_get_psdata_size(h_desc); return h_desc->epib; } static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, size_t payload_len) { struct udma_desc *d = to_udma_desc(desc); struct udma_chan *uc = to_udma_chan(desc->chan); struct cppi5_host_desc_t *h_desc; u32 psd_size = payload_len; u32 flags = 0; if (!uc->config.pkt_mode || !uc->config.metadata_size) return -ENOTSUPP; if (payload_len > uc->config.metadata_size) return -EINVAL; if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) return -EINVAL; h_desc = d->hwdesc[0].cppi5_desc_vaddr; if (uc->config.needs_epib) { psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; } cppi5_hdesc_update_flags(h_desc, flags); cppi5_hdesc_update_psdata_size(h_desc, psd_size); return 0; } static struct dma_descriptor_metadata_ops metadata_ops = { .attach = udma_attach_metadata, .get_ptr = udma_get_metadata_ptr, .set_len = udma_set_metadata_len, }; static struct dma_async_tx_descriptor * udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct udma_chan *uc = to_udma_chan(chan); enum dma_slave_buswidth dev_width; struct udma_desc *d; u32 burst; if (dir != uc->config.dir && (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { dev_err(chan->device->dev, "%s: chan%d is for %s, not supporting %s\n", __func__, uc->id, dmaengine_get_direction_text(uc->config.dir), dmaengine_get_direction_text(dir)); return NULL; } if (dir == DMA_DEV_TO_MEM) { dev_width = uc->cfg.src_addr_width; burst = uc->cfg.src_maxburst; } else if (dir == DMA_MEM_TO_DEV) { dev_width = uc->cfg.dst_addr_width; burst = uc->cfg.dst_maxburst; } else { dev_err(chan->device->dev, "%s: bad direction?\n", __func__); return NULL; } if (!burst) burst = 1; uc->config.tx_flags = tx_flags; if (uc->config.pkt_mode) d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, context); else if (is_slave_direction(uc->config.dir)) d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, context); else d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, tx_flags, context); if (!d) return NULL; d->dir = dir; d->desc_idx = 0; d->tr_idx = 0; /* static TR for remote PDMA */ if (udma_configure_statictr(uc, d, dev_width, burst)) { dev_err(uc->ud->dev, "%s: StaticTR Z is limited to maximum 4095 (%u)\n", __func__, d->static_tr.bstcnt); udma_free_hwdesc(uc, d); kfree(d); return NULL; } if (uc->config.metadata_size) d->vd.tx.metadata_ops = &metadata_ops; return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); } static struct udma_desc * udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct udma_desc *d; size_t tr_size, period_addr; struct cppi5_tr_type1_t *tr_req; unsigned int periods = buf_len / period_len; u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; unsigned int i; int num_tr; num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); if (num_tr < 0) { dev_err(uc->ud->dev, "size %zu is not supported\n", period_len); return NULL; } /* Now allocate and setup the descriptor. */ tr_size = sizeof(struct cppi5_tr_type1_t); d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); if (!d) return NULL; tr_req = d->hwdesc[0].tr_req_base; if (uc->ud->match_data->type == DMA_TYPE_UDMA) period_addr = buf_addr; else period_addr = buf_addr | ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); for (i = 0; i < periods; i++) { int tr_idx = i * num_tr; cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); tr_req[tr_idx].addr = period_addr; tr_req[tr_idx].icnt0 = tr0_cnt0; tr_req[tr_idx].icnt1 = tr0_cnt1; tr_req[tr_idx].dim1 = tr0_cnt0; if (num_tr == 2) { cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); tr_idx++; cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; tr_req[tr_idx].icnt0 = tr1_cnt0; tr_req[tr_idx].icnt1 = 1; tr_req[tr_idx].dim1 = tr1_cnt0; } if (!(flags & DMA_PREP_INTERRUPT)) cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); period_addr += period_len; } return d; } static struct udma_desc * udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct udma_desc *d; u32 ring_id; int i; int periods = buf_len / period_len; if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) return NULL; if (period_len >= SZ_4M) return NULL; d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); if (!d) return NULL; d->hwdesc_count = periods; /* TODO: re-check this... */ if (dir == DMA_DEV_TO_MEM) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); else ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); if (uc->ud->match_data->type != DMA_TYPE_UDMA) buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; for (i = 0; i < periods; i++) { struct udma_hwdesc *hwdesc = &d->hwdesc[i]; dma_addr_t period_addr = buf_addr + (period_len * i); struct cppi5_host_desc_t *h_desc; hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, GFP_NOWAIT, &hwdesc->cppi5_desc_paddr); if (!hwdesc->cppi5_desc_vaddr) { dev_err(uc->ud->dev, "descriptor%d allocation failed\n", i); udma_free_hwdesc(uc, d); kfree(d); return NULL; } hwdesc->cppi5_desc_size = uc->config.hdesc_size; h_desc = hwdesc->cppi5_desc_vaddr; cppi5_hdesc_init(h_desc, 0, 0); cppi5_hdesc_set_pktlen(h_desc, period_len); /* Flow and Packed ID */ cppi5_desc_set_pktids(&h_desc->hdr, uc->id, CPPI5_INFO1_DESC_FLOWID_DEFAULT); cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); /* attach each period to a new descriptor */ cppi5_hdesc_attach_buf(h_desc, period_addr, period_len, period_addr, period_len); } return d; } static struct dma_async_tx_descriptor * udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct udma_chan *uc = to_udma_chan(chan); enum dma_slave_buswidth dev_width; struct udma_desc *d; u32 burst; if (dir != uc->config.dir) { dev_err(chan->device->dev, "%s: chan%d is for %s, not supporting %s\n", __func__, uc->id, dmaengine_get_direction_text(uc->config.dir), dmaengine_get_direction_text(dir)); return NULL; } uc->cyclic = true; if (dir == DMA_DEV_TO_MEM) { dev_width = uc->cfg.src_addr_width; burst = uc->cfg.src_maxburst; } else if (dir == DMA_MEM_TO_DEV) { dev_width = uc->cfg.dst_addr_width; burst = uc->cfg.dst_maxburst; } else { dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); return NULL; } if (!burst) burst = 1; if (uc->config.pkt_mode) d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, dir, flags); else d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, dir, flags); if (!d) return NULL; d->sglen = buf_len / period_len; d->dir = dir; d->residue = buf_len; /* static TR for remote PDMA */ if (udma_configure_statictr(uc, d, dev_width, burst)) { dev_err(uc->ud->dev, "%s: StaticTR Z is limited to maximum 4095 (%u)\n", __func__, d->static_tr.bstcnt); udma_free_hwdesc(uc, d); kfree(d); return NULL; } if (uc->config.metadata_size) d->vd.tx.metadata_ops = &metadata_ops; return vchan_tx_prep(&uc->vc, &d->vd, flags); } static struct dma_async_tx_descriptor * udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long tx_flags) { struct udma_chan *uc = to_udma_chan(chan); struct udma_desc *d; struct cppi5_tr_type15_t *tr_req; int num_tr; size_t tr_size = sizeof(struct cppi5_tr_type15_t); u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; u32 csf = CPPI5_TR_CSF_SUPR_EVT; if (uc->config.dir != DMA_MEM_TO_MEM) { dev_err(chan->device->dev, "%s: chan%d is for %s, not supporting %s\n", __func__, uc->id, dmaengine_get_direction_text(uc->config.dir), dmaengine_get_direction_text(DMA_MEM_TO_MEM)); return NULL; } num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); if (num_tr < 0) { dev_err(uc->ud->dev, "size %zu is not supported\n", len); return NULL; } d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); if (!d) return NULL; d->dir = DMA_MEM_TO_MEM; d->desc_idx = 0; d->tr_idx = 0; d->residue = len; if (uc->ud->match_data->type != DMA_TYPE_UDMA) { src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; } else { csf |= CPPI5_TR_CSF_EOL_ICNT0; } tr_req = d->hwdesc[0].tr_req_base; cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[0].flags, csf); tr_req[0].addr = src; tr_req[0].icnt0 = tr0_cnt0; tr_req[0].icnt1 = tr0_cnt1; tr_req[0].icnt2 = 1; tr_req[0].icnt3 = 1; tr_req[0].dim1 = tr0_cnt0; tr_req[0].daddr = dest; tr_req[0].dicnt0 = tr0_cnt0; tr_req[0].dicnt1 = tr0_cnt1; tr_req[0].dicnt2 = 1; tr_req[0].dicnt3 = 1; tr_req[0].ddim1 = tr0_cnt0; if (num_tr == 2) { cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req[1].flags, csf); tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; tr_req[1].icnt0 = tr1_cnt0; tr_req[1].icnt1 = 1; tr_req[1].icnt2 = 1; tr_req[1].icnt3 = 1; tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; tr_req[1].dicnt0 = tr1_cnt0; tr_req[1].dicnt1 = 1; tr_req[1].dicnt2 = 1; tr_req[1].dicnt3 = 1; } cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP); if (uc->config.metadata_size) d->vd.tx.metadata_ops = &metadata_ops; return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); } static void udma_issue_pending(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); unsigned long flags; spin_lock_irqsave(&uc->vc.lock, flags); /* If we have something pending and no active descriptor, then */ if (vchan_issue_pending(&uc->vc) && !uc->desc) { /* * start a descriptor if the channel is NOT [marked as * terminating _and_ it is still running (teardown has not * completed yet)]. */ if (!(uc->state == UDMA_CHAN_IS_TERMINATING && udma_is_chan_running(uc))) udma_start(uc); } spin_unlock_irqrestore(&uc->vc.lock, flags); } static enum dma_status udma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct udma_chan *uc = to_udma_chan(chan); enum dma_status ret; unsigned long flags; spin_lock_irqsave(&uc->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); if (!udma_is_chan_running(uc)) ret = DMA_COMPLETE; if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) ret = DMA_PAUSED; if (ret == DMA_COMPLETE || !txstate) goto out; if (uc->desc && uc->desc->vd.tx.cookie == cookie) { u32 peer_bcnt = 0; u32 bcnt = 0; u32 residue = uc->desc->residue; u32 delay = 0; if (uc->desc->dir == DMA_MEM_TO_DEV) { bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); if (uc->config.ep_type != PSIL_EP_NATIVE) { peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); if (bcnt > peer_bcnt) delay = bcnt - peer_bcnt; } } else if (uc->desc->dir == DMA_DEV_TO_MEM) { bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); if (uc->config.ep_type != PSIL_EP_NATIVE) { peer_bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); if (peer_bcnt > bcnt) delay = peer_bcnt - bcnt; } } else { bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); } if (bcnt && !(bcnt % uc->desc->residue)) residue = 0; else residue -= bcnt % uc->desc->residue; if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { ret = DMA_COMPLETE; delay = 0; } dma_set_residue(txstate, residue); dma_set_in_flight_bytes(txstate, delay); } else { ret = DMA_COMPLETE; } out: spin_unlock_irqrestore(&uc->vc.lock, flags); return ret; } static int udma_pause(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); /* pause the channel */ switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_PAUSE, UDMA_PEER_RT_EN_PAUSE); break; case DMA_MEM_TO_DEV: udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_PAUSE, UDMA_PEER_RT_EN_PAUSE); break; case DMA_MEM_TO_MEM: udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_PAUSE, UDMA_CHAN_RT_CTL_PAUSE); break; default: return -EINVAL; } return 0; } static int udma_resume(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); /* resume the channel */ switch (uc->config.dir) { case DMA_DEV_TO_MEM: udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_PAUSE, 0); break; case DMA_MEM_TO_DEV: udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_PAUSE, 0); break; case DMA_MEM_TO_MEM: udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_PAUSE, 0); break; default: return -EINVAL; } return 0; } static int udma_terminate_all(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&uc->vc.lock, flags); if (udma_is_chan_running(uc)) udma_stop(uc); if (uc->desc) { uc->terminated_desc = uc->desc; uc->desc = NULL; uc->terminated_desc->terminated = true; cancel_delayed_work(&uc->tx_drain.work); } uc->paused = false; vchan_get_all_descriptors(&uc->vc, &head); spin_unlock_irqrestore(&uc->vc.lock, flags); vchan_dma_desc_free_list(&uc->vc, &head); return 0; } static void udma_synchronize(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); unsigned long timeout = msecs_to_jiffies(1000); vchan_synchronize(&uc->vc); if (uc->state == UDMA_CHAN_IS_TERMINATING) { timeout = wait_for_completion_timeout(&uc->teardown_completed, timeout); if (!timeout) { dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", uc->id); udma_dump_chan_stdata(uc); udma_reset_chan(uc, true); } } udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); cancel_delayed_work_sync(&uc->tx_drain.work); udma_reset_rings(uc); } static void udma_desc_pre_callback(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dmaengine_result *result) { struct udma_chan *uc = to_udma_chan(&vc->chan); struct udma_desc *d; if (!vd) return; d = to_udma_desc(&vd->tx); if (d->metadata_size) udma_fetch_epib(uc, d); /* Provide residue information for the client */ if (result) { void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); if (cppi5_desc_get_type(desc_vaddr) == CPPI5_INFO0_DESC_TYPE_VAL_HOST) { result->residue = d->residue - cppi5_hdesc_get_pktlen(desc_vaddr); if (result->residue) result->result = DMA_TRANS_ABORTED; else result->result = DMA_TRANS_NOERROR; } else { result->residue = 0; result->result = DMA_TRANS_NOERROR; } } } /* * This tasklet handles the completion of a DMA descriptor by * calling its callback and freeing it. */ static void udma_vchan_complete(struct tasklet_struct *t) { struct virt_dma_chan *vc = from_tasklet(vc, t, task); struct virt_dma_desc *vd, *_vd; struct dmaengine_desc_callback cb; LIST_HEAD(head); spin_lock_irq(&vc->lock); list_splice_tail_init(&vc->desc_completed, &head); vd = vc->cyclic; if (vd) { vc->cyclic = NULL; dmaengine_desc_get_callback(&vd->tx, &cb); } else { memset(&cb, 0, sizeof(cb)); } spin_unlock_irq(&vc->lock); udma_desc_pre_callback(vc, vd, NULL); dmaengine_desc_callback_invoke(&cb, NULL); list_for_each_entry_safe(vd, _vd, &head, node) { struct dmaengine_result result; dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); udma_desc_pre_callback(vc, vd, &result); dmaengine_desc_callback_invoke(&cb, &result); vchan_vdesc_fini(vd); } } static void udma_free_chan_resources(struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); struct udma_dev *ud = to_udma_dev(chan->device); udma_terminate_all(chan); if (uc->terminated_desc) { udma_reset_chan(uc, false); udma_reset_rings(uc); } cancel_delayed_work_sync(&uc->tx_drain.work); if (uc->irq_num_ring > 0) { free_irq(uc->irq_num_ring, uc); uc->irq_num_ring = 0; } if (uc->irq_num_udma > 0) { free_irq(uc->irq_num_udma, uc); uc->irq_num_udma = 0; } /* Release PSI-L pairing */ if (uc->psil_paired) { navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); uc->psil_paired = false; } vchan_free_chan_resources(&uc->vc); tasklet_kill(&uc->vc.task); bcdma_free_bchan_resources(uc); udma_free_tx_resources(uc); udma_free_rx_resources(uc); udma_reset_uchan(uc); if (uc->use_dma_pool) { dma_pool_destroy(uc->hdesc_pool); uc->use_dma_pool = false; } } static struct platform_driver udma_driver; static struct platform_driver bcdma_driver; static struct platform_driver pktdma_driver; struct udma_filter_param { int remote_thread_id; u32 atype; u32 asel; u32 tr_trigger_type; }; static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) { struct udma_chan_config *ucc; struct psil_endpoint_config *ep_config; struct udma_filter_param *filter_param; struct udma_chan *uc; struct udma_dev *ud; if (chan->device->dev->driver != &udma_driver.driver && chan->device->dev->driver != &bcdma_driver.driver && chan->device->dev->driver != &pktdma_driver.driver) return false; uc = to_udma_chan(chan); ucc = &uc->config; ud = uc->ud; filter_param = param; if (filter_param->atype > 2) { dev_err(ud->dev, "Invalid channel atype: %u\n", filter_param->atype); return false; } if (filter_param->asel > 15) { dev_err(ud->dev, "Invalid channel asel: %u\n", filter_param->asel); return false; } ucc->remote_thread_id = filter_param->remote_thread_id; ucc->atype = filter_param->atype; ucc->asel = filter_param->asel; ucc->tr_trigger_type = filter_param->tr_trigger_type; if (ucc->tr_trigger_type) { ucc->dir = DMA_MEM_TO_MEM; goto triggered_bchan; } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { ucc->dir = DMA_MEM_TO_DEV; } else { ucc->dir = DMA_DEV_TO_MEM; } ep_config = psil_get_ep_config(ucc->remote_thread_id); if (IS_ERR(ep_config)) { dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", ucc->remote_thread_id); ucc->dir = DMA_MEM_TO_MEM; ucc->remote_thread_id = -1; ucc->atype = 0; ucc->asel = 0; return false; } if (ud->match_data->type == DMA_TYPE_BCDMA && ep_config->pkt_mode) { dev_err(ud->dev, "Only TR mode is supported (psi-l thread 0x%04x)\n", ucc->remote_thread_id); ucc->dir = DMA_MEM_TO_MEM; ucc->remote_thread_id = -1; ucc->atype = 0; ucc->asel = 0; return false; } ucc->pkt_mode = ep_config->pkt_mode; ucc->channel_tpl = ep_config->channel_tpl; ucc->notdpkt = ep_config->notdpkt; ucc->ep_type = ep_config->ep_type; if (ud->match_data->type == DMA_TYPE_PKTDMA && ep_config->mapped_channel_id >= 0) { ucc->mapped_channel_id = ep_config->mapped_channel_id; ucc->default_flow_id = ep_config->default_flow_id; } else { ucc->mapped_channel_id = -1; ucc->default_flow_id = -1; } if (ucc->ep_type != PSIL_EP_NATIVE) { const struct udma_match_data *match_data = ud->match_data; if (match_data->flags & UDMA_FLAG_PDMA_ACC32) ucc->enable_acc32 = ep_config->pdma_acc32; if (match_data->flags & UDMA_FLAG_PDMA_BURST) ucc->enable_burst = ep_config->pdma_burst; } ucc->needs_epib = ep_config->needs_epib; ucc->psd_size = ep_config->psd_size; ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size; if (ucc->pkt_mode) ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + ucc->metadata_size, ud->desc_align); dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); return true; triggered_bchan: dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, ucc->tr_trigger_type); return true; } static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct udma_dev *ud = ofdma->of_dma_data; dma_cap_mask_t mask = ud->ddev.cap_mask; struct udma_filter_param filter_param; struct dma_chan *chan; if (ud->match_data->type == DMA_TYPE_BCDMA) { if (dma_spec->args_count != 3) return NULL; filter_param.tr_trigger_type = dma_spec->args[0]; filter_param.remote_thread_id = dma_spec->args[1]; filter_param.asel = dma_spec->args[2]; filter_param.atype = 0; } else { if (dma_spec->args_count != 1 && dma_spec->args_count != 2) return NULL; filter_param.remote_thread_id = dma_spec->args[0]; filter_param.tr_trigger_type = 0; if (dma_spec->args_count == 2) { if (ud->match_data->type == DMA_TYPE_UDMA) { filter_param.atype = dma_spec->args[1]; filter_param.asel = 0; } else { filter_param.atype = 0; filter_param.asel = dma_spec->args[1]; } } else { filter_param.atype = 0; filter_param.asel = 0; } } chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); return ERR_PTR(-EINVAL); } return chan; } static struct udma_match_data am654_main_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x1000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 0, /* No UH Channels */ }, }; static struct udma_match_data am654_mcu_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x6000, .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 0, /* No UH Channels */ }, }; static struct udma_match_data j721e_main_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x1000, .enable_memcpy_support = true, .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ }, }; static struct udma_match_data j721e_mcu_data = { .type = DMA_TYPE_UDMA, .psil_base = 0x6000, .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 0, /* No UH Channels */ }, }; static struct udma_soc_data am62a_dmss_csi_soc_data = { .oes = { .bcdma_rchan_data = 0xe00, .bcdma_rchan_ring = 0x1000, }, }; static struct udma_soc_data j721s2_bcdma_csi_soc_data = { .oes = { .bcdma_tchan_data = 0x800, .bcdma_tchan_ring = 0xa00, .bcdma_rchan_data = 0xe00, .bcdma_rchan_ring = 0x1000, }, }; static struct udma_match_data am62a_bcdma_csirx_data = { .type = DMA_TYPE_BCDMA, .psil_base = 0x3100, .enable_memcpy_support = false, .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 0, /* No H Channels */ 0, /* No UH Channels */ }, .soc_data = &am62a_dmss_csi_soc_data, }; static struct udma_match_data am64_bcdma_data = { .type = DMA_TYPE_BCDMA, .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ .enable_memcpy_support = true, /* Supported via bchan */ .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 0, /* No H Channels */ 0, /* No UH Channels */ }, }; static struct udma_match_data am64_pktdma_data = { .type = DMA_TYPE_PKTDMA, .psil_base = 0x1000, .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ .flags = UDMA_FLAGS_J7_CLASS, .statictr_z_mask = GENMASK(23, 0), .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 0, /* No H Channels */ 0, /* No UH Channels */ }, }; static struct udma_match_data j721s2_bcdma_csi_data = { .type = DMA_TYPE_BCDMA, .psil_base = 0x2000, .enable_memcpy_support = false, .burst_size = { TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 0, /* No H Channels */ 0, /* No UH Channels */ }, .soc_data = &j721s2_bcdma_csi_soc_data, }; static const struct of_device_id udma_of_match[] = { { .compatible = "ti,am654-navss-main-udmap", .data = &am654_main_data, }, { .compatible = "ti,am654-navss-mcu-udmap", .data = &am654_mcu_data, }, { .compatible = "ti,j721e-navss-main-udmap", .data = &j721e_main_data, }, { .compatible = "ti,j721e-navss-mcu-udmap", .data = &j721e_mcu_data, }, { .compatible = "ti,am64-dmss-bcdma", .data = &am64_bcdma_data, }, { .compatible = "ti,am64-dmss-pktdma", .data = &am64_pktdma_data, }, { .compatible = "ti,am62a-dmss-bcdma-csirx", .data = &am62a_bcdma_csirx_data, }, { .compatible = "ti,j721s2-dmss-bcdma-csi", .data = &j721s2_bcdma_csi_data, }, { /* Sentinel */ }, }; static struct udma_soc_data am654_soc_data = { .oes = { .udma_rchan = 0x200, }, }; static struct udma_soc_data j721e_soc_data = { .oes = { .udma_rchan = 0x400, }, }; static struct udma_soc_data j7200_soc_data = { .oes = { .udma_rchan = 0x80, }, }; static struct udma_soc_data am64_soc_data = { .oes = { .bcdma_bchan_data = 0x2200, .bcdma_bchan_ring = 0x2400, .bcdma_tchan_data = 0x2800, .bcdma_tchan_ring = 0x2a00, .bcdma_rchan_data = 0x2e00, .bcdma_rchan_ring = 0x3000, .pktdma_tchan_flow = 0x1200, .pktdma_rchan_flow = 0x1600, }, .bcdma_trigger_event_offset = 0xc400, }; static const struct soc_device_attribute k3_soc_devices[] = { { .family = "AM65X", .data = &am654_soc_data }, { .family = "J721E", .data = &j721e_soc_data }, { .family = "J7200", .data = &j7200_soc_data }, { .family = "AM64X", .data = &am64_soc_data }, { .family = "J721S2", .data = &j721e_soc_data}, { .family = "AM62X", .data = &am64_soc_data }, { .family = "AM62AX", .data = &am64_soc_data }, { .family = "J784S4", .data = &j721e_soc_data }, { /* sentinel */ } }; static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) { u32 cap2, cap3, cap4; int i; ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); if (IS_ERR(ud->mmrs[MMR_GCFG])) return PTR_ERR(ud->mmrs[MMR_GCFG]); cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); switch (ud->match_data->type) { case DMA_TYPE_UDMA: ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); break; case DMA_TYPE_BCDMA: ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); ud->rflow_cnt = ud->rchan_cnt; break; case DMA_TYPE_PKTDMA: cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); break; default: return -EINVAL; } for (i = 1; i < MMR_LAST; i++) { if (i == MMR_BCHANRT && ud->bchan_cnt == 0) continue; if (i == MMR_TCHANRT && ud->tchan_cnt == 0) continue; if (i == MMR_RCHANRT && ud->rchan_cnt == 0) continue; ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); if (IS_ERR(ud->mmrs[i])) return PTR_ERR(ud->mmrs[i]); } return 0; } static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, struct ti_sci_resource_desc *rm_desc, char *name) { bitmap_clear(map, rm_desc->start, rm_desc->num); bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, rm_desc->start, rm_desc->num, rm_desc->start_sec, rm_desc->num_sec); } static const char * const range_names[] = { [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", }; static int udma_setup_resources(struct udma_dev *ud) { int ret, i, j; struct device *dev = ud->dev; struct ti_sci_resource *rm_res, irq_res; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; u32 cap3; /* Set up the throughput level start indexes */ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); if (of_device_is_compatible(dev->of_node, "ti,am654-navss-main-udmap")) { ud->tchan_tpl.levels = 2; ud->tchan_tpl.start_idx[0] = 8; } else if (of_device_is_compatible(dev->of_node, "ti,am654-navss-mcu-udmap")) { ud->tchan_tpl.levels = 2; ud->tchan_tpl.start_idx[0] = 2; } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { ud->tchan_tpl.levels = 3; ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { ud->tchan_tpl.levels = 2; ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); } else { ud->tchan_tpl.levels = 1; } ud->rchan_tpl.levels = ud->tchan_tpl.levels; ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), GFP_KERNEL); ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), GFP_KERNEL); ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rflow_gp_map_allocated = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), GFP_KERNEL); if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || !ud->rflows || !ud->rflow_in_use) return -ENOMEM; /* * RX flows with the same Ids as RX channels are reserved to be used * as default flows if remote HW can't generate flow_ids. Those * RX flows can be requested only explicitly by id. */ bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); /* by default no GP rflows are assigned to Linux */ bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); /* Get resource ranges from tisci */ for (i = 0; i < RM_RANGE_LAST; i++) { if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) continue; tisci_rm->rm_ranges[i] = devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, tisci_rm->tisci_dev_id, (char *)range_names[i]); } /* tchan ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->tchan_map, ud->tchan_cnt); irq_res.sets = 1; } else { bitmap_fill(ud->tchan_map, ud->tchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->tchan_map, &rm_res->desc[i], "tchan"); irq_res.sets = rm_res->sets; } /* rchan and matching default flow ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->rchan_map, ud->rchan_cnt); irq_res.sets++; } else { bitmap_fill(ud->rchan_map, ud->rchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->rchan_map, &rm_res->desc[i], "rchan"); irq_res.sets += rm_res->sets; } irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); if (!irq_res.desc) return -ENOMEM; rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { irq_res.desc[0].start = 0; irq_res.desc[0].num = ud->tchan_cnt; i = 1; } else { for (i = 0; i < rm_res->sets; i++) { irq_res.desc[i].start = rm_res->desc[i].start; irq_res.desc[i].num = rm_res->desc[i].num; irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; } } rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; if (IS_ERR(rm_res)) { irq_res.desc[i].start = 0; irq_res.desc[i].num = ud->rchan_cnt; } else { for (j = 0; j < rm_res->sets; j++, i++) { if (rm_res->desc[j].num) { irq_res.desc[i].start = rm_res->desc[j].start + ud->soc_data->oes.udma_rchan; irq_res.desc[i].num = rm_res->desc[j].num; } if (rm_res->desc[j].num_sec) { irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + ud->soc_data->oes.udma_rchan; irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; } } } ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); kfree(irq_res.desc); if (ret) { dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); return ret; } /* GP rflow ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; if (IS_ERR(rm_res)) { /* all gp flows are assigned exclusively to Linux */ bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, ud->rflow_cnt - ud->rchan_cnt); } else { for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->rflow_gp_map, &rm_res->desc[i], "gp-rflow"); } return 0; } static int bcdma_setup_resources(struct udma_dev *ud) { int ret, i, j; struct device *dev = ud->dev; struct ti_sci_resource *rm_res, irq_res; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct udma_oes_offsets *oes = &ud->soc_data->oes; u32 cap; /* Set up the throughput level start indexes */ cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); if (BCDMA_CAP3_UBCHAN_CNT(cap)) { ud->bchan_tpl.levels = 3; ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { ud->bchan_tpl.levels = 2; ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); } else { ud->bchan_tpl.levels = 1; } cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); if (BCDMA_CAP4_URCHAN_CNT(cap)) { ud->rchan_tpl.levels = 3; ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { ud->rchan_tpl.levels = 2; ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); } else { ud->rchan_tpl.levels = 1; } if (BCDMA_CAP4_UTCHAN_CNT(cap)) { ud->tchan_tpl.levels = 3; ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { ud->tchan_tpl.levels = 2; ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); } else { ud->tchan_tpl.levels = 1; } ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), GFP_KERNEL); ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), GFP_KERNEL); ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), GFP_KERNEL); /* BCDMA do not really have flows, but the driver expect it */ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), GFP_KERNEL); if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || !ud->rflows) return -ENOMEM; /* Get resource ranges from tisci */ for (i = 0; i < RM_RANGE_LAST; i++) { if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) continue; if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) continue; if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) continue; if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) continue; tisci_rm->rm_ranges[i] = devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, tisci_rm->tisci_dev_id, (char *)range_names[i]); } irq_res.sets = 0; /* bchan ranges */ if (ud->bchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->bchan_map, ud->bchan_cnt); irq_res.sets++; } else { bitmap_fill(ud->bchan_map, ud->bchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->bchan_map, &rm_res->desc[i], "bchan"); irq_res.sets += rm_res->sets; } } /* tchan ranges */ if (ud->tchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->tchan_map, ud->tchan_cnt); irq_res.sets += 2; } else { bitmap_fill(ud->tchan_map, ud->tchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->tchan_map, &rm_res->desc[i], "tchan"); irq_res.sets += rm_res->sets * 2; } } /* rchan ranges */ if (ud->rchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->rchan_map, ud->rchan_cnt); irq_res.sets += 2; } else { bitmap_fill(ud->rchan_map, ud->rchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->rchan_map, &rm_res->desc[i], "rchan"); irq_res.sets += rm_res->sets * 2; } } irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); if (!irq_res.desc) return -ENOMEM; if (ud->bchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; if (IS_ERR(rm_res)) { irq_res.desc[0].start = oes->bcdma_bchan_ring; irq_res.desc[0].num = ud->bchan_cnt; i = 1; } else { for (i = 0; i < rm_res->sets; i++) { irq_res.desc[i].start = rm_res->desc[i].start + oes->bcdma_bchan_ring; irq_res.desc[i].num = rm_res->desc[i].num; } } } else { i = 0; } if (ud->tchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { irq_res.desc[i].start = oes->bcdma_tchan_data; irq_res.desc[i].num = ud->tchan_cnt; irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; irq_res.desc[i + 1].num = ud->tchan_cnt; i += 2; } else { for (j = 0; j < rm_res->sets; j++, i += 2) { irq_res.desc[i].start = rm_res->desc[j].start + oes->bcdma_tchan_data; irq_res.desc[i].num = rm_res->desc[j].num; irq_res.desc[i + 1].start = rm_res->desc[j].start + oes->bcdma_tchan_ring; irq_res.desc[i + 1].num = rm_res->desc[j].num; } } } if (ud->rchan_cnt) { rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; if (IS_ERR(rm_res)) { irq_res.desc[i].start = oes->bcdma_rchan_data; irq_res.desc[i].num = ud->rchan_cnt; irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; irq_res.desc[i + 1].num = ud->rchan_cnt; i += 2; } else { for (j = 0; j < rm_res->sets; j++, i += 2) { irq_res.desc[i].start = rm_res->desc[j].start + oes->bcdma_rchan_data; irq_res.desc[i].num = rm_res->desc[j].num; irq_res.desc[i + 1].start = rm_res->desc[j].start + oes->bcdma_rchan_ring; irq_res.desc[i + 1].num = rm_res->desc[j].num; } } } ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); kfree(irq_res.desc); if (ret) { dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); return ret; } return 0; } static int pktdma_setup_resources(struct udma_dev *ud) { int ret, i, j; struct device *dev = ud->dev; struct ti_sci_resource *rm_res, irq_res; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; const struct udma_oes_offsets *oes = &ud->soc_data->oes; u32 cap3; /* Set up the throughput level start indexes */ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); if (UDMA_CAP3_UCHAN_CNT(cap3)) { ud->tchan_tpl.levels = 3; ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { ud->tchan_tpl.levels = 2; ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); } else { ud->tchan_tpl.levels = 1; } ud->rchan_tpl.levels = ud->tchan_tpl.levels; ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), GFP_KERNEL); ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), GFP_KERNEL); ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), sizeof(unsigned long), GFP_KERNEL); ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), GFP_KERNEL); ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), sizeof(unsigned long), GFP_KERNEL); if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || !ud->rchans || !ud->rflows || !ud->rflow_in_use) return -ENOMEM; /* Get resource ranges from tisci */ for (i = 0; i < RM_RANGE_LAST; i++) { if (i == RM_RANGE_BCHAN) continue; tisci_rm->rm_ranges[i] = devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, tisci_rm->tisci_dev_id, (char *)range_names[i]); } /* tchan ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->tchan_map, ud->tchan_cnt); } else { bitmap_fill(ud->tchan_map, ud->tchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->tchan_map, &rm_res->desc[i], "tchan"); } /* rchan ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; if (IS_ERR(rm_res)) { bitmap_zero(ud->rchan_map, ud->rchan_cnt); } else { bitmap_fill(ud->rchan_map, ud->rchan_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->rchan_map, &rm_res->desc[i], "rchan"); } /* rflow ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; if (IS_ERR(rm_res)) { /* all rflows are assigned exclusively to Linux */ bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); irq_res.sets = 1; } else { bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->rflow_in_use, &rm_res->desc[i], "rflow"); irq_res.sets = rm_res->sets; } /* tflow ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; if (IS_ERR(rm_res)) { /* all tflows are assigned exclusively to Linux */ bitmap_zero(ud->tflow_map, ud->tflow_cnt); irq_res.sets++; } else { bitmap_fill(ud->tflow_map, ud->tflow_cnt); for (i = 0; i < rm_res->sets; i++) udma_mark_resource_ranges(ud, ud->tflow_map, &rm_res->desc[i], "tflow"); irq_res.sets += rm_res->sets; } irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); if (!irq_res.desc) return -ENOMEM; rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; if (IS_ERR(rm_res)) { irq_res.desc[0].start = oes->pktdma_tchan_flow; irq_res.desc[0].num = ud->tflow_cnt; i = 1; } else { for (i = 0; i < rm_res->sets; i++) { irq_res.desc[i].start = rm_res->desc[i].start + oes->pktdma_tchan_flow; irq_res.desc[i].num = rm_res->desc[i].num; } } rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; if (IS_ERR(rm_res)) { irq_res.desc[i].start = oes->pktdma_rchan_flow; irq_res.desc[i].num = ud->rflow_cnt; } else { for (j = 0; j < rm_res->sets; j++, i++) { irq_res.desc[i].start = rm_res->desc[j].start + oes->pktdma_rchan_flow; irq_res.desc[i].num = rm_res->desc[j].num; } } ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); kfree(irq_res.desc); if (ret) { dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); return ret; } return 0; } static int setup_resources(struct udma_dev *ud) { struct device *dev = ud->dev; int ch_count, ret; switch (ud->match_data->type) { case DMA_TYPE_UDMA: ret = udma_setup_resources(ud); break; case DMA_TYPE_BCDMA: ret = bcdma_setup_resources(ud); break; case DMA_TYPE_PKTDMA: ret = pktdma_setup_resources(ud); break; default: return -EINVAL; } if (ret) return ret; ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; if (ud->bchan_cnt) ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); if (!ch_count) return -ENODEV; ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), GFP_KERNEL); if (!ud->channels) return -ENOMEM; switch (ud->match_data->type) { case DMA_TYPE_UDMA: dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", ch_count, ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, ud->rflow_cnt)); break; case DMA_TYPE_BCDMA: dev_info(dev, "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", ch_count, ud->bchan_cnt - bitmap_weight(ud->bchan_map, ud->bchan_cnt), ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt)); break; case DMA_TYPE_PKTDMA: dev_info(dev, "Channels: %d (tchan: %u, rchan: %u)\n", ch_count, ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt)); break; default: break; } return ch_count; } static int udma_setup_rx_flush(struct udma_dev *ud) { struct udma_rx_flush *rx_flush = &ud->rx_flush; struct cppi5_desc_hdr_t *tr_desc; struct cppi5_tr_type1_t *tr_req; struct cppi5_host_desc_t *desc; struct device *dev = ud->dev; struct udma_hwdesc *hwdesc; size_t tr_size; /* Allocate 1K buffer for discarded data on RX channel teardown */ rx_flush->buffer_size = SZ_1K; rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, GFP_KERNEL); if (!rx_flush->buffer_vaddr) return -ENOMEM; rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, rx_flush->buffer_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, rx_flush->buffer_paddr)) return -ENOMEM; /* Set up descriptor to be used for TR mode */ hwdesc = &rx_flush->hwdescs[0]; tr_size = sizeof(struct cppi5_tr_type1_t); hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, ud->desc_align); hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, GFP_KERNEL); if (!hwdesc->cppi5_desc_vaddr) return -ENOMEM; hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, hwdesc->cppi5_desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) return -ENOMEM; /* Start of the TR req records */ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; /* Start address of the TR response array */ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; tr_desc = hwdesc->cppi5_desc_vaddr; cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); cppi5_desc_set_retpolicy(tr_desc, 0, 0); tr_req = hwdesc->tr_req_base; cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); tr_req->addr = rx_flush->buffer_paddr; tr_req->icnt0 = rx_flush->buffer_size; tr_req->icnt1 = 1; dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, hwdesc->cppi5_desc_size, DMA_TO_DEVICE); /* Set up descriptor to be used for packet mode */ hwdesc = &rx_flush->hwdescs[1]; hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + CPPI5_INFO0_HDESC_EPIB_SIZE + CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, ud->desc_align); hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, GFP_KERNEL); if (!hwdesc->cppi5_desc_vaddr) return -ENOMEM; hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, hwdesc->cppi5_desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) return -ENOMEM; desc = hwdesc->cppi5_desc_vaddr; cppi5_hdesc_init(desc, 0, 0); cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); cppi5_hdesc_attach_buf(desc, rx_flush->buffer_paddr, rx_flush->buffer_size, rx_flush->buffer_paddr, rx_flush->buffer_size); dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, hwdesc->cppi5_desc_size, DMA_TO_DEVICE); return 0; } #ifdef CONFIG_DEBUG_FS static void udma_dbg_summary_show_chan(struct seq_file *s, struct dma_chan *chan) { struct udma_chan *uc = to_udma_chan(chan); struct udma_chan_config *ucc = &uc->config; seq_printf(s, " %-13s| %s", dma_chan_name(chan), chan->dbg_client_name ?: "in-use"); if (ucc->tr_trigger_type) seq_puts(s, " (triggered, "); else seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir)); switch (uc->config.dir) { case DMA_MEM_TO_MEM: if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { seq_printf(s, "bchan%d)\n", uc->bchan->id); return; } seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, ucc->src_thread, ucc->dst_thread); break; case DMA_DEV_TO_MEM: seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, ucc->src_thread, ucc->dst_thread); if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) seq_printf(s, "rflow%d, ", uc->rflow->id); break; case DMA_MEM_TO_DEV: seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, ucc->src_thread, ucc->dst_thread); if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); break; default: seq_printf(s, ")\n"); return; } if (ucc->ep_type == PSIL_EP_NATIVE) { seq_printf(s, "PSI-L Native"); if (ucc->metadata_size) { seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); if (ucc->psd_size) seq_printf(s, " PSDsize:%u", ucc->psd_size); seq_printf(s, " ]"); } } else { seq_printf(s, "PDMA"); if (ucc->enable_acc32 || ucc->enable_burst) seq_printf(s, "[%s%s ]", ucc->enable_acc32 ? " ACC32" : "", ucc->enable_burst ? " BURST" : ""); } seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); } static void udma_dbg_summary_show(struct seq_file *s, struct dma_device *dma_dev) { struct dma_chan *chan; list_for_each_entry(chan, &dma_dev->channels, device_node) { if (chan->client_count) udma_dbg_summary_show_chan(s, chan); } } #endif /* CONFIG_DEBUG_FS */ static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) { const struct udma_match_data *match_data = ud->match_data; u8 tpl; if (!match_data->enable_memcpy_support) return DMAENGINE_ALIGN_8_BYTES; /* Get the highest TPL level the device supports for memcpy */ if (ud->bchan_cnt) tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); else if (ud->tchan_cnt) tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); else return DMAENGINE_ALIGN_8_BYTES; switch (match_data->burst_size[tpl]) { case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: return DMAENGINE_ALIGN_256_BYTES; case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: return DMAENGINE_ALIGN_128_BYTES; case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: fallthrough; default: return DMAENGINE_ALIGN_64_BYTES; } } #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) static int udma_probe(struct platform_device *pdev) { struct device_node *navss_node = pdev->dev.parent->of_node; const struct soc_device_attribute *soc; struct device *dev = &pdev->dev; struct udma_dev *ud; const struct of_device_id *match; int i, ret; int ch_count; ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) dev_err(dev, "failed to set dma mask stuff\n"); ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); if (!ud) return -ENOMEM; match = of_match_node(udma_of_match, dev->of_node); if (!match) { dev_err(dev, "No compatible match found\n"); return -ENODEV; } ud->match_data = match->data; ud->soc_data = ud->match_data->soc_data; if (!ud->soc_data) { soc = soc_device_match(k3_soc_devices); if (!soc) { dev_err(dev, "No compatible SoC found\n"); return -ENODEV; } ud->soc_data = soc->data; } ret = udma_get_mmrs(pdev, ud); if (ret) return ret; ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); if (IS_ERR(ud->tisci_rm.tisci)) return PTR_ERR(ud->tisci_rm.tisci); ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &ud->tisci_rm.tisci_dev_id); if (ret) { dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); return ret; } pdev->id = ud->tisci_rm.tisci_dev_id; ret = of_property_read_u32(navss_node, "ti,sci-dev-id", &ud->tisci_rm.tisci_navss_dev_id); if (ret) { dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); return ret; } if (ud->match_data->type == DMA_TYPE_UDMA) { ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); if (!ret && ud->atype > 2) { dev_err(dev, "Invalid atype: %u\n", ud->atype); return -EINVAL; } } else { ret = of_property_read_u32(dev->of_node, "ti,asel", &ud->asel); if (!ret && ud->asel > 15) { dev_err(dev, "Invalid asel: %u\n", ud->asel); return -EINVAL; } } ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; if (ud->match_data->type == DMA_TYPE_UDMA) { ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); } else { struct k3_ringacc_init_data ring_init_data; ring_init_data.tisci = ud->tisci_rm.tisci; ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; if (ud->match_data->type == DMA_TYPE_BCDMA) { ring_init_data.num_rings = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; } else { ring_init_data.num_rings = ud->rflow_cnt + ud->tflow_cnt; } ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); } if (IS_ERR(ud->ringacc)) return PTR_ERR(ud->ringacc); dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_TI_SCI_INTA_MSI); if (!dev->msi.domain) { return -EPROBE_DEFER; } dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); /* cyclic operation is not supported via PKTDMA */ if (ud->match_data->type != DMA_TYPE_PKTDMA) { dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; } ud->ddev.device_config = udma_slave_config; ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; ud->ddev.device_issue_pending = udma_issue_pending; ud->ddev.device_tx_status = udma_tx_status; ud->ddev.device_pause = udma_pause; ud->ddev.device_resume = udma_resume; ud->ddev.device_terminate_all = udma_terminate_all; ud->ddev.device_synchronize = udma_synchronize; #ifdef CONFIG_DEBUG_FS ud->ddev.dbg_summary_show = udma_dbg_summary_show; #endif switch (ud->match_data->type) { case DMA_TYPE_UDMA: ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; break; case DMA_TYPE_BCDMA: ud->ddev.device_alloc_chan_resources = bcdma_alloc_chan_resources; ud->ddev.device_router_config = bcdma_router_config; break; case DMA_TYPE_PKTDMA: ud->ddev.device_alloc_chan_resources = pktdma_alloc_chan_resources; break; default: return -EINVAL; } ud->ddev.device_free_chan_resources = udma_free_chan_resources; ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | DESC_METADATA_ENGINE; if (ud->match_data->enable_memcpy_support && !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); } ud->ddev.dev = dev; ud->dev = dev; ud->psil_base = ud->match_data->psil_base; INIT_LIST_HEAD(&ud->ddev.channels); INIT_LIST_HEAD(&ud->desc_to_purge); ch_count = setup_resources(ud); if (ch_count <= 0) return ch_count; spin_lock_init(&ud->lock); INIT_WORK(&ud->purge_work, udma_purge_desc_work); ud->desc_align = 64; if (ud->desc_align < dma_get_cache_alignment()) ud->desc_align = dma_get_cache_alignment(); ret = udma_setup_rx_flush(ud); if (ret) return ret; for (i = 0; i < ud->bchan_cnt; i++) { struct udma_bchan *bchan = &ud->bchans[i]; bchan->id = i; bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; } for (i = 0; i < ud->tchan_cnt; i++) { struct udma_tchan *tchan = &ud->tchans[i]; tchan->id = i; tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; } for (i = 0; i < ud->rchan_cnt; i++) { struct udma_rchan *rchan = &ud->rchans[i]; rchan->id = i; rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; } for (i = 0; i < ud->rflow_cnt; i++) { struct udma_rflow *rflow = &ud->rflows[i]; rflow->id = i; } for (i = 0; i < ch_count; i++) { struct udma_chan *uc = &ud->channels[i]; uc->ud = ud; uc->vc.desc_free = udma_desc_free; uc->id = i; uc->bchan = NULL; uc->tchan = NULL; uc->rchan = NULL; uc->config.remote_thread_id = -1; uc->config.mapped_channel_id = -1; uc->config.default_flow_id = -1; uc->config.dir = DMA_MEM_TO_MEM; uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", dev_name(dev), i); vchan_init(&uc->vc, &ud->ddev); /* Use custom vchan completion handling */ tasklet_setup(&uc->vc.task, udma_vchan_complete); init_completion(&uc->teardown_completed); INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); } /* Configure the copy_align to the maximum burst size the device supports */ ud->ddev.copy_align = udma_get_copy_align(ud); ret = dma_async_device_register(&ud->ddev); if (ret) { dev_err(dev, "failed to register slave DMA engine: %d\n", ret); return ret; } platform_set_drvdata(pdev, ud); ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); if (ret) { dev_err(dev, "failed to register of_dma controller\n"); dma_async_device_unregister(&ud->ddev); } return ret; } static int __maybe_unused udma_pm_suspend(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; struct dma_chan *chan; struct udma_chan *uc; list_for_each_entry(chan, &dma_dev->channels, device_node) { if (chan->client_count) { uc = to_udma_chan(chan); /* backup the channel configuration */ memcpy(&uc->backup_config, &uc->config, sizeof(struct udma_chan_config)); dev_dbg(dev, "Suspending channel %s\n", dma_chan_name(chan)); ud->ddev.device_free_chan_resources(chan); } } return 0; } static int __maybe_unused udma_pm_resume(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; struct dma_chan *chan; struct udma_chan *uc; int ret; list_for_each_entry(chan, &dma_dev->channels, device_node) { if (chan->client_count) { uc = to_udma_chan(chan); /* restore the channel configuration */ memcpy(&uc->config, &uc->backup_config, sizeof(struct udma_chan_config)); dev_dbg(dev, "Resuming channel %s\n", dma_chan_name(chan)); ret = ud->ddev.device_alloc_chan_resources(chan); if (ret) return ret; } } return 0; } static const struct dev_pm_ops udma_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume) }; static struct platform_driver udma_driver = { .driver = { .name = "ti-udma", .of_match_table = udma_of_match, .suppress_bind_attrs = true, .pm = &udma_pm_ops, }, .probe = udma_probe, }; module_platform_driver(udma_driver); MODULE_LICENSE("GPL v2"); /* Private interfaces to UDMA */ #include "k3-udma-private.c"
linux-master
drivers/dma/ti/k3-udma.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP DMAengine support */ #include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/omap-dma.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_dma.h> #include "../virt-dma.h" #define OMAP_SDMA_REQUESTS 127 #define OMAP_SDMA_CHANNELS 32 struct omap_dma_config { int lch_end; unsigned int rw_priority:1; unsigned int needs_busy_check:1; unsigned int may_lose_context:1; unsigned int needs_lch_clear:1; }; struct omap_dma_context { u32 irqenable_l0; u32 irqenable_l1; u32 ocp_sysconfig; u32 gcr; }; struct omap_dmadev { struct dma_device ddev; spinlock_t lock; void __iomem *base; const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; const struct omap_dma_config *cfg; struct notifier_block nb; struct omap_dma_context context; int lch_count; DECLARE_BITMAP(lch_bitmap, OMAP_SDMA_CHANNELS); struct mutex lch_lock; /* for assigning logical channels */ bool legacy; bool ll123_supported; struct dma_pool *desc_pool; unsigned dma_requests; spinlock_t irq_lock; uint32_t irq_enable_mask; struct omap_chan **lch_map; }; struct omap_chan { struct virt_dma_chan vc; void __iomem *channel_base; const struct omap_dma_reg *reg_map; uint32_t ccr; struct dma_slave_config cfg; unsigned dma_sig; bool cyclic; bool paused; bool running; int dma_ch; struct omap_desc *desc; unsigned sgidx; }; #define DESC_NXT_SV_REFRESH (0x1 << 24) #define DESC_NXT_SV_REUSE (0x2 << 24) #define DESC_NXT_DV_REFRESH (0x1 << 26) #define DESC_NXT_DV_REUSE (0x2 << 26) #define DESC_NTYPE_TYPE2 (0x2 << 29) /* Type 2 descriptor with Source or Destination address update */ struct omap_type2_desc { uint32_t next_desc; uint32_t en; uint32_t addr; /* src or dst */ uint16_t fn; uint16_t cicr; int16_t cdei; int16_t csei; int32_t cdfi; int32_t csfi; } __packed; struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ int32_t fi; /* for double indexing */ int16_t ei; /* for double indexing */ /* Linked list */ struct omap_type2_desc *t2_desc; dma_addr_t t2_desc_paddr; }; struct omap_desc { struct virt_dma_desc vd; bool using_ll; enum dma_transfer_direction dir; dma_addr_t dev_addr; bool polled; int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ int16_t ei; /* for double indexing */ uint8_t es; /* CSDP_DATA_TYPE_xxx */ uint32_t ccr; /* CCR value */ uint16_t clnk_ctrl; /* CLNK_CTRL value */ uint16_t cicr; /* CICR value */ uint32_t csdp; /* CSDP value */ unsigned sglen; struct omap_sg sg[]; }; enum { CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ CCR_FS = BIT(5), CCR_READ_PRIORITY = BIT(6), CCR_ENABLE = BIT(7), CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ CCR_REPEAT = BIT(9), /* OMAP1 only */ CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ CCR_SRC_AMODE_CONSTANT = 0 << 12, CCR_SRC_AMODE_POSTINC = 1 << 12, CCR_SRC_AMODE_SGLIDX = 2 << 12, CCR_SRC_AMODE_DBLIDX = 3 << 12, CCR_DST_AMODE_CONSTANT = 0 << 14, CCR_DST_AMODE_POSTINC = 1 << 14, CCR_DST_AMODE_SGLIDX = 2 << 14, CCR_DST_AMODE_DBLIDX = 3 << 14, CCR_CONSTANT_FILL = BIT(16), CCR_TRANSPARENT_COPY = BIT(17), CCR_BS = BIT(18), CCR_SUPERVISOR = BIT(22), CCR_PREFETCH = BIT(23), CCR_TRIGGER_SRC = BIT(24), CCR_BUFFERING_DISABLE = BIT(25), CCR_WRITE_PRIORITY = BIT(26), CCR_SYNC_ELEMENT = 0, CCR_SYNC_FRAME = CCR_FS, CCR_SYNC_BLOCK = CCR_BS, CCR_SYNC_PACKET = CCR_BS | CCR_FS, CSDP_DATA_TYPE_8 = 0, CSDP_DATA_TYPE_16 = 1, CSDP_DATA_TYPE_32 = 2, CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ CSDP_SRC_PACKED = BIT(6), CSDP_SRC_BURST_1 = 0 << 7, CSDP_SRC_BURST_16 = 1 << 7, CSDP_SRC_BURST_32 = 2 << 7, CSDP_SRC_BURST_64 = 3 << 7, CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ CSDP_DST_PACKED = BIT(13), CSDP_DST_BURST_1 = 0 << 14, CSDP_DST_BURST_16 = 1 << 14, CSDP_DST_BURST_32 = 2 << 14, CSDP_DST_BURST_64 = 3 << 14, CSDP_WRITE_NON_POSTED = 0 << 16, CSDP_WRITE_POSTED = 1 << 16, CSDP_WRITE_LAST_NON_POSTED = 2 << 16, CICR_TOUT_IE = BIT(0), /* OMAP1 only */ CICR_DROP_IE = BIT(1), CICR_HALF_IE = BIT(2), CICR_FRAME_IE = BIT(3), CICR_LAST_IE = BIT(4), CICR_BLOCK_IE = BIT(5), CICR_PKT_IE = BIT(7), /* OMAP2+ only */ CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ CLNK_CTRL_ENABLE_LNK = BIT(15), CDP_DST_VALID_INC = 0 << 0, CDP_DST_VALID_RELOAD = 1 << 0, CDP_DST_VALID_REUSE = 2 << 0, CDP_SRC_VALID_INC = 0 << 2, CDP_SRC_VALID_RELOAD = 1 << 2, CDP_SRC_VALID_REUSE = 2 << 2, CDP_NTYPE_TYPE1 = 1 << 4, CDP_NTYPE_TYPE2 = 2 << 4, CDP_NTYPE_TYPE3 = 3 << 4, CDP_TMODE_NORMAL = 0 << 8, CDP_TMODE_LLIST = 1 << 8, CDP_FAST = BIT(10), }; static const unsigned es_bytes[] = { [CSDP_DATA_TYPE_8] = 1, [CSDP_DATA_TYPE_16] = 2, [CSDP_DATA_TYPE_32] = 4, }; static bool omap_dma_filter_fn(struct dma_chan *chan, void *param); static struct of_dma_filter_info omap_dma_info = { .filter_fn = omap_dma_filter_fn, }; static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) { return container_of(d, struct omap_dmadev, ddev); } static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) { return container_of(c, struct omap_chan, vc.chan); } static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) { return container_of(t, struct omap_desc, vd.tx); } static void omap_dma_desc_free(struct virt_dma_desc *vd) { struct omap_desc *d = to_omap_dma_desc(&vd->tx); if (d->using_ll) { struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device); int i; for (i = 0; i < d->sglen; i++) { if (d->sg[i].t2_desc) dma_pool_free(od->desc_pool, d->sg[i].t2_desc, d->sg[i].t2_desc_paddr); } } kfree(d); } static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, enum dma_transfer_direction dir, bool last) { struct omap_sg *sg = &d->sg[idx]; struct omap_type2_desc *t2_desc = sg->t2_desc; if (idx) d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; if (last) t2_desc->next_desc = 0xfffffffc; t2_desc->en = sg->en; t2_desc->addr = sg->addr; t2_desc->fn = sg->fn & 0xffff; t2_desc->cicr = d->cicr; if (!last) t2_desc->cicr &= ~CICR_BLOCK_IE; switch (dir) { case DMA_DEV_TO_MEM: t2_desc->cdei = sg->ei; t2_desc->csei = d->ei; t2_desc->cdfi = sg->fi; t2_desc->csfi = d->fi; t2_desc->en |= DESC_NXT_DV_REFRESH; t2_desc->en |= DESC_NXT_SV_REUSE; break; case DMA_MEM_TO_DEV: t2_desc->cdei = d->ei; t2_desc->csei = sg->ei; t2_desc->cdfi = d->fi; t2_desc->csfi = sg->fi; t2_desc->en |= DESC_NXT_SV_REFRESH; t2_desc->en |= DESC_NXT_DV_REUSE; break; default: return; } t2_desc->en |= DESC_NTYPE_TYPE2; } static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) { switch (type) { case OMAP_DMA_REG_16BIT: writew_relaxed(val, addr); break; case OMAP_DMA_REG_2X16BIT: writew_relaxed(val, addr); writew_relaxed(val >> 16, addr + 2); break; case OMAP_DMA_REG_32BIT: writel_relaxed(val, addr); break; default: WARN_ON(1); } } static unsigned omap_dma_read(unsigned type, void __iomem *addr) { unsigned val; switch (type) { case OMAP_DMA_REG_16BIT: val = readw_relaxed(addr); break; case OMAP_DMA_REG_2X16BIT: val = readw_relaxed(addr); val |= readw_relaxed(addr + 2) << 16; break; case OMAP_DMA_REG_32BIT: val = readl_relaxed(addr); break; default: WARN_ON(1); val = 0; } return val; } static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) { const struct omap_dma_reg *r = od->reg_map + reg; WARN_ON(r->stride); omap_dma_write(val, r->type, od->base + r->offset); } static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) { const struct omap_dma_reg *r = od->reg_map + reg; WARN_ON(r->stride); return omap_dma_read(r->type, od->base + r->offset); } static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) { const struct omap_dma_reg *r = c->reg_map + reg; omap_dma_write(val, r->type, c->channel_base + r->offset); } static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) { const struct omap_dma_reg *r = c->reg_map + reg; return omap_dma_read(r->type, c->channel_base + r->offset); } static void omap_dma_clear_csr(struct omap_chan *c) { if (dma_omap1()) omap_dma_chan_read(c, CSR); else omap_dma_chan_write(c, CSR, ~0); } static unsigned omap_dma_get_csr(struct omap_chan *c) { unsigned val = omap_dma_chan_read(c, CSR); if (!dma_omap1()) omap_dma_chan_write(c, CSR, val); return val; } static void omap_dma_clear_lch(struct omap_dmadev *od, int lch) { struct omap_chan *c; int i; c = od->lch_map[lch]; if (!c) return; for (i = CSDP; i <= od->cfg->lch_end; i++) omap_dma_chan_write(c, i, 0); } static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, unsigned lch) { c->channel_base = od->base + od->plat->channel_stride * lch; od->lch_map[lch] = c; } static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); uint16_t cicr = d->cicr; if (__dma_omap15xx(od->plat->dma_attr)) omap_dma_chan_write(c, CPC, 0); else omap_dma_chan_write(c, CDAC, 0); omap_dma_clear_csr(c); if (d->using_ll) { uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; if (d->dir == DMA_DEV_TO_MEM) cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); else cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); omap_dma_chan_write(c, CDP, cdp); omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); omap_dma_chan_write(c, CCDN, 0); omap_dma_chan_write(c, CCFN, 0xffff); omap_dma_chan_write(c, CCEN, 0xffffff); cicr &= ~CICR_BLOCK_IE; } else if (od->ll123_supported) { omap_dma_chan_write(c, CDP, 0); } /* Enable interrupts */ omap_dma_chan_write(c, CICR, cicr); /* Enable channel */ omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); c->running = true; } static void omap_dma_drain_chan(struct omap_chan *c) { int i; u32 val; /* Wait for sDMA FIFO to drain */ for (i = 0; ; i++) { val = omap_dma_chan_read(c, CCR); if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) break; if (i > 100) break; udelay(5); } if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) dev_err(c->vc.chan.device->dev, "DMA drain did not complete on lch %d\n", c->dma_ch); } static int omap_dma_stop(struct omap_chan *c) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); uint32_t val; /* disable irq */ omap_dma_chan_write(c, CICR, 0); omap_dma_clear_csr(c); val = omap_dma_chan_read(c, CCR); if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { uint32_t sysconfig; sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); omap_dma_glbl_write(od, OCP_SYSCONFIG, val); val = omap_dma_chan_read(c, CCR); val &= ~CCR_ENABLE; omap_dma_chan_write(c, CCR, val); if (!(c->ccr & CCR_BUFFERING_DISABLE)) omap_dma_drain_chan(c); omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); } else { if (!(val & CCR_ENABLE)) return -EINVAL; val &= ~CCR_ENABLE; omap_dma_chan_write(c, CCR, val); if (!(c->ccr & CCR_BUFFERING_DISABLE)) omap_dma_drain_chan(c); } mb(); if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { val = omap_dma_chan_read(c, CLNK_CTRL); if (dma_omap1()) val |= 1 << 14; /* set the STOP_LNK bit */ else val &= ~CLNK_CTRL_ENABLE_LNK; omap_dma_chan_write(c, CLNK_CTRL, val); } c->running = false; return 0; } static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) { struct omap_sg *sg = d->sg + c->sgidx; unsigned cxsa, cxei, cxfi; if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { cxsa = CDSA; cxei = CDEI; cxfi = CDFI; } else { cxsa = CSSA; cxei = CSEI; cxfi = CSFI; } omap_dma_chan_write(c, cxsa, sg->addr); omap_dma_chan_write(c, cxei, sg->ei); omap_dma_chan_write(c, cxfi, sg->fi); omap_dma_chan_write(c, CEN, sg->en); omap_dma_chan_write(c, CFN, sg->fn); omap_dma_start(c, d); c->sgidx++; } static void omap_dma_start_desc(struct omap_chan *c) { struct virt_dma_desc *vd = vchan_next_desc(&c->vc); struct omap_desc *d; unsigned cxsa, cxei, cxfi; if (!vd) { c->desc = NULL; return; } list_del(&vd->node); c->desc = d = to_omap_dma_desc(&vd->tx); c->sgidx = 0; /* * This provides the necessary barrier to ensure data held in * DMA coherent memory is visible to the DMA engine prior to * the transfer starting. */ mb(); omap_dma_chan_write(c, CCR, d->ccr); if (dma_omap1()) omap_dma_chan_write(c, CCR2, d->ccr >> 16); if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { cxsa = CSSA; cxei = CSEI; cxfi = CSFI; } else { cxsa = CDSA; cxei = CDEI; cxfi = CDFI; } omap_dma_chan_write(c, cxsa, d->dev_addr); omap_dma_chan_write(c, cxei, d->ei); omap_dma_chan_write(c, cxfi, d->fi); omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); omap_dma_start_sg(c, d); } static void omap_dma_callback(int ch, u16 status, void *data) { struct omap_chan *c = data; struct omap_desc *d; unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); d = c->desc; if (d) { if (c->cyclic) { vchan_cyclic_callback(&d->vd); } else if (d->using_ll || c->sgidx == d->sglen) { omap_dma_start_desc(c); vchan_cookie_complete(&d->vd); } else { omap_dma_start_sg(c, d); } } spin_unlock_irqrestore(&c->vc.lock, flags); } static irqreturn_t omap_dma_irq(int irq, void *devid) { struct omap_dmadev *od = devid; unsigned status, channel; spin_lock(&od->irq_lock); status = omap_dma_glbl_read(od, IRQSTATUS_L1); status &= od->irq_enable_mask; if (status == 0) { spin_unlock(&od->irq_lock); return IRQ_NONE; } while ((channel = ffs(status)) != 0) { unsigned mask, csr; struct omap_chan *c; channel -= 1; mask = BIT(channel); status &= ~mask; c = od->lch_map[channel]; if (c == NULL) { /* This should never happen */ dev_err(od->ddev.dev, "invalid channel %u\n", channel); continue; } csr = omap_dma_get_csr(c); omap_dma_glbl_write(od, IRQSTATUS_L1, mask); omap_dma_callback(channel, csr, c); } spin_unlock(&od->irq_lock); return IRQ_HANDLED; } static int omap_dma_get_lch(struct omap_dmadev *od, int *lch) { int channel; mutex_lock(&od->lch_lock); channel = find_first_zero_bit(od->lch_bitmap, od->lch_count); if (channel >= od->lch_count) goto out_busy; set_bit(channel, od->lch_bitmap); mutex_unlock(&od->lch_lock); omap_dma_clear_lch(od, channel); *lch = channel; return 0; out_busy: mutex_unlock(&od->lch_lock); *lch = -EINVAL; return -EBUSY; } static void omap_dma_put_lch(struct omap_dmadev *od, int lch) { omap_dma_clear_lch(od, lch); mutex_lock(&od->lch_lock); clear_bit(lch, od->lch_bitmap); mutex_unlock(&od->lch_lock); } static inline bool omap_dma_legacy(struct omap_dmadev *od) { return IS_ENABLED(CONFIG_ARCH_OMAP1) && od->legacy; } static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); struct device *dev = od->ddev.dev; int ret; if (omap_dma_legacy(od)) { ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, c, &c->dma_ch); } else { ret = omap_dma_get_lch(od, &c->dma_ch); } dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); if (ret >= 0) { omap_dma_assign(od, c, c->dma_ch); if (!omap_dma_legacy(od)) { unsigned val; spin_lock_irq(&od->irq_lock); val = BIT(c->dma_ch); omap_dma_glbl_write(od, IRQSTATUS_L1, val); od->irq_enable_mask |= val; omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); val = omap_dma_glbl_read(od, IRQENABLE_L0); val &= ~BIT(c->dma_ch); omap_dma_glbl_write(od, IRQENABLE_L0, val); spin_unlock_irq(&od->irq_lock); } } if (dma_omap1()) { if (__dma_omap16xx(od->plat->dma_attr)) { c->ccr = CCR_OMAP31_DISABLE; /* Duplicate what plat-omap/dma.c does */ c->ccr |= c->dma_ch + 1; } else { c->ccr = c->dma_sig & 0x1f; } } else { c->ccr = c->dma_sig & 0x1f; c->ccr |= (c->dma_sig & ~0x1f) << 14; } if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) c->ccr |= CCR_BUFFERING_DISABLE; return ret; } static void omap_dma_free_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); if (!omap_dma_legacy(od)) { spin_lock_irq(&od->irq_lock); od->irq_enable_mask &= ~BIT(c->dma_ch); omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); spin_unlock_irq(&od->irq_lock); } c->channel_base = NULL; od->lch_map[c->dma_ch] = NULL; vchan_free_chan_resources(&c->vc); if (omap_dma_legacy(od)) omap_free_dma(c->dma_ch); else omap_dma_put_lch(od, c->dma_ch); dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, c->dma_sig); c->dma_sig = 0; } static size_t omap_dma_sg_size(struct omap_sg *sg) { return sg->en * sg->fn; } static size_t omap_dma_desc_size(struct omap_desc *d) { unsigned i; size_t size; for (size = i = 0; i < d->sglen; i++) size += omap_dma_sg_size(&d->sg[i]); return size * es_bytes[d->es]; } static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) { unsigned i; size_t size, es_size = es_bytes[d->es]; for (size = i = 0; i < d->sglen; i++) { size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; if (size) size += this_size; else if (addr >= d->sg[i].addr && addr < d->sg[i].addr + this_size) size += d->sg[i].addr + this_size - addr; } return size; } /* * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); uint32_t val; val = omap_dma_chan_read(c, reg); if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) val = omap_dma_chan_read(c, reg); return val; } static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); dma_addr_t addr, cdac; if (__dma_omap15xx(od->plat->dma_attr)) { addr = omap_dma_chan_read(c, CPC); } else { addr = omap_dma_chan_read_3_3(c, CSAC); cdac = omap_dma_chan_read_3_3(c, CDAC); /* * CDAC == 0 indicates that the DMA transfer on the channel has * not been started (no data has been transferred so far). * Return the programmed source start address in this case. */ if (cdac == 0) addr = omap_dma_chan_read(c, CSSA); } if (dma_omap1()) addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; return addr; } static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); dma_addr_t addr; if (__dma_omap15xx(od->plat->dma_attr)) { addr = omap_dma_chan_read(c, CPC); } else { addr = omap_dma_chan_read_3_3(c, CDAC); /* * CDAC == 0 indicates that the DMA transfer on the channel * has not been started (no data has been transferred so * far). Return the programmed destination start address in * this case. */ if (addr == 0) addr = omap_dma_chan_read(c, CDSA); } if (dma_omap1()) addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; return addr; } static enum dma_status omap_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct omap_chan *c = to_omap_dma_chan(chan); enum dma_status ret; unsigned long flags; struct omap_desc *d = NULL; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); if (c->desc && c->desc->vd.tx.cookie == cookie) d = c->desc; if (!txstate) goto out; if (d) { dma_addr_t pos; if (d->dir == DMA_MEM_TO_DEV) pos = omap_dma_get_src_pos(c); else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) pos = omap_dma_get_dst_pos(c); else pos = 0; txstate->residue = omap_dma_desc_size_pos(d, pos); } else { struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie); if (vd) txstate->residue = omap_dma_desc_size( to_omap_dma_desc(&vd->tx)); else txstate->residue = 0; } out: if (ret == DMA_IN_PROGRESS && c->paused) { ret = DMA_PAUSED; } else if (d && d->polled && c->running) { uint32_t ccr = omap_dma_chan_read(c, CCR); /* * The channel is no longer active, set the return value * accordingly and mark it as completed */ if (!(ccr & CCR_ENABLE)) { ret = DMA_COMPLETE; omap_dma_start_desc(c); vchan_cookie_complete(&d->vd); } } spin_unlock_irqrestore(&c->vc.lock, flags); return ret; } static void omap_dma_issue_pending(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); unsigned long flags; spin_lock_irqsave(&c->vc.lock, flags); if (vchan_issue_pending(&c->vc) && !c->desc) omap_dma_start_desc(c); spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); enum dma_slave_buswidth dev_width; struct scatterlist *sgent; struct omap_desc *d; dma_addr_t dev_addr; unsigned i, es, en, frame_bytes; bool ll_failed = false; u32 burst; u32 port_window, port_window_bytes; if (dir == DMA_DEV_TO_MEM) { dev_addr = c->cfg.src_addr; dev_width = c->cfg.src_addr_width; burst = c->cfg.src_maxburst; port_window = c->cfg.src_port_window_size; } else if (dir == DMA_MEM_TO_DEV) { dev_addr = c->cfg.dst_addr; dev_width = c->cfg.dst_addr_width; burst = c->cfg.dst_maxburst; port_window = c->cfg.dst_port_window_size; } else { dev_err(chan->device->dev, "%s: bad direction?\n", __func__); return NULL; } /* Bus width translates to the element size (ES) */ switch (dev_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: es = CSDP_DATA_TYPE_8; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: es = CSDP_DATA_TYPE_16; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: es = CSDP_DATA_TYPE_32; break; default: /* not reached */ return NULL; } /* Now allocate and setup the descriptor. */ d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC); if (!d) return NULL; d->dir = dir; d->dev_addr = dev_addr; d->es = es; /* When the port_window is used, one frame must cover the window */ if (port_window) { burst = port_window; port_window_bytes = port_window * es_bytes[es]; d->ei = 1; /* * One frame covers the port_window and by configure * the source frame index to be -1 * (port_window - 1) * we instruct the sDMA that after a frame is processed * it should move back to the start of the window. */ d->fi = -(port_window_bytes - 1); } d->ccr = c->ccr | CCR_SYNC_FRAME; if (dir == DMA_DEV_TO_MEM) { d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; d->ccr |= CCR_DST_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_SRC_AMODE_DBLIDX; if (port_window_bytes >= 64) d->csdp |= CSDP_SRC_BURST_64; else if (port_window_bytes >= 32) d->csdp |= CSDP_SRC_BURST_32; else if (port_window_bytes >= 16) d->csdp |= CSDP_SRC_BURST_16; } else { d->ccr |= CCR_SRC_AMODE_CONSTANT; } } else { d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; d->ccr |= CCR_SRC_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_DST_AMODE_DBLIDX; if (port_window_bytes >= 64) d->csdp |= CSDP_DST_BURST_64; else if (port_window_bytes >= 32) d->csdp |= CSDP_DST_BURST_32; else if (port_window_bytes >= 16) d->csdp |= CSDP_DST_BURST_16; } else { d->ccr |= CCR_DST_AMODE_CONSTANT; } } d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; d->csdp |= es; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; if (dir == DMA_DEV_TO_MEM) d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; else d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; } else { if (dir == DMA_DEV_TO_MEM) d->ccr |= CCR_TRIGGER_SRC; d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; if (port_window) d->csdp |= CSDP_WRITE_LAST_NON_POSTED; } if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) d->clnk_ctrl = c->dma_ch; /* * Build our scatterlist entries: each contains the address, * the number of elements (EN) in each frame, and the number of * frames (FN). Number of bytes for this entry = ES * EN * FN. * * Burst size translates to number of elements with frame sync. * Note: DMA engine defines burst to be the number of dev-width * transfers. */ en = burst; frame_bytes = es_bytes[es] * en; if (sglen >= 2) d->using_ll = od->ll123_supported; for_each_sg(sgl, sgent, sglen, i) { struct omap_sg *osg = &d->sg[i]; osg->addr = sg_dma_address(sgent); osg->en = en; osg->fn = sg_dma_len(sgent) / frame_bytes; if (d->using_ll) { osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, &osg->t2_desc_paddr); if (!osg->t2_desc) { dev_err(chan->device->dev, "t2_desc[%d] allocation failed\n", i); ll_failed = true; d->using_ll = false; continue; } omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); } } d->sglen = sglen; /* Release the dma_pool entries if one allocation failed */ if (ll_failed) { for (i = 0; i < d->sglen; i++) { struct omap_sg *osg = &d->sg[i]; if (osg->t2_desc) { dma_pool_free(od->desc_pool, osg->t2_desc, osg->t2_desc_paddr); osg->t2_desc = NULL; } } } return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); enum dma_slave_buswidth dev_width; struct omap_desc *d; dma_addr_t dev_addr; unsigned es; u32 burst; if (dir == DMA_DEV_TO_MEM) { dev_addr = c->cfg.src_addr; dev_width = c->cfg.src_addr_width; burst = c->cfg.src_maxburst; } else if (dir == DMA_MEM_TO_DEV) { dev_addr = c->cfg.dst_addr; dev_width = c->cfg.dst_addr_width; burst = c->cfg.dst_maxburst; } else { dev_err(chan->device->dev, "%s: bad direction?\n", __func__); return NULL; } /* Bus width translates to the element size (ES) */ switch (dev_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: es = CSDP_DATA_TYPE_8; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: es = CSDP_DATA_TYPE_16; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: es = CSDP_DATA_TYPE_32; break; default: /* not reached */ return NULL; } /* Now allocate and setup the descriptor. */ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); if (!d) return NULL; d->dir = dir; d->dev_addr = dev_addr; d->fi = burst; d->es = es; d->sg[0].addr = buf_addr; d->sg[0].en = period_len / es_bytes[es]; d->sg[0].fn = buf_len / period_len; d->sglen = 1; d->ccr = c->ccr; if (dir == DMA_DEV_TO_MEM) d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; else d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; d->cicr = CICR_DROP_IE; if (flags & DMA_PREP_INTERRUPT) d->cicr |= CICR_FRAME_IE; d->csdp = es; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; if (dir == DMA_DEV_TO_MEM) d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; else d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; } else { if (burst) d->ccr |= CCR_SYNC_PACKET; else d->ccr |= CCR_SYNC_ELEMENT; if (dir == DMA_DEV_TO_MEM) { d->ccr |= CCR_TRIGGER_SRC; d->csdp |= CSDP_DST_PACKED; } else { d->csdp |= CSDP_SRC_PACKED; } d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; } if (__dma_omap15xx(od->plat->dma_attr)) d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; else d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; c->cyclic = true; return vchan_tx_prep(&c->vc, &d->vd, flags); } static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long tx_flags) { struct omap_chan *c = to_omap_dma_chan(chan); struct omap_desc *d; uint8_t data_type; d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); if (!d) return NULL; data_type = __ffs((src | dest | len)); if (data_type > CSDP_DATA_TYPE_32) data_type = CSDP_DATA_TYPE_32; d->dir = DMA_MEM_TO_MEM; d->dev_addr = src; d->fi = 0; d->es = data_type; d->sg[0].en = len / BIT(data_type); d->sg[0].fn = 1; d->sg[0].addr = dest; d->sglen = 1; d->ccr = c->ccr; d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; if (tx_flags & DMA_PREP_INTERRUPT) d->cicr |= CICR_FRAME_IE; else d->polled = true; d->csdp = data_type; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; } else { d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; } return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { struct omap_chan *c = to_omap_dma_chan(chan); struct omap_desc *d; struct omap_sg *sg; uint8_t data_type; size_t src_icg, dst_icg; /* Slave mode is not supported */ if (is_slave_direction(xt->dir)) return NULL; if (xt->frame_size != 1 || xt->numf == 0) return NULL; d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); if (!d) return NULL; data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); if (data_type > CSDP_DATA_TYPE_32) data_type = CSDP_DATA_TYPE_32; sg = &d->sg[0]; d->dir = DMA_MEM_TO_MEM; d->dev_addr = xt->src_start; d->es = data_type; sg->en = xt->sgl[0].size / BIT(data_type); sg->fn = xt->numf; sg->addr = xt->dst_start; d->sglen = 1; d->ccr = c->ccr; src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); if (src_icg) { d->ccr |= CCR_SRC_AMODE_DBLIDX; d->ei = 1; d->fi = src_icg + 1; } else if (xt->src_inc) { d->ccr |= CCR_SRC_AMODE_POSTINC; d->fi = 0; } else { dev_err(chan->device->dev, "%s: SRC constant addressing is not supported\n", __func__); kfree(d); return NULL; } if (dst_icg) { d->ccr |= CCR_DST_AMODE_DBLIDX; sg->ei = 1; sg->fi = dst_icg + 1; } else if (xt->dst_inc) { d->ccr |= CCR_DST_AMODE_POSTINC; sg->fi = 0; } else { dev_err(chan->device->dev, "%s: DST constant addressing is not supported\n", __func__); kfree(d); return NULL; } d->cicr = CICR_DROP_IE | CICR_FRAME_IE; d->csdp = data_type; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; } else { d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; } return vchan_tx_prep(&c->vc, &d->vd, flags); } static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct omap_chan *c = to_omap_dma_chan(chan); if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; if (cfg->src_maxburst > chan->device->max_burst || cfg->dst_maxburst > chan->device->max_burst) return -EINVAL; memcpy(&c->cfg, cfg, sizeof(c->cfg)); return 0; } static int omap_dma_terminate_all(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); /* * Stop DMA activity: we assume the callback will not be called * after omap_dma_stop() returns (even if it does, it will see * c->desc is NULL and exit.) */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) omap_dma_stop(c); } c->cyclic = false; c->paused = false; vchan_get_all_descriptors(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); return 0; } static void omap_dma_synchronize(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); vchan_synchronize(&c->vc); } static int omap_dma_pause(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); struct omap_dmadev *od = to_omap_dma_dev(chan->device); unsigned long flags; int ret = -EINVAL; bool can_pause = false; spin_lock_irqsave(&od->irq_lock, flags); if (!c->desc) goto out; if (c->cyclic) can_pause = true; /* * We do not allow DMA_MEM_TO_DEV transfers to be paused. * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer: * "When a channel is disabled during a transfer, the channel undergoes * an abort, unless it is hardware-source-synchronized …". * A source-synchronised channel is one where the fetching of data is * under control of the device. In other words, a device-to-memory * transfer. So, a destination-synchronised channel (which would be a * memory-to-device transfer) undergoes an abort if the CCR_ENABLE * bit is cleared. * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel * aborts immediately after completion of current read/write * transactions and then the FIFO is cleaned up." The term "cleaned up" * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE * are both clear _before_ disabling the channel, otherwise data loss * will occur. * The problem is that if the channel is active, then device activity * can result in DMA activity starting between reading those as both * clear and the write to DMA_CCR to clear the enable bit hitting the * hardware. If the DMA hardware can't drain the data in its FIFO to the * destination, then data loss "might" occur (say if we write to an UART * and the UART is not accepting any further data). */ else if (c->desc->dir == DMA_DEV_TO_MEM) can_pause = true; if (can_pause && !c->paused) { ret = omap_dma_stop(c); if (!ret) c->paused = true; } out: spin_unlock_irqrestore(&od->irq_lock, flags); return ret; } static int omap_dma_resume(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); struct omap_dmadev *od = to_omap_dma_dev(chan->device); unsigned long flags; int ret = -EINVAL; spin_lock_irqsave(&od->irq_lock, flags); if (c->paused && c->desc) { mb(); /* Restore channel link register */ omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); omap_dma_start(c, c->desc); c->paused = false; ret = 0; } spin_unlock_irqrestore(&od->irq_lock, flags); return ret; } static int omap_dma_chan_init(struct omap_dmadev *od) { struct omap_chan *c; c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; c->reg_map = od->reg_map; c->vc.desc_free = omap_dma_desc_free; vchan_init(&c->vc, &od->ddev); return 0; } static void omap_dma_free(struct omap_dmadev *od) { while (!list_empty(&od->ddev.channels)) { struct omap_chan *c = list_first_entry(&od->ddev.channels, struct omap_chan, vc.chan.device_node); list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); kfree(c); } } /* Currently used by omap2 & 3 to block deeper SoC idle states */ static bool omap_dma_busy(struct omap_dmadev *od) { struct omap_chan *c; int lch = -1; while (1) { lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1); if (lch >= od->lch_count) break; c = od->lch_map[lch]; if (!c) continue; if (omap_dma_chan_read(c, CCR) & CCR_ENABLE) return true; } return false; } /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */ static int omap_dma_busy_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { struct omap_dmadev *od; od = container_of(nb, struct omap_dmadev, nb); switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (omap_dma_busy(od)) return NOTIFY_BAD; break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT: break; } return NOTIFY_OK; } /* * We are using IRQENABLE_L1, and legacy DMA code was using IRQENABLE_L0. * As the DSP may be using IRQENABLE_L2 and L3, let's not touch those for * now. Context save seems to be only currently needed on omap3. */ static void omap_dma_context_save(struct omap_dmadev *od) { od->context.irqenable_l0 = omap_dma_glbl_read(od, IRQENABLE_L0); od->context.irqenable_l1 = omap_dma_glbl_read(od, IRQENABLE_L1); od->context.ocp_sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); od->context.gcr = omap_dma_glbl_read(od, GCR); } static void omap_dma_context_restore(struct omap_dmadev *od) { int i; omap_dma_glbl_write(od, GCR, od->context.gcr); omap_dma_glbl_write(od, OCP_SYSCONFIG, od->context.ocp_sysconfig); omap_dma_glbl_write(od, IRQENABLE_L0, od->context.irqenable_l0); omap_dma_glbl_write(od, IRQENABLE_L1, od->context.irqenable_l1); /* Clear IRQSTATUS_L0 as legacy DMA code is no longer doing it */ if (od->plat->errata & DMA_ROMCODE_BUG) omap_dma_glbl_write(od, IRQSTATUS_L0, 0); /* Clear dma channels */ for (i = 0; i < od->lch_count; i++) omap_dma_clear_lch(od, i); } /* Currently only used for omap3 */ static int omap_dma_context_notifier(struct notifier_block *nb, unsigned long cmd, void *v) { struct omap_dmadev *od; od = container_of(nb, struct omap_dmadev, nb); switch (cmd) { case CPU_CLUSTER_PM_ENTER: if (omap_dma_busy(od)) return NOTIFY_BAD; omap_dma_context_save(od); break; case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */ break; case CPU_CLUSTER_PM_EXIT: omap_dma_context_restore(od); break; } return NOTIFY_OK; } static void omap_dma_init_gcr(struct omap_dmadev *od, int arb_rate, int max_fifo_depth, int tparams) { u32 val; /* Set only for omap2430 and later */ if (!od->cfg->rw_priority) return; if (max_fifo_depth == 0) max_fifo_depth = 1; if (arb_rate == 0) arb_rate = 1; val = 0xff & max_fifo_depth; val |= (0x3 & tparams) << 12; val |= (arb_rate & 0xff) << 16; omap_dma_glbl_write(od, GCR, val); } #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) /* * No flags currently set for default configuration as omap1 is still * using platform data. */ static const struct omap_dma_config default_cfg; static int omap_dma_probe(struct platform_device *pdev) { const struct omap_dma_config *conf; struct omap_dmadev *od; int rc, i, irq; u32 val; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) return -ENOMEM; od->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(od->base)) return PTR_ERR(od->base); conf = of_device_get_match_data(&pdev->dev); if (conf) { od->cfg = conf; od->plat = dev_get_platdata(&pdev->dev); if (!od->plat) { dev_err(&pdev->dev, "omap_system_dma_plat_info is missing"); return -ENODEV; } } else if (IS_ENABLED(CONFIG_ARCH_OMAP1)) { od->cfg = &default_cfg; od->plat = omap_get_plat_info(); if (!od->plat) return -EPROBE_DEFER; } else { return -ENODEV; } od->reg_map = od->plat->reg_map; dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; od->ddev.device_tx_status = omap_dma_tx_status; od->ddev.device_issue_pending = omap_dma_issue_pending; od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; od->ddev.device_terminate_all = omap_dma_terminate_all; od->ddev.device_synchronize = omap_dma_synchronize; od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); if (__dma_omap15xx(od->plat->dma_attr)) od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; else od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); mutex_init(&od->lch_lock); spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); /* Number of DMA requests */ od->dma_requests = OMAP_SDMA_REQUESTS; if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-requests", &od->dma_requests)) { dev_info(&pdev->dev, "Missing dma-requests property, using %u.\n", OMAP_SDMA_REQUESTS); } /* Number of available logical channels */ if (!pdev->dev.of_node) { od->lch_count = od->plat->dma_attr->lch_count; if (unlikely(!od->lch_count)) od->lch_count = OMAP_SDMA_CHANNELS; } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", &od->lch_count)) { dev_info(&pdev->dev, "Missing dma-channels property, using %u.\n", OMAP_SDMA_CHANNELS); od->lch_count = OMAP_SDMA_CHANNELS; } /* Mask of allowed logical channels */ if (pdev->dev.of_node && !of_property_read_u32(pdev->dev.of_node, "dma-channel-mask", &val)) { /* Tag channels not in mask as reserved */ val = ~val; bitmap_from_arr32(od->lch_bitmap, &val, od->lch_count); } if (od->plat->dma_attr->dev_caps & HS_CHANNELS_RESERVED) bitmap_set(od->lch_bitmap, 0, 2); od->lch_map = devm_kcalloc(&pdev->dev, od->lch_count, sizeof(*od->lch_map), GFP_KERNEL); if (!od->lch_map) return -ENOMEM; for (i = 0; i < od->dma_requests; i++) { rc = omap_dma_chan_init(od); if (rc) { omap_dma_free(od); return rc; } } irq = platform_get_irq(pdev, 1); if (irq <= 0) { dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); od->legacy = true; } else { /* Disable all interrupts */ od->irq_enable_mask = 0; omap_dma_glbl_write(od, IRQENABLE_L1, 0); rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, IRQF_SHARED, "omap-dma-engine", od); if (rc) { omap_dma_free(od); return rc; } } if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) od->ll123_supported = true; od->ddev.filter.map = od->plat->slave_map; od->ddev.filter.mapcnt = od->plat->slavecnt; od->ddev.filter.fn = omap_dma_filter_fn; if (od->ll123_supported) { od->desc_pool = dma_pool_create(dev_name(&pdev->dev), &pdev->dev, sizeof(struct omap_type2_desc), 4, 0); if (!od->desc_pool) { dev_err(&pdev->dev, "unable to allocate descriptor pool\n"); od->ll123_supported = false; } } rc = dma_async_device_register(&od->ddev); if (rc) { pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", rc); omap_dma_free(od); return rc; } platform_set_drvdata(pdev, od); if (pdev->dev.of_node) { omap_dma_info.dma_cap = od->ddev.cap_mask; /* Device-tree DMA controller registration */ rc = of_dma_controller_register(pdev->dev.of_node, of_dma_simple_xlate, &omap_dma_info); if (rc) { pr_warn("OMAP-DMA: failed to register DMA controller\n"); dma_async_device_unregister(&od->ddev); omap_dma_free(od); } } omap_dma_init_gcr(od, DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0); if (od->cfg->needs_busy_check) { od->nb.notifier_call = omap_dma_busy_notifier; cpu_pm_register_notifier(&od->nb); } else if (od->cfg->may_lose_context) { od->nb.notifier_call = omap_dma_context_notifier; cpu_pm_register_notifier(&od->nb); } dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); return rc; } static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); int irq; if (od->cfg->may_lose_context) cpu_pm_unregister_notifier(&od->nb); if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); irq = platform_get_irq(pdev, 1); devm_free_irq(&pdev->dev, irq, od); dma_async_device_unregister(&od->ddev); if (!omap_dma_legacy(od)) { /* Disable all interrupts */ omap_dma_glbl_write(od, IRQENABLE_L0, 0); } if (od->ll123_supported) dma_pool_destroy(od->desc_pool); omap_dma_free(od); return 0; } static const struct omap_dma_config omap2420_data = { .lch_end = CCFN, .rw_priority = true, .needs_lch_clear = true, .needs_busy_check = true, }; static const struct omap_dma_config omap2430_data = { .lch_end = CCFN, .rw_priority = true, .needs_lch_clear = true, }; static const struct omap_dma_config omap3430_data = { .lch_end = CCFN, .rw_priority = true, .needs_lch_clear = true, .may_lose_context = true, }; static const struct omap_dma_config omap3630_data = { .lch_end = CCDN, .rw_priority = true, .needs_lch_clear = true, .may_lose_context = true, }; static const struct omap_dma_config omap4_data = { .lch_end = CCDN, .rw_priority = true, .needs_lch_clear = true, }; static const struct of_device_id omap_dma_match[] = { { .compatible = "ti,omap2420-sdma", .data = &omap2420_data, }, { .compatible = "ti,omap2430-sdma", .data = &omap2430_data, }, { .compatible = "ti,omap3430-sdma", .data = &omap3430_data, }, { .compatible = "ti,omap3630-sdma", .data = &omap3630_data, }, { .compatible = "ti,omap4430-sdma", .data = &omap4_data, }, {}, }; MODULE_DEVICE_TABLE(of, omap_dma_match); static struct platform_driver omap_dma_driver = { .probe = omap_dma_probe, .remove = omap_dma_remove, .driver = { .name = "omap-dma-engine", .of_match_table = omap_dma_match, }, }; static bool omap_dma_filter_fn(struct dma_chan *chan, void *param) { if (chan->device->dev->driver == &omap_dma_driver.driver) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); unsigned req = *(unsigned *)param; if (req <= od->dma_requests) { c->dma_sig = req; return true; } } return false; } static int omap_dma_init(void) { return platform_driver_register(&omap_dma_driver); } subsys_initcall(omap_dma_init); static void __exit omap_dma_exit(void) { platform_driver_unregister(&omap_dma_driver); } module_exit(omap_dma_exit); MODULE_AUTHOR("Russell King"); MODULE_LICENSE("GPL");
linux-master
drivers/dma/ti/omap-dma.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .mapped_channel_id = -1, \ .default_flow_id = -1, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .mapped_channel_id = -1, \ .default_flow_id = -1, \ .pkt_mode = 1, \ }, \ } #define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = flow_base, \ }, \ } #define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = default_flow, \ .notdpkt = tx, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_CSI2RX(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am62a_src_ep_map[] = { /* SAUL */ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4304), PSIL_PDMA_XY_PKT(0x4305), PSIL_PDMA_XY_PKT(0x4306), PSIL_PDMA_XY_PKT(0x4307), PSIL_PDMA_XY_PKT(0x4308), PSIL_PDMA_XY_PKT(0x4309), PSIL_PDMA_XY_PKT(0x430a), PSIL_PDMA_XY_PKT(0x430b), PSIL_PDMA_XY_PKT(0x430c), PSIL_PDMA_XY_PKT(0x430d), /* PDMA_MAIN1 - UART0-6 */ PSIL_PDMA_XY_PKT(0x4400), PSIL_PDMA_XY_PKT(0x4401), PSIL_PDMA_XY_PKT(0x4402), PSIL_PDMA_XY_PKT(0x4403), PSIL_PDMA_XY_PKT(0x4404), PSIL_PDMA_XY_PKT(0x4405), PSIL_PDMA_XY_PKT(0x4406), /* PDMA_MAIN2 - MCASP0-2 */ PSIL_PDMA_MCASP(0x4500), PSIL_PDMA_MCASP(0x4501), PSIL_PDMA_MCASP(0x4502), /* CPSW3G */ PSIL_ETHERNET(0x4600, 19, 19, 16), /* CSI2RX */ PSIL_CSI2RX(0x5000), PSIL_CSI2RX(0x5001), PSIL_CSI2RX(0x5002), PSIL_CSI2RX(0x5003), PSIL_CSI2RX(0x5004), PSIL_CSI2RX(0x5005), PSIL_CSI2RX(0x5006), PSIL_CSI2RX(0x5007), PSIL_CSI2RX(0x5008), PSIL_CSI2RX(0x5009), PSIL_CSI2RX(0x500a), PSIL_CSI2RX(0x500b), PSIL_CSI2RX(0x500c), PSIL_CSI2RX(0x500d), PSIL_CSI2RX(0x500e), PSIL_CSI2RX(0x500f), PSIL_CSI2RX(0x5010), PSIL_CSI2RX(0x5011), PSIL_CSI2RX(0x5012), PSIL_CSI2RX(0x5013), PSIL_CSI2RX(0x5014), PSIL_CSI2RX(0x5015), PSIL_CSI2RX(0x5016), PSIL_CSI2RX(0x5017), PSIL_CSI2RX(0x5018), PSIL_CSI2RX(0x5019), PSIL_CSI2RX(0x501a), PSIL_CSI2RX(0x501b), PSIL_CSI2RX(0x501c), PSIL_CSI2RX(0x501d), PSIL_CSI2RX(0x501e), PSIL_CSI2RX(0x501f), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep am62a_dst_ep_map[] = { /* SAUL */ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0xc302), PSIL_PDMA_XY_PKT(0xc303), PSIL_PDMA_XY_PKT(0xc304), PSIL_PDMA_XY_PKT(0xc305), PSIL_PDMA_XY_PKT(0xc306), PSIL_PDMA_XY_PKT(0xc307), PSIL_PDMA_XY_PKT(0xc308), PSIL_PDMA_XY_PKT(0xc309), PSIL_PDMA_XY_PKT(0xc30a), PSIL_PDMA_XY_PKT(0xc30b), PSIL_PDMA_XY_PKT(0xc30c), PSIL_PDMA_XY_PKT(0xc30d), /* PDMA_MAIN1 - UART0-6 */ PSIL_PDMA_XY_PKT(0xc400), PSIL_PDMA_XY_PKT(0xc401), PSIL_PDMA_XY_PKT(0xc402), PSIL_PDMA_XY_PKT(0xc403), PSIL_PDMA_XY_PKT(0xc404), PSIL_PDMA_XY_PKT(0xc405), PSIL_PDMA_XY_PKT(0xc406), /* PDMA_MAIN2 - MCASP0-2 */ PSIL_PDMA_MCASP(0xc500), PSIL_PDMA_MCASP(0xc501), PSIL_PDMA_MCASP(0xc502), /* CPSW3G */ PSIL_ETHERNET(0xc600, 19, 19, 8), PSIL_ETHERNET(0xc601, 20, 27, 8), PSIL_ETHERNET(0xc602, 21, 35, 8), PSIL_ETHERNET(0xc603, 22, 43, 8), PSIL_ETHERNET(0xc604, 23, 51, 8), PSIL_ETHERNET(0xc605, 24, 59, 8), PSIL_ETHERNET(0xc606, 25, 67, 8), PSIL_ETHERNET(0xc607, 26, 75, 8), }; struct psil_ep_map am62a_ep_map = { .name = "am62a", .src = am62a_src_ep_map, .src_count = ARRAY_SIZE(am62a_src_ep_map), .dst = am62a_dst_ep_map, .dst_count = ARRAY_SIZE(am62a_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-am62a.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pkt_mode = 1, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_ETHERNET(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ }, \ } #define PSIL_SA2UL(x, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .notdpkt = tx, \ }, \ } #define PSIL_CSI2RX(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep j784s4_src_ep_map[] = { /* PDMA_MCASP - McASP0-4 */ PSIL_PDMA_MCASP(0x4400), PSIL_PDMA_MCASP(0x4401), PSIL_PDMA_MCASP(0x4402), PSIL_PDMA_MCASP(0x4403), PSIL_PDMA_MCASP(0x4404), /* PDMA_SPI_G0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4600), PSIL_PDMA_XY_PKT(0x4601), PSIL_PDMA_XY_PKT(0x4602), PSIL_PDMA_XY_PKT(0x4603), PSIL_PDMA_XY_PKT(0x4604), PSIL_PDMA_XY_PKT(0x4605), PSIL_PDMA_XY_PKT(0x4606), PSIL_PDMA_XY_PKT(0x4607), PSIL_PDMA_XY_PKT(0x4608), PSIL_PDMA_XY_PKT(0x4609), PSIL_PDMA_XY_PKT(0x460a), PSIL_PDMA_XY_PKT(0x460b), PSIL_PDMA_XY_PKT(0x460c), PSIL_PDMA_XY_PKT(0x460d), PSIL_PDMA_XY_PKT(0x460e), PSIL_PDMA_XY_PKT(0x460f), /* PDMA_SPI_G1 - SPI4-7 */ PSIL_PDMA_XY_PKT(0x4620), PSIL_PDMA_XY_PKT(0x4621), PSIL_PDMA_XY_PKT(0x4622), PSIL_PDMA_XY_PKT(0x4623), PSIL_PDMA_XY_PKT(0x4624), PSIL_PDMA_XY_PKT(0x4625), PSIL_PDMA_XY_PKT(0x4626), PSIL_PDMA_XY_PKT(0x4627), PSIL_PDMA_XY_PKT(0x4628), PSIL_PDMA_XY_PKT(0x4629), PSIL_PDMA_XY_PKT(0x462a), PSIL_PDMA_XY_PKT(0x462b), PSIL_PDMA_XY_PKT(0x462c), PSIL_PDMA_XY_PKT(0x462d), PSIL_PDMA_XY_PKT(0x462e), PSIL_PDMA_XY_PKT(0x462f), /* MAIN_CPSW2G */ PSIL_ETHERNET(0x4640), /* PDMA_USART_G0 - UART0-1 */ PSIL_PDMA_XY_PKT(0x4700), PSIL_PDMA_XY_PKT(0x4701), /* PDMA_USART_G1 - UART2-3 */ PSIL_PDMA_XY_PKT(0x4702), PSIL_PDMA_XY_PKT(0x4703), /* PDMA_USART_G2 - UART4-9 */ PSIL_PDMA_XY_PKT(0x4704), PSIL_PDMA_XY_PKT(0x4705), PSIL_PDMA_XY_PKT(0x4706), PSIL_PDMA_XY_PKT(0x4707), PSIL_PDMA_XY_PKT(0x4708), PSIL_PDMA_XY_PKT(0x4709), /* CSI2RX */ PSIL_CSI2RX(0x4900), PSIL_CSI2RX(0x4901), PSIL_CSI2RX(0x4902), PSIL_CSI2RX(0x4903), PSIL_CSI2RX(0x4940), PSIL_CSI2RX(0x4941), PSIL_CSI2RX(0x4942), PSIL_CSI2RX(0x4943), PSIL_CSI2RX(0x4944), PSIL_CSI2RX(0x4945), PSIL_CSI2RX(0x4946), PSIL_CSI2RX(0x4947), PSIL_CSI2RX(0x4948), PSIL_CSI2RX(0x4949), PSIL_CSI2RX(0x494a), PSIL_CSI2RX(0x494b), PSIL_CSI2RX(0x494c), PSIL_CSI2RX(0x494d), PSIL_CSI2RX(0x494e), PSIL_CSI2RX(0x494f), PSIL_CSI2RX(0x4950), PSIL_CSI2RX(0x4951), PSIL_CSI2RX(0x4952), PSIL_CSI2RX(0x4953), PSIL_CSI2RX(0x4954), PSIL_CSI2RX(0x4955), PSIL_CSI2RX(0x4956), PSIL_CSI2RX(0x4957), PSIL_CSI2RX(0x4958), PSIL_CSI2RX(0x4959), PSIL_CSI2RX(0x495a), PSIL_CSI2RX(0x495b), PSIL_CSI2RX(0x495c), PSIL_CSI2RX(0x495d), PSIL_CSI2RX(0x495e), PSIL_CSI2RX(0x495f), PSIL_CSI2RX(0x4960), PSIL_CSI2RX(0x4961), PSIL_CSI2RX(0x4962), PSIL_CSI2RX(0x4963), PSIL_CSI2RX(0x4964), PSIL_CSI2RX(0x4965), PSIL_CSI2RX(0x4966), PSIL_CSI2RX(0x4967), PSIL_CSI2RX(0x4968), PSIL_CSI2RX(0x4969), PSIL_CSI2RX(0x496a), PSIL_CSI2RX(0x496b), PSIL_CSI2RX(0x496c), PSIL_CSI2RX(0x496d), PSIL_CSI2RX(0x496e), PSIL_CSI2RX(0x496f), PSIL_CSI2RX(0x4970), PSIL_CSI2RX(0x4971), PSIL_CSI2RX(0x4972), PSIL_CSI2RX(0x4973), PSIL_CSI2RX(0x4974), PSIL_CSI2RX(0x4975), PSIL_CSI2RX(0x4976), PSIL_CSI2RX(0x4977), PSIL_CSI2RX(0x4978), PSIL_CSI2RX(0x4979), PSIL_CSI2RX(0x497a), PSIL_CSI2RX(0x497b), PSIL_CSI2RX(0x497c), PSIL_CSI2RX(0x497d), PSIL_CSI2RX(0x497e), PSIL_CSI2RX(0x497f), PSIL_CSI2RX(0x4980), PSIL_CSI2RX(0x4981), PSIL_CSI2RX(0x4982), PSIL_CSI2RX(0x4983), PSIL_CSI2RX(0x4984), PSIL_CSI2RX(0x4985), PSIL_CSI2RX(0x4986), PSIL_CSI2RX(0x4987), PSIL_CSI2RX(0x4988), PSIL_CSI2RX(0x4989), PSIL_CSI2RX(0x498a), PSIL_CSI2RX(0x498b), PSIL_CSI2RX(0x498c), PSIL_CSI2RX(0x498d), PSIL_CSI2RX(0x498e), PSIL_CSI2RX(0x498f), PSIL_CSI2RX(0x4990), PSIL_CSI2RX(0x4991), PSIL_CSI2RX(0x4992), PSIL_CSI2RX(0x4993), PSIL_CSI2RX(0x4994), PSIL_CSI2RX(0x4995), PSIL_CSI2RX(0x4996), PSIL_CSI2RX(0x4997), PSIL_CSI2RX(0x4998), PSIL_CSI2RX(0x4999), PSIL_CSI2RX(0x499a), PSIL_CSI2RX(0x499b), PSIL_CSI2RX(0x499c), PSIL_CSI2RX(0x499d), PSIL_CSI2RX(0x499e), PSIL_CSI2RX(0x499f), /* MAIN_CPSW9G */ PSIL_ETHERNET(0x4a00), /* MAIN-SA2UL */ PSIL_SA2UL(0x4a40, 0), PSIL_SA2UL(0x4a41, 0), PSIL_SA2UL(0x4a42, 0), PSIL_SA2UL(0x4a43, 0), /* MCU_CPSW0 */ PSIL_ETHERNET(0x7000), /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ PSIL_PDMA_XY_PKT(0x7100), PSIL_PDMA_XY_PKT(0x7101), PSIL_PDMA_XY_PKT(0x7102), PSIL_PDMA_XY_PKT(0x7103), /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ PSIL_PDMA_XY_PKT(0x7200), PSIL_PDMA_XY_PKT(0x7201), PSIL_PDMA_XY_PKT(0x7202), PSIL_PDMA_XY_PKT(0x7203), PSIL_PDMA_XY_PKT(0x7204), PSIL_PDMA_XY_PKT(0x7205), PSIL_PDMA_XY_PKT(0x7206), PSIL_PDMA_XY_PKT(0x7207), /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ PSIL_PDMA_XY_PKT(0x7300), /* MCU_PDMA_ADC - ADC0-1 */ PSIL_PDMA_XY_TR(0x7400), PSIL_PDMA_XY_TR(0x7401), PSIL_PDMA_XY_TR(0x7402), PSIL_PDMA_XY_TR(0x7403), /* MCU_SA2UL */ PSIL_SA2UL(0x7500, 0), PSIL_SA2UL(0x7501, 0), PSIL_SA2UL(0x7502, 0), PSIL_SA2UL(0x7503, 0), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep j784s4_dst_ep_map[] = { /* MAIN_CPSW2G */ PSIL_ETHERNET(0xc640), PSIL_ETHERNET(0xc641), PSIL_ETHERNET(0xc642), PSIL_ETHERNET(0xc643), PSIL_ETHERNET(0xc644), PSIL_ETHERNET(0xc645), PSIL_ETHERNET(0xc646), PSIL_ETHERNET(0xc647), /* MAIN_CPSW9G */ PSIL_ETHERNET(0xca00), PSIL_ETHERNET(0xca01), PSIL_ETHERNET(0xca02), PSIL_ETHERNET(0xca03), PSIL_ETHERNET(0xca04), PSIL_ETHERNET(0xca05), PSIL_ETHERNET(0xca06), PSIL_ETHERNET(0xca07), /* MAIN-SA2UL */ PSIL_SA2UL(0xca40, 1), PSIL_SA2UL(0xca41, 1), /* PDMA_SPI_G0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0xc600), PSIL_PDMA_XY_PKT(0xc601), PSIL_PDMA_XY_PKT(0xc602), PSIL_PDMA_XY_PKT(0xc603), PSIL_PDMA_XY_PKT(0xc604), PSIL_PDMA_XY_PKT(0xc605), PSIL_PDMA_XY_PKT(0xc606), PSIL_PDMA_XY_PKT(0xc607), PSIL_PDMA_XY_PKT(0xc608), PSIL_PDMA_XY_PKT(0xc609), PSIL_PDMA_XY_PKT(0xc60a), PSIL_PDMA_XY_PKT(0xc60b), PSIL_PDMA_XY_PKT(0xc60c), PSIL_PDMA_XY_PKT(0xc60d), PSIL_PDMA_XY_PKT(0xc60e), PSIL_PDMA_XY_PKT(0xc60f), /* PDMA_SPI_G1 - SPI4-7 */ PSIL_PDMA_XY_PKT(0xc620), PSIL_PDMA_XY_PKT(0xc621), PSIL_PDMA_XY_PKT(0xc622), PSIL_PDMA_XY_PKT(0xc623), PSIL_PDMA_XY_PKT(0xc624), PSIL_PDMA_XY_PKT(0xc625), PSIL_PDMA_XY_PKT(0xc626), PSIL_PDMA_XY_PKT(0xc627), PSIL_PDMA_XY_PKT(0xc628), PSIL_PDMA_XY_PKT(0xc629), PSIL_PDMA_XY_PKT(0xc62a), PSIL_PDMA_XY_PKT(0xc62b), PSIL_PDMA_XY_PKT(0xc62c), PSIL_PDMA_XY_PKT(0xc62d), PSIL_PDMA_XY_PKT(0xc62e), PSIL_PDMA_XY_PKT(0xc62f), /* MCU_CPSW0 */ PSIL_ETHERNET(0xf000), PSIL_ETHERNET(0xf001), PSIL_ETHERNET(0xf002), PSIL_ETHERNET(0xf003), PSIL_ETHERNET(0xf004), PSIL_ETHERNET(0xf005), PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), /* MCU_PDMA_MISC_G0 - SPI0 */ PSIL_PDMA_XY_PKT(0xf100), PSIL_PDMA_XY_PKT(0xf101), PSIL_PDMA_XY_PKT(0xf102), PSIL_PDMA_XY_PKT(0xf103), /* MCU_PDMA_MISC_G1 - SPI1-2 */ PSIL_PDMA_XY_PKT(0xf200), PSIL_PDMA_XY_PKT(0xf201), PSIL_PDMA_XY_PKT(0xf202), PSIL_PDMA_XY_PKT(0xf203), PSIL_PDMA_XY_PKT(0xf204), PSIL_PDMA_XY_PKT(0xf205), PSIL_PDMA_XY_PKT(0xf206), PSIL_PDMA_XY_PKT(0xf207), /* MCU_SA2UL */ PSIL_SA2UL(0xf500, 1), PSIL_SA2UL(0xf501, 1), }; struct psil_ep_map j784s4_ep_map = { .name = "j784s4", .src = j784s4_src_ep_map, .src_count = ARRAY_SIZE(j784s4_src_ep_map), .dst = j784s4_dst_ep_map, .dst_count = ARRAY_SIZE(j784s4_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-j784s4.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/sys_soc.h> #include "k3-psil-priv.h" static DEFINE_MUTEX(ep_map_mutex); static const struct psil_ep_map *soc_ep_map; static const struct soc_device_attribute k3_soc_devices[] = { { .family = "AM65X", .data = &am654_ep_map }, { .family = "J721E", .data = &j721e_ep_map }, { .family = "J7200", .data = &j7200_ep_map }, { .family = "AM64X", .data = &am64_ep_map }, { .family = "J721S2", .data = &j721s2_ep_map }, { .family = "AM62X", .data = &am62_ep_map }, { .family = "AM62AX", .data = &am62a_ep_map }, { .family = "J784S4", .data = &j784s4_ep_map }, { /* sentinel */ } }; struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) { int i; mutex_lock(&ep_map_mutex); if (!soc_ep_map) { const struct soc_device_attribute *soc; soc = soc_device_match(k3_soc_devices); if (soc) { soc_ep_map = soc->data; } else { pr_err("PSIL: No compatible machine found for map\n"); mutex_unlock(&ep_map_mutex); return ERR_PTR(-ENOTSUPP); } pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); } mutex_unlock(&ep_map_mutex); if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { /* check in destination thread map */ for (i = 0; i < soc_ep_map->dst_count; i++) { if (soc_ep_map->dst[i].thread_id == thread_id) return &soc_ep_map->dst[i].ep_config; } } thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; if (soc_ep_map->src) { for (i = 0; i < soc_ep_map->src_count; i++) { if (soc_ep_map->src[i].thread_id == thread_id) return &soc_ep_map->src[i].ep_config; } } return ERR_PTR(-ENOENT); } EXPORT_SYMBOL_GPL(psil_get_ep_config); int psil_set_new_ep_config(struct device *dev, const char *name, struct psil_endpoint_config *ep_config) { struct psil_endpoint_config *dst_ep_config; struct of_phandle_args dma_spec; u32 thread_id; int index; if (!dev || !dev->of_node) return -EINVAL; index = of_property_match_string(dev->of_node, "dma-names", name); if (index < 0) return index; if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", index, &dma_spec)) return -ENOENT; thread_id = dma_spec.args[0]; dst_ep_config = psil_get_ep_config(thread_id); if (IS_ERR(dst_ep_config)) { pr_err("PSIL: thread ID 0x%04x not defined in map\n", thread_id); of_node_put(dma_spec.np); return PTR_ERR(dst_ep_config); } memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config)); of_node_put(dma_spec.np); return 0; } EXPORT_SYMBOL_GPL(psil_set_new_ep_config); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/ti/k3-psil.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pkt_mode = 1, \ }, \ } #define PSIL_ETHERNET(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ }, \ } #define PSIL_SA2UL(x, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .notdpkt = tx, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am654_src_ep_map[] = { /* SA2UL */ PSIL_SA2UL(0x4000, 0), PSIL_SA2UL(0x4001, 0), PSIL_SA2UL(0x4002, 0), PSIL_SA2UL(0x4003, 0), /* PRU_ICSSG0 */ PSIL_ETHERNET(0x4100), PSIL_ETHERNET(0x4101), PSIL_ETHERNET(0x4102), PSIL_ETHERNET(0x4103), /* PRU_ICSSG1 */ PSIL_ETHERNET(0x4200), PSIL_ETHERNET(0x4201), PSIL_ETHERNET(0x4202), PSIL_ETHERNET(0x4203), /* PRU_ICSSG2 */ PSIL_ETHERNET(0x4300), PSIL_ETHERNET(0x4301), PSIL_ETHERNET(0x4302), PSIL_ETHERNET(0x4303), /* PDMA0 - McASPs */ PSIL_PDMA_XY_TR(0x4400), PSIL_PDMA_XY_TR(0x4401), PSIL_PDMA_XY_TR(0x4402), /* PDMA1 - SPI0-4 */ PSIL_PDMA_XY_PKT(0x4500), PSIL_PDMA_XY_PKT(0x4501), PSIL_PDMA_XY_PKT(0x4502), PSIL_PDMA_XY_PKT(0x4503), PSIL_PDMA_XY_PKT(0x4504), PSIL_PDMA_XY_PKT(0x4505), PSIL_PDMA_XY_PKT(0x4506), PSIL_PDMA_XY_PKT(0x4507), PSIL_PDMA_XY_PKT(0x4508), PSIL_PDMA_XY_PKT(0x4509), PSIL_PDMA_XY_PKT(0x450a), PSIL_PDMA_XY_PKT(0x450b), PSIL_PDMA_XY_PKT(0x450c), PSIL_PDMA_XY_PKT(0x450d), PSIL_PDMA_XY_PKT(0x450e), PSIL_PDMA_XY_PKT(0x450f), PSIL_PDMA_XY_PKT(0x4510), PSIL_PDMA_XY_PKT(0x4511), PSIL_PDMA_XY_PKT(0x4512), PSIL_PDMA_XY_PKT(0x4513), /* PDMA1 - USART0-2 */ PSIL_PDMA_XY_PKT(0x4514), PSIL_PDMA_XY_PKT(0x4515), PSIL_PDMA_XY_PKT(0x4516), /* CPSW0 */ PSIL_ETHERNET(0x7000), /* MCU_PDMA0 - ADCs */ PSIL_PDMA_XY_TR(0x7100), PSIL_PDMA_XY_TR(0x7101), PSIL_PDMA_XY_TR(0x7102), PSIL_PDMA_XY_TR(0x7103), /* MCU_PDMA1 - MCU_SPI0-2 */ PSIL_PDMA_XY_PKT(0x7200), PSIL_PDMA_XY_PKT(0x7201), PSIL_PDMA_XY_PKT(0x7202), PSIL_PDMA_XY_PKT(0x7203), PSIL_PDMA_XY_PKT(0x7204), PSIL_PDMA_XY_PKT(0x7205), PSIL_PDMA_XY_PKT(0x7206), PSIL_PDMA_XY_PKT(0x7207), PSIL_PDMA_XY_PKT(0x7208), PSIL_PDMA_XY_PKT(0x7209), PSIL_PDMA_XY_PKT(0x720a), PSIL_PDMA_XY_PKT(0x720b), /* MCU_PDMA1 - MCU_USART0 */ PSIL_PDMA_XY_PKT(0x7212), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep am654_dst_ep_map[] = { /* SA2UL */ PSIL_SA2UL(0xc000, 1), PSIL_SA2UL(0xc001, 1), /* PRU_ICSSG0 */ PSIL_ETHERNET(0xc100), PSIL_ETHERNET(0xc101), PSIL_ETHERNET(0xc102), PSIL_ETHERNET(0xc103), PSIL_ETHERNET(0xc104), PSIL_ETHERNET(0xc105), PSIL_ETHERNET(0xc106), PSIL_ETHERNET(0xc107), /* PRU_ICSSG1 */ PSIL_ETHERNET(0xc200), PSIL_ETHERNET(0xc201), PSIL_ETHERNET(0xc202), PSIL_ETHERNET(0xc203), PSIL_ETHERNET(0xc204), PSIL_ETHERNET(0xc205), PSIL_ETHERNET(0xc206), PSIL_ETHERNET(0xc207), /* PRU_ICSSG2 */ PSIL_ETHERNET(0xc300), PSIL_ETHERNET(0xc301), PSIL_ETHERNET(0xc302), PSIL_ETHERNET(0xc303), PSIL_ETHERNET(0xc304), PSIL_ETHERNET(0xc305), PSIL_ETHERNET(0xc306), PSIL_ETHERNET(0xc307), /* CPSW0 */ PSIL_ETHERNET(0xf000), PSIL_ETHERNET(0xf001), PSIL_ETHERNET(0xf002), PSIL_ETHERNET(0xf003), PSIL_ETHERNET(0xf004), PSIL_ETHERNET(0xf005), PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), }; struct psil_ep_map am654_ep_map = { .name = "am654", .src = am654_src_ep_map, .src_count = ARRAY_SIZE(am654_src_ep_map), .dst = am654_dst_ep_map, .dst_count = ARRAY_SIZE(am654_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-am654.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .mapped_channel_id = -1, \ .default_flow_id = -1, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .mapped_channel_id = -1, \ .default_flow_id = -1, \ .pkt_mode = 1, \ }, \ } #define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = flow_base, \ }, \ } #define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .mapped_channel_id = ch, \ .flow_start = flow_base, \ .flow_num = flow_cnt, \ .default_flow_id = default_flow, \ .notdpkt = tx, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am64_src_ep_map[] = { /* SAUL */ PSIL_SAUL(0x4000, 17, 32, 8, 32, 0), PSIL_SAUL(0x4001, 18, 32, 8, 33, 0), PSIL_SAUL(0x4002, 19, 40, 8, 40, 0), PSIL_SAUL(0x4003, 20, 40, 8, 41, 0), /* ICSS_G0 */ PSIL_ETHERNET(0x4100, 21, 48, 16), PSIL_ETHERNET(0x4101, 22, 64, 16), PSIL_ETHERNET(0x4102, 23, 80, 16), PSIL_ETHERNET(0x4103, 24, 96, 16), /* ICSS_G1 */ PSIL_ETHERNET(0x4200, 25, 112, 16), PSIL_ETHERNET(0x4201, 26, 128, 16), PSIL_ETHERNET(0x4202, 27, 144, 16), PSIL_ETHERNET(0x4203, 28, 160, 16), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4300), PSIL_PDMA_XY_PKT(0x4301), PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4304), PSIL_PDMA_XY_PKT(0x4305), PSIL_PDMA_XY_PKT(0x4306), PSIL_PDMA_XY_PKT(0x4307), PSIL_PDMA_XY_PKT(0x4308), PSIL_PDMA_XY_PKT(0x4309), PSIL_PDMA_XY_PKT(0x430a), PSIL_PDMA_XY_PKT(0x430b), PSIL_PDMA_XY_PKT(0x430c), PSIL_PDMA_XY_PKT(0x430d), PSIL_PDMA_XY_PKT(0x430e), PSIL_PDMA_XY_PKT(0x430f), /* PDMA_MAIN0 - USART0-1 */ PSIL_PDMA_XY_PKT(0x4310), PSIL_PDMA_XY_PKT(0x4311), /* PDMA_MAIN1 - SPI4 */ PSIL_PDMA_XY_PKT(0x4400), PSIL_PDMA_XY_PKT(0x4401), PSIL_PDMA_XY_PKT(0x4402), PSIL_PDMA_XY_PKT(0x4403), /* PDMA_MAIN1 - USART2-6 */ PSIL_PDMA_XY_PKT(0x4404), PSIL_PDMA_XY_PKT(0x4405), PSIL_PDMA_XY_PKT(0x4406), PSIL_PDMA_XY_PKT(0x4407), PSIL_PDMA_XY_PKT(0x4408), /* PDMA_MAIN1 - ADCs */ PSIL_PDMA_XY_TR(0x440f), PSIL_PDMA_XY_TR(0x4410), /* CPSW2 */ PSIL_ETHERNET(0x4500, 16, 16, 16), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep am64_dst_ep_map[] = { /* SAUL */ PSIL_SAUL(0xc000, 24, 80, 8, 80, 1), PSIL_SAUL(0xc001, 25, 88, 8, 88, 1), /* ICSS_G0 */ PSIL_ETHERNET(0xc100, 26, 96, 1), PSIL_ETHERNET(0xc101, 27, 97, 1), PSIL_ETHERNET(0xc102, 28, 98, 1), PSIL_ETHERNET(0xc103, 29, 99, 1), PSIL_ETHERNET(0xc104, 30, 100, 1), PSIL_ETHERNET(0xc105, 31, 101, 1), PSIL_ETHERNET(0xc106, 32, 102, 1), PSIL_ETHERNET(0xc107, 33, 103, 1), /* ICSS_G1 */ PSIL_ETHERNET(0xc200, 34, 104, 1), PSIL_ETHERNET(0xc201, 35, 105, 1), PSIL_ETHERNET(0xc202, 36, 106, 1), PSIL_ETHERNET(0xc203, 37, 107, 1), PSIL_ETHERNET(0xc204, 38, 108, 1), PSIL_ETHERNET(0xc205, 39, 109, 1), PSIL_ETHERNET(0xc206, 40, 110, 1), PSIL_ETHERNET(0xc207, 41, 111, 1), /* CPSW2 */ PSIL_ETHERNET(0xc500, 16, 16, 8), PSIL_ETHERNET(0xc501, 17, 24, 8), PSIL_ETHERNET(0xc502, 18, 32, 8), PSIL_ETHERNET(0xc503, 19, 40, 8), PSIL_ETHERNET(0xc504, 20, 48, 8), PSIL_ETHERNET(0xc505, 21, 56, 8), PSIL_ETHERNET(0xc506, 22, 64, 8), PSIL_ETHERNET(0xc507, 23, 72, 8), }; struct psil_ep_map am64_ep_map = { .name = "am64", .src = am64_src_ep_map, .src_count = ARRAY_SIZE(am64_src_ep_map), .dst = am64_dst_ep_map, .dst_count = ARRAY_SIZE(am64_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-am64.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/kernel.h> #include "k3-psil-priv.h" #define PSIL_PDMA_XY_TR(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ }, \ } #define PSIL_PDMA_XY_PKT(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pkt_mode = 1, \ }, \ } #define PSIL_PDMA_MCASP(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_PDMA_XY, \ .pdma_acc32 = 1, \ .pdma_burst = 1, \ }, \ } #define PSIL_ETHERNET(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 16, \ }, \ } #define PSIL_SA2UL(x, tx) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ .pkt_mode = 1, \ .needs_epib = 1, \ .psd_size = 64, \ .notdpkt = tx, \ }, \ } #define PSIL_CSI2RX(x) \ { \ .thread_id = x, \ .ep_config = { \ .ep_type = PSIL_EP_NATIVE, \ }, \ } /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep j721e_src_ep_map[] = { /* SA2UL */ PSIL_SA2UL(0x4000, 0), PSIL_SA2UL(0x4001, 0), PSIL_SA2UL(0x4002, 0), PSIL_SA2UL(0x4003, 0), /* PRU_ICSSG0 */ PSIL_ETHERNET(0x4100), PSIL_ETHERNET(0x4101), PSIL_ETHERNET(0x4102), PSIL_ETHERNET(0x4103), /* PRU_ICSSG1 */ PSIL_ETHERNET(0x4200), PSIL_ETHERNET(0x4201), PSIL_ETHERNET(0x4202), PSIL_ETHERNET(0x4203), /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ PSIL_PDMA_MCASP(0x4400), PSIL_PDMA_MCASP(0x4401), PSIL_PDMA_MCASP(0x4402), /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ PSIL_PDMA_MCASP(0x4500), PSIL_PDMA_MCASP(0x4501), PSIL_PDMA_MCASP(0x4502), PSIL_PDMA_MCASP(0x4503), PSIL_PDMA_MCASP(0x4504), PSIL_PDMA_MCASP(0x4505), PSIL_PDMA_MCASP(0x4506), PSIL_PDMA_MCASP(0x4507), PSIL_PDMA_MCASP(0x4508), /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ PSIL_PDMA_XY_PKT(0x4600), PSIL_PDMA_XY_PKT(0x4601), PSIL_PDMA_XY_PKT(0x4602), PSIL_PDMA_XY_PKT(0x4603), PSIL_PDMA_XY_PKT(0x4604), PSIL_PDMA_XY_PKT(0x4605), PSIL_PDMA_XY_PKT(0x4606), PSIL_PDMA_XY_PKT(0x4607), /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ PSIL_PDMA_XY_PKT(0x460c), PSIL_PDMA_XY_PKT(0x460d), PSIL_PDMA_XY_PKT(0x460e), PSIL_PDMA_XY_PKT(0x460f), PSIL_PDMA_XY_PKT(0x4610), PSIL_PDMA_XY_PKT(0x4611), PSIL_PDMA_XY_PKT(0x4612), PSIL_PDMA_XY_PKT(0x4613), /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ PSIL_PDMA_XY_PKT(0x4618), PSIL_PDMA_XY_PKT(0x4619), PSIL_PDMA_XY_PKT(0x461a), PSIL_PDMA_XY_PKT(0x461b), PSIL_PDMA_XY_PKT(0x461c), PSIL_PDMA_XY_PKT(0x461d), PSIL_PDMA_XY_PKT(0x461e), PSIL_PDMA_XY_PKT(0x461f), /* PDMA11 (PDMA_MISC_G3) */ PSIL_PDMA_XY_PKT(0x4624), PSIL_PDMA_XY_PKT(0x4625), PSIL_PDMA_XY_PKT(0x4626), PSIL_PDMA_XY_PKT(0x4627), PSIL_PDMA_XY_PKT(0x4628), PSIL_PDMA_XY_PKT(0x4629), PSIL_PDMA_XY_PKT(0x4630), PSIL_PDMA_XY_PKT(0x463a), /* PDMA13 (PDMA_USART_G0) - UART0-1 */ PSIL_PDMA_XY_PKT(0x4700), PSIL_PDMA_XY_PKT(0x4701), /* PDMA14 (PDMA_USART_G1) - UART2-3 */ PSIL_PDMA_XY_PKT(0x4702), PSIL_PDMA_XY_PKT(0x4703), /* PDMA15 (PDMA_USART_G2) - UART4-9 */ PSIL_PDMA_XY_PKT(0x4704), PSIL_PDMA_XY_PKT(0x4705), PSIL_PDMA_XY_PKT(0x4706), PSIL_PDMA_XY_PKT(0x4707), PSIL_PDMA_XY_PKT(0x4708), PSIL_PDMA_XY_PKT(0x4709), /* CSI2RX */ PSIL_CSI2RX(0x4940), PSIL_CSI2RX(0x4941), PSIL_CSI2RX(0x4942), PSIL_CSI2RX(0x4943), PSIL_CSI2RX(0x4944), PSIL_CSI2RX(0x4945), PSIL_CSI2RX(0x4946), PSIL_CSI2RX(0x4947), PSIL_CSI2RX(0x4948), PSIL_CSI2RX(0x4949), PSIL_CSI2RX(0x494a), PSIL_CSI2RX(0x494b), PSIL_CSI2RX(0x494c), PSIL_CSI2RX(0x494d), PSIL_CSI2RX(0x494e), PSIL_CSI2RX(0x494f), PSIL_CSI2RX(0x4950), PSIL_CSI2RX(0x4951), PSIL_CSI2RX(0x4952), PSIL_CSI2RX(0x4953), PSIL_CSI2RX(0x4954), PSIL_CSI2RX(0x4955), PSIL_CSI2RX(0x4956), PSIL_CSI2RX(0x4957), PSIL_CSI2RX(0x4958), PSIL_CSI2RX(0x4959), PSIL_CSI2RX(0x495a), PSIL_CSI2RX(0x495b), PSIL_CSI2RX(0x495c), PSIL_CSI2RX(0x495d), PSIL_CSI2RX(0x495e), PSIL_CSI2RX(0x495f), PSIL_CSI2RX(0x4960), PSIL_CSI2RX(0x4961), PSIL_CSI2RX(0x4962), PSIL_CSI2RX(0x4963), PSIL_CSI2RX(0x4964), PSIL_CSI2RX(0x4965), PSIL_CSI2RX(0x4966), PSIL_CSI2RX(0x4967), PSIL_CSI2RX(0x4968), PSIL_CSI2RX(0x4969), PSIL_CSI2RX(0x496a), PSIL_CSI2RX(0x496b), PSIL_CSI2RX(0x496c), PSIL_CSI2RX(0x496d), PSIL_CSI2RX(0x496e), PSIL_CSI2RX(0x496f), PSIL_CSI2RX(0x4970), PSIL_CSI2RX(0x4971), PSIL_CSI2RX(0x4972), PSIL_CSI2RX(0x4973), PSIL_CSI2RX(0x4974), PSIL_CSI2RX(0x4975), PSIL_CSI2RX(0x4976), PSIL_CSI2RX(0x4977), PSIL_CSI2RX(0x4978), PSIL_CSI2RX(0x4979), PSIL_CSI2RX(0x497a), PSIL_CSI2RX(0x497b), PSIL_CSI2RX(0x497c), PSIL_CSI2RX(0x497d), PSIL_CSI2RX(0x497e), PSIL_CSI2RX(0x497f), /* CPSW9 */ PSIL_ETHERNET(0x4a00), /* CPSW0 */ PSIL_ETHERNET(0x7000), /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ PSIL_PDMA_XY_PKT(0x7100), PSIL_PDMA_XY_PKT(0x7101), PSIL_PDMA_XY_PKT(0x7102), PSIL_PDMA_XY_PKT(0x7103), /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ PSIL_PDMA_XY_PKT(0x7200), PSIL_PDMA_XY_PKT(0x7201), PSIL_PDMA_XY_PKT(0x7202), PSIL_PDMA_XY_PKT(0x7203), PSIL_PDMA_XY_PKT(0x7204), PSIL_PDMA_XY_PKT(0x7205), PSIL_PDMA_XY_PKT(0x7206), PSIL_PDMA_XY_PKT(0x7207), /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ PSIL_PDMA_XY_PKT(0x7300), /* MCU_PDMA_ADC - ADC0-1 */ PSIL_PDMA_XY_TR(0x7400), PSIL_PDMA_XY_TR(0x7401), PSIL_PDMA_XY_TR(0x7402), PSIL_PDMA_XY_TR(0x7403), /* SA2UL */ PSIL_SA2UL(0x7500, 0), PSIL_SA2UL(0x7501, 0), PSIL_SA2UL(0x7502, 0), PSIL_SA2UL(0x7503, 0), }; /* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ static struct psil_ep j721e_dst_ep_map[] = { /* SA2UL */ PSIL_SA2UL(0xc000, 1), PSIL_SA2UL(0xc001, 1), /* PRU_ICSSG0 */ PSIL_ETHERNET(0xc100), PSIL_ETHERNET(0xc101), PSIL_ETHERNET(0xc102), PSIL_ETHERNET(0xc103), PSIL_ETHERNET(0xc104), PSIL_ETHERNET(0xc105), PSIL_ETHERNET(0xc106), PSIL_ETHERNET(0xc107), /* PRU_ICSSG1 */ PSIL_ETHERNET(0xc200), PSIL_ETHERNET(0xc201), PSIL_ETHERNET(0xc202), PSIL_ETHERNET(0xc203), PSIL_ETHERNET(0xc204), PSIL_ETHERNET(0xc205), PSIL_ETHERNET(0xc206), PSIL_ETHERNET(0xc207), /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ PSIL_PDMA_MCASP(0xc400), PSIL_PDMA_MCASP(0xc401), PSIL_PDMA_MCASP(0xc402), /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ PSIL_PDMA_MCASP(0xc500), PSIL_PDMA_MCASP(0xc501), PSIL_PDMA_MCASP(0xc502), PSIL_PDMA_MCASP(0xc503), PSIL_PDMA_MCASP(0xc504), PSIL_PDMA_MCASP(0xc505), PSIL_PDMA_MCASP(0xc506), PSIL_PDMA_MCASP(0xc507), PSIL_PDMA_MCASP(0xc508), /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ PSIL_PDMA_XY_PKT(0xc600), PSIL_PDMA_XY_PKT(0xc601), PSIL_PDMA_XY_PKT(0xc602), PSIL_PDMA_XY_PKT(0xc603), PSIL_PDMA_XY_PKT(0xc604), PSIL_PDMA_XY_PKT(0xc605), PSIL_PDMA_XY_PKT(0xc606), PSIL_PDMA_XY_PKT(0xc607), /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ PSIL_PDMA_XY_PKT(0xc60c), PSIL_PDMA_XY_PKT(0xc60d), PSIL_PDMA_XY_PKT(0xc60e), PSIL_PDMA_XY_PKT(0xc60f), PSIL_PDMA_XY_PKT(0xc610), PSIL_PDMA_XY_PKT(0xc611), PSIL_PDMA_XY_PKT(0xc612), PSIL_PDMA_XY_PKT(0xc613), /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ PSIL_PDMA_XY_PKT(0xc618), PSIL_PDMA_XY_PKT(0xc619), PSIL_PDMA_XY_PKT(0xc61a), PSIL_PDMA_XY_PKT(0xc61b), PSIL_PDMA_XY_PKT(0xc61c), PSIL_PDMA_XY_PKT(0xc61d), PSIL_PDMA_XY_PKT(0xc61e), PSIL_PDMA_XY_PKT(0xc61f), /* PDMA11 (PDMA_MISC_G3) */ PSIL_PDMA_XY_PKT(0xc624), PSIL_PDMA_XY_PKT(0xc625), PSIL_PDMA_XY_PKT(0xc626), PSIL_PDMA_XY_PKT(0xc627), PSIL_PDMA_XY_PKT(0xc628), PSIL_PDMA_XY_PKT(0xc629), PSIL_PDMA_XY_PKT(0xc630), PSIL_PDMA_XY_PKT(0xc63a), /* PDMA13 (PDMA_USART_G0) - UART0-1 */ PSIL_PDMA_XY_PKT(0xc700), PSIL_PDMA_XY_PKT(0xc701), /* PDMA14 (PDMA_USART_G1) - UART2-3 */ PSIL_PDMA_XY_PKT(0xc702), PSIL_PDMA_XY_PKT(0xc703), /* PDMA15 (PDMA_USART_G2) - UART4-9 */ PSIL_PDMA_XY_PKT(0xc704), PSIL_PDMA_XY_PKT(0xc705), PSIL_PDMA_XY_PKT(0xc706), PSIL_PDMA_XY_PKT(0xc707), PSIL_PDMA_XY_PKT(0xc708), PSIL_PDMA_XY_PKT(0xc709), /* CPSW9 */ PSIL_ETHERNET(0xca00), PSIL_ETHERNET(0xca01), PSIL_ETHERNET(0xca02), PSIL_ETHERNET(0xca03), PSIL_ETHERNET(0xca04), PSIL_ETHERNET(0xca05), PSIL_ETHERNET(0xca06), PSIL_ETHERNET(0xca07), /* CPSW0 */ PSIL_ETHERNET(0xf000), PSIL_ETHERNET(0xf001), PSIL_ETHERNET(0xf002), PSIL_ETHERNET(0xf003), PSIL_ETHERNET(0xf004), PSIL_ETHERNET(0xf005), PSIL_ETHERNET(0xf006), PSIL_ETHERNET(0xf007), /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ PSIL_PDMA_XY_PKT(0xf100), PSIL_PDMA_XY_PKT(0xf101), PSIL_PDMA_XY_PKT(0xf102), PSIL_PDMA_XY_PKT(0xf103), /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ PSIL_PDMA_XY_PKT(0xf200), PSIL_PDMA_XY_PKT(0xf201), PSIL_PDMA_XY_PKT(0xf202), PSIL_PDMA_XY_PKT(0xf203), PSIL_PDMA_XY_PKT(0xf204), PSIL_PDMA_XY_PKT(0xf205), PSIL_PDMA_XY_PKT(0xf206), PSIL_PDMA_XY_PKT(0xf207), /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ PSIL_PDMA_XY_PKT(0xf300), /* SA2UL */ PSIL_SA2UL(0xf500, 1), PSIL_SA2UL(0xf501, 1), }; struct psil_ep_map j721e_ep_map = { .name = "j721e", .src = j721e_src_ep_map, .src_count = ARRAY_SIZE(j721e_src_ep_map), .dst = j721e_dst_ep_map, .dst_count = ARRAY_SIZE(j721e_dst_ep_map), };
linux-master
drivers/dma/ti/k3-psil-j721e.c
// SPDX-License-Identifier: GPL-2.0 /* * K3 NAVSS DMA glue interface * * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com * */ #include <linux/module.h> #include <linux/atomic.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/soc/ti/k3-ringacc.h> #include <linux/dma/ti-cppi5.h> #include <linux/dma/k3-udma-glue.h> #include "k3-udma.h" #include "k3-psil-priv.h" struct k3_udma_glue_common { struct device *dev; struct device chan_dev; struct udma_dev *udmax; const struct udma_tisci_rm *tisci_rm; struct k3_ringacc *ringacc; u32 src_thread; u32 dst_thread; u32 hdesc_size; bool epib; u32 psdata_size; u32 swdata_size; u32 atype_asel; struct psil_endpoint_config *ep_config; }; struct k3_udma_glue_tx_channel { struct k3_udma_glue_common common; struct udma_tchan *udma_tchanx; int udma_tchan_id; struct k3_ring *ringtx; struct k3_ring *ringtxcq; bool psil_paired; int virq; atomic_t free_pkts; bool tx_pause_on_err; bool tx_filt_einfo; bool tx_filt_pswords; bool tx_supr_tdpkt; int udma_tflow_id; }; struct k3_udma_glue_rx_flow { struct udma_rflow *udma_rflow; int udma_rflow_id; struct k3_ring *ringrx; struct k3_ring *ringrxfdq; int virq; }; struct k3_udma_glue_rx_channel { struct k3_udma_glue_common common; struct udma_rchan *udma_rchanx; int udma_rchan_id; bool remote; bool psil_paired; u32 swdata_size; int flow_id_base; struct k3_udma_glue_rx_flow *flows; u32 flow_num; u32 flows_ready; }; static void k3_udma_chan_dev_release(struct device *dev) { /* The struct containing the device is devm managed */ } static struct class k3_udma_glue_devclass = { .name = "k3_udma_glue_chan", .dev_release = k3_udma_chan_dev_release, }; #define K3_UDMAX_TDOWN_TIMEOUT_US 1000 static int of_k3_udma_glue_parse(struct device_node *udmax_np, struct k3_udma_glue_common *common) { common->udmax = of_xudma_dev_get(udmax_np, NULL); if (IS_ERR(common->udmax)) return PTR_ERR(common->udmax); common->ringacc = xudma_get_ringacc(common->udmax); common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); return 0; } static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, const char *name, struct k3_udma_glue_common *common, bool tx_chn) { struct of_phandle_args dma_spec; u32 thread_id; int ret = 0; int index; if (unlikely(!name)) return -EINVAL; index = of_property_match_string(chn_np, "dma-names", name); if (index < 0) return index; if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, &dma_spec)) return -ENOENT; ret = of_k3_udma_glue_parse(dma_spec.np, common); if (ret) goto out_put_spec; thread_id = dma_spec.args[0]; if (dma_spec.args_count == 2) { if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) { dev_err(common->dev, "Invalid channel atype: %u\n", dma_spec.args[1]); ret = -EINVAL; goto out_put_spec; } if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) { dev_err(common->dev, "Invalid channel asel: %u\n", dma_spec.args[1]); ret = -EINVAL; goto out_put_spec; } common->atype_asel = dma_spec.args[1]; } if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { ret = -EINVAL; goto out_put_spec; } if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { ret = -EINVAL; goto out_put_spec; } /* get psil endpoint config */ common->ep_config = psil_get_ep_config(thread_id); if (IS_ERR(common->ep_config)) { dev_err(common->dev, "No configuration for psi-l thread 0x%04x\n", thread_id); ret = PTR_ERR(common->ep_config); goto out_put_spec; } common->epib = common->ep_config->needs_epib; common->psdata_size = common->ep_config->psd_size; if (tx_chn) common->dst_thread = thread_id; else common->src_thread = thread_id; out_put_spec: of_node_put(dma_spec.np); return ret; }; static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { struct device *dev = tx_chn->common.dev; dev_dbg(dev, "dump_tx_chn:\n" "udma_tchan_id: %d\n" "src_thread: %08x\n" "dst_thread: %08x\n", tx_chn->udma_tchan_id, tx_chn->common.src_thread, tx_chn->common.dst_thread); } static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, char *mark) { struct device *dev = chn->common.dev; dev_dbg(dev, "=== dump ===> %s\n", mark); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG, xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG, xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG, xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG, xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG, xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG)); } static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; struct ti_sci_msg_rm_udmap_tx_ch_cfg req; memset(&req, 0, sizeof(req)); req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.index = tx_chn->udma_tchan_id; if (tx_chn->tx_pause_on_err) req.tx_pause_on_err = 1; if (tx_chn->tx_filt_einfo) req.tx_filt_einfo = 1; if (tx_chn->tx_filt_pswords) req.tx_filt_pswords = 1; req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; if (tx_chn->tx_supr_tdpkt) req.tx_supr_tdpkt = 1; req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); req.tx_atype = tx_chn->common.atype_asel; return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); } struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) { struct k3_udma_glue_tx_channel *tx_chn; int ret; tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); if (!tx_chn) return ERR_PTR(-ENOMEM); tx_chn->common.dev = dev; tx_chn->common.swdata_size = cfg->swdata_size; tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; /* parse of udmap channel */ ret = of_k3_udma_glue_parse_chn(dev->of_node, name, &tx_chn->common, true); if (ret) goto err; tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, tx_chn->common.psdata_size, tx_chn->common.swdata_size); if (xudma_is_pktdma(tx_chn->common.udmax)) tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; else tx_chn->udma_tchan_id = -1; /* request and cfg UDMAP TX channel */ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, tx_chn->udma_tchan_id); if (IS_ERR(tx_chn->udma_tchanx)) { ret = PTR_ERR(tx_chn->udma_tchanx); dev_err(dev, "UDMAX tchanx get err %d\n", ret); goto err; } tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); tx_chn->common.chan_dev.class = &k3_udma_glue_devclass; tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax); dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x", tx_chn->udma_tchan_id, tx_chn->common.dst_thread); ret = device_register(&tx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); put_device(&tx_chn->common.chan_dev); tx_chn->common.chan_dev.parent = NULL; goto err; } if (xudma_is_pktdma(tx_chn->common.udmax)) { /* prepare the channel device as coherent */ tx_chn->common.chan_dev.dma_coherent = true; dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev, DMA_BIT_MASK(48)); } atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); if (xudma_is_pktdma(tx_chn->common.udmax)) tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id; else tx_chn->udma_tflow_id = tx_chn->udma_tchan_id; /* request and cfg rings */ ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, tx_chn->udma_tflow_id, -1, &tx_chn->ringtx, &tx_chn->ringtxcq); if (ret) { dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret); goto err; } /* Set the dma_dev for the rings to be configured */ cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn); cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev; /* Set the ASEL value for DMA rings of PKTDMA */ if (xudma_is_pktdma(tx_chn->common.udmax)) { cfg->tx_cfg.asel = tx_chn->common.atype_asel; cfg->txcq_cfg.asel = tx_chn->common.atype_asel; } ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); if (ret) { dev_err(dev, "Failed to cfg ringtx %d\n", ret); goto err; } ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); if (ret) { dev_err(dev, "Failed to cfg ringtx %d\n", ret); goto err; } /* request and cfg psi-l */ tx_chn->common.src_thread = xudma_dev_get_psil_base(tx_chn->common.udmax) + tx_chn->udma_tchan_id; ret = k3_udma_glue_cfg_tx_chn(tx_chn); if (ret) { dev_err(dev, "Failed to cfg tchan %d\n", ret); goto err; } k3_udma_glue_dump_tx_chn(tx_chn); return tx_chn; err: k3_udma_glue_release_tx_chn(tx_chn); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { if (tx_chn->psil_paired) { xudma_navss_psil_unpair(tx_chn->common.udmax, tx_chn->common.src_thread, tx_chn->common.dst_thread); tx_chn->psil_paired = false; } if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) xudma_tchan_put(tx_chn->common.udmax, tx_chn->udma_tchanx); if (tx_chn->ringtxcq) k3_ringacc_ring_free(tx_chn->ringtxcq); if (tx_chn->ringtx) k3_ringacc_ring_free(tx_chn->ringtx); if (tx_chn->common.chan_dev.parent) { device_unregister(&tx_chn->common.chan_dev); tx_chn->common.chan_dev.parent = NULL; } } EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, struct cppi5_host_desc_t *desc_tx, dma_addr_t desc_dma) { u32 ringtxcq_id; if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) return -ENOMEM; ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); } EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, dma_addr_t *desc_dma) { int ret; ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); if (!ret) atomic_inc(&tx_chn->free_pkts); return ret; } EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { int ret; ret = xudma_navss_psil_pair(tx_chn->common.udmax, tx_chn->common.src_thread, tx_chn->common.dst_thread); if (ret) { dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret); return ret; } tx_chn->psil_paired = true; xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE); xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); return 0; } EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0); xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); if (tx_chn->psil_paired) { xudma_navss_psil_unpair(tx_chn->common.udmax, tx_chn->common.src_thread, tx_chn->common.dst_thread); tx_chn->psil_paired = false; } } EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, bool sync) { int i = 0; u32 val; k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); udelay(1); if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { dev_err(tx_chn->common.dev, "TX tdown timeout\n"); break; } i++; } val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG); if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); } EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, void *data, void (*cleanup)(void *data, dma_addr_t desc_dma)) { struct device *dev = tx_chn->common.dev; dma_addr_t desc_dma; int occ_tx, i, ret; /* * TXQ reset need to be special way as it is input for udma and its * state cached by udma, so: * 1) save TXQ occ * 2) clean up TXQ and call callback .cleanup() for each desc * 3) reset TXQ in a special way */ occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx); for (i = 0; i < occ_tx; i++) { ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); if (ret) { if (ret != -ENODATA) dev_err(dev, "TX reset pop %d\n", ret); break; } cleanup(data, desc_dma); } /* reset TXCQ as it is not input for udma - expected to be empty */ k3_ringacc_ring_reset(tx_chn->ringtxcq); k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); } EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) { return tx_chn->common.hdesc_size; } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) { return k3_ringacc_get_ring_id(tx_chn->ringtxcq); } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) { if (xudma_is_pktdma(tx_chn->common.udmax)) { tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax, tx_chn->udma_tflow_id); } else { tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); } return tx_chn->virq; } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); struct device * k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn) { if (xudma_is_pktdma(tx_chn->common.udmax) && (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15)) return &tx_chn->common.chan_dev; return xudma_get_device(tx_chn->common.udmax); } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device); void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn, dma_addr_t *addr) { if (!xudma_is_pktdma(tx_chn->common.udmax) || !tx_chn->common.atype_asel) return; *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr); void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn, dma_addr_t *addr) { if (!xudma_is_pktdma(tx_chn->common.udmax) || !tx_chn->common.atype_asel) return; *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); } EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr); static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) { const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; struct ti_sci_msg_rm_udmap_rx_ch_cfg req; int ret; memset(&req, 0, sizeof(req)); req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.index = rx_chn->udma_rchan_id; req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; /* * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw * and udmax impl, so just configure it to invalid value. * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); */ req.rxcq_qnum = 0xFFFF; if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { /* Default flow + extra ones */ req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; req.flowid_start = rx_chn->flow_id_base; req.flowid_cnt = rx_chn->flow_num; } req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; req.rx_atype = rx_chn->common.atype_asel; ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); if (ret) dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", rx_chn->udma_rchan_id, ret); return ret; } static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; if (IS_ERR_OR_NULL(flow->udma_rflow)) return; if (flow->ringrxfdq) k3_ringacc_ring_free(flow->ringrxfdq); if (flow->ringrx) k3_ringacc_ring_free(flow->ringrx); xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); flow->udma_rflow = NULL; rx_chn->flows_ready--; } static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; struct device *dev = rx_chn->common.dev; struct ti_sci_msg_rm_udmap_flow_cfg req; int rx_ring_id; int rx_ringfdq_id; int ret = 0; flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, flow->udma_rflow_id); if (IS_ERR(flow->udma_rflow)) { ret = PTR_ERR(flow->udma_rflow); dev_err(dev, "UDMAX rflow get err %d\n", ret); return ret; } if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { ret = -ENODEV; goto err_rflow_put; } if (xudma_is_pktdma(rx_chn->common.udmax)) { rx_ringfdq_id = flow->udma_rflow_id + xudma_get_rflow_ring_offset(rx_chn->common.udmax); rx_ring_id = 0; } else { rx_ring_id = flow_cfg->ring_rxq_id; rx_ringfdq_id = flow_cfg->ring_rxfdq0_id; } /* request and cfg rings */ ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, rx_ringfdq_id, rx_ring_id, &flow->ringrxfdq, &flow->ringrx); if (ret) { dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret); goto err_rflow_put; } /* Set the dma_dev for the rings to be configured */ flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev; /* Set the ASEL value for DMA rings of PKTDMA */ if (xudma_is_pktdma(rx_chn->common.udmax)) { flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel; flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel; } ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrx %d\n", ret); goto err_ringrxfdq_free; } ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); if (ret) { dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); goto err_ringrxfdq_free; } if (rx_chn->remote) { rx_ring_id = TI_SCI_RESOURCE_NULL; rx_ringfdq_id = TI_SCI_RESOURCE_NULL; } else { rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); } memset(&req, 0, sizeof(req)); req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.flow_index = flow->udma_rflow_id; if (rx_chn->common.epib) req.rx_einfo_present = 1; if (rx_chn->common.psdata_size) req.rx_psinfo_present = 1; if (flow_cfg->rx_error_handling) req.rx_error_handling = 1; req.rx_desc_type = 0; req.rx_dest_qnum = rx_ring_id; req.rx_src_tag_hi_sel = 0; req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; req.rx_dest_tag_hi_sel = 0; req.rx_dest_tag_lo_sel = 0; req.rx_fdq0_sz0_qnum = rx_ringfdq_id; req.rx_fdq1_qnum = rx_ringfdq_id; req.rx_fdq2_qnum = rx_ringfdq_id; req.rx_fdq3_qnum = rx_ringfdq_id; ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); if (ret) { dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, ret); goto err_ringrxfdq_free; } rx_chn->flows_ready++; dev_dbg(dev, "flow%d config done. ready:%d\n", flow->udma_rflow_id, rx_chn->flows_ready); return 0; err_ringrxfdq_free: k3_ringacc_ring_free(flow->ringrxfdq); k3_ringacc_ring_free(flow->ringrx); err_rflow_put: xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); flow->udma_rflow = NULL; return ret; } static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) { struct device *dev = chn->common.dev; dev_dbg(dev, "dump_rx_chn:\n" "udma_rchan_id: %d\n" "src_thread: %08x\n" "dst_thread: %08x\n" "epib: %d\n" "hdesc_size: %u\n" "psdata_size: %u\n" "swdata_size: %u\n" "flow_id_base: %d\n" "flow_num: %d\n", chn->udma_rchan_id, chn->common.src_thread, chn->common.dst_thread, chn->common.epib, chn->common.hdesc_size, chn->common.psdata_size, chn->common.swdata_size, chn->flow_id_base, chn->flow_num); } static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, char *mark) { struct device *dev = chn->common.dev; dev_dbg(dev, "=== dump ===> %s\n", mark); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG, xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG, xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG, xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG, xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG)); dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG, xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG)); } static int k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, struct k3_udma_glue_rx_channel_cfg *cfg) { int ret; /* default rflow */ if (cfg->flow_id_use_rxchan_id) return 0; /* not a GP rflows */ if (rx_chn->flow_id_base != -1 && !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) return 0; /* Allocate range of GP rflows */ ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, rx_chn->flow_id_base, rx_chn->flow_num); if (ret < 0) { dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", rx_chn->flow_id_base, rx_chn->flow_num, ret); return ret; } rx_chn->flow_id_base = ret; return 0; } static struct k3_udma_glue_rx_channel * k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, struct k3_udma_glue_rx_channel_cfg *cfg) { struct k3_udma_glue_rx_channel *rx_chn; struct psil_endpoint_config *ep_cfg; int ret, i; if (cfg->flow_id_num <= 0) return ERR_PTR(-EINVAL); if (cfg->flow_id_num != 1 && (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) return ERR_PTR(-EINVAL); rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); if (!rx_chn) return ERR_PTR(-ENOMEM); rx_chn->common.dev = dev; rx_chn->common.swdata_size = cfg->swdata_size; rx_chn->remote = false; /* parse of udmap channel */ ret = of_k3_udma_glue_parse_chn(dev->of_node, name, &rx_chn->common, false); if (ret) goto err; rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, rx_chn->common.psdata_size, rx_chn->common.swdata_size); ep_cfg = rx_chn->common.ep_config; if (xudma_is_pktdma(rx_chn->common.udmax)) rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; else rx_chn->udma_rchan_id = -1; /* request and cfg UDMAP RX channel */ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, rx_chn->udma_rchan_id); if (IS_ERR(rx_chn->udma_rchanx)) { ret = PTR_ERR(rx_chn->udma_rchanx); dev_err(dev, "UDMAX rchanx get err %d\n", ret); goto err; } rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x", rx_chn->udma_rchan_id, rx_chn->common.src_thread); ret = device_register(&rx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); put_device(&rx_chn->common.chan_dev); rx_chn->common.chan_dev.parent = NULL; goto err; } if (xudma_is_pktdma(rx_chn->common.udmax)) { /* prepare the channel device as coherent */ rx_chn->common.chan_dev.dma_coherent = true; dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, DMA_BIT_MASK(48)); } if (xudma_is_pktdma(rx_chn->common.udmax)) { int flow_start = cfg->flow_id_base; int flow_end; if (flow_start == -1) flow_start = ep_cfg->flow_start; flow_end = flow_start + cfg->flow_id_num - 1; if (flow_start < ep_cfg->flow_start || flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) { dev_err(dev, "Invalid flow range requested\n"); ret = -EINVAL; goto err; } rx_chn->flow_id_base = flow_start; } else { rx_chn->flow_id_base = cfg->flow_id_base; /* Use RX channel id as flow id: target dev can't generate flow_id */ if (cfg->flow_id_use_rxchan_id) rx_chn->flow_id_base = rx_chn->udma_rchan_id; } rx_chn->flow_num = cfg->flow_id_num; rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, sizeof(*rx_chn->flows), GFP_KERNEL); if (!rx_chn->flows) { ret = -ENOMEM; goto err; } ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); if (ret) goto err; for (i = 0; i < rx_chn->flow_num; i++) rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; /* request and cfg psi-l */ rx_chn->common.dst_thread = xudma_dev_get_psil_base(rx_chn->common.udmax) + rx_chn->udma_rchan_id; ret = k3_udma_glue_cfg_rx_chn(rx_chn); if (ret) { dev_err(dev, "Failed to cfg rchan %d\n", ret); goto err; } /* init default RX flow only if flow_num = 1 */ if (cfg->def_flow_cfg) { ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); if (ret) goto err; } k3_udma_glue_dump_rx_chn(rx_chn); return rx_chn; err: k3_udma_glue_release_rx_chn(rx_chn); return ERR_PTR(ret); } static struct k3_udma_glue_rx_channel * k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, struct k3_udma_glue_rx_channel_cfg *cfg) { struct k3_udma_glue_rx_channel *rx_chn; int ret, i; if (cfg->flow_id_num <= 0 || cfg->flow_id_use_rxchan_id || cfg->def_flow_cfg || cfg->flow_id_base < 0) return ERR_PTR(-EINVAL); /* * Remote RX channel is under control of Remote CPU core, so * Linux can only request and manipulate by dedicated RX flows */ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); if (!rx_chn) return ERR_PTR(-ENOMEM); rx_chn->common.dev = dev; rx_chn->common.swdata_size = cfg->swdata_size; rx_chn->remote = true; rx_chn->udma_rchan_id = -1; rx_chn->flow_num = cfg->flow_id_num; rx_chn->flow_id_base = cfg->flow_id_base; rx_chn->psil_paired = false; /* parse of udmap channel */ ret = of_k3_udma_glue_parse_chn(dev->of_node, name, &rx_chn->common, false); if (ret) goto err; rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, rx_chn->common.psdata_size, rx_chn->common.swdata_size); rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, sizeof(*rx_chn->flows), GFP_KERNEL); if (!rx_chn->flows) { ret = -ENOMEM; goto err; } rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x", rx_chn->common.src_thread); ret = device_register(&rx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); put_device(&rx_chn->common.chan_dev); rx_chn->common.chan_dev.parent = NULL; goto err; } if (xudma_is_pktdma(rx_chn->common.udmax)) { /* prepare the channel device as coherent */ rx_chn->common.chan_dev.dma_coherent = true; dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, DMA_BIT_MASK(48)); } ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); if (ret) goto err; for (i = 0; i < rx_chn->flow_num; i++) rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; k3_udma_glue_dump_rx_chn(rx_chn); return rx_chn; err: k3_udma_glue_release_rx_chn(rx_chn); return ERR_PTR(ret); } struct k3_udma_glue_rx_channel * k3_udma_glue_request_rx_chn(struct device *dev, const char *name, struct k3_udma_glue_rx_channel_cfg *cfg) { if (cfg->remote) return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); else return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); } EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) { int i; if (IS_ERR_OR_NULL(rx_chn->common.udmax)) return; if (rx_chn->psil_paired) { xudma_navss_psil_unpair(rx_chn->common.udmax, rx_chn->common.src_thread, rx_chn->common.dst_thread); rx_chn->psil_paired = false; } for (i = 0; i < rx_chn->flow_num; i++) k3_udma_glue_release_rx_flow(rx_chn, i); if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) xudma_free_gp_rflow_range(rx_chn->common.udmax, rx_chn->flow_id_base, rx_chn->flow_num); if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) xudma_rchan_put(rx_chn->common.udmax, rx_chn->udma_rchanx); if (rx_chn->common.chan_dev.parent) { device_unregister(&rx_chn->common.chan_dev); rx_chn->common.chan_dev.parent = NULL; } } EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg) { if (flow_idx >= rx_chn->flow_num) return -EINVAL; return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx) { struct k3_udma_glue_rx_flow *flow; if (flow_idx >= rx_chn->flow_num) return -EINVAL; flow = &rx_chn->flows[flow_idx]; return k3_ringacc_get_ring_id(flow->ringrxfdq); } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) { return rx_chn->flow_id_base; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; struct device *dev = rx_chn->common.dev; struct ti_sci_msg_rm_udmap_flow_cfg req; int rx_ring_id; int rx_ringfdq_id; int ret = 0; if (!rx_chn->remote) return -EINVAL; rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); memset(&req, 0, sizeof(req)); req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.flow_index = flow->udma_rflow_id; req.rx_dest_qnum = rx_ring_id; req.rx_fdq0_sz0_qnum = rx_ringfdq_id; req.rx_fdq1_qnum = rx_ringfdq_id; req.rx_fdq2_qnum = rx_ringfdq_id; req.rx_fdq3_qnum = rx_ringfdq_id; ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); if (ret) { dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, ret); } return ret; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_idx) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; struct device *dev = rx_chn->common.dev; struct ti_sci_msg_rm_udmap_flow_cfg req; int ret = 0; if (!rx_chn->remote) return -EINVAL; memset(&req, 0, sizeof(req)); req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.flow_index = flow->udma_rflow_id; req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); if (ret) { dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, ret); } return ret; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) { int ret; if (rx_chn->remote) return -EINVAL; if (rx_chn->flows_ready < rx_chn->flow_num) return -EINVAL; ret = xudma_navss_psil_pair(rx_chn->common.udmax, rx_chn->common.src_thread, rx_chn->common.dst_thread); if (ret) { dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret); return ret; } rx_chn->psil_paired = true; xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, UDMA_CHAN_RT_CTL_EN); xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE); k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); return 0; } EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) { k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); if (rx_chn->psil_paired) { xudma_navss_psil_unpair(rx_chn->common.udmax, rx_chn->common.src_thread, rx_chn->common.dst_thread); rx_chn->psil_paired = false; } } EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, bool sync) { int i = 0; u32 val; if (rx_chn->remote) return; k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); udelay(1); if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { dev_err(rx_chn->common.dev, "RX tdown timeout\n"); break; } i++; } val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG); if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); } EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num, void *data, void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; struct device *dev = rx_chn->common.dev; dma_addr_t desc_dma; int occ_rx, i, ret; /* reset RXCQ as it is not input for udma - expected to be empty */ occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); /* Skip RX FDQ in case one FDQ is used for the set of flows */ if (skip_fdq) goto do_reset; /* * RX FDQ reset need to be special way as it is input for udma and its * state cached by udma, so: * 1) save RX FDQ occ * 2) clean up RX FDQ and call callback .cleanup() for each desc * 3) reset RX FDQ in a special way */ occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); for (i = 0; i < occ_rx; i++) { ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); if (ret) { if (ret != -ENODATA) dev_err(dev, "RX reset pop %d\n", ret); break; } cleanup(data, desc_dma); } k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); do_reset: k3_ringacc_ring_reset(flow->ringrx); } EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num, struct cppi5_host_desc_t *desc_rx, dma_addr_t desc_dma) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); } EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num, dma_addr_t *desc_dma) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; return k3_ringacc_ring_pop(flow->ringrx, desc_dma); } EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num) { struct k3_udma_glue_rx_flow *flow; flow = &rx_chn->flows[flow_num]; if (xudma_is_pktdma(rx_chn->common.udmax)) { flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax, flow->udma_rflow_id); } else { flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); } return flow->virq; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); struct device * k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn) { if (xudma_is_pktdma(rx_chn->common.udmax) && (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15)) return &rx_chn->common.chan_dev; return xudma_get_device(rx_chn->common.udmax); } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device); void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn, dma_addr_t *addr) { if (!xudma_is_pktdma(rx_chn->common.udmax) || !rx_chn->common.atype_asel) return; *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr); void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn, dma_addr_t *addr) { if (!xudma_is_pktdma(rx_chn->common.udmax) || !rx_chn->common.atype_asel) return; *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr); static int __init k3_udma_glue_class_init(void) { return class_register(&k3_udma_glue_devclass); } module_init(k3_udma_glue_class_init); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/ti/k3-udma-glue.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/of_dma.h> #include <linux/of_irq.h> #include <linux/dmapool.h> #include <linux/interrupt.h> #include <linux/of_address.h> #include <linux/pm_runtime.h> #include "../dmaengine.h" #define DESC_TYPE 27 #define DESC_TYPE_HOST 0x10 #define DESC_TYPE_TEARD 0x13 #define TD_DESC_IS_RX (1 << 16) #define TD_DESC_DMA_NUM 10 #define DESC_LENGTH_BITS_NUM 21 #define DESC_TYPE_USB (5 << 26) #define DESC_PD_COMPLETE (1 << 31) /* DMA engine */ #define DMA_TDFDQ 4 #define DMA_TXGCR(x) (0x800 + (x) * 0x20) #define DMA_RXGCR(x) (0x808 + (x) * 0x20) #define RXHPCRA0 4 #define GCR_CHAN_ENABLE (1 << 31) #define GCR_TEARDOWN (1 << 30) #define GCR_STARV_RETRY (1 << 24) #define GCR_DESC_TYPE_HOST (1 << 14) /* DMA scheduler */ #define DMA_SCHED_CTRL 0 #define DMA_SCHED_CTRL_EN (1 << 31) #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800) #define SCHED_ENTRY0_CHAN(x) ((x) << 0) #define SCHED_ENTRY0_IS_RX (1 << 7) #define SCHED_ENTRY1_CHAN(x) ((x) << 8) #define SCHED_ENTRY1_IS_RX (1 << 15) #define SCHED_ENTRY2_CHAN(x) ((x) << 16) #define SCHED_ENTRY2_IS_RX (1 << 23) #define SCHED_ENTRY3_CHAN(x) ((x) << 24) #define SCHED_ENTRY3_IS_RX (1 << 31) /* Queue manager */ /* 4 KiB of memory for descriptors, 2 for each endpoint */ #define ALLOC_DECS_NUM 128 #define DESCS_AREAS 1 #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS) #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4) #define QMGR_LRAM0_BASE 0x80 #define QMGR_LRAM_SIZE 0x84 #define QMGR_LRAM1_BASE 0x88 #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10) #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10) #define QMGR_MEMCTRL_IDX_SH 16 #define QMGR_MEMCTRL_DESC_SH 8 #define QMGR_PEND(x) (0x90 + (x) * 4) #define QMGR_PENDING_SLOT_Q(x) (x / 32) #define QMGR_PENDING_BIT_Q(x) (x % 32) #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10) #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10) #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10) #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10) /* Packet Descriptor */ #define PD2_ZERO_LENGTH (1 << 19) struct cppi41_channel { struct dma_chan chan; struct dma_async_tx_descriptor txd; struct cppi41_dd *cdd; struct cppi41_desc *desc; dma_addr_t desc_phys; void __iomem *gcr_reg; int is_tx; u32 residue; unsigned int q_num; unsigned int q_comp_num; unsigned int port_num; unsigned td_retry; unsigned td_queued:1; unsigned td_seen:1; unsigned td_desc_seen:1; struct list_head node; /* Node for pending list */ }; struct cppi41_desc { u32 pd0; u32 pd1; u32 pd2; u32 pd3; u32 pd4; u32 pd5; u32 pd6; u32 pd7; } __aligned(32); struct chan_queues { u16 submit; u16 complete; }; struct cppi41_dd { struct dma_device ddev; void *qmgr_scratch; dma_addr_t scratch_phys; struct cppi41_desc *cd; dma_addr_t descs_phys; u32 first_td_desc; struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; void __iomem *ctrl_mem; void __iomem *sched_mem; void __iomem *qmgr_mem; unsigned int irq; const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; u16 first_completion_queue; u16 qmgr_num_pend; u32 n_chans; u8 platform; struct list_head pending; /* Pending queued transfers */ spinlock_t lock; /* Lock for pending list */ /* context for suspend/resume */ unsigned int dma_tdfdq; bool is_suspended; }; static struct chan_queues am335x_usb_queues_tx[] = { /* USB0 ENDP 1 */ [ 0] = { .submit = 32, .complete = 93}, [ 1] = { .submit = 34, .complete = 94}, [ 2] = { .submit = 36, .complete = 95}, [ 3] = { .submit = 38, .complete = 96}, [ 4] = { .submit = 40, .complete = 97}, [ 5] = { .submit = 42, .complete = 98}, [ 6] = { .submit = 44, .complete = 99}, [ 7] = { .submit = 46, .complete = 100}, [ 8] = { .submit = 48, .complete = 101}, [ 9] = { .submit = 50, .complete = 102}, [10] = { .submit = 52, .complete = 103}, [11] = { .submit = 54, .complete = 104}, [12] = { .submit = 56, .complete = 105}, [13] = { .submit = 58, .complete = 106}, [14] = { .submit = 60, .complete = 107}, /* USB1 ENDP1 */ [15] = { .submit = 62, .complete = 125}, [16] = { .submit = 64, .complete = 126}, [17] = { .submit = 66, .complete = 127}, [18] = { .submit = 68, .complete = 128}, [19] = { .submit = 70, .complete = 129}, [20] = { .submit = 72, .complete = 130}, [21] = { .submit = 74, .complete = 131}, [22] = { .submit = 76, .complete = 132}, [23] = { .submit = 78, .complete = 133}, [24] = { .submit = 80, .complete = 134}, [25] = { .submit = 82, .complete = 135}, [26] = { .submit = 84, .complete = 136}, [27] = { .submit = 86, .complete = 137}, [28] = { .submit = 88, .complete = 138}, [29] = { .submit = 90, .complete = 139}, }; static const struct chan_queues am335x_usb_queues_rx[] = { /* USB0 ENDP 1 */ [ 0] = { .submit = 1, .complete = 109}, [ 1] = { .submit = 2, .complete = 110}, [ 2] = { .submit = 3, .complete = 111}, [ 3] = { .submit = 4, .complete = 112}, [ 4] = { .submit = 5, .complete = 113}, [ 5] = { .submit = 6, .complete = 114}, [ 6] = { .submit = 7, .complete = 115}, [ 7] = { .submit = 8, .complete = 116}, [ 8] = { .submit = 9, .complete = 117}, [ 9] = { .submit = 10, .complete = 118}, [10] = { .submit = 11, .complete = 119}, [11] = { .submit = 12, .complete = 120}, [12] = { .submit = 13, .complete = 121}, [13] = { .submit = 14, .complete = 122}, [14] = { .submit = 15, .complete = 123}, /* USB1 ENDP 1 */ [15] = { .submit = 16, .complete = 141}, [16] = { .submit = 17, .complete = 142}, [17] = { .submit = 18, .complete = 143}, [18] = { .submit = 19, .complete = 144}, [19] = { .submit = 20, .complete = 145}, [20] = { .submit = 21, .complete = 146}, [21] = { .submit = 22, .complete = 147}, [22] = { .submit = 23, .complete = 148}, [23] = { .submit = 24, .complete = 149}, [24] = { .submit = 25, .complete = 150}, [25] = { .submit = 26, .complete = 151}, [26] = { .submit = 27, .complete = 152}, [27] = { .submit = 28, .complete = 153}, [28] = { .submit = 29, .complete = 154}, [29] = { .submit = 30, .complete = 155}, }; static const struct chan_queues da8xx_usb_queues_tx[] = { [0] = { .submit = 16, .complete = 24}, [1] = { .submit = 18, .complete = 24}, [2] = { .submit = 20, .complete = 24}, [3] = { .submit = 22, .complete = 24}, }; static const struct chan_queues da8xx_usb_queues_rx[] = { [0] = { .submit = 1, .complete = 26}, [1] = { .submit = 3, .complete = 26}, [2] = { .submit = 5, .complete = 26}, [3] = { .submit = 7, .complete = 26}, }; struct cppi_glue_infos { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; u16 first_completion_queue; u16 qmgr_num_pend; }; static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) { return container_of(c, struct cppi41_channel, chan); } static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) { struct cppi41_channel *c; u32 descs_size; u32 desc_num; descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM; if (!((desc >= cdd->descs_phys) && (desc < (cdd->descs_phys + descs_size)))) { return NULL; } desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc); BUG_ON(desc_num >= ALLOC_DECS_NUM); c = cdd->chan_busy[desc_num]; cdd->chan_busy[desc_num] = NULL; /* Usecount for chan_busy[], paired with push_desc_queue() */ pm_runtime_put(cdd->ddev.dev); return c; } static void cppi_writel(u32 val, void *__iomem *mem) { __raw_writel(val, mem); } static u32 cppi_readl(void *__iomem *mem) { return __raw_readl(mem); } static u32 pd_trans_len(u32 val) { return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); } static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) { u32 desc; desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); desc &= ~0x1f; return desc; } static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; u16 first_completion_queue = cdd->first_completion_queue; u16 qmgr_num_pend = cdd->qmgr_num_pend; struct cppi41_channel *c; int i; for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend; i++) { u32 val; u32 q_num; val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) { u32 mask; /* set corresponding bit for completion Q 93 */ mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue); /* not set all bits for queues less than Q 93 */ mask--; /* now invert and keep only Q 93+ set */ val &= ~mask; } if (val) __iormb(); while (val) { u32 desc, len; /* * This should never trigger, see the comments in * push_desc_queue() */ WARN_ON(cdd->is_suspended); q_num = __fls(val); val &= ~(1 << q_num); q_num += 32 * i; desc = cppi41_pop_desc(cdd, q_num); c = desc_to_chan(cdd, desc); if (WARN_ON(!c)) { pr_err("%s() q %d desc %08x\n", __func__, q_num, desc); continue; } if (c->desc->pd2 & PD2_ZERO_LENGTH) len = 0; else len = pd_trans_len(c->desc->pd0); c->residue = pd_trans_len(c->desc->pd6) - len; dma_cookie_complete(&c->txd); dmaengine_desc_get_callback_invoke(&c->txd, NULL); } } return IRQ_HANDLED; } static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx) { dma_cookie_t cookie; cookie = dma_cookie_assign(tx); return cookie; } static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); struct cppi41_dd *cdd = c->cdd; int error; error = pm_runtime_get_sync(cdd->ddev.dev); if (error < 0) { dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", __func__, error); pm_runtime_put_noidle(cdd->ddev.dev); return error; } dma_cookie_init(chan); dma_async_tx_descriptor_init(&c->txd, chan); c->txd.tx_submit = cppi41_tx_submit; if (!c->is_tx) cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); return 0; } static void cppi41_dma_free_chan_resources(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); struct cppi41_dd *cdd = c->cdd; int error; error = pm_runtime_get_sync(cdd->ddev.dev); if (error < 0) { pm_runtime_put_noidle(cdd->ddev.dev); return; } WARN_ON(!list_empty(&cdd->pending)); pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); } static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct cppi41_channel *c = to_cpp41_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); dma_set_residue(txstate, c->residue); return ret; } static void push_desc_queue(struct cppi41_channel *c) { struct cppi41_dd *cdd = c->cdd; u32 desc_num; u32 desc_phys; u32 reg; c->residue = 0; reg = GCR_CHAN_ENABLE; if (!c->is_tx) { reg |= GCR_STARV_RETRY; reg |= GCR_DESC_TYPE_HOST; reg |= c->q_comp_num; } cppi_writel(reg, c->gcr_reg); /* * We don't use writel() but __raw_writel() so we have to make sure * that the DMA descriptor in coherent memory made to the main memory * before starting the dma engine. */ __iowmb(); /* * DMA transfers can take at least 200ms to complete with USB mass * storage connected. To prevent autosuspend timeouts, we must use * pm_runtime_get/put() when chan_busy[] is modified. This will get * cleared in desc_to_chan() or cppi41_stop_chan() depending on the * outcome of the transfer. */ pm_runtime_get(cdd->ddev.dev); desc_phys = lower_32_bits(c->desc_phys); desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); WARN_ON(cdd->chan_busy[desc_num]); cdd->chan_busy[desc_num] = c; reg = (sizeof(struct cppi41_desc) - 24) / 4; reg |= desc_phys; cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); } /* * Caller must hold cdd->lock to prevent push_desc_queue() * getting called out of order. We have both cppi41_dma_issue_pending() * and cppi41_runtime_resume() call this function. */ static void cppi41_run_queue(struct cppi41_dd *cdd) { struct cppi41_channel *c, *_c; list_for_each_entry_safe(c, _c, &cdd->pending, node) { push_desc_queue(c); list_del(&c->node); } } static void cppi41_dma_issue_pending(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); struct cppi41_dd *cdd = c->cdd; unsigned long flags; int error; error = pm_runtime_get(cdd->ddev.dev); if ((error != -EINPROGRESS) && error < 0) { pm_runtime_put_noidle(cdd->ddev.dev); dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", error); return; } spin_lock_irqsave(&cdd->lock, flags); list_add_tail(&c->node, &cdd->pending); if (!cdd->is_suspended) cppi41_run_queue(cdd); spin_unlock_irqrestore(&cdd->lock, flags); pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); } static u32 get_host_pd0(u32 length) { u32 reg; reg = DESC_TYPE_HOST << DESC_TYPE; reg |= length; return reg; } static u32 get_host_pd1(struct cppi41_channel *c) { u32 reg; reg = 0; return reg; } static u32 get_host_pd2(struct cppi41_channel *c) { u32 reg; reg = DESC_TYPE_USB; reg |= c->q_comp_num; return reg; } static u32 get_host_pd3(u32 length) { u32 reg; /* PD3 = packet size */ reg = length; return reg; } static u32 get_host_pd6(u32 length) { u32 reg; /* PD6 buffer size */ reg = DESC_PD_COMPLETE; reg |= length; return reg; } static u32 get_host_pd4_or_7(u32 addr) { u32 reg; reg = addr; return reg; } static u32 get_host_pd5(void) { u32 reg; reg = 0; return reg; } static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct cppi41_channel *c = to_cpp41_chan(chan); struct dma_async_tx_descriptor *txd = NULL; struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; int error; error = pm_runtime_get(cdd->ddev.dev); if (error < 0) { pm_runtime_put_noidle(cdd->ddev.dev); return NULL; } if (cdd->is_suspended) goto err_out_not_ready; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { u32 addr; u32 len; /* We need to use more than one desc once musb supports sg */ addr = lower_32_bits(sg_dma_address(sg)); len = sg_dma_len(sg); d->pd0 = get_host_pd0(len); d->pd1 = get_host_pd1(c); d->pd2 = get_host_pd2(c); d->pd3 = get_host_pd3(len); d->pd4 = get_host_pd4_or_7(addr); d->pd5 = get_host_pd5(); d->pd6 = get_host_pd6(len); d->pd7 = get_host_pd4_or_7(addr); d++; } txd = &c->txd; err_out_not_ready: pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); return txd; } static void cppi41_compute_td_desc(struct cppi41_desc *d) { d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; } static int cppi41_tear_down_chan(struct cppi41_channel *c) { struct dmaengine_result abort_result; struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *td; u32 reg; u32 desc_phys; u32 td_desc_phys; td = cdd->cd; td += cdd->first_td_desc; td_desc_phys = cdd->descs_phys; td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc); if (!c->td_queued) { cppi41_compute_td_desc(td); __iowmb(); reg = (sizeof(struct cppi41_desc) - 24) / 4; reg |= td_desc_phys; cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(cdd->td_queue.submit)); reg = GCR_CHAN_ENABLE; if (!c->is_tx) { reg |= GCR_STARV_RETRY; reg |= GCR_DESC_TYPE_HOST; reg |= cdd->td_queue.complete; } reg |= GCR_TEARDOWN; cppi_writel(reg, c->gcr_reg); c->td_queued = 1; c->td_retry = 500; } if (!c->td_seen || !c->td_desc_seen) { desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); if (!desc_phys && c->is_tx) desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); if (desc_phys == c->desc_phys) { c->td_desc_seen = 1; } else if (desc_phys == td_desc_phys) { u32 pd0; __iormb(); pd0 = td->pd0; WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); WARN_ON((pd0 & 0x1f) != c->port_num); c->td_seen = 1; } else if (desc_phys) { WARN_ON_ONCE(1); } } c->td_retry--; /* * If the TX descriptor / channel is in use, the caller needs to poke * his TD bit multiple times. After that he hardware releases the * transfer descriptor followed by TD descriptor. Waiting seems not to * cause any difference. * RX seems to be thrown out right away. However once the TearDown * descriptor gets through we are done. If we have seen the transfer * descriptor before the TD we fetch it from enqueue, it has to be * there waiting for us. */ if (!c->td_seen && c->td_retry) { udelay(1); return -EAGAIN; } WARN_ON(!c->td_retry); if (!c->td_desc_seen) { desc_phys = cppi41_pop_desc(cdd, c->q_num); if (!desc_phys) desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); WARN_ON(!desc_phys); } c->td_queued = 0; c->td_seen = 0; c->td_desc_seen = 0; cppi_writel(0, c->gcr_reg); /* Invoke the callback to do the necessary clean-up */ abort_result.result = DMA_TRANS_ABORTED; dma_cookie_complete(&c->txd); dmaengine_desc_get_callback_invoke(&c->txd, &abort_result); return 0; } static int cppi41_stop_chan(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); struct cppi41_dd *cdd = c->cdd; u32 desc_num; u32 desc_phys; int ret; desc_phys = lower_32_bits(c->desc_phys); desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); if (!cdd->chan_busy[desc_num]) { struct cppi41_channel *cc, *_ct; /* * channels might still be in the pending list if * cppi41_dma_issue_pending() is called after * cppi41_runtime_suspend() is called */ list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { if (cc != c) continue; list_del(&cc->node); break; } return 0; } ret = cppi41_tear_down_chan(c); if (ret) return ret; WARN_ON(!cdd->chan_busy[desc_num]); cdd->chan_busy[desc_num] = NULL; /* Usecount for chan_busy[], paired with push_desc_queue() */ pm_runtime_put(cdd->ddev.dev); return 0; } static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { struct cppi41_channel *cchan, *chans; int i; u32 n_chans = cdd->n_chans; /* * The channels can only be used as TX or as RX. So we add twice * that much dma channels because USB can only do RX or TX. */ n_chans *= 2; chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL); if (!chans) return -ENOMEM; for (i = 0; i < n_chans; i++) { cchan = &chans[i]; cchan->cdd = cdd; if (i & 1) { cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1); cchan->is_tx = 1; } else { cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1); cchan->is_tx = 0; } cchan->port_num = i >> 1; cchan->desc = &cdd->cd[i]; cchan->desc_phys = cdd->descs_phys; cchan->desc_phys += i * sizeof(struct cppi41_desc); cchan->chan.device = &cdd->ddev; list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels); } cdd->first_td_desc = n_chans; return 0; } static void purge_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int mem_decs; int i; mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc); for (i = 0; i < DESCS_AREAS; i++) { cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); dma_free_coherent(dev, mem_decs, cdd->cd, cdd->descs_phys); } } static void disable_sched(struct cppi41_dd *cdd) { cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); } static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) { disable_sched(cdd); purge_descs(dev, cdd); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd->scratch_phys); } static int init_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int desc_size; unsigned int mem_decs; int i; u32 reg; u32 idx; BUILD_BUG_ON(sizeof(struct cppi41_desc) & (sizeof(struct cppi41_desc) - 1)); BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32); BUILD_BUG_ON(ALLOC_DECS_NUM < 32); desc_size = sizeof(struct cppi41_desc); mem_decs = ALLOC_DECS_NUM * desc_size; idx = 0; for (i = 0; i < DESCS_AREAS; i++) { reg = idx << QMGR_MEMCTRL_IDX_SH; reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH; reg |= ilog2(ALLOC_DECS_NUM) - 5; BUILD_BUG_ON(DESCS_AREAS != 1); cdd->cd = dma_alloc_coherent(dev, mem_decs, &cdd->descs_phys, GFP_KERNEL); if (!cdd->cd) return -ENOMEM; cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i)); idx += ALLOC_DECS_NUM; } return 0; } static void init_sched(struct cppi41_dd *cdd) { unsigned ch; unsigned word; u32 reg; word = 0; cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); for (ch = 0; ch < cdd->n_chans; ch += 2) { reg = SCHED_ENTRY0_CHAN(ch); reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; reg |= SCHED_ENTRY2_CHAN(ch + 1); reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX; cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); word++; } reg = cdd->n_chans * 2 - 1; reg |= DMA_SCHED_CTRL_EN; cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) { int ret; BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, &cdd->scratch_phys, GFP_KERNEL); if (!cdd->qmgr_scratch) return -ENOMEM; cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); ret = init_descs(dev, cdd); if (ret) goto err_td; cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); init_sched(cdd); return 0; err_td: deinit_cppi41(dev, cdd); return ret; } static struct platform_driver cpp41_dma_driver; /* * The param format is: * X Y * X: Port * Y: 0 = RX else TX */ #define INFO_PORT 0 #define INFO_IS_TX 1 static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) { struct cppi41_channel *cchan; struct cppi41_dd *cdd; const struct chan_queues *queues; u32 *num = param; if (chan->device->dev->driver != &cpp41_dma_driver.driver) return false; cchan = to_cpp41_chan(chan); if (cchan->port_num != num[INFO_PORT]) return false; if (cchan->is_tx && !num[INFO_IS_TX]) return false; cdd = cchan->cdd; if (cchan->is_tx) queues = cdd->queues_tx; else queues = cdd->queues_rx; BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != ARRAY_SIZE(am335x_usb_queues_tx)); if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx))) return false; cchan->q_num = queues[cchan->port_num].submit; cchan->q_comp_num = queues[cchan->port_num].complete; return true; } static struct of_dma_filter_info cpp41_dma_info = { .filter_fn = cpp41_dma_filter_fn, }; static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { int count = dma_spec->args_count; struct of_dma_filter_info *info = ofdma->of_dma_data; if (!info || !info->filter_fn) return NULL; if (count != 2) return NULL; return dma_request_channel(info->dma_cap, info->filter_fn, &dma_spec->args[0]); } static const struct cppi_glue_infos am335x_usb_infos = { .queues_rx = am335x_usb_queues_rx, .queues_tx = am335x_usb_queues_tx, .td_queue = { .submit = 31, .complete = 0 }, .first_completion_queue = 93, .qmgr_num_pend = 5, }; static const struct cppi_glue_infos da8xx_usb_infos = { .queues_rx = da8xx_usb_queues_rx, .queues_tx = da8xx_usb_queues_tx, .td_queue = { .submit = 31, .complete = 0 }, .first_completion_queue = 24, .qmgr_num_pend = 2, }; static const struct of_device_id cppi41_dma_ids[] = { { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos}, { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos}, {}, }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); static const struct cppi_glue_infos *get_glue_info(struct device *dev) { const struct of_device_id *of_id; of_id = of_match_node(cppi41_dma_ids, dev->of_node); if (!of_id) return NULL; return of_id->data; } #define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) static int cppi41_dma_probe(struct platform_device *pdev) { struct cppi41_dd *cdd; struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; int index; int irq; int ret; glue_info = get_glue_info(dev); if (!glue_info) return -EINVAL; cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); if (!cdd) return -ENOMEM; dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask); cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources; cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources; cdd->ddev.device_tx_status = cppi41_dma_tx_status; cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd->ddev.device_terminate_all = cppi41_stop_chan; cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS; cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS; cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; index = of_property_match_string(dev->of_node, "reg-names", "controller"); if (index < 0) return index; cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index); if (IS_ERR(cdd->ctrl_mem)) return PTR_ERR(cdd->ctrl_mem); cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1); if (IS_ERR(cdd->sched_mem)) return PTR_ERR(cdd->sched_mem); cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2); if (IS_ERR(cdd->qmgr_mem)) return PTR_ERR(cdd->qmgr_mem); spin_lock_init(&cdd->lock); INIT_LIST_HEAD(&cdd->pending); platform_set_drvdata(pdev, cdd); pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_get_sync; cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; cdd->qmgr_num_pend = glue_info->qmgr_num_pend; cdd->first_completion_queue = glue_info->first_completion_queue; /* Parse new and deprecated dma-channels properties */ ret = of_property_read_u32(dev->of_node, "dma-channels", &cdd->n_chans); if (ret) ret = of_property_read_u32(dev->of_node, "#dma-channels", &cdd->n_chans); if (ret) goto err_get_n_chans; ret = init_cppi41(dev, cdd); if (ret) goto err_init_cppi; ret = cppi41_add_chans(dev, cdd); if (ret) goto err_chans; irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { ret = -EINVAL; goto err_chans; } ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED, dev_name(dev), cdd); if (ret) goto err_chans; cdd->irq = irq; ret = dma_async_device_register(&cdd->ddev); if (ret) goto err_chans; ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); if (ret) goto err_of; pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; err_of: dma_async_device_unregister(&cdd->ddev); err_chans: deinit_cppi41(dev, cdd); err_init_cppi: pm_runtime_dont_use_autosuspend(dev); err_get_n_chans: err_get_sync: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } static int cppi41_dma_remove(struct platform_device *pdev) { struct cppi41_dd *cdd = platform_get_drvdata(pdev); int error; error = pm_runtime_get_sync(&pdev->dev); if (error < 0) dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", __func__, error); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&cdd->ddev); devm_free_irq(&pdev->dev, cdd->irq, cdd); deinit_cppi41(&pdev->dev, cdd); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static int __maybe_unused cppi41_suspend(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); disable_sched(cdd); return 0; } static int __maybe_unused cppi41_resume(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); struct cppi41_channel *c; int i; for (i = 0; i < DESCS_AREAS; i++) cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) if (!c->is_tx) cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); init_sched(cdd); cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); return 0; } static int __maybe_unused cppi41_runtime_suspend(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&cdd->lock, flags); cdd->is_suspended = true; WARN_ON(!list_empty(&cdd->pending)); spin_unlock_irqrestore(&cdd->lock, flags); return 0; } static int __maybe_unused cppi41_runtime_resume(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); unsigned long flags; spin_lock_irqsave(&cdd->lock, flags); cdd->is_suspended = false; cppi41_run_queue(cdd); spin_unlock_irqrestore(&cdd->lock, flags); return 0; } static const struct dev_pm_ops cppi41_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume) SET_RUNTIME_PM_OPS(cppi41_runtime_suspend, cppi41_runtime_resume, NULL) }; static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, }; module_platform_driver(cpp41_dma_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sebastian Andrzej Siewior <[email protected]>");
linux-master
drivers/dma/ti/cppi41.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI EDMA DMA engine driver * * Copyright 2012 Texas Instruments */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/bitmap.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/pm_runtime.h> #include <linux/platform_data/edma.h> #include "../dmaengine.h" #include "../virt-dma.h" /* Offsets matching "struct edmacc_param" */ #define PARM_OPT 0x00 #define PARM_SRC 0x04 #define PARM_A_B_CNT 0x08 #define PARM_DST 0x0c #define PARM_SRC_DST_BIDX 0x10 #define PARM_LINK_BCNTRLD 0x14 #define PARM_SRC_DST_CIDX 0x18 #define PARM_CCNT 0x1c #define PARM_SIZE 0x20 /* Offsets for EDMA CC global channel registers and their shadows */ #define SH_ER 0x00 /* 64 bits */ #define SH_ECR 0x08 /* 64 bits */ #define SH_ESR 0x10 /* 64 bits */ #define SH_CER 0x18 /* 64 bits */ #define SH_EER 0x20 /* 64 bits */ #define SH_EECR 0x28 /* 64 bits */ #define SH_EESR 0x30 /* 64 bits */ #define SH_SER 0x38 /* 64 bits */ #define SH_SECR 0x40 /* 64 bits */ #define SH_IER 0x50 /* 64 bits */ #define SH_IECR 0x58 /* 64 bits */ #define SH_IESR 0x60 /* 64 bits */ #define SH_IPR 0x68 /* 64 bits */ #define SH_ICR 0x70 /* 64 bits */ #define SH_IEVAL 0x78 #define SH_QER 0x80 #define SH_QEER 0x84 #define SH_QEECR 0x88 #define SH_QEESR 0x8c #define SH_QSER 0x90 #define SH_QSECR 0x94 #define SH_SIZE 0x200 /* Offsets for EDMA CC global registers */ #define EDMA_REV 0x0000 #define EDMA_CCCFG 0x0004 #define EDMA_QCHMAP 0x0200 /* 8 registers */ #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ #define EDMA_QDMAQNUM 0x0260 #define EDMA_QUETCMAP 0x0280 #define EDMA_QUEPRI 0x0284 #define EDMA_EMR 0x0300 /* 64 bits */ #define EDMA_EMCR 0x0308 /* 64 bits */ #define EDMA_QEMR 0x0310 #define EDMA_QEMCR 0x0314 #define EDMA_CCERR 0x0318 #define EDMA_CCERRCLR 0x031c #define EDMA_EEVAL 0x0320 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ #define EDMA_QRAE 0x0380 /* 4 registers */ #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ #define EDMA_QSTAT 0x0600 /* 2 registers */ #define EDMA_QWMTHRA 0x0620 #define EDMA_QWMTHRB 0x0624 #define EDMA_CCSTAT 0x0640 #define EDMA_M 0x1000 /* global channel registers */ #define EDMA_ECR 0x1008 #define EDMA_ECRH 0x100C #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ #define EDMA_PARM 0x4000 /* PaRAM entries */ #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) #define EDMA_DCHMAP 0x0100 /* 64 registers */ /* CCCFG register */ #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ #define CHMAP_EXIST BIT(24) /* CCSTAT register */ #define EDMA_CCSTAT_ACTV BIT(4) /* * Max of 20 segments per channel to conserve PaRAM slots * Also note that MAX_NR_SG should be at least the no.of periods * that are required for ASoC, otherwise DMA prep calls will * fail. Today davinci-pcm is the only user of this driver and * requires at least 17 slots, so we setup the default to 20. */ #define MAX_NR_SG 20 #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ #define EDMA_CONT_PARAMS_ANY 1001 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 /* * 64bit array registers are split into two 32bit registers: * reg0: channel/event 0-31 * reg1: channel/event 32-63 * * bit 5 in the channel number tells the array index (0/1) * bit 0-4 (0x1f) is the bit offset within the register */ #define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5) #define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f)) /* PaRAM slots are laid out like this */ struct edmacc_param { u32 opt; u32 src; u32 a_b_cnt; u32 dst; u32 src_dst_bidx; u32 link_bcntrld; u32 src_dst_cidx; u32 ccnt; } __packed; /* fields in edmacc_param.opt */ #define SAM BIT(0) #define DAM BIT(1) #define SYNCDIM BIT(2) #define STATIC BIT(3) #define EDMA_FWID (0x07 << 8) #define TCCMODE BIT(11) #define EDMA_TCC(t) ((t) << 12) #define TCINTEN BIT(20) #define ITCINTEN BIT(21) #define TCCHEN BIT(22) #define ITCCHEN BIT(23) struct edma_pset { u32 len; dma_addr_t addr; struct edmacc_param param; }; struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; enum dma_transfer_direction direction; int cyclic; bool polled; int absync; int pset_nr; struct edma_chan *echan; int processed; /* * The following 4 elements are used for residue accounting. * * - processed_stat: the number of SG elements we have traversed * so far to cover accounting. This is updated directly to processed * during edma_callback and is always <= processed, because processed * refers to the number of pending transfer (programmed to EDMA * controller), where as processed_stat tracks number of transfers * accounted for so far. * * - residue: The amount of bytes we have left to transfer for this desc * * - residue_stat: The residue in bytes of data we have covered * so far for accounting. This is updated directly to residue * during callbacks to keep it current. * * - sg_len: Tracks the length of the current intermediate transfer, * this is required to update the residue during intermediate transfer * completion callback. */ int processed_stat; u32 sg_len; u32 residue; u32 residue_stat; struct edma_pset pset[]; }; struct edma_cc; struct edma_tc { struct device_node *node; u16 id; }; struct edma_chan { struct virt_dma_chan vchan; struct list_head node; struct edma_desc *edesc; struct edma_cc *ecc; struct edma_tc *tc; int ch_num; bool alloced; bool hw_triggered; int slot[EDMA_MAX_SLOTS]; int missed; struct dma_slave_config cfg; }; struct edma_cc { struct device *dev; struct edma_soc_info *info; void __iomem *base; int id; bool legacy_mode; /* eDMA3 resource information */ unsigned num_channels; unsigned num_qchannels; unsigned num_region; unsigned num_slots; unsigned num_tc; bool chmap_exist; enum dma_event_q default_queue; unsigned int ccint; unsigned int ccerrint; /* * The slot_inuse bit for each PaRAM slot is clear unless the slot is * in use by Linux or if it is allocated to be used by DSP. */ unsigned long *slot_inuse; /* * For tracking reserved channels used by DSP. * If the bit is cleared, the channel is allocated to be used by DSP * and Linux must not touch it. */ unsigned long *channels_mask; struct dma_device dma_slave; struct dma_device *dma_memcpy; struct edma_chan *slave_chans; struct edma_tc *tc_list; int dummy_slot; }; /* dummy param set used to (re)initialize parameter RAM slots */ static const struct edmacc_param dummy_paramset = { .link_bcntrld = 0xffff, .ccnt = 1, }; #define EDMA_BINDING_LEGACY 0 #define EDMA_BINDING_TPCC 1 static const u32 edma_binding_type[] = { [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY, [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC, }; static const struct of_device_id edma_of_ids[] = { { .compatible = "ti,edma3", .data = &edma_binding_type[EDMA_BINDING_LEGACY], }, { .compatible = "ti,edma3-tpcc", .data = &edma_binding_type[EDMA_BINDING_TPCC], }, {} }; MODULE_DEVICE_TABLE(of, edma_of_ids); static const struct of_device_id edma_tptc_of_ids[] = { { .compatible = "ti,edma3-tptc", }, {} }; MODULE_DEVICE_TABLE(of, edma_tptc_of_ids); static inline unsigned int edma_read(struct edma_cc *ecc, int offset) { return (unsigned int)__raw_readl(ecc->base + offset); } static inline void edma_write(struct edma_cc *ecc, int offset, int val) { __raw_writel(val, ecc->base + offset); } static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, unsigned or) { unsigned val = edma_read(ecc, offset); val &= and; val |= or; edma_write(ecc, offset, val); } static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) { unsigned val = edma_read(ecc, offset); val |= or; edma_write(ecc, offset, val); } static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, int i) { return edma_read(ecc, offset + (i << 2)); } static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, unsigned val) { edma_write(ecc, offset + (i << 2), val); } static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, unsigned and, unsigned or) { edma_modify(ecc, offset + (i << 2), and, or); } static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, unsigned or) { edma_or(ecc, offset + ((i * 2 + j) << 2), or); } static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, int j, unsigned val) { edma_write(ecc, offset + ((i * 2 + j) << 2), val); } static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, int offset, int i) { return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); } static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, unsigned val) { edma_write(ecc, EDMA_SHADOW0 + offset, val); } static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, int i, unsigned val) { edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); } static inline void edma_param_modify(struct edma_cc *ecc, int offset, int param_no, unsigned and, unsigned or) { edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); } static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, int priority) { int bit = queue_no * 4; edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); } static void edma_set_chmap(struct edma_chan *echan, int slot) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); if (ecc->chmap_exist) { slot = EDMA_CHAN_SLOT(slot); edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); } } static void edma_setup_interrupt(struct edma_chan *echan, bool enable) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); if (enable) { edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit); } else { edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit); } } /* * paRAM slot management functions */ static void edma_write_slot(struct edma_cc *ecc, unsigned slot, const struct edmacc_param *param) { slot = EDMA_CHAN_SLOT(slot); if (slot >= ecc->num_slots) return; memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); } static int edma_read_slot(struct edma_cc *ecc, unsigned slot, struct edmacc_param *param) { slot = EDMA_CHAN_SLOT(slot); if (slot >= ecc->num_slots) return -EINVAL; memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); return 0; } /** * edma_alloc_slot - allocate DMA parameter RAM * @ecc: pointer to edma_cc struct * @slot: specific slot to allocate; negative for "any unused slot" * * This allocates a parameter RAM slot, initializing it to hold a * dummy transfer. Slots allocated using this routine have not been * mapped to a hardware DMA channel, and will normally be used by * linking to them from a slot associated with a DMA channel. * * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific * slots may be allocated on behalf of DSP firmware. * * Returns the number of the slot, else negative errno. */ static int edma_alloc_slot(struct edma_cc *ecc, int slot) { if (slot >= 0) { slot = EDMA_CHAN_SLOT(slot); /* Requesting entry paRAM slot for a HW triggered channel. */ if (ecc->chmap_exist && slot < ecc->num_channels) slot = EDMA_SLOT_ANY; } if (slot < 0) { if (ecc->chmap_exist) slot = 0; else slot = ecc->num_channels; for (;;) { slot = find_next_zero_bit(ecc->slot_inuse, ecc->num_slots, slot); if (slot == ecc->num_slots) return -ENOMEM; if (!test_and_set_bit(slot, ecc->slot_inuse)) break; } } else if (slot >= ecc->num_slots) { return -EINVAL; } else if (test_and_set_bit(slot, ecc->slot_inuse)) { return -EBUSY; } edma_write_slot(ecc, slot, &dummy_paramset); return EDMA_CTLR_CHAN(ecc->id, slot); } static void edma_free_slot(struct edma_cc *ecc, unsigned slot) { slot = EDMA_CHAN_SLOT(slot); if (slot >= ecc->num_slots) return; edma_write_slot(ecc, slot, &dummy_paramset); clear_bit(slot, ecc->slot_inuse); } /** * edma_link - link one parameter RAM slot to another * @ecc: pointer to edma_cc struct * @from: parameter RAM slot originating the link * @to: parameter RAM slot which is the link target * * The originating slot should not be part of any active DMA transfer. */ static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) { if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); from = EDMA_CHAN_SLOT(from); to = EDMA_CHAN_SLOT(to); if (from >= ecc->num_slots || to >= ecc->num_slots) return; edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to)); } /** * edma_get_position - returns the current transfer point * @ecc: pointer to edma_cc struct * @slot: parameter RAM slot being examined * @dst: true selects the dest position, false the source * * Returns the position of the current active slot */ static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, bool dst) { u32 offs; slot = EDMA_CHAN_SLOT(slot); offs = PARM_OFFSET(slot); offs += dst ? PARM_DST : PARM_SRC; return edma_read(ecc, offs); } /* * Channels with event associations will be triggered by their hardware * events, and channels without such associations will be triggered by * software. (At this writing there is no interface for using software * triggers except with channels that don't support hardware triggers.) */ static void edma_start(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); if (!echan->hw_triggered) { /* EDMA channels without event association */ dev_dbg(ecc->dev, "ESR%d %08x\n", idx, edma_shadow0_read_array(ecc, SH_ESR, idx)); edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); } else { /* EDMA channel with event association */ dev_dbg(ecc->dev, "ER%d %08x\n", idx, edma_shadow0_read_array(ecc, SH_ER, idx)); /* Clear any pending event or error */ edma_write_array(ecc, EDMA_ECR, idx, ch_bit); edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* Clear any SER */ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit); dev_dbg(ecc->dev, "EER%d %08x\n", idx, edma_shadow0_read_array(ecc, SH_EER, idx)); } } static void edma_stop(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit); edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* clear possibly pending completion interrupt */ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); dev_dbg(ecc->dev, "EER%d %08x\n", idx, edma_shadow0_read_array(ecc, SH_EER, idx)); /* REVISIT: consider guarding against inappropriate event * chaining by overwriting with dummy_paramset. */ } /* * Temporarily disable EDMA hardware events on the specified channel, * preventing them from triggering new transfers */ static void edma_pause(struct edma_chan *echan) { int channel = EDMA_CHAN_SLOT(echan->ch_num); edma_shadow0_write_array(echan->ecc, SH_EECR, EDMA_REG_ARRAY_INDEX(channel), EDMA_CHANNEL_BIT(channel)); } /* Re-enable EDMA hardware events on the specified channel. */ static void edma_resume(struct edma_chan *echan) { int channel = EDMA_CHAN_SLOT(echan->ch_num); edma_shadow0_write_array(echan->ecc, SH_EESR, EDMA_REG_ARRAY_INDEX(channel), EDMA_CHANNEL_BIT(channel)); } static void edma_trigger_channel(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); dev_dbg(ecc->dev, "ESR%d %08x\n", idx, edma_shadow0_read_array(ecc, SH_ESR, idx)); } static void edma_clean_channel(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); dev_dbg(ecc->dev, "EMR%d %08x\n", idx, edma_read_array(ecc, EDMA_EMR, idx)); edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); /* Clear the corresponding EMR bits */ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); /* Clear any SER */ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); } /* Move channel to a specific event queue */ static void edma_assign_channel_eventq(struct edma_chan *echan, enum dma_event_q eventq_no) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); int bit = (channel & 0x7) * 4; /* default to low priority queue */ if (eventq_no == EVENTQ_DEFAULT) eventq_no = ecc->default_queue; if (eventq_no >= ecc->num_tc) return; eventq_no &= 7; edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), eventq_no << bit); } static int edma_alloc_channel(struct edma_chan *echan, enum dma_event_q eventq_no) { struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); if (!test_bit(echan->ch_num, ecc->channels_mask)) { dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n", echan->ch_num); return -EINVAL; } /* ensure access through shadow region 0 */ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel), EDMA_CHANNEL_BIT(channel)); /* ensure no events are pending */ edma_stop(echan); edma_setup_interrupt(echan, true); edma_assign_channel_eventq(echan, eventq_no); return 0; } static void edma_free_channel(struct edma_chan *echan) { /* ensure no events are pending */ edma_stop(echan); /* REVISIT should probably take out of shadow region 0 */ edma_setup_interrupt(echan, false); } static inline struct edma_chan *to_edma_chan(struct dma_chan *c) { return container_of(c, struct edma_chan, vchan.chan); } static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) { return container_of(tx, struct edma_desc, vdesc.tx); } static void edma_desc_free(struct virt_dma_desc *vdesc) { kfree(container_of(vdesc, struct edma_desc, vdesc)); } /* Dispatch a queued descriptor to the controller (caller holds lock) */ static void edma_execute(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; struct virt_dma_desc *vdesc; struct edma_desc *edesc; struct device *dev = echan->vchan.chan.device->dev; int i, j, left, nslots; if (!echan->edesc) { /* Setup is needed for the first transfer */ vdesc = vchan_next_desc(&echan->vchan); if (!vdesc) return; list_del(&vdesc->node); echan->edesc = to_edma_desc(&vdesc->tx); } edesc = echan->edesc; /* Find out how many left */ left = edesc->pset_nr - edesc->processed; nslots = min(MAX_NR_SG, left); edesc->sg_len = 0; /* Write descriptor PaRAM set(s) */ for (i = 0; i < nslots; i++) { j = i + edesc->processed; edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); edesc->sg_len += edesc->pset[j].len; dev_vdbg(dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" " opt\t%08x\n" " src\t%08x\n" " dst\t%08x\n" " abcnt\t%08x\n" " ccnt\t%08x\n" " bidx\t%08x\n" " cidx\t%08x\n" " lkrld\t%08x\n", j, echan->ch_num, echan->slot[i], edesc->pset[j].param.opt, edesc->pset[j].param.src, edesc->pset[j].param.dst, edesc->pset[j].param.a_b_cnt, edesc->pset[j].param.ccnt, edesc->pset[j].param.src_dst_bidx, edesc->pset[j].param.src_dst_cidx, edesc->pset[j].param.link_bcntrld); /* Link to the previous slot if not the last set */ if (i != (nslots - 1)) edma_link(ecc, echan->slot[i], echan->slot[i + 1]); } edesc->processed += nslots; /* * If this is either the last set in a set of SG-list transactions * then setup a link to the dummy slot, this results in all future * events being absorbed and that's OK because we're done */ if (edesc->processed == edesc->pset_nr) { if (edesc->cyclic) edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); else edma_link(ecc, echan->slot[nslots - 1], echan->ecc->dummy_slot); } if (echan->missed) { /* * This happens due to setup times between intermediate * transfers in long SG lists which have to be broken up into * transfers of MAX_NR_SG */ dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); edma_clean_channel(echan); edma_stop(echan); edma_start(echan); edma_trigger_channel(echan); echan->missed = 0; } else if (edesc->processed <= MAX_NR_SG) { dev_dbg(dev, "first transfer starting on channel %d\n", echan->ch_num); edma_start(echan); } else { dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", echan->ch_num, edesc->processed); edma_resume(echan); } } static int edma_terminate_all(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&echan->vchan.lock, flags); /* * Stop DMA activity: we assume the callback will not be called * after edma_dma() returns (even if it does, it will see * echan->edesc is NULL and exit.) */ if (echan->edesc) { edma_stop(echan); /* Move the cyclic channel back to default queue */ if (!echan->tc && echan->edesc->cyclic) edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); vchan_terminate_vdesc(&echan->edesc->vdesc); echan->edesc = NULL; } vchan_get_all_descriptors(&echan->vchan, &head); spin_unlock_irqrestore(&echan->vchan.lock, flags); vchan_dma_desc_free_list(&echan->vchan, &head); return 0; } static void edma_synchronize(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); vchan_synchronize(&echan->vchan); } static int edma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct edma_chan *echan = to_edma_chan(chan); if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; if (cfg->src_maxburst > chan->device->max_burst || cfg->dst_maxburst > chan->device->max_burst) return -EINVAL; memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); return 0; } static int edma_dma_pause(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); if (!echan->edesc) return -EINVAL; edma_pause(echan); return 0; } static int edma_dma_resume(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); edma_resume(echan); return 0; } /* * A PaRAM set configuration abstraction used by other modes * @chan: Channel who's PaRAM set we're configuring * @pset: PaRAM set to initialize and setup. * @src_addr: Source address of the DMA * @dst_addr: Destination address of the DMA * @burst: In units of dev_width, how much to send * @dev_width: How much is the dev_width * @dma_length: Total length of the DMA transfer * @direction: Direction of the transfer */ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, unsigned int acnt, unsigned int dma_length, enum dma_transfer_direction direction) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edmacc_param *param = &epset->param; int bcnt, ccnt, cidx; int src_bidx, dst_bidx, src_cidx, dst_cidx; int absync; /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ if (!burst) burst = 1; /* * If the maxburst is equal to the fifo width, use * A-synced transfers. This allows for large contiguous * buffer transfers using only one PaRAM set. */ if (burst == 1) { /* * For the A-sync case, bcnt and ccnt are the remainder * and quotient respectively of the division of: * (dma_length / acnt) by (SZ_64K -1). This is so * that in case bcnt over flows, we have ccnt to use. * Note: In A-sync transfer only, bcntrld is used, but it * only applies for sg_dma_len(sg) >= SZ_64K. * In this case, the best way adopted is- bccnt for the * first frame will be the remainder below. Then for * every successive frame, bcnt will be SZ_64K-1. This * is assured as bcntrld = 0xffff in end of function. */ absync = false; ccnt = dma_length / acnt / (SZ_64K - 1); bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); /* * If bcnt is non-zero, we have a remainder and hence an * extra frame to transfer, so increment ccnt. */ if (bcnt) ccnt++; else bcnt = SZ_64K - 1; cidx = acnt; } else { /* * If maxburst is greater than the fifo address_width, * use AB-synced transfers where A count is the fifo * address_width and B count is the maxburst. In this * case, we are limited to transfers of C count frames * of (address_width * maxburst) where C count is limited * to SZ_64K-1. This places an upper bound on the length * of an SG segment that can be handled. */ absync = true; bcnt = burst; ccnt = dma_length / (acnt * bcnt); if (ccnt > (SZ_64K - 1)) { dev_err(dev, "Exceeded max SG segment size\n"); return -EINVAL; } cidx = acnt * bcnt; } epset->len = dma_length; if (direction == DMA_MEM_TO_DEV) { src_bidx = acnt; src_cidx = cidx; dst_bidx = 0; dst_cidx = 0; epset->addr = src_addr; } else if (direction == DMA_DEV_TO_MEM) { src_bidx = 0; src_cidx = 0; dst_bidx = acnt; dst_cidx = cidx; epset->addr = dst_addr; } else if (direction == DMA_MEM_TO_MEM) { src_bidx = acnt; src_cidx = cidx; dst_bidx = acnt; dst_cidx = cidx; epset->addr = src_addr; } else { dev_err(dev, "%s: direction not implemented yet\n", __func__); return -EINVAL; } param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); /* Configure A or AB synchronized transfers */ if (absync) param->opt |= SYNCDIM; param->src = src_addr; param->dst = dst_addr; param->src_dst_bidx = (dst_bidx << 16) | src_bidx; param->src_dst_cidx = (dst_cidx << 16) | src_cidx; param->a_b_cnt = bcnt << 16 | acnt; param->ccnt = ccnt; /* * Only time when (bcntrld) auto reload is required is for * A-sync case, and in this case, a requirement of reload value * of SZ_64K-1 only is assured. 'link' is initially set to NULL * and then later will be populated by edma_execute. */ param->link_bcntrld = 0xffffffff; return absync; } static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long tx_flags, void *context) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; dma_addr_t src_addr = 0, dst_addr = 0; enum dma_slave_buswidth dev_width; u32 burst; struct scatterlist *sg; int i, nslots, ret; if (unlikely(!echan || !sgl || !sg_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { src_addr = echan->cfg.src_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC); if (!edesc) return NULL; edesc->pset_nr = sg_len; edesc->residue = 0; edesc->direction = direction; edesc->echan = echan; /* Allocate a PaRAM slot, if needed */ nslots = min_t(unsigned, MAX_NR_SG, sg_len); for (i = 0; i < nslots; i++) { if (echan->slot[i] < 0) { echan->slot[i] = edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "%s: Failed to allocate slot\n", __func__); return NULL; } } } /* Configure PaRAM sets for each SG */ for_each_sg(sgl, sg, sg_len, i) { /* Get address for each SG */ if (direction == DMA_DEV_TO_MEM) dst_addr = sg_dma_address(sg); else src_addr = sg_dma_address(sg); ret = edma_config_pset(chan, &edesc->pset[i], src_addr, dst_addr, burst, dev_width, sg_dma_len(sg), direction); if (ret < 0) { kfree(edesc); return NULL; } edesc->absync = ret; edesc->residue += sg_dma_len(sg); if (i == sg_len - 1) /* Enable completion interrupt */ edesc->pset[i].param.opt |= TCINTEN; else if (!((i+1) % MAX_NR_SG)) /* * Enable early completion interrupt for the * intermediateset. In this case the driver will be * notified when the paRAM set is submitted to TC. This * will allow more time to set up the next set of slots. */ edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); } edesc->residue_stat = edesc->residue; return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long tx_flags) { int ret, nslots; struct edma_desc *edesc; struct device *dev = chan->device->dev; struct edma_chan *echan = to_edma_chan(chan); unsigned int width, pset_len, array_size; if (unlikely(!echan || !len)) return NULL; /* Align the array size (acnt block) with the transfer properties */ switch (__ffs((src | dest | len))) { case 0: array_size = SZ_32K - 1; break; case 1: array_size = SZ_32K - 2; break; default: array_size = SZ_32K - 4; break; } if (len < SZ_64K) { /* * Transfer size less than 64K can be handled with one paRAM * slot and with one burst. * ACNT = length */ width = len; pset_len = len; nslots = 1; } else { /* * Transfer size bigger than 64K will be handled with maximum of * two paRAM slots. * slot1: (full_length / 32767) times 32767 bytes bursts. * ACNT = 32767, length1: (full_length / 32767) * 32767 * slot2: the remaining amount of data after slot1. * ACNT = full_length - length1, length2 = ACNT * * When the full_length is a multiple of 32767 one slot can be * used to complete the transfer. */ width = array_size; pset_len = rounddown(len, width); /* One slot is enough for lengths multiple of (SZ_32K -1) */ if (unlikely(pset_len == len)) nslots = 1; else nslots = 2; } edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); if (!edesc) return NULL; edesc->pset_nr = nslots; edesc->residue = edesc->residue_stat = len; edesc->direction = DMA_MEM_TO_MEM; edesc->echan = echan; ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, width, pset_len, DMA_MEM_TO_MEM); if (ret < 0) { kfree(edesc); return NULL; } edesc->absync = ret; edesc->pset[0].param.opt |= ITCCHEN; if (nslots == 1) { /* Enable transfer complete interrupt if requested */ if (tx_flags & DMA_PREP_INTERRUPT) edesc->pset[0].param.opt |= TCINTEN; } else { /* Enable transfer complete chaining for the first slot */ edesc->pset[0].param.opt |= TCCHEN; if (echan->slot[1] < 0) { echan->slot[1] = edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); if (echan->slot[1] < 0) { kfree(edesc); dev_err(dev, "%s: Failed to allocate slot\n", __func__); return NULL; } } dest += pset_len; src += pset_len; pset_len = width = len % array_size; ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, width, pset_len, DMA_MEM_TO_MEM); if (ret < 0) { kfree(edesc); return NULL; } edesc->pset[1].param.opt |= ITCCHEN; /* Enable transfer complete interrupt if requested */ if (tx_flags & DMA_PREP_INTERRUPT) edesc->pset[1].param.opt |= TCINTEN; } if (!(tx_flags & DMA_PREP_INTERRUPT)) edesc->polled = true; return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } static struct dma_async_tx_descriptor * edma_prep_dma_interleaved(struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long tx_flags) { struct device *dev = chan->device->dev; struct edma_chan *echan = to_edma_chan(chan); struct edmacc_param *param; struct edma_desc *edesc; size_t src_icg, dst_icg; int src_bidx, dst_bidx; /* Slave mode is not supported */ if (is_slave_direction(xt->dir)) return NULL; if (xt->frame_size != 1 || xt->numf == 0) return NULL; if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K) return NULL; src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); if (src_icg) { src_bidx = src_icg + xt->sgl[0].size; } else if (xt->src_inc) { src_bidx = xt->sgl[0].size; } else { dev_err(dev, "%s: SRC constant addressing is not supported\n", __func__); return NULL; } dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); if (dst_icg) { dst_bidx = dst_icg + xt->sgl[0].size; } else if (xt->dst_inc) { dst_bidx = xt->sgl[0].size; } else { dev_err(dev, "%s: DST constant addressing is not supported\n", __func__); return NULL; } if (src_bidx > SZ_64K || dst_bidx > SZ_64K) return NULL; edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC); if (!edesc) return NULL; edesc->direction = DMA_MEM_TO_MEM; edesc->echan = echan; edesc->pset_nr = 1; param = &edesc->pset[0].param; param->src = xt->src_start; param->dst = xt->dst_start; param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size; param->ccnt = 1; param->src_dst_bidx = (dst_bidx << 16) | src_bidx; param->src_dst_cidx = 0; param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); param->opt |= ITCCHEN; /* Enable transfer complete interrupt if requested */ if (tx_flags & DMA_PREP_INTERRUPT) param->opt |= TCINTEN; else edesc->polled = true; return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long tx_flags) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; dma_addr_t src_addr, dst_addr; enum dma_slave_buswidth dev_width; bool use_intermediate = false; u32 burst; int i, ret, nslots; if (unlikely(!echan || !buf_len || !period_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { src_addr = echan->cfg.src_addr; dst_addr = buf_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { src_addr = buf_addr; dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } if (unlikely(buf_len % period_len)) { dev_err(dev, "Period should be multiple of Buffer length\n"); return NULL; } nslots = (buf_len / period_len) + 1; /* * Cyclic DMA users such as audio cannot tolerate delays introduced * by cases where the number of periods is more than the maximum * number of SGs the EDMA driver can handle at a time. For DMA types * such as Slave SGs, such delays are tolerable and synchronized, * but the synchronization is difficult to achieve with Cyclic and * cannot be guaranteed, so we error out early. */ if (nslots > MAX_NR_SG) { /* * If the burst and period sizes are the same, we can put * the full buffer into a single period and activate * intermediate interrupts. This will produce interrupts * after each burst, which is also after each desired period. */ if (burst == period_len) { period_len = buf_len; nslots = 2; use_intermediate = true; } else { return NULL; } } edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC); if (!edesc) return NULL; edesc->cyclic = 1; edesc->pset_nr = nslots; edesc->residue = edesc->residue_stat = buf_len; edesc->direction = direction; edesc->echan = echan; dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", __func__, echan->ch_num, nslots, period_len, buf_len); for (i = 0; i < nslots; i++) { /* Allocate a PaRAM slot, if needed */ if (echan->slot[i] < 0) { echan->slot[i] = edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "%s: Failed to allocate slot\n", __func__); return NULL; } } if (i == nslots - 1) { memcpy(&edesc->pset[i], &edesc->pset[0], sizeof(edesc->pset[0])); break; } ret = edma_config_pset(chan, &edesc->pset[i], src_addr, dst_addr, burst, dev_width, period_len, direction); if (ret < 0) { kfree(edesc); return NULL; } if (direction == DMA_DEV_TO_MEM) dst_addr += period_len; else src_addr += period_len; dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); dev_vdbg(dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" " opt\t%08x\n" " src\t%08x\n" " dst\t%08x\n" " abcnt\t%08x\n" " ccnt\t%08x\n" " bidx\t%08x\n" " cidx\t%08x\n" " lkrld\t%08x\n", i, echan->ch_num, echan->slot[i], edesc->pset[i].param.opt, edesc->pset[i].param.src, edesc->pset[i].param.dst, edesc->pset[i].param.a_b_cnt, edesc->pset[i].param.ccnt, edesc->pset[i].param.src_dst_bidx, edesc->pset[i].param.src_dst_cidx, edesc->pset[i].param.link_bcntrld); edesc->absync = ret; /* * Enable period interrupt only if it is requested */ if (tx_flags & DMA_PREP_INTERRUPT) { edesc->pset[i].param.opt |= TCINTEN; /* Also enable intermediate interrupts if necessary */ if (use_intermediate) edesc->pset[i].param.opt |= ITCINTEN; } } /* Place the cyclic channel to highest priority queue */ if (!echan->tc) edma_assign_channel_eventq(echan, EVENTQ_0); return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } static void edma_completion_handler(struct edma_chan *echan) { struct device *dev = echan->vchan.chan.device->dev; struct edma_desc *edesc; spin_lock(&echan->vchan.lock); edesc = echan->edesc; if (edesc) { if (edesc->cyclic) { vchan_cyclic_callback(&edesc->vdesc); spin_unlock(&echan->vchan.lock); return; } else if (edesc->processed == edesc->pset_nr) { edesc->residue = 0; edma_stop(echan); vchan_cookie_complete(&edesc->vdesc); echan->edesc = NULL; dev_dbg(dev, "Transfer completed on channel %d\n", echan->ch_num); } else { dev_dbg(dev, "Sub transfer completed on channel %d\n", echan->ch_num); edma_pause(echan); /* Update statistics for tx_status */ edesc->residue -= edesc->sg_len; edesc->residue_stat = edesc->residue; edesc->processed_stat = edesc->processed; } edma_execute(echan); } spin_unlock(&echan->vchan.lock); } /* eDMA interrupt handler */ static irqreturn_t dma_irq_handler(int irq, void *data) { struct edma_cc *ecc = data; int ctlr; u32 sh_ier; u32 sh_ipr; u32 bank; ctlr = ecc->id; if (ctlr < 0) return IRQ_NONE; dev_vdbg(ecc->dev, "dma_irq_handler\n"); sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); if (!sh_ipr) { sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); if (!sh_ipr) return IRQ_NONE; sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); bank = 1; } else { sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); bank = 0; } do { u32 slot; u32 channel; slot = __ffs(sh_ipr); sh_ipr &= ~(BIT(slot)); if (sh_ier & BIT(slot)) { channel = (bank << 5) | slot; /* Clear the corresponding IPR bits */ edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); edma_completion_handler(&ecc->slave_chans[channel]); } } while (sh_ipr); edma_shadow0_write(ecc, SH_IEVAL, 1); return IRQ_HANDLED; } static void edma_error_handler(struct edma_chan *echan) { struct edma_cc *ecc = echan->ecc; struct device *dev = echan->vchan.chan.device->dev; struct edmacc_param p; int err; if (!echan->edesc) return; spin_lock(&echan->vchan.lock); err = edma_read_slot(ecc, echan->slot[0], &p); /* * Issue later based on missed flag which will be sure * to happen as: * (1) we finished transmitting an intermediate slot and * edma_execute is coming up. * (2) or we finished current transfer and issue will * call edma_execute. * * Important note: issuing can be dangerous here and * lead to some nasty recursion when we are in a NULL * slot. So we avoid doing so and set the missed flag. */ if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) { dev_dbg(dev, "Error on null slot, setting miss\n"); echan->missed = 1; } else { /* * The slot is already programmed but the event got * missed, so its safe to issue it here. */ dev_dbg(dev, "Missed event, TRIGGERING\n"); edma_clean_channel(echan); edma_stop(echan); edma_start(echan); edma_trigger_channel(echan); } spin_unlock(&echan->vchan.lock); } static inline bool edma_error_pending(struct edma_cc *ecc) { if (edma_read_array(ecc, EDMA_EMR, 0) || edma_read_array(ecc, EDMA_EMR, 1) || edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) return true; return false; } /* eDMA error interrupt handler */ static irqreturn_t dma_ccerr_handler(int irq, void *data) { struct edma_cc *ecc = data; int i, j; int ctlr; unsigned int cnt = 0; unsigned int val; ctlr = ecc->id; if (ctlr < 0) return IRQ_NONE; dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); if (!edma_error_pending(ecc)) { /* * The registers indicate no pending error event but the irq * handler has been called. * Ask eDMA to re-evaluate the error registers. */ dev_err(ecc->dev, "%s: Error interrupt without error event!\n", __func__); edma_write(ecc, EDMA_EEVAL, 1); return IRQ_NONE; } while (1) { /* Event missed register(s) */ for (j = 0; j < 2; j++) { unsigned long emr; val = edma_read_array(ecc, EDMA_EMR, j); if (!val) continue; dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); emr = val; for_each_set_bit(i, &emr, 32) { int k = (j << 5) + i; /* Clear the corresponding EMR bits */ edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); /* Clear any SER */ edma_shadow0_write_array(ecc, SH_SECR, j, BIT(i)); edma_error_handler(&ecc->slave_chans[k]); } } val = edma_read(ecc, EDMA_QEMR); if (val) { dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); /* Not reported, just clear the interrupt reason. */ edma_write(ecc, EDMA_QEMCR, val); edma_shadow0_write(ecc, SH_QSECR, val); } val = edma_read(ecc, EDMA_CCERR); if (val) { dev_warn(ecc->dev, "CCERR 0x%08x\n", val); /* Not reported, just clear the interrupt reason. */ edma_write(ecc, EDMA_CCERRCLR, val); } if (!edma_error_pending(ecc)) break; cnt++; if (cnt > 10) break; } edma_write(ecc, EDMA_EEVAL, 1); return IRQ_HANDLED; } /* Alloc channel resources */ static int edma_alloc_chan_resources(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); struct edma_cc *ecc = echan->ecc; struct device *dev = ecc->dev; enum dma_event_q eventq_no = EVENTQ_DEFAULT; int ret; if (echan->tc) { eventq_no = echan->tc->id; } else if (ecc->tc_list) { /* memcpy channel */ echan->tc = &ecc->tc_list[ecc->info->default_queue]; eventq_no = echan->tc->id; } ret = edma_alloc_channel(echan, eventq_no); if (ret) return ret; echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); if (echan->slot[0] < 0) { dev_err(dev, "Entry slot allocation failed for channel %u\n", EDMA_CHAN_SLOT(echan->ch_num)); ret = echan->slot[0]; goto err_slot; } /* Set up channel -> slot mapping for the entry slot */ edma_set_chmap(echan, echan->slot[0]); echan->alloced = true; dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, echan->hw_triggered ? "HW" : "SW"); return 0; err_slot: edma_free_channel(echan); return ret; } /* Free channel resources */ static void edma_free_chan_resources(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = echan->ecc->dev; int i; /* Terminate transfers */ edma_stop(echan); vchan_free_chan_resources(&echan->vchan); /* Free EDMA PaRAM slots */ for (i = 0; i < EDMA_MAX_SLOTS; i++) { if (echan->slot[i] >= 0) { edma_free_slot(echan->ecc, echan->slot[i]); echan->slot[i] = -1; } } /* Set entry slot to the dummy slot */ edma_set_chmap(echan, echan->ecc->dummy_slot); /* Free EDMA channel */ if (echan->alloced) { edma_free_channel(echan); echan->alloced = false; } echan->tc = NULL; echan->hw_triggered = false; dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); } /* Send pending descriptor to hardware */ static void edma_issue_pending(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); unsigned long flags; spin_lock_irqsave(&echan->vchan.lock, flags); if (vchan_issue_pending(&echan->vchan) && !echan->edesc) edma_execute(echan); spin_unlock_irqrestore(&echan->vchan.lock, flags); } /* * This limit exists to avoid a possible infinite loop when waiting for proof * that a particular transfer is completed. This limit can be hit if there * are large bursts to/from slow devices or the CPU is never able to catch * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART * RX-FIFO, as many as 55 loops have been seen. */ #define EDMA_MAX_TR_WAIT_LOOPS 1000 static u32 edma_residue(struct edma_desc *edesc) { bool dst = edesc->direction == DMA_DEV_TO_MEM; int loop_count = EDMA_MAX_TR_WAIT_LOOPS; struct edma_chan *echan = edesc->echan; struct edma_pset *pset = edesc->pset; dma_addr_t done, pos, pos_old; int channel = EDMA_CHAN_SLOT(echan->ch_num); int idx = EDMA_REG_ARRAY_INDEX(channel); int ch_bit = EDMA_CHANNEL_BIT(channel); int event_reg; int i; /* * We always read the dst/src position from the first RamPar * pset. That's the one which is active now. */ pos = edma_get_position(echan->ecc, echan->slot[0], dst); /* * "pos" may represent a transfer request that is still being * processed by the EDMACC or EDMATC. We will busy wait until * any one of the situations occurs: * 1. while and event is pending for the channel * 2. a position updated * 3. we hit the loop limit */ if (is_slave_direction(edesc->direction)) event_reg = SH_ER; else event_reg = SH_ESR; pos_old = pos; while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) { pos = edma_get_position(echan->ecc, echan->slot[0], dst); if (pos != pos_old) break; if (!--loop_count) { dev_dbg_ratelimited(echan->vchan.chan.device->dev, "%s: timeout waiting for PaRAM update\n", __func__); break; } cpu_relax(); } /* * Cyclic is simple. Just subtract pset[0].addr from pos. * * We never update edesc->residue in the cyclic case, so we * can tell the remaining room to the end of the circular * buffer. */ if (edesc->cyclic) { done = pos - pset->addr; edesc->residue_stat = edesc->residue - done; return edesc->residue_stat; } /* * If the position is 0, then EDMA loaded the closing dummy slot, the * transfer is completed */ if (!pos) return 0; /* * For SG operation we catch up with the last processed * status. */ pset += edesc->processed_stat; for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { /* * If we are inside this pset address range, we know * this is the active one. Get the current delta and * stop walking the psets. */ if (pos >= pset->addr && pos < pset->addr + pset->len) return edesc->residue_stat - (pos - pset->addr); /* Otherwise mark it done and update residue_stat. */ edesc->processed_stat++; edesc->residue_stat -= pset->len; } return edesc->residue_stat; } /* Check request completion status */ static enum dma_status edma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct edma_chan *echan = to_edma_chan(chan); struct dma_tx_state txstate_tmp; enum dma_status ret; unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; /* Provide a dummy dma_tx_state for completion checking */ if (!txstate) txstate = &txstate_tmp; spin_lock_irqsave(&echan->vchan.lock, flags); if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { txstate->residue = edma_residue(echan->edesc); } else { struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan, cookie); if (vdesc) txstate->residue = to_edma_desc(&vdesc->tx)->residue; else txstate->residue = 0; } /* * Mark the cookie completed if the residue is 0 for non cyclic * transfers */ if (ret != DMA_COMPLETE && !txstate->residue && echan->edesc && echan->edesc->polled && echan->edesc->vdesc.tx.cookie == cookie) { edma_stop(echan); vchan_cookie_complete(&echan->edesc->vdesc); echan->edesc = NULL; edma_execute(echan); ret = DMA_COMPLETE; } spin_unlock_irqrestore(&echan->vchan.lock, flags); return ret; } static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) { if (!memcpy_channels) return false; while (*memcpy_channels != -1) { if (*memcpy_channels == ch_num) return true; memcpy_channels++; } return false; } #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) { struct dma_device *s_ddev = &ecc->dma_slave; struct dma_device *m_ddev = NULL; s32 *memcpy_channels = ecc->info->memcpy_channels; int i, j; dma_cap_zero(s_ddev->cap_mask); dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); if (ecc->legacy_mode && !memcpy_channels) { dev_warn(ecc->dev, "Legacy memcpy is enabled, things might not work\n"); dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask); s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; s_ddev->directions = BIT(DMA_MEM_TO_MEM); } s_ddev->device_prep_slave_sg = edma_prep_slave_sg; s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; s_ddev->device_free_chan_resources = edma_free_chan_resources; s_ddev->device_issue_pending = edma_issue_pending; s_ddev->device_tx_status = edma_tx_status; s_ddev->device_config = edma_slave_config; s_ddev->device_pause = edma_dma_pause; s_ddev->device_resume = edma_dma_resume; s_ddev->device_terminate_all = edma_terminate_all; s_ddev->device_synchronize = edma_synchronize; s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ s_ddev->dev = ecc->dev; INIT_LIST_HEAD(&s_ddev->channels); if (memcpy_channels) { m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); if (!m_ddev) { dev_warn(ecc->dev, "memcpy is disabled due to OoM\n"); memcpy_channels = NULL; goto ch_setup; } ecc->dma_memcpy = m_ddev; dma_cap_zero(m_ddev->cap_mask); dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask); m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; m_ddev->device_free_chan_resources = edma_free_chan_resources; m_ddev->device_issue_pending = edma_issue_pending; m_ddev->device_tx_status = edma_tx_status; m_ddev->device_config = edma_slave_config; m_ddev->device_pause = edma_dma_pause; m_ddev->device_resume = edma_dma_resume; m_ddev->device_terminate_all = edma_terminate_all; m_ddev->device_synchronize = edma_synchronize; m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; m_ddev->directions = BIT(DMA_MEM_TO_MEM); m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; m_ddev->dev = ecc->dev; INIT_LIST_HEAD(&m_ddev->channels); } else if (!ecc->legacy_mode) { dev_info(ecc->dev, "memcpy is disabled\n"); } ch_setup: for (i = 0; i < ecc->num_channels; i++) { struct edma_chan *echan = &ecc->slave_chans[i]; echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); echan->ecc = ecc; echan->vchan.desc_free = edma_desc_free; if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) vchan_init(&echan->vchan, m_ddev); else vchan_init(&echan->vchan, s_ddev); INIT_LIST_HEAD(&echan->node); for (j = 0; j < EDMA_MAX_SLOTS; j++) echan->slot[j] = -1; } } static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, struct edma_cc *ecc) { int i; u32 value, cccfg; s8 (*queue_priority_map)[2]; /* Decode the eDMA3 configuration from CCCFG register */ cccfg = edma_read(ecc, EDMA_CCCFG); value = GET_NUM_REGN(cccfg); ecc->num_region = BIT(value); value = GET_NUM_DMACH(cccfg); ecc->num_channels = BIT(value + 1); value = GET_NUM_QDMACH(cccfg); ecc->num_qchannels = value * 2; value = GET_NUM_PAENTRY(cccfg); ecc->num_slots = BIT(value + 4); value = GET_NUM_EVQUE(cccfg); ecc->num_tc = value + 1; ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); dev_dbg(dev, "num_region: %u\n", ecc->num_region); dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); /* Nothing need to be done if queue priority is provided */ if (pdata->queue_priority_mapping) return 0; /* * Configure TC/queue priority as follows: * Q0 - priority 0 * Q1 - priority 1 * Q2 - priority 2 * ... * The meaning of priority numbers: 0 highest priority, 7 lowest * priority. So Q0 is the highest priority queue and the last queue has * the lowest priority. */ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), GFP_KERNEL); if (!queue_priority_map) return -ENOMEM; for (i = 0; i < ecc->num_tc; i++) { queue_priority_map[i][0] = i; queue_priority_map[i][1] = i; } queue_priority_map[i][0] = -1; queue_priority_map[i][1] = -1; pdata->queue_priority_mapping = queue_priority_map; /* Default queue has the lowest priority */ pdata->default_queue = i - 1; return 0; } #if IS_ENABLED(CONFIG_OF) static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, size_t sz) { const char pname[] = "ti,edma-xbar-event-map"; struct resource res; void __iomem *xbar; s16 (*xbar_chans)[2]; size_t nelm = sz / sizeof(s16); u32 shift, offset, mux; int ret, i; xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); if (!xbar_chans) return -ENOMEM; ret = of_address_to_resource(dev->of_node, 1, &res); if (ret) return -ENOMEM; xbar = devm_ioremap(dev, res.start, resource_size(&res)); if (!xbar) return -ENOMEM; ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, nelm); if (ret) return -EIO; /* Invalidate last entry for the other user of this mess */ nelm >>= 1; xbar_chans[nelm][0] = -1; xbar_chans[nelm][1] = -1; for (i = 0; i < nelm; i++) { shift = (xbar_chans[i][1] & 0x03) << 3; offset = xbar_chans[i][1] & 0xfffffffc; mux = readl(xbar + offset); mux &= ~(0xff << shift); mux |= xbar_chans[i][0] << shift; writel(mux, (xbar + offset)); } pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; return 0; } static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, bool legacy_mode) { struct edma_soc_info *info; struct property *prop; int sz, ret; info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); if (legacy_mode) { prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz); if (prop) { ret = edma_xbar_event_map(dev, info, sz); if (ret) return ERR_PTR(ret); } return info; } /* Get the list of channels allocated to be used for memcpy */ prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); if (prop) { const char pname[] = "ti,edma-memcpy-channels"; size_t nelm = sz / sizeof(s32); s32 *memcpy_ch; memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), GFP_KERNEL); if (!memcpy_ch) return ERR_PTR(-ENOMEM); ret = of_property_read_u32_array(dev->of_node, pname, (u32 *)memcpy_ch, nelm); if (ret) return ERR_PTR(ret); memcpy_ch[nelm] = -1; info->memcpy_channels = memcpy_ch; } prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", &sz); if (prop) { const char pname[] = "ti,edma-reserved-slot-ranges"; u32 (*tmp)[2]; s16 (*rsv_slots)[2]; size_t nelm = sz / sizeof(*tmp); struct edma_rsv_info *rsv_info; int i; if (!nelm) return info; tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); if (!tmp) return ERR_PTR(-ENOMEM); rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); if (!rsv_info) { kfree(tmp); return ERR_PTR(-ENOMEM); } rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), GFP_KERNEL); if (!rsv_slots) { kfree(tmp); return ERR_PTR(-ENOMEM); } ret = of_property_read_u32_array(dev->of_node, pname, (u32 *)tmp, nelm * 2); if (ret) { kfree(tmp); return ERR_PTR(ret); } for (i = 0; i < nelm; i++) { rsv_slots[i][0] = tmp[i][0]; rsv_slots[i][1] = tmp[i][1]; } rsv_slots[nelm][0] = -1; rsv_slots[nelm][1] = -1; info->rsv = rsv_info; info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; kfree(tmp); } return info; } static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct edma_cc *ecc = ofdma->of_dma_data; struct dma_chan *chan = NULL; struct edma_chan *echan; int i; if (!ecc || dma_spec->args_count < 1) return NULL; for (i = 0; i < ecc->num_channels; i++) { echan = &ecc->slave_chans[i]; if (echan->ch_num == dma_spec->args[0]) { chan = &echan->vchan.chan; break; } } if (!chan) return NULL; if (echan->ecc->legacy_mode && dma_spec->args_count == 1) goto out; if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && dma_spec->args[1] < echan->ecc->num_tc) { echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; goto out; } return NULL; out: /* The channel is going to be used as HW synchronized */ echan->hw_triggered = true; return dma_get_slave_channel(chan); } #else static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, bool legacy_mode) { return ERR_PTR(-EINVAL); } static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { return NULL; } #endif static bool edma_filter_fn(struct dma_chan *chan, void *param); static int edma_probe(struct platform_device *pdev) { struct edma_soc_info *info = pdev->dev.platform_data; s8 (*queue_priority_mapping)[2]; const s16 (*reserved)[2]; int i, irq; char *irq_name; struct resource *mem; struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; struct edma_cc *ecc; bool legacy_mode = true; int ret; if (node) { const struct of_device_id *match; match = of_match_node(edma_of_ids, node); if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) legacy_mode = false; info = edma_setup_info_from_dt(dev, legacy_mode); if (IS_ERR(info)) { dev_err(dev, "failed to get DT data\n"); return PTR_ERR(info); } } if (!info) return -ENODEV; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); if (!ecc) return -ENOMEM; ecc->dev = dev; ecc->id = pdev->id; ecc->legacy_mode = legacy_mode; /* When booting with DT the pdev->id is -1 */ if (ecc->id < 0) ecc->id = 0; mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); if (!mem) { dev_dbg(dev, "mem resource not found, using index 0\n"); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mem resource?\n"); return -ENODEV; } } ecc->base = devm_ioremap_resource(dev, mem); if (IS_ERR(ecc->base)) return PTR_ERR(ecc->base); platform_set_drvdata(pdev, ecc); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm_runtime_get_sync() failed\n"); pm_runtime_disable(dev); return ret; } /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); ecc->channels_mask = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels), sizeof(unsigned long), GFP_KERNEL); if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { ret = -ENOMEM; goto err_disable_pm; } /* Mark all channels available initially */ bitmap_fill(ecc->channels_mask, ecc->num_channels); ecc->default_queue = info->default_queue; if (info->rsv) { /* Set the reserved slots in inuse list */ reserved = info->rsv->rsv_slots; if (reserved) { for (i = 0; reserved[i][0] != -1; i++) bitmap_set(ecc->slot_inuse, reserved[i][0], reserved[i][1]); } /* Clear channels not usable for Linux */ reserved = info->rsv->rsv_chans; if (reserved) { for (i = 0; reserved[i][0] != -1; i++) bitmap_clear(ecc->channels_mask, reserved[i][0], reserved[i][1]); } } for (i = 0; i < ecc->num_slots; i++) { /* Reset only unused - not reserved - paRAM slots */ if (!test_bit(i, ecc->slot_inuse)) edma_write_slot(ecc, i, &dummy_paramset); } irq = platform_get_irq_byname(pdev, "edma3_ccint"); if (irq < 0 && node) irq = irq_of_parse_and_map(node, 0); if (irq >= 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); goto err_disable_pm; } ecc->ccint = irq; } irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); if (irq < 0 && node) irq = irq_of_parse_and_map(node, 2); if (irq >= 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); goto err_disable_pm; } ecc->ccerrint = irq; } ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); ret = ecc->dummy_slot; goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; if (!ecc->legacy_mode) { int lowest_priority = 0; unsigned int array_max; struct of_phandle_args tc_args; ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, sizeof(*ecc->tc_list), GFP_KERNEL); if (!ecc->tc_list) { ret = -ENOMEM; goto err_reg1; } for (i = 0;; i++) { ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", 1, i, &tc_args); if (ret || i == ecc->num_tc) break; ecc->tc_list[i].node = tc_args.np; ecc->tc_list[i].id = i; queue_priority_mapping[i][1] = tc_args.args[0]; if (queue_priority_mapping[i][1] > lowest_priority) { lowest_priority = queue_priority_mapping[i][1]; info->default_queue = i; } } /* See if we have optional dma-channel-mask array */ array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32)); ret = of_property_read_variable_u32_array(node, "dma-channel-mask", (u32 *)ecc->channels_mask, 1, array_max); if (ret > 0 && ret != array_max) dev_warn(dev, "dma-channel-mask is not complete.\n"); else if (ret == -EOVERFLOW || ret == -ENODATA) dev_warn(dev, "dma-channel-mask is out of range or empty\n"); } /* Event queue priority mapping */ for (i = 0; queue_priority_mapping[i][0] != -1; i++) edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], queue_priority_mapping[i][1]); edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0); edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0); edma_write_array(ecc, EDMA_QRAE, 0, 0x0); ecc->info = info; /* Init the dma device and channels */ edma_dma_init(ecc, legacy_mode); for (i = 0; i < ecc->num_channels; i++) { /* Do not touch reserved channels */ if (!test_bit(i, ecc->channels_mask)) continue; /* Assign all channels to the default queue */ edma_assign_channel_eventq(&ecc->slave_chans[i], info->default_queue); /* Set entry slot to the dummy slot */ edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); } ecc->dma_slave.filter.map = info->slave_map; ecc->dma_slave.filter.mapcnt = info->slavecnt; ecc->dma_slave.filter.fn = edma_filter_fn; ret = dma_async_device_register(&ecc->dma_slave); if (ret) { dev_err(dev, "slave ddev registration failed (%d)\n", ret); goto err_reg1; } if (ecc->dma_memcpy) { ret = dma_async_device_register(ecc->dma_memcpy); if (ret) { dev_err(dev, "memcpy ddev registration failed (%d)\n", ret); dma_async_device_unregister(&ecc->dma_slave); goto err_reg1; } } if (node) of_dma_controller_register(node, of_edma_xlate, ecc); dev_info(dev, "TI EDMA DMA engine driver\n"); return 0; err_reg1: edma_free_slot(ecc, ecc->dummy_slot); err_disable_pm: pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } static void edma_cleanupp_vchan(struct dma_device *dmadev) { struct edma_chan *echan, *_echan; list_for_each_entry_safe(echan, _echan, &dmadev->channels, vchan.chan.device_node) { list_del(&echan->vchan.chan.device_node); tasklet_kill(&echan->vchan.task); } } static int edma_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct edma_cc *ecc = dev_get_drvdata(dev); devm_free_irq(dev, ecc->ccint, ecc); devm_free_irq(dev, ecc->ccerrint, ecc); edma_cleanupp_vchan(&ecc->dma_slave); if (dev->of_node) of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); pm_runtime_put_sync(dev); pm_runtime_disable(dev); return 0; } #ifdef CONFIG_PM_SLEEP static int edma_pm_suspend(struct device *dev) { struct edma_cc *ecc = dev_get_drvdata(dev); struct edma_chan *echan = ecc->slave_chans; int i; for (i = 0; i < ecc->num_channels; i++) { if (echan[i].alloced) edma_setup_interrupt(&echan[i], false); } return 0; } static int edma_pm_resume(struct device *dev) { struct edma_cc *ecc = dev_get_drvdata(dev); struct edma_chan *echan = ecc->slave_chans; int i; s8 (*queue_priority_mapping)[2]; /* re initialize dummy slot to dummy param set */ edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset); queue_priority_mapping = ecc->info->queue_priority_mapping; /* Event queue priority mapping */ for (i = 0; queue_priority_mapping[i][0] != -1; i++) edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], queue_priority_mapping[i][1]); for (i = 0; i < ecc->num_channels; i++) { if (echan[i].alloced) { /* ensure access through shadow region 0 */ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(i), EDMA_CHANNEL_BIT(i)); edma_setup_interrupt(&echan[i], true); /* Set up channel -> slot mapping for the entry slot */ edma_set_chmap(&echan[i], echan[i].slot[0]); } } return 0; } #endif static const struct dev_pm_ops edma_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) }; static struct platform_driver edma_driver = { .probe = edma_probe, .remove = edma_remove, .driver = { .name = "edma", .pm = &edma_pm_ops, .of_match_table = edma_of_ids, }, }; static int edma_tptc_probe(struct platform_device *pdev) { pm_runtime_enable(&pdev->dev); return pm_runtime_get_sync(&pdev->dev); } static struct platform_driver edma_tptc_driver = { .probe = edma_tptc_probe, .driver = { .name = "edma3-tptc", .of_match_table = edma_tptc_of_ids, }, }; static bool edma_filter_fn(struct dma_chan *chan, void *param) { bool match = false; if (chan->device->dev->driver == &edma_driver.driver) { struct edma_chan *echan = to_edma_chan(chan); unsigned ch_req = *(unsigned *)param; if (ch_req == echan->ch_num) { /* The channel is going to be used as HW synchronized */ echan->hw_triggered = true; match = true; } } return match; } static int edma_init(void) { int ret; ret = platform_driver_register(&edma_tptc_driver); if (ret) return ret; return platform_driver_register(&edma_driver); } subsys_initcall(edma_init); static void __exit edma_exit(void) { platform_driver_unregister(&edma_driver); platform_driver_unregister(&edma_tptc_driver); } module_exit(edma_exit); MODULE_AUTHOR("Matt Porter <[email protected]>"); MODULE_DESCRIPTION("TI EDMA DMA engine driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/dma/ti/edma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #define TI_XBAR_DRA7 0 #define TI_XBAR_AM335X 1 static const u32 ti_xbar_type[] = { [TI_XBAR_DRA7] = TI_XBAR_DRA7, [TI_XBAR_AM335X] = TI_XBAR_AM335X, }; static const struct of_device_id ti_dma_xbar_match[] = { { .compatible = "ti,dra7-dma-crossbar", .data = &ti_xbar_type[TI_XBAR_DRA7], }, { .compatible = "ti,am335x-edma-crossbar", .data = &ti_xbar_type[TI_XBAR_AM335X], }, {}, }; /* Crossbar on AM335x/AM437x family */ #define TI_AM335X_XBAR_LINES 64 struct ti_am335x_xbar_data { void __iomem *iomem; struct dma_router dmarouter; u32 xbar_events; /* maximum number of events to select in xbar */ u32 dma_requests; /* number of DMA requests on eDMA */ }; struct ti_am335x_xbar_map { u16 dma_line; u8 mux_val; }; static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { /* * TPCC_EVT_MUX_60_63 register layout is different than the * rest, in the sense, that event 63 is mapped to lowest byte * and event 60 is mapped to highest, handle it separately. */ if (event >= 60 && event <= 63) writeb_relaxed(val, iomem + (63 - event % 4)); else writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) { struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); struct ti_am335x_xbar_map *map = route_data; dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", map->mux_val, map->dma_line); ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); kfree(map); } static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); struct ti_am335x_xbar_map *map; if (dma_spec->args_count != 3) return ERR_PTR(-EINVAL); if (dma_spec->args[2] >= xbar->xbar_events) { dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", dma_spec->args[2]); return ERR_PTR(-EINVAL); } if (dma_spec->args[0] >= xbar->dma_requests) { dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", dma_spec->args[0]); return ERR_PTR(-EINVAL); } /* The of_node_put() will be done in the core for the node */ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); return ERR_PTR(-EINVAL); } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { of_node_put(dma_spec->np); return ERR_PTR(-ENOMEM); } map->dma_line = (u16)dma_spec->args[0]; map->mux_val = (u8)dma_spec->args[2]; dma_spec->args[2] = 0; dma_spec->args_count = 2; dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", map->mux_val, map->dma_line); ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); return map; } static const struct of_device_id ti_am335x_master_match[] __maybe_unused = { { .compatible = "ti,edma3-tpcc", }, {}, }; static int ti_am335x_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dma_node; struct ti_am335x_xbar_data *xbar; void __iomem *iomem; int i, ret; if (!node) return -ENODEV; xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); if (!xbar) return -ENOMEM; dma_node = of_parse_phandle(node, "dma-masters", 0); if (!dma_node) { dev_err(&pdev->dev, "Can't get DMA master node\n"); return -ENODEV; } match = of_match_node(ti_am335x_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); of_node_put(dma_node); return -EINVAL; } if (of_property_read_u32(dma_node, "dma-requests", &xbar->dma_requests)) { dev_info(&pdev->dev, "Missing XBAR output information, using %u.\n", TI_AM335X_XBAR_LINES); xbar->dma_requests = TI_AM335X_XBAR_LINES; } of_node_put(dma_node); if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { dev_info(&pdev->dev, "Missing XBAR input information, using %u.\n", TI_AM335X_XBAR_LINES); xbar->xbar_events = TI_AM335X_XBAR_LINES; } iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(iomem)) return PTR_ERR(iomem); xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_am335x_xbar_free; platform_set_drvdata(pdev, xbar); /* Reset the crossbar */ for (i = 0; i < xbar->dma_requests; i++) ti_am335x_xbar_write(xbar->iomem, i, 0); ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, &xbar->dmarouter); return ret; } /* Crossbar on DRA7xx family */ #define TI_DRA7_XBAR_OUTPUTS 127 #define TI_DRA7_XBAR_INPUTS 256 struct ti_dra7_xbar_data { void __iomem *iomem; struct dma_router dmarouter; struct mutex mutex; unsigned long *dma_inuse; u16 safe_val; /* Value to rest the crossbar lines */ u32 xbar_requests; /* number of DMA requests connected to XBAR */ u32 dma_requests; /* number of DMA requests forwarded to DMA */ u32 dma_offset; }; struct ti_dra7_xbar_map { u16 xbar_in; int xbar_out; }; static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) { writew_relaxed(val, iomem + (xbar * 2)); } static void ti_dra7_xbar_free(struct device *dev, void *route_data) { struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); struct ti_dra7_xbar_map *map = route_data; dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", map->xbar_in, map->xbar_out); ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); mutex_lock(&xbar->mutex); clear_bit(map->xbar_out, xbar->dma_inuse); mutex_unlock(&xbar->mutex); kfree(map); } static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); struct ti_dra7_xbar_map *map; if (dma_spec->args[0] >= xbar->xbar_requests) { dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", dma_spec->args[0]); put_device(&pdev->dev); return ERR_PTR(-EINVAL); } /* The of_node_put() will be done in the core for the node */ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); put_device(&pdev->dev); return ERR_PTR(-EINVAL); } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { of_node_put(dma_spec->np); put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } mutex_lock(&xbar->mutex); map->xbar_out = find_first_zero_bit(xbar->dma_inuse, xbar->dma_requests); if (map->xbar_out == xbar->dma_requests) { mutex_unlock(&xbar->mutex); dev_err(&pdev->dev, "Run out of free DMA requests\n"); kfree(map); of_node_put(dma_spec->np); put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } set_bit(map->xbar_out, xbar->dma_inuse); mutex_unlock(&xbar->mutex); map->xbar_in = (u16)dma_spec->args[0]; dma_spec->args[0] = map->xbar_out + xbar->dma_offset; dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", map->xbar_in, map->xbar_out); ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); return map; } #define TI_XBAR_EDMA_OFFSET 0 #define TI_XBAR_SDMA_OFFSET 1 static const u32 ti_dma_offset[] = { [TI_XBAR_EDMA_OFFSET] = 0, [TI_XBAR_SDMA_OFFSET] = 1, }; static const struct of_device_id ti_dra7_master_match[] __maybe_unused = { { .compatible = "ti,omap4430-sdma", .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET], }, { .compatible = "ti,edma3", .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], }, { .compatible = "ti,edma3-tpcc", .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], }, {}, }; static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p) { for (; len > 0; len--) set_bit(offset + (len - 1), p); } static int ti_dra7_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dma_node; struct ti_dra7_xbar_data *xbar; struct property *prop; u32 safe_val; int sz; void __iomem *iomem; int i, ret; if (!node) return -ENODEV; xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); if (!xbar) return -ENOMEM; dma_node = of_parse_phandle(node, "dma-masters", 0); if (!dma_node) { dev_err(&pdev->dev, "Can't get DMA master node\n"); return -ENODEV; } match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); of_node_put(dma_node); return -EINVAL; } if (of_property_read_u32(dma_node, "dma-requests", &xbar->dma_requests)) { dev_info(&pdev->dev, "Missing XBAR output information, using %u.\n", TI_DRA7_XBAR_OUTPUTS); xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; } of_node_put(dma_node); xbar->dma_inuse = devm_kcalloc(&pdev->dev, BITS_TO_LONGS(xbar->dma_requests), sizeof(unsigned long), GFP_KERNEL); if (!xbar->dma_inuse) return -ENOMEM; if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { dev_info(&pdev->dev, "Missing XBAR input information, using %u.\n", TI_DRA7_XBAR_INPUTS); xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; } if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) xbar->safe_val = (u16)safe_val; prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz); if (prop) { const char pname[] = "ti,reserved-dma-request-ranges"; u32 (*rsv_events)[2]; size_t nelm = sz / sizeof(*rsv_events); int i; if (!nelm) return -EINVAL; rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL); if (!rsv_events) return -ENOMEM; ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, nelm * 2); if (ret) { kfree(rsv_events); return ret; } for (i = 0; i < nelm; i++) { ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], xbar->dma_inuse); } kfree(rsv_events); } iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(iomem)) return PTR_ERR(iomem); xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_dra7_xbar_free; xbar->dma_offset = *(u32 *)match->data; mutex_init(&xbar->mutex); platform_set_drvdata(pdev, xbar); /* Reset the crossbar */ for (i = 0; i < xbar->dma_requests; i++) { if (!test_bit(i, xbar->dma_inuse)) ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); } ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, &xbar->dmarouter); if (ret) { /* Restore the defaults for the crossbar */ for (i = 0; i < xbar->dma_requests; i++) { if (!test_bit(i, xbar->dma_inuse)) ti_dra7_xbar_write(xbar->iomem, i, i); } } return ret; } static int ti_dma_xbar_probe(struct platform_device *pdev) { const struct of_device_id *match; int ret; match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); if (unlikely(!match)) return -EINVAL; switch (*(u32 *)match->data) { case TI_XBAR_DRA7: ret = ti_dra7_xbar_probe(pdev); break; case TI_XBAR_AM335X: ret = ti_am335x_xbar_probe(pdev); break; default: dev_err(&pdev->dev, "Unsupported crossbar\n"); ret = -ENODEV; break; } return ret; } static struct platform_driver ti_dma_xbar_driver = { .driver = { .name = "ti-dma-crossbar", .of_match_table = ti_dma_xbar_match, }, .probe = ti_dma_xbar_probe, }; static int omap_dmaxbar_init(void) { return platform_driver_register(&ti_dma_xbar_driver); } arch_initcall(omap_dmaxbar_init);
linux-master
drivers/dma/ti/dma-crossbar.c
// SPDX-License-Identifier: GPL-2.0+ /* * Front panel driver for Linux * Copyright (C) 2000-2008, Willy Tarreau <[email protected]> * Copyright (C) 2016-2017 Glider bvba * * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad) * connected to a parallel printer port. * * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit * serial module compatible with Samsung's KS0074. The pins may be connected in * any combination, everything is programmable. * * The keypad consists in a matrix of push buttons connecting input pins to * data output pins or to the ground. The combinations have to be hard-coded * in the driver, though several profiles exist and adding new ones is easy. * * Several profiles are provided for commonly found LCD+keypad modules on the * market, such as those found in Nexcom's appliances. * * FIXME: * - the initialization/deinitialization process is very dirty and should * be rewritten. It may even be buggy. * * TODO: * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs) * - make the LCD a part of a virtual screen of Vx*Vy * - make the inputs list smp-safe * - change the keyboard to a double mapping : signals -> key_id -> values * so that applications can change values without knowing signals * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/parport.h> #include <linux/list.h> #include <linux/io.h> #include <linux/uaccess.h> #include "charlcd.h" #include "hd44780_common.h" #define LCD_MAXBYTES 256 /* max burst write */ #define KEYPAD_BUFFER 64 /* poll the keyboard this every second */ #define INPUT_POLL_TIME (HZ / 50) /* a key starts to repeat after this times INPUT_POLL_TIME */ #define KEYPAD_REP_START (10) /* a key repeats this times INPUT_POLL_TIME */ #define KEYPAD_REP_DELAY (2) /* converts an r_str() input to an active high, bits string : 000BAOSE */ #define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3) #define PNL_PBUSY 0x80 /* inverted input, active low */ #define PNL_PACK 0x40 /* direct input, active low */ #define PNL_POUTPA 0x20 /* direct input, active high */ #define PNL_PSELECD 0x10 /* direct input, active high */ #define PNL_PERRORP 0x08 /* direct input, active low */ #define PNL_PBIDIR 0x20 /* bi-directional ports */ /* high to read data in or-ed with data out */ #define PNL_PINTEN 0x10 #define PNL_PSELECP 0x08 /* inverted output, active low */ #define PNL_PINITP 0x04 /* direct output, active low */ #define PNL_PAUTOLF 0x02 /* inverted output, active low */ #define PNL_PSTROBE 0x01 /* inverted output */ #define PNL_PD0 0x01 #define PNL_PD1 0x02 #define PNL_PD2 0x04 #define PNL_PD3 0x08 #define PNL_PD4 0x10 #define PNL_PD5 0x20 #define PNL_PD6 0x40 #define PNL_PD7 0x80 #define PIN_NONE 0 #define PIN_STROBE 1 #define PIN_D0 2 #define PIN_D1 3 #define PIN_D2 4 #define PIN_D3 5 #define PIN_D4 6 #define PIN_D5 7 #define PIN_D6 8 #define PIN_D7 9 #define PIN_AUTOLF 14 #define PIN_INITP 16 #define PIN_SELECP 17 #define PIN_NOT_SET 127 #define NOT_SET -1 /* macros to simplify use of the parallel port */ #define r_ctr(x) (parport_read_control((x)->port)) #define r_dtr(x) (parport_read_data((x)->port)) #define r_str(x) (parport_read_status((x)->port)) #define w_ctr(x, y) (parport_write_control((x)->port, (y))) #define w_dtr(x, y) (parport_write_data((x)->port, (y))) /* this defines which bits are to be used and which ones to be ignored */ /* logical or of the output bits involved in the scan matrix */ static __u8 scan_mask_o; /* logical or of the input bits involved in the scan matrix */ static __u8 scan_mask_i; enum input_type { INPUT_TYPE_STD, INPUT_TYPE_KBD, }; enum input_state { INPUT_ST_LOW, INPUT_ST_RISING, INPUT_ST_HIGH, INPUT_ST_FALLING, }; struct logical_input { struct list_head list; __u64 mask; __u64 value; enum input_type type; enum input_state state; __u8 rise_time, fall_time; __u8 rise_timer, fall_timer, high_timer; union { struct { /* valid when type == INPUT_TYPE_STD */ void (*press_fct)(int); void (*release_fct)(int); int press_data; int release_data; } std; struct { /* valid when type == INPUT_TYPE_KBD */ char press_str[sizeof(void *) + sizeof(int)] __nonstring; char repeat_str[sizeof(void *) + sizeof(int)] __nonstring; char release_str[sizeof(void *) + sizeof(int)] __nonstring; } kbd; } u; }; static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */ /* physical contacts history * Physical contacts are a 45 bits string of 9 groups of 5 bits each. * The 8 lower groups correspond to output bits 0 to 7, and the 9th group * corresponds to the ground. * Within each group, bits are stored in the same order as read on the port : * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0). * So, each __u64 is represented like this : * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00> */ /* what has just been read from the I/O ports */ static __u64 phys_read; /* previous phys_read */ static __u64 phys_read_prev; /* stabilized phys_read (phys_read|phys_read_prev) */ static __u64 phys_curr; /* previous phys_curr */ static __u64 phys_prev; /* 0 means that at least one logical signal needs be computed */ static char inputs_stable; /* these variables are specific to the keypad */ static struct { bool enabled; } keypad; static char keypad_buffer[KEYPAD_BUFFER]; static int keypad_buflen; static int keypad_start; static char keypressed; static wait_queue_head_t keypad_read_wait; /* lcd-specific variables */ static struct { bool enabled; bool initialized; int charset; int proto; /* TODO: use union here? */ struct { int e; int rs; int rw; int cl; int da; int bl; } pins; struct charlcd *charlcd; } lcd; /* Needed only for init */ static int selected_lcd_type = NOT_SET; /* * Bit masks to convert LCD signals to parallel port outputs. * _d_ are values for data port, _c_ are for control port. * [0] = signal OFF, [1] = signal ON, [2] = mask */ #define BIT_CLR 0 #define BIT_SET 1 #define BIT_MSK 2 #define BIT_STATES 3 /* * one entry for each bit on the LCD */ #define LCD_BIT_E 0 #define LCD_BIT_RS 1 #define LCD_BIT_RW 2 #define LCD_BIT_BL 3 #define LCD_BIT_CL 4 #define LCD_BIT_DA 5 #define LCD_BITS 6 /* * each bit can be either connected to a DATA or CTRL port */ #define LCD_PORT_C 0 #define LCD_PORT_D 1 #define LCD_PORTS 2 static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES]; /* * LCD protocols */ #define LCD_PROTO_PARALLEL 0 #define LCD_PROTO_SERIAL 1 #define LCD_PROTO_TI_DA8XX_LCD 2 /* * LCD character sets */ #define LCD_CHARSET_NORMAL 0 #define LCD_CHARSET_KS0074 1 /* * LCD types */ #define LCD_TYPE_NONE 0 #define LCD_TYPE_CUSTOM 1 #define LCD_TYPE_OLD 2 #define LCD_TYPE_KS0074 3 #define LCD_TYPE_HANTRONIX 4 #define LCD_TYPE_NEXCOM 5 /* * keypad types */ #define KEYPAD_TYPE_NONE 0 #define KEYPAD_TYPE_OLD 1 #define KEYPAD_TYPE_NEW 2 #define KEYPAD_TYPE_NEXCOM 3 /* * panel profiles */ #define PANEL_PROFILE_CUSTOM 0 #define PANEL_PROFILE_OLD 1 #define PANEL_PROFILE_NEW 2 #define PANEL_PROFILE_HANTRONIX 3 #define PANEL_PROFILE_NEXCOM 4 #define PANEL_PROFILE_LARGE 5 /* * Construct custom config from the kernel's configuration */ #define DEFAULT_PARPORT 0 #define DEFAULT_PROFILE PANEL_PROFILE_LARGE #define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD #define DEFAULT_LCD_TYPE LCD_TYPE_OLD #define DEFAULT_LCD_HEIGHT 2 #define DEFAULT_LCD_WIDTH 40 #define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL #define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL #define DEFAULT_LCD_PIN_E PIN_AUTOLF #define DEFAULT_LCD_PIN_RS PIN_SELECP #define DEFAULT_LCD_PIN_RW PIN_INITP #define DEFAULT_LCD_PIN_SCL PIN_STROBE #define DEFAULT_LCD_PIN_SDA PIN_D0 #define DEFAULT_LCD_PIN_BL PIN_NOT_SET #ifdef CONFIG_PANEL_PARPORT #undef DEFAULT_PARPORT #define DEFAULT_PARPORT CONFIG_PANEL_PARPORT #endif #ifdef CONFIG_PANEL_PROFILE #undef DEFAULT_PROFILE #define DEFAULT_PROFILE CONFIG_PANEL_PROFILE #endif #if DEFAULT_PROFILE == 0 /* custom */ #ifdef CONFIG_PANEL_KEYPAD #undef DEFAULT_KEYPAD_TYPE #define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD #endif #ifdef CONFIG_PANEL_LCD #undef DEFAULT_LCD_TYPE #define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD #endif #ifdef CONFIG_PANEL_LCD_HEIGHT #undef DEFAULT_LCD_HEIGHT #define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT #endif #ifdef CONFIG_PANEL_LCD_WIDTH #undef DEFAULT_LCD_WIDTH #define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH #endif #ifdef CONFIG_PANEL_LCD_BWIDTH #undef DEFAULT_LCD_BWIDTH #define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH #endif #ifdef CONFIG_PANEL_LCD_HWIDTH #undef DEFAULT_LCD_HWIDTH #define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH #endif #ifdef CONFIG_PANEL_LCD_CHARSET #undef DEFAULT_LCD_CHARSET #define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET #endif #ifdef CONFIG_PANEL_LCD_PROTO #undef DEFAULT_LCD_PROTO #define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO #endif #ifdef CONFIG_PANEL_LCD_PIN_E #undef DEFAULT_LCD_PIN_E #define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E #endif #ifdef CONFIG_PANEL_LCD_PIN_RS #undef DEFAULT_LCD_PIN_RS #define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS #endif #ifdef CONFIG_PANEL_LCD_PIN_RW #undef DEFAULT_LCD_PIN_RW #define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW #endif #ifdef CONFIG_PANEL_LCD_PIN_SCL #undef DEFAULT_LCD_PIN_SCL #define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL #endif #ifdef CONFIG_PANEL_LCD_PIN_SDA #undef DEFAULT_LCD_PIN_SDA #define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA #endif #ifdef CONFIG_PANEL_LCD_PIN_BL #undef DEFAULT_LCD_PIN_BL #define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL #endif #endif /* DEFAULT_PROFILE == 0 */ /* global variables */ /* Device single-open policy control */ static atomic_t keypad_available = ATOMIC_INIT(1); static struct pardevice *pprt; static int keypad_initialized; static DEFINE_SPINLOCK(pprt_lock); static struct timer_list scan_timer; MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver"); static int parport = DEFAULT_PARPORT; module_param(parport, int, 0000); MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)"); static int profile = DEFAULT_PROFILE; module_param(profile, int, 0000); MODULE_PARM_DESC(profile, "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; " "4=16x2 nexcom; default=40x2, old kp"); static int keypad_type = NOT_SET; module_param(keypad_type, int, 0000); MODULE_PARM_DESC(keypad_type, "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys"); static int lcd_type = NOT_SET; module_param(lcd_type, int, 0000); MODULE_PARM_DESC(lcd_type, "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom"); static int lcd_height = NOT_SET; module_param(lcd_height, int, 0000); MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD"); static int lcd_width = NOT_SET; module_param(lcd_width, int, 0000); MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD"); static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */ module_param(lcd_bwidth, int, 0000); MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)"); static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */ module_param(lcd_hwidth, int, 0000); MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)"); static int lcd_charset = NOT_SET; module_param(lcd_charset, int, 0000); MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074"); static int lcd_proto = NOT_SET; module_param(lcd_proto, int, 0000); MODULE_PARM_DESC(lcd_proto, "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface"); /* * These are the parallel port pins the LCD control signals are connected to. * Set this to 0 if the signal is not used. Set it to its opposite value * (negative) if the signal is negated. -MAXINT is used to indicate that the * pin has not been explicitly specified. * * WARNING! no check will be performed about collisions with keypad ! */ static int lcd_e_pin = PIN_NOT_SET; module_param(lcd_e_pin, int, 0000); MODULE_PARM_DESC(lcd_e_pin, "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)"); static int lcd_rs_pin = PIN_NOT_SET; module_param(lcd_rs_pin, int, 0000); MODULE_PARM_DESC(lcd_rs_pin, "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)"); static int lcd_rw_pin = PIN_NOT_SET; module_param(lcd_rw_pin, int, 0000); MODULE_PARM_DESC(lcd_rw_pin, "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)"); static int lcd_cl_pin = PIN_NOT_SET; module_param(lcd_cl_pin, int, 0000); MODULE_PARM_DESC(lcd_cl_pin, "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)"); static int lcd_da_pin = PIN_NOT_SET; module_param(lcd_da_pin, int, 0000); MODULE_PARM_DESC(lcd_da_pin, "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)"); static int lcd_bl_pin = PIN_NOT_SET; module_param(lcd_bl_pin, int, 0000); MODULE_PARM_DESC(lcd_bl_pin, "# of the // port pin connected to LCD backlight, with polarity (-17..17)"); /* Deprecated module parameters - consider not using them anymore */ static int lcd_enabled = NOT_SET; module_param(lcd_enabled, int, 0000); MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead"); static int keypad_enabled = NOT_SET; module_param(keypad_enabled, int, 0000); MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead"); /* for some LCD drivers (ks0074) we need a charset conversion table. */ static const unsigned char lcd_char_conv_ks0074[256] = { /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */ /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27, /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4, /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20, /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f, /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96, /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd, /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60, /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9, /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3, /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78, /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe, /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8, /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69, /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25, /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79, }; static const char old_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4", "Esc\n", "Esc\n", ""}, {"S5", "Ret\n", "Ret\n", ""}, {"", "", "", ""} }; /* signals, press, repeat, release */ static const char new_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4s5", "", "Esc\n", "Esc\n"}, {"s4S5", "", "Ret\n", "Ret\n"}, {"S4S5", "Help\n", "", ""}, /* add new signals above this line */ {"", "", "", ""} }; /* signals, press, repeat, release */ static const char nexcom_keypad_profile[][4][9] = { {"a-p-e-", "Down\n", "Down\n", ""}, {"a-p-E-", "Ret\n", "Ret\n", ""}, {"a-P-E-", "Esc\n", "Esc\n", ""}, {"a-P-e-", "Up\n", "Up\n", ""}, /* add new signals above this line */ {"", "", "", ""} }; static const char (*keypad_profile)[4][9] = old_keypad_profile; static DECLARE_BITMAP(bits, LCD_BITS); static void lcd_get_bits(unsigned int port, int *val) { unsigned int bit, state; for (bit = 0; bit < LCD_BITS; bit++) { state = test_bit(bit, bits) ? BIT_SET : BIT_CLR; *val &= lcd_bits[port][bit][BIT_MSK]; *val |= lcd_bits[port][bit][state]; } } /* sets data port bits according to current signals values */ static int set_data_bits(void) { int val; val = r_dtr(pprt); lcd_get_bits(LCD_PORT_D, &val); w_dtr(pprt, val); return val; } /* sets ctrl port bits according to current signals values */ static int set_ctrl_bits(void) { int val; val = r_ctr(pprt); lcd_get_bits(LCD_PORT_C, &val); w_ctr(pprt, val); return val; } /* sets ctrl & data port bits according to current signals values */ static void panel_set_bits(void) { set_data_bits(); set_ctrl_bits(); } /* * Converts a parallel port pin (from -25 to 25) to data and control ports * masks, and data and control port bits. The signal will be considered * unconnected if it's on pin 0 or an invalid pin (<-25 or >25). * * Result will be used this way : * out(dport, in(dport) & d_val[2] | d_val[signal_state]) * out(cport, in(cport) & c_val[2] | c_val[signal_state]) */ static void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val) { int d_bit, c_bit, inv; d_val[0] = 0; c_val[0] = 0; d_val[1] = 0; c_val[1] = 0; d_val[2] = 0xFF; c_val[2] = 0xFF; if (pin == 0) return; inv = (pin < 0); if (inv) pin = -pin; d_bit = 0; c_bit = 0; switch (pin) { case PIN_STROBE: /* strobe, inverted */ c_bit = PNL_PSTROBE; inv = !inv; break; case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */ d_bit = 1 << (pin - 2); break; case PIN_AUTOLF: /* autofeed, inverted */ c_bit = PNL_PAUTOLF; inv = !inv; break; case PIN_INITP: /* init, direct */ c_bit = PNL_PINITP; break; case PIN_SELECP: /* select_in, inverted */ c_bit = PNL_PSELECP; inv = !inv; break; default: /* unknown pin, ignore */ break; } if (c_bit) { c_val[2] &= ~c_bit; c_val[!inv] = c_bit; } else if (d_bit) { d_val[2] &= ~d_bit; d_val[!inv] = d_bit; } } /* * send a serial byte to the LCD panel. The caller is responsible for locking * if needed. */ static void lcd_send_serial(int byte) { int bit; /* * the data bit is set on D0, and the clock on STROBE. * LCD reads D0 on STROBE's rising edge. */ for (bit = 0; bit < 8; bit++) { clear_bit(LCD_BIT_CL, bits); /* CLK low */ panel_set_bits(); if (byte & 1) { set_bit(LCD_BIT_DA, bits); } else { clear_bit(LCD_BIT_DA, bits); } panel_set_bits(); udelay(2); /* maintain the data during 2 us before CLK up */ set_bit(LCD_BIT_CL, bits); /* CLK high */ panel_set_bits(); udelay(1); /* maintain the strobe during 1 us */ byte >>= 1; } } /* turn the backlight on or off */ static void lcd_backlight(struct charlcd *charlcd, enum charlcd_onoff on) { if (lcd.pins.bl == PIN_NONE) return; /* The backlight is activated by setting the AUTOFEED line to +5V */ spin_lock_irq(&pprt_lock); if (on) set_bit(LCD_BIT_BL, bits); else clear_bit(LCD_BIT_BL, bits); panel_set_bits(); spin_unlock_irq(&pprt_lock); } /* send a command to the LCD panel in serial mode */ static void lcd_write_cmd_s(struct hd44780_common *hdc, int cmd) { spin_lock_irq(&pprt_lock); lcd_send_serial(0x1F); /* R/W=W, RS=0 */ lcd_send_serial(cmd & 0x0F); lcd_send_serial((cmd >> 4) & 0x0F); udelay(40); /* the shortest command takes at least 40 us */ spin_unlock_irq(&pprt_lock); } /* send data to the LCD panel in serial mode */ static void lcd_write_data_s(struct hd44780_common *hdc, int data) { spin_lock_irq(&pprt_lock); lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(data & 0x0F); lcd_send_serial((data >> 4) & 0x0F); udelay(40); /* the shortest data takes at least 40 us */ spin_unlock_irq(&pprt_lock); } /* send a command to the LCD panel in 8 bits parallel mode */ static void lcd_write_cmd_p8(struct hd44780_common *hdc, int cmd) { spin_lock_irq(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, cmd); udelay(20); /* maintain the data during 20 us before the strobe */ set_bit(LCD_BIT_E, bits); clear_bit(LCD_BIT_RS, bits); clear_bit(LCD_BIT_RW, bits); set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ clear_bit(LCD_BIT_E, bits); set_ctrl_bits(); udelay(120); /* the shortest command takes at least 120 us */ spin_unlock_irq(&pprt_lock); } /* send data to the LCD panel in 8 bits parallel mode */ static void lcd_write_data_p8(struct hd44780_common *hdc, int data) { spin_lock_irq(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(20); /* maintain the data during 20 us before the strobe */ set_bit(LCD_BIT_E, bits); set_bit(LCD_BIT_RS, bits); clear_bit(LCD_BIT_RW, bits); set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ clear_bit(LCD_BIT_E, bits); set_ctrl_bits(); udelay(45); /* the shortest data takes at least 45 us */ spin_unlock_irq(&pprt_lock); } /* send a command to the TI LCD panel */ static void lcd_write_cmd_tilcd(struct hd44780_common *hdc, int cmd) { spin_lock_irq(&pprt_lock); /* present the data to the control port */ w_ctr(pprt, cmd); udelay(60); spin_unlock_irq(&pprt_lock); } /* send data to the TI LCD panel */ static void lcd_write_data_tilcd(struct hd44780_common *hdc, int data) { spin_lock_irq(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(60); spin_unlock_irq(&pprt_lock); } static const struct charlcd_ops charlcd_ops = { .backlight = lcd_backlight, .print = hd44780_common_print, .gotoxy = hd44780_common_gotoxy, .home = hd44780_common_home, .clear_display = hd44780_common_clear_display, .init_display = hd44780_common_init_display, .shift_cursor = hd44780_common_shift_cursor, .shift_display = hd44780_common_shift_display, .display = hd44780_common_display, .cursor = hd44780_common_cursor, .blink = hd44780_common_blink, .fontsize = hd44780_common_fontsize, .lines = hd44780_common_lines, .redefine_char = hd44780_common_redefine_char, }; /* initialize the LCD driver */ static void lcd_init(void) { struct charlcd *charlcd; struct hd44780_common *hdc; hdc = hd44780_common_alloc(); if (!hdc) return; charlcd = charlcd_alloc(); if (!charlcd) { kfree(hdc); return; } hdc->hd44780 = &lcd; charlcd->drvdata = hdc; /* * Init lcd struct with load-time values to preserve exact * current functionality (at least for now). */ charlcd->height = lcd_height; charlcd->width = lcd_width; hdc->bwidth = lcd_bwidth; hdc->hwidth = lcd_hwidth; switch (selected_lcd_type) { case LCD_TYPE_OLD: /* parallel mode, 8 bits */ lcd.proto = LCD_PROTO_PARALLEL; lcd.charset = LCD_CHARSET_NORMAL; lcd.pins.e = PIN_STROBE; lcd.pins.rs = PIN_AUTOLF; charlcd->width = 40; hdc->bwidth = 40; hdc->hwidth = 64; charlcd->height = 2; break; case LCD_TYPE_KS0074: /* serial mode, ks0074 */ lcd.proto = LCD_PROTO_SERIAL; lcd.charset = LCD_CHARSET_KS0074; lcd.pins.bl = PIN_AUTOLF; lcd.pins.cl = PIN_STROBE; lcd.pins.da = PIN_D0; charlcd->width = 16; hdc->bwidth = 40; hdc->hwidth = 16; charlcd->height = 2; break; case LCD_TYPE_NEXCOM: /* parallel mode, 8 bits, generic */ lcd.proto = LCD_PROTO_PARALLEL; lcd.charset = LCD_CHARSET_NORMAL; lcd.pins.e = PIN_AUTOLF; lcd.pins.rs = PIN_SELECP; lcd.pins.rw = PIN_INITP; charlcd->width = 16; hdc->bwidth = 40; hdc->hwidth = 64; charlcd->height = 2; break; case LCD_TYPE_CUSTOM: /* customer-defined */ lcd.proto = DEFAULT_LCD_PROTO; lcd.charset = DEFAULT_LCD_CHARSET; /* default geometry will be set later */ break; case LCD_TYPE_HANTRONIX: /* parallel mode, 8 bits, hantronix-like */ default: lcd.proto = LCD_PROTO_PARALLEL; lcd.charset = LCD_CHARSET_NORMAL; lcd.pins.e = PIN_STROBE; lcd.pins.rs = PIN_SELECP; charlcd->width = 16; hdc->bwidth = 40; hdc->hwidth = 64; charlcd->height = 2; break; } /* Overwrite with module params set on loading */ if (lcd_height != NOT_SET) charlcd->height = lcd_height; if (lcd_width != NOT_SET) charlcd->width = lcd_width; if (lcd_bwidth != NOT_SET) hdc->bwidth = lcd_bwidth; if (lcd_hwidth != NOT_SET) hdc->hwidth = lcd_hwidth; if (lcd_charset != NOT_SET) lcd.charset = lcd_charset; if (lcd_proto != NOT_SET) lcd.proto = lcd_proto; if (lcd_e_pin != PIN_NOT_SET) lcd.pins.e = lcd_e_pin; if (lcd_rs_pin != PIN_NOT_SET) lcd.pins.rs = lcd_rs_pin; if (lcd_rw_pin != PIN_NOT_SET) lcd.pins.rw = lcd_rw_pin; if (lcd_cl_pin != PIN_NOT_SET) lcd.pins.cl = lcd_cl_pin; if (lcd_da_pin != PIN_NOT_SET) lcd.pins.da = lcd_da_pin; if (lcd_bl_pin != PIN_NOT_SET) lcd.pins.bl = lcd_bl_pin; /* this is used to catch wrong and default values */ if (charlcd->width <= 0) charlcd->width = DEFAULT_LCD_WIDTH; if (hdc->bwidth <= 0) hdc->bwidth = DEFAULT_LCD_BWIDTH; if (hdc->hwidth <= 0) hdc->hwidth = DEFAULT_LCD_HWIDTH; if (charlcd->height <= 0) charlcd->height = DEFAULT_LCD_HEIGHT; if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */ charlcd->ops = &charlcd_ops; hdc->write_data = lcd_write_data_s; hdc->write_cmd = lcd_write_cmd_s; if (lcd.pins.cl == PIN_NOT_SET) lcd.pins.cl = DEFAULT_LCD_PIN_SCL; if (lcd.pins.da == PIN_NOT_SET) lcd.pins.da = DEFAULT_LCD_PIN_SDA; } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */ charlcd->ops = &charlcd_ops; hdc->write_data = lcd_write_data_p8; hdc->write_cmd = lcd_write_cmd_p8; if (lcd.pins.e == PIN_NOT_SET) lcd.pins.e = DEFAULT_LCD_PIN_E; if (lcd.pins.rs == PIN_NOT_SET) lcd.pins.rs = DEFAULT_LCD_PIN_RS; if (lcd.pins.rw == PIN_NOT_SET) lcd.pins.rw = DEFAULT_LCD_PIN_RW; } else { charlcd->ops = &charlcd_ops; hdc->write_data = lcd_write_data_tilcd; hdc->write_cmd = lcd_write_cmd_tilcd; } if (lcd.pins.bl == PIN_NOT_SET) lcd.pins.bl = DEFAULT_LCD_PIN_BL; if (lcd.pins.e == PIN_NOT_SET) lcd.pins.e = PIN_NONE; if (lcd.pins.rs == PIN_NOT_SET) lcd.pins.rs = PIN_NONE; if (lcd.pins.rw == PIN_NOT_SET) lcd.pins.rw = PIN_NONE; if (lcd.pins.bl == PIN_NOT_SET) lcd.pins.bl = PIN_NONE; if (lcd.pins.cl == PIN_NOT_SET) lcd.pins.cl = PIN_NONE; if (lcd.pins.da == PIN_NOT_SET) lcd.pins.da = PIN_NONE; if (lcd.charset == NOT_SET) lcd.charset = DEFAULT_LCD_CHARSET; if (lcd.charset == LCD_CHARSET_KS0074) charlcd->char_conv = lcd_char_conv_ks0074; else charlcd->char_conv = NULL; pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E], lcd_bits[LCD_PORT_C][LCD_BIT_E]); pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS], lcd_bits[LCD_PORT_C][LCD_BIT_RS]); pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW], lcd_bits[LCD_PORT_C][LCD_BIT_RW]); pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL], lcd_bits[LCD_PORT_C][LCD_BIT_BL]); pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL], lcd_bits[LCD_PORT_C][LCD_BIT_CL]); pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA], lcd_bits[LCD_PORT_C][LCD_BIT_DA]); lcd.charlcd = charlcd; lcd.initialized = true; } /* * These are the file operation function for user access to /dev/keypad */ static ssize_t keypad_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned i = *ppos; char __user *tmp = buf; if (keypad_buflen == 0) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(keypad_read_wait, keypad_buflen != 0)) return -EINTR; } for (; count-- > 0 && (keypad_buflen > 0); ++i, ++tmp, --keypad_buflen) { put_user(keypad_buffer[keypad_start], tmp); keypad_start = (keypad_start + 1) % KEYPAD_BUFFER; } *ppos = i; return tmp - buf; } static int keypad_open(struct inode *inode, struct file *file) { int ret; ret = -EBUSY; if (!atomic_dec_and_test(&keypad_available)) goto fail; /* open only once at a time */ ret = -EPERM; if (file->f_mode & FMODE_WRITE) /* device is read-only */ goto fail; keypad_buflen = 0; /* flush the buffer on opening */ return 0; fail: atomic_inc(&keypad_available); return ret; } static int keypad_release(struct inode *inode, struct file *file) { atomic_inc(&keypad_available); return 0; } static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ .llseek = default_llseek, }; static struct miscdevice keypad_dev = { .minor = KEYPAD_MINOR, .name = "keypad", .fops = &keypad_fops, }; static void keypad_send_key(const char *string, int max_len) { /* send the key to the device only if a process is attached to it. */ if (!atomic_read(&keypad_available)) { while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) { keypad_buffer[(keypad_start + keypad_buflen++) % KEYPAD_BUFFER] = *string++; } wake_up_interruptible(&keypad_read_wait); } } /* this function scans all the bits involving at least one logical signal, * and puts the results in the bitfield "phys_read" (one bit per established * contact), and sets "phys_read_prev" to "phys_read". * * Note: to debounce input signals, we will only consider as switched a signal * which is stable across 2 measures. Signals which are different between two * reads will be kept as they previously were in their logical form (phys_prev). * A signal which has just switched will have a 1 in * (phys_read ^ phys_read_prev). */ static void phys_scan_contacts(void) { int bit, bitval; char oldval; char bitmask; char gndmask; phys_prev = phys_curr; phys_read_prev = phys_read; phys_read = 0; /* flush all signals */ /* keep track of old value, with all outputs disabled */ oldval = r_dtr(pprt) | scan_mask_o; /* activate all keyboard outputs (active low) */ w_dtr(pprt, oldval & ~scan_mask_o); /* will have a 1 for each bit set to gnd */ bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* disable all matrix signals */ w_dtr(pprt, oldval); /* now that all outputs are cleared, the only active input bits are * directly connected to the ground */ /* 1 for each grounded input */ gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* grounded inputs are signals 40-44 */ phys_read |= (__u64)gndmask << 40; if (bitmask != gndmask) { /* * since clearing the outputs changed some inputs, we know * that some input signals are currently tied to some outputs. * So we'll scan them. */ for (bit = 0; bit < 8; bit++) { bitval = BIT(bit); if (!(scan_mask_o & bitval)) /* skip unused bits */ continue; w_dtr(pprt, oldval & ~bitval); /* enable this output */ bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask; phys_read |= (__u64)bitmask << (5 * bit); } w_dtr(pprt, oldval); /* disable all outputs */ } /* * this is easy: use old bits when they are flapping, * use new ones when stable */ phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) | (phys_read & ~(phys_read ^ phys_read_prev)); } static inline int input_state_high(struct logical_input *input) { #if 0 /* FIXME: * this is an invalid test. It tries to catch * transitions from single-key to multiple-key, but * doesn't take into account the contacts polarity. * The only solution to the problem is to parse keys * from the most complex to the simplest combinations, * and mark them as 'caught' once a combination * matches, then unmatch it for all other ones. */ /* try to catch dangerous transitions cases : * someone adds a bit, so this signal was a false * positive resulting from a transition. We should * invalidate the signal immediately and not call the * release function. * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release. */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return 1; } #endif if ((phys_curr & input->mask) == input->value) { if ((input->type == INPUT_TYPE_STD) && (input->high_timer == 0)) { input->high_timer++; if (input->u.std.press_fct) input->u.std.press_fct(input->u.std.press_data); } else if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->high_timer == 0) { char *press_str = input->u.kbd.press_str; if (press_str[0]) { int s = sizeof(input->u.kbd.press_str); keypad_send_key(press_str, s); } } if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) { int s = sizeof(input->u.kbd.repeat_str); input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, s); } /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } return 1; } /* else signal falling down. Let's fall through. */ input->state = INPUT_ST_FALLING; input->fall_timer = 0; return 0; } static inline void input_state_falling(struct logical_input *input) { #if 0 /* FIXME !!! same comment as in input_state_high */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return; } #endif if ((phys_curr & input->mask) == input->value) { if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) { int s = sizeof(input->u.kbd.repeat_str); input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, s); } /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } input->state = INPUT_ST_HIGH; } else if (input->fall_timer >= input->fall_time) { /* call release event */ if (input->type == INPUT_TYPE_STD) { void (*release_fct)(int) = input->u.std.release_fct; if (release_fct) release_fct(input->u.std.release_data); } else if (input->type == INPUT_TYPE_KBD) { char *release_str = input->u.kbd.release_str; if (release_str[0]) { int s = sizeof(input->u.kbd.release_str); keypad_send_key(release_str, s); } } input->state = INPUT_ST_LOW; } else { input->fall_timer++; inputs_stable = 0; } } static void panel_process_inputs(void) { struct logical_input *input; keypressed = 0; inputs_stable = 1; list_for_each_entry(input, &logical_inputs, list) { switch (input->state) { case INPUT_ST_LOW: if ((phys_curr & input->mask) != input->value) break; /* if all needed ones were already set previously, * this means that this logical signal has been * activated by the releasing of another combined * signal, so we don't want to match. * eg: AB -(release B)-> A -(release A)-> 0 : * don't match A. */ if ((phys_prev & input->mask) == input->value) break; input->rise_timer = 0; input->state = INPUT_ST_RISING; fallthrough; case INPUT_ST_RISING: if ((phys_curr & input->mask) != input->value) { input->state = INPUT_ST_LOW; break; } if (input->rise_timer < input->rise_time) { inputs_stable = 0; input->rise_timer++; break; } input->high_timer = 0; input->state = INPUT_ST_HIGH; fallthrough; case INPUT_ST_HIGH: if (input_state_high(input)) break; fallthrough; case INPUT_ST_FALLING: input_state_falling(input); } } } static void panel_scan_timer(struct timer_list *unused) { if (keypad.enabled && keypad_initialized) { if (spin_trylock_irq(&pprt_lock)) { phys_scan_contacts(); /* no need for the parport anymore */ spin_unlock_irq(&pprt_lock); } if (!inputs_stable || phys_curr != phys_prev) panel_process_inputs(); } if (keypressed && lcd.enabled && lcd.initialized) charlcd_poke(lcd.charlcd); mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME); } static void init_scan_timer(void) { if (scan_timer.function) return; /* already started */ timer_setup(&scan_timer, panel_scan_timer, 0); scan_timer.expires = jiffies + INPUT_POLL_TIME; add_timer(&scan_timer); } /* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits. * if <omask> or <imask> are non-null, they will be or'ed with the bits * corresponding to out and in bits respectively. * returns 1 if ok, 0 if error (in which case, nothing is written). */ static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value, u8 *imask, u8 *omask) { const char sigtab[] = "EeSsPpAaBb"; u8 im, om; __u64 m, v; om = 0; im = 0; m = 0ULL; v = 0ULL; while (*name) { int in, out, bit, neg; const char *idx; idx = strchr(sigtab, *name); if (!idx) return 0; /* input name not found */ in = idx - sigtab; neg = (in & 1); /* odd (lower) names are negated */ in >>= 1; im |= BIT(in); name++; if (*name >= '0' && *name <= '7') { out = *name - '0'; om |= BIT(out); } else if (*name == '-') { out = 8; } else { return 0; /* unknown bit name */ } bit = (out * 5) + in; m |= 1ULL << bit; if (!neg) v |= 1ULL << bit; name++; } *mask = m; *value = v; if (imask) *imask |= im; if (omask) *omask |= om; return 1; } /* tries to bind a key to the signal name <name>. The key will send the * strings <press>, <repeat>, <release> for these respective events. * Returns the pointer to the new key if ok, NULL if the key could not be bound. */ static struct logical_input *panel_bind_key(const char *name, const char *press, const char *repeat, const char *release) { struct logical_input *key; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i, &scan_mask_o)) { kfree(key); return NULL; } key->type = INPUT_TYPE_KBD; key->state = INPUT_ST_LOW; key->rise_time = 1; key->fall_time = 1; strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str)); strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str)); strncpy(key->u.kbd.release_str, release, sizeof(key->u.kbd.release_str)); list_add(&key->list, &logical_inputs); return key; } #if 0 /* tries to bind a callback function to the signal name <name>. The function * <press_fct> will be called with the <press_data> arg when the signal is * activated, and so on for <release_fct>/<release_data> * Returns the pointer to the new signal if ok, NULL if the signal could not * be bound. */ static struct logical_input *panel_bind_callback(char *name, void (*press_fct)(int), int press_data, void (*release_fct)(int), int release_data) { struct logical_input *callback; callback = kmalloc(sizeof(*callback), GFP_KERNEL); if (!callback) return NULL; memset(callback, 0, sizeof(struct logical_input)); if (!input_name2mask(name, &callback->mask, &callback->value, &scan_mask_i, &scan_mask_o)) return NULL; callback->type = INPUT_TYPE_STD; callback->state = INPUT_ST_LOW; callback->rise_time = 1; callback->fall_time = 1; callback->u.std.press_fct = press_fct; callback->u.std.press_data = press_data; callback->u.std.release_fct = release_fct; callback->u.std.release_data = release_data; list_add(&callback->list, &logical_inputs); return callback; } #endif static void keypad_init(void) { int keynum; init_waitqueue_head(&keypad_read_wait); keypad_buflen = 0; /* flushes any eventual noisy keystroke */ /* Let's create all known keys */ for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) { panel_bind_key(keypad_profile[keynum][0], keypad_profile[keynum][1], keypad_profile[keynum][2], keypad_profile[keynum][3]); } init_scan_timer(); keypad_initialized = 1; } /**************************************************/ /* device initialization */ /**************************************************/ static void panel_attach(struct parport *port) { struct pardev_cb panel_cb; if (port->number != parport) return; if (pprt) { pr_err("%s: port->number=%d parport=%d, already registered!\n", __func__, port->number, parport); return; } memset(&panel_cb, 0, sizeof(panel_cb)); panel_cb.private = &pprt; /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */ pprt = parport_register_dev_model(port, "panel", &panel_cb, 0); if (!pprt) { pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n", __func__, port->number, parport); return; } if (parport_claim(pprt)) { pr_err("could not claim access to parport%d. Aborting.\n", parport); goto err_unreg_device; } /* must init LCD first, just in case an IRQ from the keypad is * generated at keypad init */ if (lcd.enabled) { lcd_init(); if (!lcd.charlcd || charlcd_register(lcd.charlcd)) goto err_unreg_device; } if (keypad.enabled) { keypad_init(); if (misc_register(&keypad_dev)) goto err_lcd_unreg; } return; err_lcd_unreg: if (scan_timer.function) del_timer_sync(&scan_timer); if (lcd.enabled) charlcd_unregister(lcd.charlcd); err_unreg_device: kfree(lcd.charlcd); lcd.charlcd = NULL; parport_unregister_device(pprt); pprt = NULL; } static void panel_detach(struct parport *port) { if (port->number != parport) return; if (!pprt) { pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n", __func__, port->number, parport); return; } if (scan_timer.function) del_timer_sync(&scan_timer); if (keypad.enabled) { misc_deregister(&keypad_dev); keypad_initialized = 0; } if (lcd.enabled) { charlcd_unregister(lcd.charlcd); lcd.initialized = false; kfree(lcd.charlcd->drvdata); kfree(lcd.charlcd); lcd.charlcd = NULL; } /* TODO: free all input signals */ parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } static struct parport_driver panel_driver = { .name = "panel", .match_port = panel_attach, .detach = panel_detach, .devmodel = true, }; /* init function */ static int __init panel_init_module(void) { int selected_keypad_type = NOT_SET, err; /* take care of an eventual profile */ switch (profile) { case PANEL_PROFILE_CUSTOM: /* custom profile */ selected_keypad_type = DEFAULT_KEYPAD_TYPE; selected_lcd_type = DEFAULT_LCD_TYPE; break; case PANEL_PROFILE_OLD: /* 8 bits, 2*16, old keypad */ selected_keypad_type = KEYPAD_TYPE_OLD; selected_lcd_type = LCD_TYPE_OLD; /* TODO: This two are a little hacky, sort it out later */ if (lcd_width == NOT_SET) lcd_width = 16; if (lcd_hwidth == NOT_SET) lcd_hwidth = 16; break; case PANEL_PROFILE_NEW: /* serial, 2*16, new keypad */ selected_keypad_type = KEYPAD_TYPE_NEW; selected_lcd_type = LCD_TYPE_KS0074; break; case PANEL_PROFILE_HANTRONIX: /* 8 bits, 2*16 hantronix-like, no keypad */ selected_keypad_type = KEYPAD_TYPE_NONE; selected_lcd_type = LCD_TYPE_HANTRONIX; break; case PANEL_PROFILE_NEXCOM: /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */ selected_keypad_type = KEYPAD_TYPE_NEXCOM; selected_lcd_type = LCD_TYPE_NEXCOM; break; case PANEL_PROFILE_LARGE: /* 8 bits, 2*40, old keypad */ selected_keypad_type = KEYPAD_TYPE_OLD; selected_lcd_type = LCD_TYPE_OLD; break; } /* * Overwrite selection with module param values (both keypad and lcd), * where the deprecated params have lower prio. */ if (keypad_enabled != NOT_SET) selected_keypad_type = keypad_enabled; if (keypad_type != NOT_SET) selected_keypad_type = keypad_type; keypad.enabled = (selected_keypad_type > 0); if (lcd_enabled != NOT_SET) selected_lcd_type = lcd_enabled; if (lcd_type != NOT_SET) selected_lcd_type = lcd_type; lcd.enabled = (selected_lcd_type > 0); if (lcd.enabled) { /* * Init lcd struct with load-time values to preserve exact * current functionality (at least for now). */ lcd.charset = lcd_charset; lcd.proto = lcd_proto; lcd.pins.e = lcd_e_pin; lcd.pins.rs = lcd_rs_pin; lcd.pins.rw = lcd_rw_pin; lcd.pins.cl = lcd_cl_pin; lcd.pins.da = lcd_da_pin; lcd.pins.bl = lcd_bl_pin; } switch (selected_keypad_type) { case KEYPAD_TYPE_OLD: keypad_profile = old_keypad_profile; break; case KEYPAD_TYPE_NEW: keypad_profile = new_keypad_profile; break; case KEYPAD_TYPE_NEXCOM: keypad_profile = nexcom_keypad_profile; break; default: keypad_profile = NULL; break; } if (!lcd.enabled && !keypad.enabled) { /* no device enabled, let's exit */ pr_err("panel driver disabled.\n"); return -ENODEV; } err = parport_register_driver(&panel_driver); if (err) { pr_err("could not register with parport. Aborting.\n"); return err; } if (pprt) pr_info("panel driver registered on parport%d (io=0x%lx).\n", parport, pprt->port->base); else pr_info("panel driver not yet registered\n"); return 0; } static void __exit panel_cleanup_module(void) { parport_unregister_driver(&panel_driver); } module_init(panel_init_module); module_exit(panel_cleanup_module); MODULE_AUTHOR("Willy Tarreau"); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/panel.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include "charlcd.h" #include "hd44780_common.h" /* LCD commands */ #define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */ #define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */ #define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */ #define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */ #define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */ #define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */ #define LCD_CMD_BLINK_ON 0x01 /* Set blink on */ #define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */ #define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */ #define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */ #define LCD_CMD_FUNCTION_SET 0x20 /* Set function */ #define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */ #define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */ #define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */ #define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */ #define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */ /* sleeps that many milliseconds with a reschedule */ static void long_sleep(int ms) { schedule_timeout_interruptible(msecs_to_jiffies(ms)); } int hd44780_common_print(struct charlcd *lcd, int c) { struct hd44780_common *hdc = lcd->drvdata; if (lcd->addr.x < hdc->bwidth) { hdc->write_data(hdc, c); return 0; } return 1; } EXPORT_SYMBOL_GPL(hd44780_common_print); int hd44780_common_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y) { struct hd44780_common *hdc = lcd->drvdata; unsigned int addr; /* * we force the cursor to stay at the end of the * line if it wants to go farther */ addr = x < hdc->bwidth ? x & (hdc->hwidth - 1) : hdc->bwidth - 1; if (y & 1) addr += hdc->hwidth; if (y & 2) addr += hdc->bwidth; hdc->write_cmd(hdc, LCD_CMD_SET_DDRAM_ADDR | addr); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_gotoxy); int hd44780_common_home(struct charlcd *lcd) { return hd44780_common_gotoxy(lcd, 0, 0); } EXPORT_SYMBOL_GPL(hd44780_common_home); /* clears the display and resets X/Y */ int hd44780_common_clear_display(struct charlcd *lcd) { struct hd44780_common *hdc = lcd->drvdata; hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CLEAR); /* datasheet says to wait 1,64 milliseconds */ long_sleep(2); /* * The Hitachi HD44780 controller (and compatible ones) reset the DDRAM * address when executing the DISPLAY_CLEAR command, thus the * following call is not required. However, other controllers do not * (e.g. NewHaven NHD-0220DZW-AG5), thus move the cursor to home * unconditionally to support both. */ return hd44780_common_home(lcd); } EXPORT_SYMBOL_GPL(hd44780_common_clear_display); int hd44780_common_init_display(struct charlcd *lcd) { struct hd44780_common *hdc = lcd->drvdata; void (*write_cmd_raw)(struct hd44780_common *hdc, int cmd); u8 init; if (hdc->ifwidth != 4 && hdc->ifwidth != 8) return -EINVAL; hdc->hd44780_common_flags = ((lcd->height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B; long_sleep(20); /* wait 20 ms after power-up for the paranoid */ /* * 8-bit mode, 1 line, small fonts; let's do it 3 times, to make sure * the LCD is in 8-bit mode afterwards */ init = LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS; if (hdc->ifwidth == 4) { init >>= 4; write_cmd_raw = hdc->write_cmd_raw4; } else { write_cmd_raw = hdc->write_cmd; } write_cmd_raw(hdc, init); long_sleep(10); write_cmd_raw(hdc, init); long_sleep(10); write_cmd_raw(hdc, init); long_sleep(10); if (hdc->ifwidth == 4) { /* Switch to 4-bit mode, 1 line, small fonts */ hdc->write_cmd_raw4(hdc, LCD_CMD_FUNCTION_SET >> 4); long_sleep(10); } /* set font height and lines number */ hdc->write_cmd(hdc, LCD_CMD_FUNCTION_SET | ((hdc->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0)); long_sleep(10); /* display off, cursor off, blink off */ hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CTRL); long_sleep(10); hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CTRL | /* set display mode */ ((hdc->hd44780_common_flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0)); charlcd_backlight(lcd, (hdc->hd44780_common_flags & LCD_FLAG_L) ? 1 : 0); long_sleep(10); /* entry mode set : increment, cursor shifting */ hdc->write_cmd(hdc, LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC); hd44780_common_clear_display(lcd); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_init_display); int hd44780_common_shift_cursor(struct charlcd *lcd, enum charlcd_shift_dir dir) { struct hd44780_common *hdc = lcd->drvdata; if (dir == CHARLCD_SHIFT_LEFT) { /* back one char if not at end of line */ if (lcd->addr.x < hdc->bwidth) hdc->write_cmd(hdc, LCD_CMD_SHIFT); } else if (dir == CHARLCD_SHIFT_RIGHT) { /* allow the cursor to pass the end of the line */ if (lcd->addr.x < (hdc->bwidth - 1)) hdc->write_cmd(hdc, LCD_CMD_SHIFT | LCD_CMD_SHIFT_RIGHT); } return 0; } EXPORT_SYMBOL_GPL(hd44780_common_shift_cursor); int hd44780_common_shift_display(struct charlcd *lcd, enum charlcd_shift_dir dir) { struct hd44780_common *hdc = lcd->drvdata; if (dir == CHARLCD_SHIFT_LEFT) hdc->write_cmd(hdc, LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT); else if (dir == CHARLCD_SHIFT_RIGHT) hdc->write_cmd(hdc, LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT | LCD_CMD_SHIFT_RIGHT); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_shift_display); static void hd44780_common_set_mode(struct hd44780_common *hdc) { hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CTRL | ((hdc->hd44780_common_flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0)); } int hd44780_common_display(struct charlcd *lcd, enum charlcd_onoff on) { struct hd44780_common *hdc = lcd->drvdata; if (on == CHARLCD_ON) hdc->hd44780_common_flags |= LCD_FLAG_D; else hdc->hd44780_common_flags &= ~LCD_FLAG_D; hd44780_common_set_mode(hdc); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_display); int hd44780_common_cursor(struct charlcd *lcd, enum charlcd_onoff on) { struct hd44780_common *hdc = lcd->drvdata; if (on == CHARLCD_ON) hdc->hd44780_common_flags |= LCD_FLAG_C; else hdc->hd44780_common_flags &= ~LCD_FLAG_C; hd44780_common_set_mode(hdc); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_cursor); int hd44780_common_blink(struct charlcd *lcd, enum charlcd_onoff on) { struct hd44780_common *hdc = lcd->drvdata; if (on == CHARLCD_ON) hdc->hd44780_common_flags |= LCD_FLAG_B; else hdc->hd44780_common_flags &= ~LCD_FLAG_B; hd44780_common_set_mode(hdc); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_blink); static void hd44780_common_set_function(struct hd44780_common *hdc) { hdc->write_cmd(hdc, LCD_CMD_FUNCTION_SET | ((hdc->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) | ((hdc->hd44780_common_flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0)); } int hd44780_common_fontsize(struct charlcd *lcd, enum charlcd_fontsize size) { struct hd44780_common *hdc = lcd->drvdata; if (size == CHARLCD_FONTSIZE_LARGE) hdc->hd44780_common_flags |= LCD_FLAG_F; else hdc->hd44780_common_flags &= ~LCD_FLAG_F; hd44780_common_set_function(hdc); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_fontsize); int hd44780_common_lines(struct charlcd *lcd, enum charlcd_lines lines) { struct hd44780_common *hdc = lcd->drvdata; if (lines == CHARLCD_LINES_2) hdc->hd44780_common_flags |= LCD_FLAG_N; else hdc->hd44780_common_flags &= ~LCD_FLAG_N; hd44780_common_set_function(hdc); return 0; } EXPORT_SYMBOL_GPL(hd44780_common_lines); int hd44780_common_redefine_char(struct charlcd *lcd, char *esc) { /* Generator : LGcxxxxx...xx; must have <c> between '0' * and '7', representing the numerical ASCII code of the * redefined character, and <xx...xx> a sequence of 16 * hex digits representing 8 bytes for each character. * Most LCDs will only use 5 lower bits of the 7 first * bytes. */ struct hd44780_common *hdc = lcd->drvdata; unsigned char cgbytes[8]; unsigned char cgaddr; int cgoffset; int shift; char value; int addr; if (!strchr(esc, ';')) return 0; esc++; cgaddr = *(esc++) - '0'; if (cgaddr > 7) return 1; cgoffset = 0; shift = 0; value = 0; while (*esc && cgoffset < 8) { int half; shift ^= 4; half = hex_to_bin(*esc++); if (half < 0) continue; value |= half << shift; if (shift == 0) { cgbytes[cgoffset++] = value; value = 0; } } hdc->write_cmd(hdc, LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8)); for (addr = 0; addr < cgoffset; addr++) hdc->write_data(hdc, cgbytes[addr]); /* ensures that we stop writing to CGRAM */ lcd->ops->gotoxy(lcd, lcd->addr.x, lcd->addr.y); return 1; } EXPORT_SYMBOL_GPL(hd44780_common_redefine_char); struct hd44780_common *hd44780_common_alloc(void) { struct hd44780_common *hd; hd = kzalloc(sizeof(*hd), GFP_KERNEL); if (!hd) return NULL; hd->ifwidth = 8; hd->bwidth = DEFAULT_LCD_BWIDTH; hd->hwidth = DEFAULT_LCD_HWIDTH; return hd; } EXPORT_SYMBOL_GPL(hd44780_common_alloc); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/hd44780_common.c
// SPDX-License-Identifier: GPL-2.0+ /* * HD44780 Character LCD driver for Linux * * Copyright (C) 2000-2008, Willy Tarreau <[email protected]> * Copyright (C) 2016-2017 Glider bvba */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> #include "charlcd.h" #include "hd44780_common.h" enum hd44780_pin { /* Order does matter due to writing to GPIO array subsets! */ PIN_DATA0, /* Optional */ PIN_DATA1, /* Optional */ PIN_DATA2, /* Optional */ PIN_DATA3, /* Optional */ PIN_DATA4, PIN_DATA5, PIN_DATA6, PIN_DATA7, PIN_CTRL_RS, PIN_CTRL_RW, /* Optional */ PIN_CTRL_E, PIN_CTRL_BL, /* Optional */ PIN_NUM }; struct hd44780 { struct gpio_desc *pins[PIN_NUM]; }; static void hd44780_backlight(struct charlcd *lcd, enum charlcd_onoff on) { struct hd44780_common *hdc = lcd->drvdata; struct hd44780 *hd = hdc->hd44780; if (hd->pins[PIN_CTRL_BL]) gpiod_set_value_cansleep(hd->pins[PIN_CTRL_BL], on); } static void hd44780_strobe_gpio(struct hd44780 *hd) { /* Maintain the data during 20 us before the strobe */ udelay(20); gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 1); /* Maintain the strobe during 40 us */ udelay(40); gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 0); } /* write to an LCD panel register in 8 bit GPIO mode */ static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs) { DECLARE_BITMAP(values, 10); /* for DATA[0-7], RS, RW */ unsigned int n; values[0] = val; __assign_bit(8, values, rs); n = hd->pins[PIN_CTRL_RW] ? 10 : 9; /* Present the data to the port */ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], NULL, values); hd44780_strobe_gpio(hd); } /* write to an LCD panel register in 4 bit GPIO mode */ static void hd44780_write_gpio4(struct hd44780 *hd, u8 val, unsigned int rs) { DECLARE_BITMAP(values, 6); /* for DATA[4-7], RS, RW */ unsigned int n; /* High nibble + RS, RW */ values[0] = val >> 4; __assign_bit(4, values, rs); n = hd->pins[PIN_CTRL_RW] ? 6 : 5; /* Present the data to the port */ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd44780_strobe_gpio(hd); /* Low nibble */ values[0] &= ~0x0fUL; values[0] |= val & 0x0f; /* Present the data to the port */ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd44780_strobe_gpio(hd); } /* Send a command to the LCD panel in 8 bit GPIO mode */ static void hd44780_write_cmd_gpio8(struct hd44780_common *hdc, int cmd) { struct hd44780 *hd = hdc->hd44780; hd44780_write_gpio8(hd, cmd, 0); /* The shortest command takes at least 120 us */ udelay(120); } /* Send data to the LCD panel in 8 bit GPIO mode */ static void hd44780_write_data_gpio8(struct hd44780_common *hdc, int data) { struct hd44780 *hd = hdc->hd44780; hd44780_write_gpio8(hd, data, 1); /* The shortest data takes at least 45 us */ udelay(45); } static const struct charlcd_ops hd44780_ops_gpio8 = { .backlight = hd44780_backlight, .print = hd44780_common_print, .gotoxy = hd44780_common_gotoxy, .home = hd44780_common_home, .clear_display = hd44780_common_clear_display, .init_display = hd44780_common_init_display, .shift_cursor = hd44780_common_shift_cursor, .shift_display = hd44780_common_shift_display, .display = hd44780_common_display, .cursor = hd44780_common_cursor, .blink = hd44780_common_blink, .fontsize = hd44780_common_fontsize, .lines = hd44780_common_lines, .redefine_char = hd44780_common_redefine_char, }; /* Send a command to the LCD panel in 4 bit GPIO mode */ static void hd44780_write_cmd_gpio4(struct hd44780_common *hdc, int cmd) { struct hd44780 *hd = hdc->hd44780; hd44780_write_gpio4(hd, cmd, 0); /* The shortest command takes at least 120 us */ udelay(120); } /* Send 4-bits of a command to the LCD panel in raw 4 bit GPIO mode */ static void hd44780_write_cmd_raw_gpio4(struct hd44780_common *hdc, int cmd) { DECLARE_BITMAP(values, 6); /* for DATA[4-7], RS, RW */ struct hd44780 *hd = hdc->hd44780; unsigned int n; /* Command nibble + RS, RW */ values[0] = cmd & 0x0f; n = hd->pins[PIN_CTRL_RW] ? 6 : 5; /* Present the data to the port */ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd44780_strobe_gpio(hd); } /* Send data to the LCD panel in 4 bit GPIO mode */ static void hd44780_write_data_gpio4(struct hd44780_common *hdc, int data) { struct hd44780 *hd = hdc->hd44780; hd44780_write_gpio4(hd, data, 1); /* The shortest data takes at least 45 us */ udelay(45); } static const struct charlcd_ops hd44780_ops_gpio4 = { .backlight = hd44780_backlight, .print = hd44780_common_print, .gotoxy = hd44780_common_gotoxy, .home = hd44780_common_home, .clear_display = hd44780_common_clear_display, .init_display = hd44780_common_init_display, .shift_cursor = hd44780_common_shift_cursor, .shift_display = hd44780_common_shift_display, .display = hd44780_common_display, .cursor = hd44780_common_cursor, .blink = hd44780_common_blink, .fontsize = hd44780_common_fontsize, .lines = hd44780_common_lines, .redefine_char = hd44780_common_redefine_char, }; static int hd44780_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; unsigned int i, base; struct charlcd *lcd; struct hd44780_common *hdc; struct hd44780 *hd; int ifwidth, ret = -ENOMEM; /* Required pins */ ifwidth = gpiod_count(dev, "data"); if (ifwidth < 0) return ifwidth; switch (ifwidth) { case 4: base = PIN_DATA4; break; case 8: base = PIN_DATA0; break; default: return -EINVAL; } hdc = hd44780_common_alloc(); if (!hdc) return -ENOMEM; lcd = charlcd_alloc(); if (!lcd) goto fail1; hd = kzalloc(sizeof(struct hd44780), GFP_KERNEL); if (!hd) goto fail2; hdc->hd44780 = hd; lcd->drvdata = hdc; for (i = 0; i < ifwidth; i++) { hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i, GPIOD_OUT_LOW); if (IS_ERR(hd->pins[base + i])) { ret = PTR_ERR(hd->pins[base + i]); goto fail3; } } hd->pins[PIN_CTRL_E] = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(hd->pins[PIN_CTRL_E])) { ret = PTR_ERR(hd->pins[PIN_CTRL_E]); goto fail3; } hd->pins[PIN_CTRL_RS] = devm_gpiod_get(dev, "rs", GPIOD_OUT_HIGH); if (IS_ERR(hd->pins[PIN_CTRL_RS])) { ret = PTR_ERR(hd->pins[PIN_CTRL_RS]); goto fail3; } /* Optional pins */ hd->pins[PIN_CTRL_RW] = devm_gpiod_get_optional(dev, "rw", GPIOD_OUT_LOW); if (IS_ERR(hd->pins[PIN_CTRL_RW])) { ret = PTR_ERR(hd->pins[PIN_CTRL_RW]); goto fail3; } hd->pins[PIN_CTRL_BL] = devm_gpiod_get_optional(dev, "backlight", GPIOD_OUT_LOW); if (IS_ERR(hd->pins[PIN_CTRL_BL])) { ret = PTR_ERR(hd->pins[PIN_CTRL_BL]); goto fail3; } /* Required properties */ ret = device_property_read_u32(dev, "display-height-chars", &lcd->height); if (ret) goto fail3; ret = device_property_read_u32(dev, "display-width-chars", &lcd->width); if (ret) goto fail3; /* * On displays with more than two rows, the internal buffer width is * usually equal to the display width */ if (lcd->height > 2) hdc->bwidth = lcd->width; /* Optional properties */ device_property_read_u32(dev, "internal-buffer-width", &hdc->bwidth); hdc->ifwidth = ifwidth; if (ifwidth == 8) { lcd->ops = &hd44780_ops_gpio8; hdc->write_data = hd44780_write_data_gpio8; hdc->write_cmd = hd44780_write_cmd_gpio8; } else { lcd->ops = &hd44780_ops_gpio4; hdc->write_data = hd44780_write_data_gpio4; hdc->write_cmd = hd44780_write_cmd_gpio4; hdc->write_cmd_raw4 = hd44780_write_cmd_raw_gpio4; } ret = charlcd_register(lcd); if (ret) goto fail3; platform_set_drvdata(pdev, lcd); return 0; fail3: kfree(hd); fail2: kfree(lcd); fail1: kfree(hdc); return ret; } static int hd44780_remove(struct platform_device *pdev) { struct charlcd *lcd = platform_get_drvdata(pdev); struct hd44780_common *hdc = lcd->drvdata; charlcd_unregister(lcd); kfree(hdc->hd44780); kfree(lcd->drvdata); kfree(lcd); return 0; } static const struct of_device_id hd44780_of_match[] = { { .compatible = "hit,hd44780" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, hd44780_of_match); static struct platform_driver hd44780_driver = { .probe = hd44780_probe, .remove = hd44780_remove, .driver = { .name = "hd44780", .of_match_table = hd44780_of_match, }, }; module_platform_driver(hd44780_driver); MODULE_DESCRIPTION("HD44780 Character LCD driver"); MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/hd44780.c
// SPDX-License-Identifier: GPL-2.0 /* * Console driver for LCD2S 4x20 character displays connected through i2c. * The display also has a SPI interface, but the driver does not support * this yet. * * This is a driver allowing you to use a LCD2S 4x20 from Modtronix * engineering as auxdisplay character device. * * (C) 2019 by Lemonage Software GmbH * Author: Lars Pöschel <[email protected]> * All rights reserved. */ #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include "charlcd.h" #define LCD2S_CMD_CUR_MOVES_FWD 0x09 #define LCD2S_CMD_CUR_BLINK_OFF 0x10 #define LCD2S_CMD_CUR_UL_OFF 0x11 #define LCD2S_CMD_DISPLAY_OFF 0x12 #define LCD2S_CMD_CUR_BLINK_ON 0x18 #define LCD2S_CMD_CUR_UL_ON 0x19 #define LCD2S_CMD_DISPLAY_ON 0x1a #define LCD2S_CMD_BACKLIGHT_OFF 0x20 #define LCD2S_CMD_BACKLIGHT_ON 0x28 #define LCD2S_CMD_WRITE 0x80 #define LCD2S_CMD_MOV_CUR_RIGHT 0x83 #define LCD2S_CMD_MOV_CUR_LEFT 0x84 #define LCD2S_CMD_SHIFT_RIGHT 0x85 #define LCD2S_CMD_SHIFT_LEFT 0x86 #define LCD2S_CMD_SHIFT_UP 0x87 #define LCD2S_CMD_SHIFT_DOWN 0x88 #define LCD2S_CMD_CUR_ADDR 0x89 #define LCD2S_CMD_CUR_POS 0x8a #define LCD2S_CMD_CUR_RESET 0x8b #define LCD2S_CMD_CLEAR 0x8c #define LCD2S_CMD_DEF_CUSTOM_CHAR 0x92 #define LCD2S_CMD_READ_STATUS 0xd0 #define LCD2S_CHARACTER_SIZE 8 #define LCD2S_STATUS_BUF_MASK 0x7f struct lcd2s_data { struct i2c_client *i2c; struct charlcd *charlcd; }; static s32 lcd2s_wait_buf_free(const struct i2c_client *client, int count) { s32 status; status = i2c_smbus_read_byte_data(client, LCD2S_CMD_READ_STATUS); if (status < 0) return status; while ((status & LCD2S_STATUS_BUF_MASK) < count) { mdelay(1); status = i2c_smbus_read_byte_data(client, LCD2S_CMD_READ_STATUS); if (status < 0) return status; } return 0; } static int lcd2s_i2c_master_send(const struct i2c_client *client, const char *buf, int count) { s32 status; status = lcd2s_wait_buf_free(client, count); if (status < 0) return status; return i2c_master_send(client, buf, count); } static int lcd2s_i2c_smbus_write_byte(const struct i2c_client *client, u8 value) { s32 status; status = lcd2s_wait_buf_free(client, 1); if (status < 0) return status; return i2c_smbus_write_byte(client, value); } static int lcd2s_print(struct charlcd *lcd, int c) { struct lcd2s_data *lcd2s = lcd->drvdata; u8 buf[2] = { LCD2S_CMD_WRITE, c }; lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf)); return 0; } static int lcd2s_gotoxy(struct charlcd *lcd, unsigned int x, unsigned int y) { struct lcd2s_data *lcd2s = lcd->drvdata; u8 buf[3] = { LCD2S_CMD_CUR_POS, y + 1, x + 1 }; lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf)); return 0; } static int lcd2s_home(struct charlcd *lcd) { struct lcd2s_data *lcd2s = lcd->drvdata; lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_RESET); return 0; } static int lcd2s_init_display(struct charlcd *lcd) { struct lcd2s_data *lcd2s = lcd->drvdata; /* turn everything off, but display on */ lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_DISPLAY_ON); lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_BACKLIGHT_OFF); lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_MOVES_FWD); lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_BLINK_OFF); lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_UL_OFF); lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CLEAR); return 0; } static int lcd2s_shift_cursor(struct charlcd *lcd, enum charlcd_shift_dir dir) { struct lcd2s_data *lcd2s = lcd->drvdata; if (dir == CHARLCD_SHIFT_LEFT) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_MOV_CUR_LEFT); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_MOV_CUR_RIGHT); return 0; } static int lcd2s_shift_display(struct charlcd *lcd, enum charlcd_shift_dir dir) { struct lcd2s_data *lcd2s = lcd->drvdata; if (dir == CHARLCD_SHIFT_LEFT) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_SHIFT_LEFT); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_SHIFT_RIGHT); return 0; } static void lcd2s_backlight(struct charlcd *lcd, enum charlcd_onoff on) { struct lcd2s_data *lcd2s = lcd->drvdata; if (on) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_BACKLIGHT_ON); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_BACKLIGHT_OFF); } static int lcd2s_display(struct charlcd *lcd, enum charlcd_onoff on) { struct lcd2s_data *lcd2s = lcd->drvdata; if (on) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_DISPLAY_ON); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_DISPLAY_OFF); return 0; } static int lcd2s_cursor(struct charlcd *lcd, enum charlcd_onoff on) { struct lcd2s_data *lcd2s = lcd->drvdata; if (on) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_UL_ON); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_UL_OFF); return 0; } static int lcd2s_blink(struct charlcd *lcd, enum charlcd_onoff on) { struct lcd2s_data *lcd2s = lcd->drvdata; if (on) lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_BLINK_ON); else lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CUR_BLINK_OFF); return 0; } static int lcd2s_fontsize(struct charlcd *lcd, enum charlcd_fontsize size) { return 0; } static int lcd2s_lines(struct charlcd *lcd, enum charlcd_lines lines) { return 0; } /* * Generator: LGcxxxxx...xx; must have <c> between '0' and '7', * representing the numerical ASCII code of the redefined character, * and <xx...xx> a sequence of 16 hex digits representing 8 bytes * for each character. Most LCDs will only use 5 lower bits of * the 7 first bytes. */ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc) { struct lcd2s_data *lcd2s = lcd->drvdata; u8 buf[LCD2S_CHARACTER_SIZE + 2] = { LCD2S_CMD_DEF_CUSTOM_CHAR }; u8 value; int shift, i; if (!strchr(esc, ';')) return 0; esc++; buf[1] = *(esc++) - '0'; if (buf[1] > 7) return 1; i = 2; shift = 0; value = 0; while (*esc && i < LCD2S_CHARACTER_SIZE + 2) { int half; shift ^= 4; half = hex_to_bin(*esc++); if (half < 0) continue; value |= half << shift; if (shift == 0) { buf[i++] = value; value = 0; } } lcd2s_i2c_master_send(lcd2s->i2c, buf, sizeof(buf)); return 1; } static int lcd2s_clear_display(struct charlcd *lcd) { struct lcd2s_data *lcd2s = lcd->drvdata; /* This implicitly sets cursor to first row and column */ lcd2s_i2c_smbus_write_byte(lcd2s->i2c, LCD2S_CMD_CLEAR); return 0; } static const struct charlcd_ops lcd2s_ops = { .print = lcd2s_print, .backlight = lcd2s_backlight, .gotoxy = lcd2s_gotoxy, .home = lcd2s_home, .clear_display = lcd2s_clear_display, .init_display = lcd2s_init_display, .shift_cursor = lcd2s_shift_cursor, .shift_display = lcd2s_shift_display, .display = lcd2s_display, .cursor = lcd2s_cursor, .blink = lcd2s_blink, .fontsize = lcd2s_fontsize, .lines = lcd2s_lines, .redefine_char = lcd2s_redefine_char, }; static int lcd2s_i2c_probe(struct i2c_client *i2c) { struct charlcd *lcd; struct lcd2s_data *lcd2s; int err; if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) return -EIO; lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL); if (!lcd2s) return -ENOMEM; /* Test, if the display is responding */ err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF); if (err < 0) return err; lcd = charlcd_alloc(); if (!lcd) return -ENOMEM; lcd->drvdata = lcd2s; lcd2s->i2c = i2c; lcd2s->charlcd = lcd; /* Required properties */ err = device_property_read_u32(&i2c->dev, "display-height-chars", &lcd->height); if (err) goto fail1; err = device_property_read_u32(&i2c->dev, "display-width-chars", &lcd->width); if (err) goto fail1; lcd->ops = &lcd2s_ops; err = charlcd_register(lcd2s->charlcd); if (err) goto fail1; i2c_set_clientdata(i2c, lcd2s); return 0; fail1: charlcd_free(lcd2s->charlcd); return err; } static void lcd2s_i2c_remove(struct i2c_client *i2c) { struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c); charlcd_unregister(lcd2s->charlcd); charlcd_free(lcd2s->charlcd); } static const struct i2c_device_id lcd2s_i2c_id[] = { { "lcd2s", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id); static const struct of_device_id lcd2s_of_table[] = { { .compatible = "modtronix,lcd2s" }, { } }; MODULE_DEVICE_TABLE(of, lcd2s_of_table); static struct i2c_driver lcd2s_i2c_driver = { .driver = { .name = "lcd2s", .of_match_table = lcd2s_of_table, }, .probe = lcd2s_i2c_probe, .remove = lcd2s_i2c_remove, .id_table = lcd2s_i2c_id, }; module_i2c_driver(lcd2s_i2c_driver); MODULE_DESCRIPTION("LCD2S character display driver"); MODULE_AUTHOR("Lars Poeschel"); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/lcd2s.c
// SPDX-License-Identifier: GPL-2.0 /* * Filename: cfag12864bfb.c * Version: 0.1.0 * Description: cfag12864b LCD framebuffer driver * Depends: cfag12864b * * Author: Copyright (C) Miguel Ojeda <[email protected]> * Date: 2006-10-31 */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/cfag12864b.h> #define CFAG12864BFB_NAME "cfag12864bfb" static const struct fb_fix_screeninfo cfag12864bfb_fix = { .id = "cfag12864b", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = CFAG12864B_WIDTH / 8, .accel = FB_ACCEL_NONE, }; static const struct fb_var_screeninfo cfag12864bfb_var = { .xres = CFAG12864B_WIDTH, .yres = CFAG12864B_HEIGHT, .xres_virtual = CFAG12864B_WIDTH, .yres_virtual = CFAG12864B_HEIGHT, .bits_per_pixel = 1, .red = { 0, 1, 0 }, .green = { 0, 1, 0 }, .blue = { 0, 1, 0 }, .left_margin = 0, .right_margin = 0, .upper_margin = 0, .lower_margin = 0, .vmode = FB_VMODE_NONINTERLACED, }; static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct page *pages = virt_to_page(cfag12864b_buffer); return vm_map_pages_zero(vma, &pages, 1); } static const struct fb_ops cfag12864bfb_ops = { .owner = THIS_MODULE, .fb_read = fb_sys_read, .fb_write = fb_sys_write, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_mmap = cfag12864bfb_mmap, }; static int cfag12864bfb_probe(struct platform_device *device) { int ret = -EINVAL; struct fb_info *info = framebuffer_alloc(0, &device->dev); if (!info) goto none; info->screen_buffer = cfag12864b_buffer; info->screen_size = CFAG12864B_SIZE; info->fbops = &cfag12864bfb_ops; info->fix = cfag12864bfb_fix; info->var = cfag12864bfb_var; info->pseudo_palette = NULL; info->par = NULL; if (register_framebuffer(info) < 0) goto fballoced; platform_set_drvdata(device, info); fb_info(info, "%s frame buffer device\n", info->fix.id); return 0; fballoced: framebuffer_release(info); none: return ret; } static int cfag12864bfb_remove(struct platform_device *device) { struct fb_info *info = platform_get_drvdata(device); if (info) { unregister_framebuffer(info); framebuffer_release(info); } return 0; } static struct platform_driver cfag12864bfb_driver = { .probe = cfag12864bfb_probe, .remove = cfag12864bfb_remove, .driver = { .name = CFAG12864BFB_NAME, }, }; static struct platform_device *cfag12864bfb_device; static int __init cfag12864bfb_init(void) { int ret = -EINVAL; /* cfag12864b_init() must be called first */ if (!cfag12864b_isinited()) { printk(KERN_ERR CFAG12864BFB_NAME ": ERROR: " "cfag12864b is not initialized\n"); goto none; } if (cfag12864b_enable()) { printk(KERN_ERR CFAG12864BFB_NAME ": ERROR: " "can't enable cfag12864b refreshing (being used)\n"); return -ENODEV; } ret = platform_driver_register(&cfag12864bfb_driver); if (!ret) { cfag12864bfb_device = platform_device_alloc(CFAG12864BFB_NAME, 0); if (cfag12864bfb_device) ret = platform_device_add(cfag12864bfb_device); else ret = -ENOMEM; if (ret) { platform_device_put(cfag12864bfb_device); platform_driver_unregister(&cfag12864bfb_driver); } } none: return ret; } static void __exit cfag12864bfb_exit(void) { platform_device_unregister(cfag12864bfb_device); platform_driver_unregister(&cfag12864bfb_driver); cfag12864b_disable(); } module_init(cfag12864bfb_init); module_exit(cfag12864bfb_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda <[email protected]>"); MODULE_DESCRIPTION("cfag12864b LCD framebuffer driver");
linux-master
drivers/auxdisplay/cfag12864bfb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Imagination Technologies * Author: Paul Burton <[email protected]> */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include "line-display.h" struct img_ascii_lcd_ctx; /** * struct img_ascii_lcd_config - Configuration information about an LCD model * @num_chars: the number of characters the LCD can display * @external_regmap: true if registers are in a system controller, else false * @update: function called to update the LCD */ struct img_ascii_lcd_config { unsigned int num_chars; bool external_regmap; void (*update)(struct linedisp *linedisp); }; /** * struct img_ascii_lcd_ctx - Private data structure * @base: the base address of the LCD registers * @regmap: the regmap through which LCD registers are accessed * @offset: the offset within regmap to the start of the LCD registers * @cfg: pointer to the LCD model configuration * @linedisp: line display structure * @curr: the string currently displayed on the LCD */ struct img_ascii_lcd_ctx { union { void __iomem *base; struct regmap *regmap; }; u32 offset; const struct img_ascii_lcd_config *cfg; struct linedisp linedisp; char curr[] __aligned(8); }; /* * MIPS Boston development board */ static void boston_update(struct linedisp *linedisp) { struct img_ascii_lcd_ctx *ctx = container_of(linedisp, struct img_ascii_lcd_ctx, linedisp); ulong val; #if BITS_PER_LONG == 64 val = *((u64 *)&ctx->curr[0]); __raw_writeq(val, ctx->base); #elif BITS_PER_LONG == 32 val = *((u32 *)&ctx->curr[0]); __raw_writel(val, ctx->base); val = *((u32 *)&ctx->curr[4]); __raw_writel(val, ctx->base + 4); #else # error Not 32 or 64 bit #endif } static struct img_ascii_lcd_config boston_config = { .num_chars = 8, .update = boston_update, }; /* * MIPS Malta development board */ static void malta_update(struct linedisp *linedisp) { struct img_ascii_lcd_ctx *ctx = container_of(linedisp, struct img_ascii_lcd_ctx, linedisp); unsigned int i; int err = 0; for (i = 0; i < linedisp->num_chars; i++) { err = regmap_write(ctx->regmap, ctx->offset + (i * 8), ctx->curr[i]); if (err) break; } if (unlikely(err)) pr_err_ratelimited("Failed to update LCD display: %d\n", err); } static struct img_ascii_lcd_config malta_config = { .num_chars = 8, .external_regmap = true, .update = malta_update, }; /* * MIPS SEAD3 development board */ enum { SEAD3_REG_LCD_CTRL = 0x00, #define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7) SEAD3_REG_LCD_DATA = 0x08, SEAD3_REG_CPLD_STATUS = 0x10, #define SEAD3_REG_CPLD_STATUS_BUSY BIT(0) SEAD3_REG_CPLD_DATA = 0x18, #define SEAD3_REG_CPLD_DATA_BUSY BIT(7) }; static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx) { unsigned int status; int err; do { err = regmap_read(ctx->regmap, ctx->offset + SEAD3_REG_CPLD_STATUS, &status); if (err) return err; } while (status & SEAD3_REG_CPLD_STATUS_BUSY); return 0; } static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx) { unsigned int cpld_data; int err; err = sead3_wait_sm_idle(ctx); if (err) return err; do { err = regmap_read(ctx->regmap, ctx->offset + SEAD3_REG_LCD_CTRL, &cpld_data); if (err) return err; err = sead3_wait_sm_idle(ctx); if (err) return err; err = regmap_read(ctx->regmap, ctx->offset + SEAD3_REG_CPLD_DATA, &cpld_data); if (err) return err; } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY); return 0; } static void sead3_update(struct linedisp *linedisp) { struct img_ascii_lcd_ctx *ctx = container_of(linedisp, struct img_ascii_lcd_ctx, linedisp); unsigned int i; int err = 0; for (i = 0; i < linedisp->num_chars; i++) { err = sead3_wait_lcd_idle(ctx); if (err) break; err = regmap_write(ctx->regmap, ctx->offset + SEAD3_REG_LCD_CTRL, SEAD3_REG_LCD_CTRL_SETDRAM | i); if (err) break; err = sead3_wait_lcd_idle(ctx); if (err) break; err = regmap_write(ctx->regmap, ctx->offset + SEAD3_REG_LCD_DATA, ctx->curr[i]); if (err) break; } if (unlikely(err)) pr_err_ratelimited("Failed to update LCD display: %d\n", err); } static struct img_ascii_lcd_config sead3_config = { .num_chars = 16, .external_regmap = true, .update = sead3_update, }; static const struct of_device_id img_ascii_lcd_matches[] = { { .compatible = "img,boston-lcd", .data = &boston_config }, { .compatible = "mti,malta-lcd", .data = &malta_config }, { .compatible = "mti,sead3-lcd", .data = &sead3_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches); /** * img_ascii_lcd_probe() - probe an LCD display device * @pdev: the LCD platform device * * Probe an LCD display device, ensuring that we have the required resources in * order to access the LCD & setting up private data as well as sysfs files. * * Return: 0 on success, else -ERRNO */ static int img_ascii_lcd_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct img_ascii_lcd_config *cfg; struct device *dev = &pdev->dev; struct img_ascii_lcd_ctx *ctx; int err; match = of_match_device(img_ascii_lcd_matches, dev); if (!match) return -ENODEV; cfg = match->data; ctx = devm_kzalloc(dev, sizeof(*ctx) + cfg->num_chars, GFP_KERNEL); if (!ctx) return -ENOMEM; if (cfg->external_regmap) { ctx->regmap = syscon_node_to_regmap(dev->parent->of_node); if (IS_ERR(ctx->regmap)) return PTR_ERR(ctx->regmap); if (of_property_read_u32(dev->of_node, "offset", &ctx->offset)) return -EINVAL; } else { ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); } err = linedisp_register(&ctx->linedisp, dev, cfg->num_chars, ctx->curr, cfg->update); if (err) return err; /* for backwards compatibility */ err = compat_only_sysfs_link_entry_to_kobj(&dev->kobj, &ctx->linedisp.dev.kobj, "message", NULL); if (err) goto err_unregister; platform_set_drvdata(pdev, ctx); return 0; err_unregister: linedisp_unregister(&ctx->linedisp); return err; } /** * img_ascii_lcd_remove() - remove an LCD display device * @pdev: the LCD platform device * * Remove an LCD display device, freeing private resources & ensuring that the * driver stops using the LCD display registers. * * Return: 0 */ static int img_ascii_lcd_remove(struct platform_device *pdev) { struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev); sysfs_remove_link(&pdev->dev.kobj, "message"); linedisp_unregister(&ctx->linedisp); return 0; } static struct platform_driver img_ascii_lcd_driver = { .driver = { .name = "img-ascii-lcd", .of_match_table = img_ascii_lcd_matches, }, .probe = img_ascii_lcd_probe, .remove = img_ascii_lcd_remove, }; module_platform_driver(img_ascii_lcd_driver); MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display"); MODULE_AUTHOR("Paul Burton <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/img-ascii-lcd.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the on-board character LCD found on some ARM reference boards * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it * https://en.wikipedia.org/wiki/HD44780_Character_LCD * Currently it will just display the text "ARM Linux" and the linux version * * Author: Linus Walleij <[email protected]> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <generated/utsrelease.h> #define DRIVERNAME "arm-charlcd" #define CHARLCD_TIMEOUT (msecs_to_jiffies(1000)) /* Offsets to registers */ #define CHAR_COM 0x00U #define CHAR_DAT 0x04U #define CHAR_RD 0x08U #define CHAR_RAW 0x0CU #define CHAR_MASK 0x10U #define CHAR_STAT 0x14U #define CHAR_RAW_CLEAR 0x00000000U #define CHAR_RAW_VALID 0x00000100U /* Hitachi HD44780 display commands */ #define HD_CLEAR 0x01U #define HD_HOME 0x02U #define HD_ENTRYMODE 0x04U #define HD_ENTRYMODE_INCREMENT 0x02U #define HD_ENTRYMODE_SHIFT 0x01U #define HD_DISPCTRL 0x08U #define HD_DISPCTRL_ON 0x04U #define HD_DISPCTRL_CURSOR_ON 0x02U #define HD_DISPCTRL_CURSOR_BLINK 0x01U #define HD_CRSR_SHIFT 0x10U #define HD_CRSR_SHIFT_DISPLAY 0x08U #define HD_CRSR_SHIFT_DISPLAY_RIGHT 0x04U #define HD_FUNCSET 0x20U #define HD_FUNCSET_8BIT 0x10U #define HD_FUNCSET_2_LINES 0x08U #define HD_FUNCSET_FONT_5X10 0x04U #define HD_SET_CGRAM 0x40U #define HD_SET_DDRAM 0x80U #define HD_BUSY_FLAG 0x80U /** * struct charlcd - Private data structure * @dev: a pointer back to containing device * @phybase: the offset to the controller in physical memory * @physize: the size of the physical page * @virtbase: the offset to the controller in virtual memory * @irq: reserved interrupt number * @complete: completion structure for the last LCD command * @init_work: delayed work structure to initialize the display on boot */ struct charlcd { struct device *dev; u32 phybase; u32 physize; void __iomem *virtbase; int irq; struct completion complete; struct delayed_work init_work; }; static irqreturn_t charlcd_interrupt(int irq, void *data) { struct charlcd *lcd = data; u8 status; status = readl(lcd->virtbase + CHAR_STAT) & 0x01; /* Clear IRQ */ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); if (status) complete(&lcd->complete); else dev_info(lcd->dev, "Spurious IRQ (%02x)\n", status); return IRQ_HANDLED; } static void charlcd_wait_complete_irq(struct charlcd *lcd) { int ret; ret = wait_for_completion_interruptible_timeout(&lcd->complete, CHARLCD_TIMEOUT); /* Disable IRQ after completion */ writel(0x00, lcd->virtbase + CHAR_MASK); if (ret < 0) { dev_err(lcd->dev, "wait_for_completion_interruptible_timeout() " "returned %d waiting for ready\n", ret); return; } if (ret == 0) { dev_err(lcd->dev, "charlcd controller timed out " "waiting for ready\n"); return; } } static u8 charlcd_4bit_read_char(struct charlcd *lcd) { u8 data; u32 val; int i; /* If we can, use an IRQ to wait for the data, else poll */ if (lcd->irq >= 0) charlcd_wait_complete_irq(lcd); else { i = 0; val = 0; while (!(val & CHAR_RAW_VALID) && i < 10) { udelay(100); val = readl(lcd->virtbase + CHAR_RAW); i++; } writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); } msleep(1); /* Read the 4 high bits of the data */ data = readl(lcd->virtbase + CHAR_RD) & 0xf0; /* * The second read for the low bits does not trigger an IRQ * so in this case we have to poll for the 4 lower bits */ i = 0; val = 0; while (!(val & CHAR_RAW_VALID) && i < 10) { udelay(100); val = readl(lcd->virtbase + CHAR_RAW); i++; } writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); msleep(1); /* Read the 4 low bits of the data */ data |= (readl(lcd->virtbase + CHAR_RD) >> 4) & 0x0f; return data; } static bool charlcd_4bit_read_bf(struct charlcd *lcd) { if (lcd->irq >= 0) { /* * If we'll use IRQs to wait for the busyflag, clear any * pending flag and enable IRQ */ writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); init_completion(&lcd->complete); writel(0x01, lcd->virtbase + CHAR_MASK); } readl(lcd->virtbase + CHAR_COM); return charlcd_4bit_read_char(lcd) & HD_BUSY_FLAG ? true : false; } static void charlcd_4bit_wait_busy(struct charlcd *lcd) { int retries = 50; udelay(100); while (charlcd_4bit_read_bf(lcd) && retries) retries--; if (!retries) dev_err(lcd->dev, "timeout waiting for busyflag\n"); } static void charlcd_4bit_command(struct charlcd *lcd, u8 cmd) { u32 cmdlo = (cmd << 4) & 0xf0; u32 cmdhi = (cmd & 0xf0); writel(cmdhi, lcd->virtbase + CHAR_COM); udelay(10); writel(cmdlo, lcd->virtbase + CHAR_COM); charlcd_4bit_wait_busy(lcd); } static void charlcd_4bit_char(struct charlcd *lcd, u8 ch) { u32 chlo = (ch << 4) & 0xf0; u32 chhi = (ch & 0xf0); writel(chhi, lcd->virtbase + CHAR_DAT); udelay(10); writel(chlo, lcd->virtbase + CHAR_DAT); charlcd_4bit_wait_busy(lcd); } static void charlcd_4bit_print(struct charlcd *lcd, int line, const char *str) { u8 offset; int i; /* * We support line 0, 1 * Line 1 runs from 0x00..0x27 * Line 2 runs from 0x28..0x4f */ if (line == 0) offset = 0; else if (line == 1) offset = 0x28; else return; /* Set offset */ charlcd_4bit_command(lcd, HD_SET_DDRAM | offset); /* Send string */ for (i = 0; i < strlen(str) && i < 0x28; i++) charlcd_4bit_char(lcd, str[i]); } static void charlcd_4bit_init(struct charlcd *lcd) { /* These commands cannot be checked with the busy flag */ writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); msleep(5); writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); udelay(100); writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); udelay(100); /* Go to 4bit mode */ writel(HD_FUNCSET, lcd->virtbase + CHAR_COM); udelay(100); /* * 4bit mode, 2 lines, 5x8 font, after this the number of lines * and the font cannot be changed until the next initialization sequence */ charlcd_4bit_command(lcd, HD_FUNCSET | HD_FUNCSET_2_LINES); charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); charlcd_4bit_command(lcd, HD_ENTRYMODE | HD_ENTRYMODE_INCREMENT); charlcd_4bit_command(lcd, HD_CLEAR); charlcd_4bit_command(lcd, HD_HOME); /* Put something useful in the display */ charlcd_4bit_print(lcd, 0, "ARM Linux"); charlcd_4bit_print(lcd, 1, UTS_RELEASE); } static void charlcd_init_work(struct work_struct *work) { struct charlcd *lcd = container_of(work, struct charlcd, init_work.work); charlcd_4bit_init(lcd); } static int __init charlcd_probe(struct platform_device *pdev) { int ret; struct charlcd *lcd; struct resource *res; lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL); if (!lcd) return -ENOMEM; lcd->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENOENT; goto out_no_resource; } lcd->phybase = res->start; lcd->physize = resource_size(res); if (request_mem_region(lcd->phybase, lcd->physize, DRIVERNAME) == NULL) { ret = -EBUSY; goto out_no_memregion; } lcd->virtbase = ioremap(lcd->phybase, lcd->physize); if (!lcd->virtbase) { ret = -ENOMEM; goto out_no_memregion; } lcd->irq = platform_get_irq(pdev, 0); /* If no IRQ is supplied, we'll survive without it */ if (lcd->irq >= 0) { if (request_irq(lcd->irq, charlcd_interrupt, 0, DRIVERNAME, lcd)) { ret = -EIO; goto out_no_irq; } } platform_set_drvdata(pdev, lcd); /* * Initialize the display in a delayed work, because * it is VERY slow and would slow down the boot of the system. */ INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work); schedule_delayed_work(&lcd->init_work, 0); dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n", lcd->phybase); return 0; out_no_irq: iounmap(lcd->virtbase); out_no_memregion: release_mem_region(lcd->phybase, SZ_4K); out_no_resource: kfree(lcd); return ret; } static int charlcd_suspend(struct device *dev) { struct charlcd *lcd = dev_get_drvdata(dev); /* Power the display off */ charlcd_4bit_command(lcd, HD_DISPCTRL); return 0; } static int charlcd_resume(struct device *dev) { struct charlcd *lcd = dev_get_drvdata(dev); /* Turn the display back on */ charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); return 0; } static const struct dev_pm_ops charlcd_pm_ops = { .suspend = charlcd_suspend, .resume = charlcd_resume, }; static const struct of_device_id charlcd_match[] = { { .compatible = "arm,versatile-lcd", }, {} }; static struct platform_driver charlcd_driver = { .driver = { .name = DRIVERNAME, .pm = &charlcd_pm_ops, .suppress_bind_attrs = true, .of_match_table = of_match_ptr(charlcd_match), }, }; builtin_platform_driver_probe(charlcd_driver, charlcd_probe);
linux-master
drivers/auxdisplay/arm-charlcd.c
// SPDX-License-Identifier: GPL-2.0 /* * HT16K33 driver * * Author: Robin van der Gracht <[email protected]> * * Copyright: (C) 2016 Protonic Holland. * Copyright (C) 2021 Glider bv */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/property.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/leds.h> #include <linux/workqueue.h> #include <linux/mm.h> #include <linux/map_to_7segment.h> #include <linux/map_to_14segment.h> #include <asm/unaligned.h> #include "line-display.h" /* Registers */ #define REG_SYSTEM_SETUP 0x20 #define REG_SYSTEM_SETUP_OSC_ON BIT(0) #define REG_DISPLAY_SETUP 0x80 #define REG_DISPLAY_SETUP_ON BIT(0) #define REG_DISPLAY_SETUP_BLINK_OFF (0 << 1) #define REG_DISPLAY_SETUP_BLINK_2HZ (1 << 1) #define REG_DISPLAY_SETUP_BLINK_1HZ (2 << 1) #define REG_DISPLAY_SETUP_BLINK_0HZ5 (3 << 1) #define REG_ROWINT_SET 0xA0 #define REG_ROWINT_SET_INT_EN BIT(0) #define REG_ROWINT_SET_INT_ACT_HIGH BIT(1) #define REG_BRIGHTNESS 0xE0 /* Defines */ #define DRIVER_NAME "ht16k33" #define MIN_BRIGHTNESS 0x1 #define MAX_BRIGHTNESS 0x10 #define HT16K33_MATRIX_LED_MAX_COLS 8 #define HT16K33_MATRIX_LED_MAX_ROWS 16 #define HT16K33_MATRIX_KEYPAD_MAX_COLS 3 #define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12 #define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8) #define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW) enum display_type { DISP_MATRIX = 0, DISP_QUAD_7SEG, DISP_QUAD_14SEG, }; struct ht16k33_keypad { struct i2c_client *client; struct input_dev *dev; uint32_t cols; uint32_t rows; uint32_t row_shift; uint32_t debounce_ms; uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS]; wait_queue_head_t wait; bool stopped; }; struct ht16k33_fbdev { struct fb_info *info; uint32_t refresh_rate; uint8_t *buffer; uint8_t *cache; }; struct ht16k33_seg { struct linedisp linedisp; union { struct seg7_conversion_map seg7; struct seg14_conversion_map seg14; } map; unsigned int map_size; char curr[4]; }; struct ht16k33_priv { struct i2c_client *client; struct delayed_work work; struct led_classdev led; struct ht16k33_keypad keypad; union { struct ht16k33_fbdev fbdev; struct ht16k33_seg seg; }; enum display_type type; uint8_t blink; }; static const struct fb_fix_screeninfo ht16k33_fb_fix = { .id = DRIVER_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = HT16K33_MATRIX_LED_MAX_ROWS, .accel = FB_ACCEL_NONE, }; static const struct fb_var_screeninfo ht16k33_fb_var = { .xres = HT16K33_MATRIX_LED_MAX_ROWS, .yres = HT16K33_MATRIX_LED_MAX_COLS, .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, .yres_virtual = HT16K33_MATRIX_LED_MAX_COLS, .bits_per_pixel = 1, .red = { 0, 1, 0 }, .green = { 0, 1, 0 }, .blue = { 0, 1, 0 }, .left_margin = 0, .right_margin = 0, .upper_margin = 0, .lower_margin = 0, .vmode = FB_VMODE_NONINTERLACED, }; static const SEG7_DEFAULT_MAP(initial_map_seg7); static const SEG14_DEFAULT_MAP(initial_map_seg14); static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ht16k33_priv *priv = dev_get_drvdata(dev); memcpy(buf, &priv->seg.map, priv->seg.map_size); return priv->seg.map_size; } static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t cnt) { struct ht16k33_priv *priv = dev_get_drvdata(dev); if (cnt != priv->seg.map_size) return -EINVAL; memcpy(&priv->seg.map, buf, cnt); return cnt; } static DEVICE_ATTR(map_seg7, 0644, map_seg_show, map_seg_store); static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store); static int ht16k33_display_on(struct ht16k33_priv *priv) { uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON | priv->blink; return i2c_smbus_write_byte(priv->client, data); } static int ht16k33_display_off(struct ht16k33_priv *priv) { return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP); } static int ht16k33_brightness_set(struct ht16k33_priv *priv, unsigned int brightness) { int err; if (brightness == 0) { priv->blink = REG_DISPLAY_SETUP_BLINK_OFF; return ht16k33_display_off(priv); } err = ht16k33_display_on(priv); if (err) return err; return i2c_smbus_write_byte(priv->client, REG_BRIGHTNESS | (brightness - 1)); } static int ht16k33_brightness_set_blocking(struct led_classdev *led_cdev, enum led_brightness brightness) { struct ht16k33_priv *priv = container_of(led_cdev, struct ht16k33_priv, led); return ht16k33_brightness_set(priv, brightness); } static int ht16k33_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct ht16k33_priv *priv = container_of(led_cdev, struct ht16k33_priv, led); unsigned int delay; uint8_t blink; int err; if (!*delay_on && !*delay_off) { blink = REG_DISPLAY_SETUP_BLINK_1HZ; delay = 1000; } else if (*delay_on <= 750) { blink = REG_DISPLAY_SETUP_BLINK_2HZ; delay = 500; } else if (*delay_on <= 1500) { blink = REG_DISPLAY_SETUP_BLINK_1HZ; delay = 1000; } else { blink = REG_DISPLAY_SETUP_BLINK_0HZ5; delay = 2000; } err = i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON | blink); if (err) return err; priv->blink = blink; *delay_on = *delay_off = delay; return 0; } static void ht16k33_fb_queue(struct ht16k33_priv *priv) { struct ht16k33_fbdev *fbdev = &priv->fbdev; schedule_delayed_work(&priv->work, HZ / fbdev->refresh_rate); } /* * This gets the fb data from cache and copies it to ht16k33 display RAM */ static void ht16k33_fb_update(struct work_struct *work) { struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv, work.work); struct ht16k33_fbdev *fbdev = &priv->fbdev; uint8_t *p1, *p2; int len, pos = 0, first = -1; p1 = fbdev->cache; p2 = fbdev->buffer; /* Search for the first byte with changes */ while (pos < HT16K33_FB_SIZE && first < 0) { if (*(p1++) - *(p2++)) first = pos; pos++; } /* No changes found */ if (first < 0) goto requeue; len = HT16K33_FB_SIZE - first; p1 = fbdev->cache + HT16K33_FB_SIZE - 1; p2 = fbdev->buffer + HT16K33_FB_SIZE - 1; /* Determine i2c transfer length */ while (len > 1) { if (*(p1--) - *(p2--)) break; len--; } p1 = fbdev->cache + first; p2 = fbdev->buffer + first; if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2)) memcpy(p1, p2, len); requeue: ht16k33_fb_queue(priv); } static int ht16k33_initialize(struct ht16k33_priv *priv) { uint8_t data[HT16K33_FB_SIZE]; uint8_t byte; int err; /* Clear RAM (8 * 16 bits) */ memset(data, 0, sizeof(data)); err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data); if (err) return err; /* Turn on internal oscillator */ byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP; err = i2c_smbus_write_byte(priv->client, byte); if (err) return err; /* Configure INT pin */ byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH; if (priv->client->irq > 0) byte |= REG_ROWINT_SET_INT_EN; return i2c_smbus_write_byte(priv->client, byte); } static int ht16k33_bl_update_status(struct backlight_device *bl) { int brightness = bl->props.brightness; struct ht16k33_priv *priv = bl_get_data(bl); if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK || bl->props.state & BL_CORE_FBBLANK) brightness = 0; return ht16k33_brightness_set(priv, brightness); } static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi) { struct ht16k33_priv *priv = bl_get_data(bl); return (fi == NULL) || (fi->par == priv); } static const struct backlight_ops ht16k33_bl_ops = { .update_status = ht16k33_bl_update_status, .check_fb = ht16k33_bl_check_fb, }; /* * Blank events will be passed to the actual device handling the backlight when * we return zero here. */ static int ht16k33_blank(int blank, struct fb_info *info) { return 0; } static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct ht16k33_priv *priv = info->par; struct page *pages = virt_to_page(priv->fbdev.buffer); return vm_map_pages_zero(vma, &pages, 1); } static const struct fb_ops ht16k33_fb_ops = { .owner = THIS_MODULE, .fb_read = fb_sys_read, .fb_write = fb_sys_write, .fb_blank = ht16k33_blank, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_mmap = ht16k33_mmap, }; /* * This gets the keys from keypad and reports it to input subsystem. * Returns true if a key is pressed. */ static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad) { const unsigned short *keycodes = keypad->dev->keycode; u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS]; __le16 data[HT16K33_MATRIX_KEYPAD_MAX_COLS]; unsigned long bits_changed; int row, col, code; int rc; bool pressed = false; rc = i2c_smbus_read_i2c_block_data(keypad->client, 0x40, sizeof(data), (u8 *)data); if (rc != sizeof(data)) { dev_err(&keypad->client->dev, "Failed to read key data, rc=%d\n", rc); return false; } for (col = 0; col < keypad->cols; col++) { new_state[col] = le16_to_cpu(data[col]); if (new_state[col]) pressed = true; bits_changed = keypad->last_key_state[col] ^ new_state[col]; for_each_set_bit(row, &bits_changed, BITS_PER_LONG) { code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); input_event(keypad->dev, EV_MSC, MSC_SCAN, code); input_report_key(keypad->dev, keycodes[code], new_state[col] & BIT(row)); } } input_sync(keypad->dev); memcpy(keypad->last_key_state, new_state, sizeof(u16) * keypad->cols); return pressed; } static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev) { struct ht16k33_keypad *keypad = dev; do { wait_event_timeout(keypad->wait, keypad->stopped, msecs_to_jiffies(keypad->debounce_ms)); if (keypad->stopped) break; } while (ht16k33_keypad_scan(keypad)); return IRQ_HANDLED; } static int ht16k33_keypad_start(struct input_dev *dev) { struct ht16k33_keypad *keypad = input_get_drvdata(dev); keypad->stopped = false; mb(); enable_irq(keypad->client->irq); return 0; } static void ht16k33_keypad_stop(struct input_dev *dev) { struct ht16k33_keypad *keypad = input_get_drvdata(dev); keypad->stopped = true; mb(); wake_up(&keypad->wait); disable_irq(keypad->client->irq); } static void ht16k33_linedisp_update(struct linedisp *linedisp) { struct ht16k33_priv *priv = container_of(linedisp, struct ht16k33_priv, seg.linedisp); schedule_delayed_work(&priv->work, 0); } static void ht16k33_seg7_update(struct work_struct *work) { struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv, work.work); struct ht16k33_seg *seg = &priv->seg; char *s = seg->curr; uint8_t buf[9]; buf[0] = map_to_seg7(&seg->map.seg7, *s++); buf[1] = 0; buf[2] = map_to_seg7(&seg->map.seg7, *s++); buf[3] = 0; buf[4] = 0; buf[5] = 0; buf[6] = map_to_seg7(&seg->map.seg7, *s++); buf[7] = 0; buf[8] = map_to_seg7(&seg->map.seg7, *s++); i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf); } static void ht16k33_seg14_update(struct work_struct *work) { struct ht16k33_priv *priv = container_of(work, struct ht16k33_priv, work.work); struct ht16k33_seg *seg = &priv->seg; char *s = seg->curr; uint8_t buf[8]; put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf); put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 2); put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 4); put_unaligned_le16(map_to_seg14(&seg->map.seg14, *s++), buf + 6); i2c_smbus_write_i2c_block_data(priv->client, 0, ARRAY_SIZE(buf), buf); } static int ht16k33_led_probe(struct device *dev, struct led_classdev *led, unsigned int brightness) { struct led_init_data init_data = {}; int err; /* The LED is optional */ init_data.fwnode = device_get_named_child_node(dev, "led"); if (!init_data.fwnode) return 0; init_data.devicename = "auxdisplay"; init_data.devname_mandatory = true; led->brightness_set_blocking = ht16k33_brightness_set_blocking; led->blink_set = ht16k33_blink_set; led->flags = LED_CORE_SUSPENDRESUME; led->brightness = brightness; led->max_brightness = MAX_BRIGHTNESS; err = devm_led_classdev_register_ext(dev, led, &init_data); if (err) dev_err(dev, "Failed to register LED\n"); return err; } static int ht16k33_keypad_probe(struct i2c_client *client, struct ht16k33_keypad *keypad) { struct device *dev = &client->dev; u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS; u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS; int err; keypad->client = client; init_waitqueue_head(&keypad->wait); keypad->dev = devm_input_allocate_device(dev); if (!keypad->dev) return -ENOMEM; input_set_drvdata(keypad->dev, keypad); keypad->dev->name = DRIVER_NAME"-keypad"; keypad->dev->id.bustype = BUS_I2C; keypad->dev->open = ht16k33_keypad_start; keypad->dev->close = ht16k33_keypad_stop; if (!device_property_read_bool(dev, "linux,no-autorepeat")) __set_bit(EV_REP, keypad->dev->evbit); err = device_property_read_u32(dev, "debounce-delay-ms", &keypad->debounce_ms); if (err) { dev_err(dev, "key debounce delay not specified\n"); return err; } err = matrix_keypad_parse_properties(dev, &rows, &cols); if (err) return err; if (rows > HT16K33_MATRIX_KEYPAD_MAX_ROWS || cols > HT16K33_MATRIX_KEYPAD_MAX_COLS) { dev_err(dev, "%u rows or %u cols out of range in DT\n", rows, cols); return -ERANGE; } keypad->rows = rows; keypad->cols = cols; keypad->row_shift = get_count_order(cols); err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL, keypad->dev); if (err) { dev_err(dev, "failed to build keymap\n"); return err; } err = devm_request_threaded_irq(dev, client->irq, NULL, ht16k33_keypad_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, DRIVER_NAME, keypad); if (err) { dev_err(dev, "irq request failed %d, error %d\n", client->irq, err); return err; } ht16k33_keypad_stop(keypad->dev); return input_register_device(keypad->dev); } static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv, uint32_t brightness) { struct ht16k33_fbdev *fbdev = &priv->fbdev; struct backlight_device *bl = NULL; int err; if (priv->led.dev) { err = ht16k33_brightness_set(priv, brightness); if (err) return err; } else { /* backwards compatibility with DT lacking an led subnode */ struct backlight_properties bl_props; memset(&bl_props, 0, sizeof(struct backlight_properties)); bl_props.type = BACKLIGHT_RAW; bl_props.max_brightness = MAX_BRIGHTNESS; bl = devm_backlight_device_register(dev, DRIVER_NAME"-bl", dev, priv, &ht16k33_bl_ops, &bl_props); if (IS_ERR(bl)) { dev_err(dev, "failed to register backlight\n"); return PTR_ERR(bl); } bl->props.brightness = brightness; ht16k33_bl_update_status(bl); } /* Framebuffer (2 bytes per column) */ BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE); fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); if (!fbdev->buffer) return -ENOMEM; fbdev->cache = devm_kmalloc(dev, HT16K33_FB_SIZE, GFP_KERNEL); if (!fbdev->cache) { err = -ENOMEM; goto err_fbdev_buffer; } fbdev->info = framebuffer_alloc(0, dev); if (!fbdev->info) { err = -ENOMEM; goto err_fbdev_buffer; } err = device_property_read_u32(dev, "refresh-rate-hz", &fbdev->refresh_rate); if (err) { dev_err(dev, "refresh rate not specified\n"); goto err_fbdev_info; } fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS); INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update); fbdev->info->fbops = &ht16k33_fb_ops; fbdev->info->screen_buffer = fbdev->buffer; fbdev->info->screen_size = HT16K33_FB_SIZE; fbdev->info->fix = ht16k33_fb_fix; fbdev->info->var = ht16k33_fb_var; fbdev->info->bl_dev = bl; fbdev->info->pseudo_palette = NULL; fbdev->info->par = priv; err = register_framebuffer(fbdev->info); if (err) goto err_fbdev_info; ht16k33_fb_queue(priv); return 0; err_fbdev_info: framebuffer_release(fbdev->info); err_fbdev_buffer: free_page((unsigned long) fbdev->buffer); return err; } static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv, uint32_t brightness) { struct ht16k33_seg *seg = &priv->seg; int err; err = ht16k33_brightness_set(priv, brightness); if (err) return err; switch (priv->type) { case DISP_MATRIX: /* not handled here */ err = -EINVAL; break; case DISP_QUAD_7SEG: INIT_DELAYED_WORK(&priv->work, ht16k33_seg7_update); seg->map.seg7 = initial_map_seg7; seg->map_size = sizeof(seg->map.seg7); err = device_create_file(dev, &dev_attr_map_seg7); break; case DISP_QUAD_14SEG: INIT_DELAYED_WORK(&priv->work, ht16k33_seg14_update); seg->map.seg14 = initial_map_seg14; seg->map_size = sizeof(seg->map.seg14); err = device_create_file(dev, &dev_attr_map_seg14); break; } if (err) return err; err = linedisp_register(&seg->linedisp, dev, 4, seg->curr, ht16k33_linedisp_update); if (err) goto err_remove_map_file; return 0; err_remove_map_file: device_remove_file(dev, &dev_attr_map_seg7); device_remove_file(dev, &dev_attr_map_seg14); return err; } static int ht16k33_probe(struct i2c_client *client) { struct device *dev = &client->dev; const struct of_device_id *id; struct ht16k33_priv *priv; uint32_t dft_brightness; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "i2c_check_functionality error\n"); return -EIO; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->client = client; id = i2c_of_match_device(dev->driver->of_match_table, client); if (id) priv->type = (uintptr_t)id->data; i2c_set_clientdata(client, priv); err = ht16k33_initialize(priv); if (err) return err; err = device_property_read_u32(dev, "default-brightness-level", &dft_brightness); if (err) { dft_brightness = MAX_BRIGHTNESS; } else if (dft_brightness > MAX_BRIGHTNESS) { dev_warn(dev, "invalid default brightness level: %u, using %u\n", dft_brightness, MAX_BRIGHTNESS); dft_brightness = MAX_BRIGHTNESS; } /* LED */ err = ht16k33_led_probe(dev, &priv->led, dft_brightness); if (err) return err; /* Keypad */ if (client->irq > 0) { err = ht16k33_keypad_probe(client, &priv->keypad); if (err) return err; } switch (priv->type) { case DISP_MATRIX: /* Frame Buffer Display */ err = ht16k33_fbdev_probe(dev, priv, dft_brightness); break; case DISP_QUAD_7SEG: case DISP_QUAD_14SEG: /* Segment Display */ err = ht16k33_seg_probe(dev, priv, dft_brightness); break; } return err; } static void ht16k33_remove(struct i2c_client *client) { struct ht16k33_priv *priv = i2c_get_clientdata(client); struct ht16k33_fbdev *fbdev = &priv->fbdev; cancel_delayed_work_sync(&priv->work); switch (priv->type) { case DISP_MATRIX: unregister_framebuffer(fbdev->info); framebuffer_release(fbdev->info); free_page((unsigned long)fbdev->buffer); break; case DISP_QUAD_7SEG: case DISP_QUAD_14SEG: linedisp_unregister(&priv->seg.linedisp); device_remove_file(&client->dev, &dev_attr_map_seg7); device_remove_file(&client->dev, &dev_attr_map_seg14); break; } } static const struct i2c_device_id ht16k33_i2c_match[] = { { "ht16k33", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match); static const struct of_device_id ht16k33_of_match[] = { { /* 0.56" 4-Digit 7-Segment FeatherWing Display (Red) */ .compatible = "adafruit,3108", .data = (void *)DISP_QUAD_7SEG, }, { /* 0.54" Quad Alphanumeric FeatherWing Display (Red) */ .compatible = "adafruit,3130", .data = (void *)DISP_QUAD_14SEG, }, { /* Generic, assumed Dot-Matrix Display */ .compatible = "holtek,ht16k33", .data = (void *)DISP_MATRIX, }, { } }; MODULE_DEVICE_TABLE(of, ht16k33_of_match); static struct i2c_driver ht16k33_driver = { .probe = ht16k33_probe, .remove = ht16k33_remove, .driver = { .name = DRIVER_NAME, .of_match_table = ht16k33_of_match, }, .id_table = ht16k33_i2c_match, }; module_i2c_driver(ht16k33_driver); MODULE_DESCRIPTION("Holtek HT16K33 driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robin van der Gracht <[email protected]>");
linux-master
drivers/auxdisplay/ht16k33.c
// SPDX-License-Identifier: GPL-2.0 /* * Filename: cfag12864b.c * Version: 0.1.0 * Description: cfag12864b LCD driver * Depends: ks0108 * * Author: Copyright (C) Miguel Ojeda <[email protected]> * Date: 2006-10-31 */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/cdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/ks0108.h> #include <linux/cfag12864b.h> #define CFAG12864B_NAME "cfag12864b" /* * Module Parameters */ static unsigned int cfag12864b_rate = CONFIG_CFAG12864B_RATE; module_param(cfag12864b_rate, uint, 0444); MODULE_PARM_DESC(cfag12864b_rate, "Refresh rate (hertz)"); unsigned int cfag12864b_getrate(void) { return cfag12864b_rate; } /* * cfag12864b Commands * * E = Enable signal * Every time E switch from low to high, * cfag12864b/ks0108 reads the command/data. * * CS1 = First ks0108controller. * If high, the first ks0108 controller receives commands/data. * * CS2 = Second ks0108 controller * If high, the second ks0108 controller receives commands/data. * * DI = Data/Instruction * If low, cfag12864b will expect commands. * If high, cfag12864b will expect data. * */ #define bit(n) (((unsigned char)1)<<(n)) #define CFAG12864B_BIT_E (0) #define CFAG12864B_BIT_CS1 (2) #define CFAG12864B_BIT_CS2 (1) #define CFAG12864B_BIT_DI (3) static unsigned char cfag12864b_state; static void cfag12864b_set(void) { ks0108_writecontrol(cfag12864b_state); } static void cfag12864b_setbit(unsigned char state, unsigned char n) { if (state) cfag12864b_state |= bit(n); else cfag12864b_state &= ~bit(n); } static void cfag12864b_e(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_E); cfag12864b_set(); } static void cfag12864b_cs1(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_CS1); } static void cfag12864b_cs2(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_CS2); } static void cfag12864b_di(unsigned char state) { cfag12864b_setbit(state, CFAG12864B_BIT_DI); } static void cfag12864b_setcontrollers(unsigned char first, unsigned char second) { if (first) cfag12864b_cs1(0); else cfag12864b_cs1(1); if (second) cfag12864b_cs2(0); else cfag12864b_cs2(1); } static void cfag12864b_controller(unsigned char which) { if (which == 0) cfag12864b_setcontrollers(1, 0); else if (which == 1) cfag12864b_setcontrollers(0, 1); } static void cfag12864b_displaystate(unsigned char state) { cfag12864b_di(0); cfag12864b_e(1); ks0108_displaystate(state); cfag12864b_e(0); } static void cfag12864b_address(unsigned char address) { cfag12864b_di(0); cfag12864b_e(1); ks0108_address(address); cfag12864b_e(0); } static void cfag12864b_page(unsigned char page) { cfag12864b_di(0); cfag12864b_e(1); ks0108_page(page); cfag12864b_e(0); } static void cfag12864b_startline(unsigned char startline) { cfag12864b_di(0); cfag12864b_e(1); ks0108_startline(startline); cfag12864b_e(0); } static void cfag12864b_writebyte(unsigned char byte) { cfag12864b_di(1); cfag12864b_e(1); ks0108_writedata(byte); cfag12864b_e(0); } static void cfag12864b_nop(void) { cfag12864b_startline(0); } /* * cfag12864b Internal Commands */ static void cfag12864b_on(void) { cfag12864b_setcontrollers(1, 1); cfag12864b_displaystate(1); } static void cfag12864b_off(void) { cfag12864b_setcontrollers(1, 1); cfag12864b_displaystate(0); } static void cfag12864b_clear(void) { unsigned char i, j; cfag12864b_setcontrollers(1, 1); for (i = 0; i < CFAG12864B_PAGES; i++) { cfag12864b_page(i); cfag12864b_address(0); for (j = 0; j < CFAG12864B_ADDRESSES; j++) cfag12864b_writebyte(0); } } /* * Update work */ unsigned char *cfag12864b_buffer; static unsigned char *cfag12864b_cache; static DEFINE_MUTEX(cfag12864b_mutex); static unsigned char cfag12864b_updating; static void cfag12864b_update(struct work_struct *delayed_work); static struct workqueue_struct *cfag12864b_workqueue; static DECLARE_DELAYED_WORK(cfag12864b_work, cfag12864b_update); static void cfag12864b_queue(void) { queue_delayed_work(cfag12864b_workqueue, &cfag12864b_work, HZ / cfag12864b_rate); } unsigned char cfag12864b_enable(void) { unsigned char ret; mutex_lock(&cfag12864b_mutex); if (!cfag12864b_updating) { cfag12864b_updating = 1; cfag12864b_queue(); ret = 0; } else ret = 1; mutex_unlock(&cfag12864b_mutex); return ret; } void cfag12864b_disable(void) { mutex_lock(&cfag12864b_mutex); if (cfag12864b_updating) { cfag12864b_updating = 0; cancel_delayed_work(&cfag12864b_work); flush_workqueue(cfag12864b_workqueue); } mutex_unlock(&cfag12864b_mutex); } unsigned char cfag12864b_isenabled(void) { return cfag12864b_updating; } static void cfag12864b_update(struct work_struct *work) { unsigned char c; unsigned short i, j, k, b; if (memcmp(cfag12864b_cache, cfag12864b_buffer, CFAG12864B_SIZE)) { for (i = 0; i < CFAG12864B_CONTROLLERS; i++) { cfag12864b_controller(i); cfag12864b_nop(); for (j = 0; j < CFAG12864B_PAGES; j++) { cfag12864b_page(j); cfag12864b_nop(); cfag12864b_address(0); cfag12864b_nop(); for (k = 0; k < CFAG12864B_ADDRESSES; k++) { for (c = 0, b = 0; b < 8; b++) if (cfag12864b_buffer [i * CFAG12864B_ADDRESSES / 8 + k / 8 + (j * 8 + b) * CFAG12864B_WIDTH / 8] & bit(k % 8)) c |= bit(b); cfag12864b_writebyte(c); } } } memcpy(cfag12864b_cache, cfag12864b_buffer, CFAG12864B_SIZE); } if (cfag12864b_updating) cfag12864b_queue(); } /* * cfag12864b Exported Symbols */ EXPORT_SYMBOL_GPL(cfag12864b_buffer); EXPORT_SYMBOL_GPL(cfag12864b_getrate); EXPORT_SYMBOL_GPL(cfag12864b_enable); EXPORT_SYMBOL_GPL(cfag12864b_disable); EXPORT_SYMBOL_GPL(cfag12864b_isenabled); /* * Is the module inited? */ static unsigned char cfag12864b_inited; unsigned char cfag12864b_isinited(void) { return cfag12864b_inited; } EXPORT_SYMBOL_GPL(cfag12864b_isinited); /* * Module Init & Exit */ static int __init cfag12864b_init(void) { int ret = -EINVAL; /* ks0108_init() must be called first */ if (!ks0108_isinited()) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "ks0108 is not initialized\n"); goto none; } BUILD_BUG_ON(PAGE_SIZE < CFAG12864B_SIZE); cfag12864b_buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); if (cfag12864b_buffer == NULL) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "can't get a free page\n"); ret = -ENOMEM; goto none; } cfag12864b_cache = kmalloc(CFAG12864B_SIZE, GFP_KERNEL); if (cfag12864b_cache == NULL) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "can't alloc cache buffer (%i bytes)\n", CFAG12864B_SIZE); ret = -ENOMEM; goto bufferalloced; } cfag12864b_workqueue = create_singlethread_workqueue(CFAG12864B_NAME); if (cfag12864b_workqueue == NULL) goto cachealloced; cfag12864b_clear(); cfag12864b_on(); cfag12864b_inited = 1; return 0; cachealloced: kfree(cfag12864b_cache); bufferalloced: free_page((unsigned long) cfag12864b_buffer); none: return ret; } static void __exit cfag12864b_exit(void) { cfag12864b_disable(); cfag12864b_off(); destroy_workqueue(cfag12864b_workqueue); kfree(cfag12864b_cache); free_page((unsigned long) cfag12864b_buffer); } module_init(cfag12864b_init); module_exit(cfag12864b_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda <[email protected]>"); MODULE_DESCRIPTION("cfag12864b LCD driver");
linux-master
drivers/auxdisplay/cfag12864b.c
// SPDX-License-Identifier: GPL-2.0+ /* * Character LCD driver for Linux * * Copyright (C) 2000-2008, Willy Tarreau <[email protected]> * Copyright (C) 2016-2017 Glider bvba */ #include <linux/atomic.h> #include <linux/ctype.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/workqueue.h> #include <generated/utsrelease.h> #include "charlcd.h" /* Keep the backlight on this many seconds for each flash */ #define LCD_BL_TEMPO_PERIOD 4 #define LCD_ESCAPE_LEN 24 /* Max chars for LCD escape command */ #define LCD_ESCAPE_CHAR 27 /* Use char 27 for escape command */ struct charlcd_priv { struct charlcd lcd; struct delayed_work bl_work; struct mutex bl_tempo_lock; /* Protects access to bl_tempo */ bool bl_tempo; bool must_clear; /* contains the LCD config state */ unsigned long flags; /* Current escape sequence and it's length or -1 if outside */ struct { char buf[LCD_ESCAPE_LEN + 1]; int len; } esc_seq; unsigned long long drvdata[]; }; #define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd) /* Device single-open policy control */ static atomic_t charlcd_available = ATOMIC_INIT(1); /* turn the backlight on or off */ void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on) { struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; mutex_lock(&priv->bl_tempo_lock); if (!priv->bl_tempo) lcd->ops->backlight(lcd, on); mutex_unlock(&priv->bl_tempo_lock); } EXPORT_SYMBOL_GPL(charlcd_backlight); static void charlcd_bl_off(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct charlcd_priv *priv = container_of(dwork, struct charlcd_priv, bl_work); mutex_lock(&priv->bl_tempo_lock); if (priv->bl_tempo) { priv->bl_tempo = false; if (!(priv->flags & LCD_FLAG_L)) priv->lcd.ops->backlight(&priv->lcd, CHARLCD_OFF); } mutex_unlock(&priv->bl_tempo_lock); } /* turn the backlight on for a little while */ void charlcd_poke(struct charlcd *lcd) { struct charlcd_priv *priv = charlcd_to_priv(lcd); if (!lcd->ops->backlight) return; cancel_delayed_work_sync(&priv->bl_work); mutex_lock(&priv->bl_tempo_lock); if (!priv->bl_tempo && !(priv->flags & LCD_FLAG_L)) lcd->ops->backlight(lcd, CHARLCD_ON); priv->bl_tempo = true; schedule_delayed_work(&priv->bl_work, LCD_BL_TEMPO_PERIOD * HZ); mutex_unlock(&priv->bl_tempo_lock); } EXPORT_SYMBOL_GPL(charlcd_poke); static void charlcd_home(struct charlcd *lcd) { lcd->addr.x = 0; lcd->addr.y = 0; lcd->ops->home(lcd); } static void charlcd_print(struct charlcd *lcd, char c) { if (lcd->addr.x >= lcd->width) return; if (lcd->char_conv) c = lcd->char_conv[(unsigned char)c]; if (!lcd->ops->print(lcd, c)) lcd->addr.x++; /* prevents the cursor from wrapping onto the next line */ if (lcd->addr.x == lcd->width) lcd->ops->gotoxy(lcd, lcd->addr.x - 1, lcd->addr.y); } static void charlcd_clear_display(struct charlcd *lcd) { lcd->ops->clear_display(lcd); lcd->addr.x = 0; lcd->addr.y = 0; } /* * Parses a movement command of the form "(.*);", where the group can be * any number of subcommands of the form "(x|y)[0-9]+". * * Returns whether the command is valid. The position arguments are * only written if the parsing was successful. * * For instance: * - ";" returns (<original x>, <original y>). * - "x1;" returns (1, <original y>). * - "y2x1;" returns (1, 2). * - "x12y34x56;" returns (56, 34). * - "" fails. * - "x" fails. * - "x;" fails. * - "x1" fails. * - "xy12;" fails. * - "x12yy12;" fails. * - "xx" fails. */ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y) { unsigned long new_x = *x; unsigned long new_y = *y; char *p; for (;;) { if (!*s) return false; if (*s == ';') break; if (*s == 'x') { new_x = simple_strtoul(s + 1, &p, 10); if (p == s + 1) return false; s = p; } else if (*s == 'y') { new_y = simple_strtoul(s + 1, &p, 10); if (p == s + 1) return false; s = p; } else { return false; } } *x = new_x; *y = new_y; return true; } /* * These are the file operation function for user access to /dev/lcd * This function can also be called from inside the kernel, by * setting file and ppos to NULL. * */ static inline int handle_lcd_special_code(struct charlcd *lcd) { struct charlcd_priv *priv = charlcd_to_priv(lcd); /* LCD special codes */ int processed = 0; char *esc = priv->esc_seq.buf + 2; int oldflags = priv->flags; /* check for display mode flags */ switch (*esc) { case 'D': /* Display ON */ priv->flags |= LCD_FLAG_D; if (priv->flags != oldflags) lcd->ops->display(lcd, CHARLCD_ON); processed = 1; break; case 'd': /* Display OFF */ priv->flags &= ~LCD_FLAG_D; if (priv->flags != oldflags) lcd->ops->display(lcd, CHARLCD_OFF); processed = 1; break; case 'C': /* Cursor ON */ priv->flags |= LCD_FLAG_C; if (priv->flags != oldflags) lcd->ops->cursor(lcd, CHARLCD_ON); processed = 1; break; case 'c': /* Cursor OFF */ priv->flags &= ~LCD_FLAG_C; if (priv->flags != oldflags) lcd->ops->cursor(lcd, CHARLCD_OFF); processed = 1; break; case 'B': /* Blink ON */ priv->flags |= LCD_FLAG_B; if (priv->flags != oldflags) lcd->ops->blink(lcd, CHARLCD_ON); processed = 1; break; case 'b': /* Blink OFF */ priv->flags &= ~LCD_FLAG_B; if (priv->flags != oldflags) lcd->ops->blink(lcd, CHARLCD_OFF); processed = 1; break; case '+': /* Back light ON */ priv->flags |= LCD_FLAG_L; if (priv->flags != oldflags) charlcd_backlight(lcd, CHARLCD_ON); processed = 1; break; case '-': /* Back light OFF */ priv->flags &= ~LCD_FLAG_L; if (priv->flags != oldflags) charlcd_backlight(lcd, CHARLCD_OFF); processed = 1; break; case '*': /* Flash back light */ charlcd_poke(lcd); processed = 1; break; case 'f': /* Small Font */ priv->flags &= ~LCD_FLAG_F; if (priv->flags != oldflags) lcd->ops->fontsize(lcd, CHARLCD_FONTSIZE_SMALL); processed = 1; break; case 'F': /* Large Font */ priv->flags |= LCD_FLAG_F; if (priv->flags != oldflags) lcd->ops->fontsize(lcd, CHARLCD_FONTSIZE_LARGE); processed = 1; break; case 'n': /* One Line */ priv->flags &= ~LCD_FLAG_N; if (priv->flags != oldflags) lcd->ops->lines(lcd, CHARLCD_LINES_1); processed = 1; break; case 'N': /* Two Lines */ priv->flags |= LCD_FLAG_N; if (priv->flags != oldflags) lcd->ops->lines(lcd, CHARLCD_LINES_2); processed = 1; break; case 'l': /* Shift Cursor Left */ if (lcd->addr.x > 0) { if (!lcd->ops->shift_cursor(lcd, CHARLCD_SHIFT_LEFT)) lcd->addr.x--; } processed = 1; break; case 'r': /* shift cursor right */ if (lcd->addr.x < lcd->width) { if (!lcd->ops->shift_cursor(lcd, CHARLCD_SHIFT_RIGHT)) lcd->addr.x++; } processed = 1; break; case 'L': /* shift display left */ lcd->ops->shift_display(lcd, CHARLCD_SHIFT_LEFT); processed = 1; break; case 'R': /* shift display right */ lcd->ops->shift_display(lcd, CHARLCD_SHIFT_RIGHT); processed = 1; break; case 'k': { /* kill end of line */ int x, xs, ys; xs = lcd->addr.x; ys = lcd->addr.y; for (x = lcd->addr.x; x < lcd->width; x++) lcd->ops->print(lcd, ' '); /* restore cursor position */ lcd->addr.x = xs; lcd->addr.y = ys; lcd->ops->gotoxy(lcd, lcd->addr.x, lcd->addr.y); processed = 1; break; } case 'I': /* reinitialize display */ lcd->ops->init_display(lcd); priv->flags = ((lcd->height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B; processed = 1; break; case 'G': if (lcd->ops->redefine_char) processed = lcd->ops->redefine_char(lcd, esc); else processed = 1; break; case 'x': /* gotoxy : LxXXX[yYYY]; */ case 'y': /* gotoxy : LyYYY[xXXX]; */ if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';') break; /* If the command is valid, move to the new address */ if (parse_xy(esc, &lcd->addr.x, &lcd->addr.y)) lcd->ops->gotoxy(lcd, lcd->addr.x, lcd->addr.y); /* Regardless of its validity, mark as processed */ processed = 1; break; } return processed; } static void charlcd_write_char(struct charlcd *lcd, char c) { struct charlcd_priv *priv = charlcd_to_priv(lcd); /* first, we'll test if we're in escape mode */ if ((c != '\n') && priv->esc_seq.len >= 0) { /* yes, let's add this char to the buffer */ priv->esc_seq.buf[priv->esc_seq.len++] = c; priv->esc_seq.buf[priv->esc_seq.len] = '\0'; } else { /* aborts any previous escape sequence */ priv->esc_seq.len = -1; switch (c) { case LCD_ESCAPE_CHAR: /* start of an escape sequence */ priv->esc_seq.len = 0; priv->esc_seq.buf[priv->esc_seq.len] = '\0'; break; case '\b': /* go back one char and clear it */ if (lcd->addr.x > 0) { /* back one char */ if (!lcd->ops->shift_cursor(lcd, CHARLCD_SHIFT_LEFT)) lcd->addr.x--; } /* replace with a space */ charlcd_print(lcd, ' '); /* back one char again */ if (!lcd->ops->shift_cursor(lcd, CHARLCD_SHIFT_LEFT)) lcd->addr.x--; break; case '\f': /* quickly clear the display */ charlcd_clear_display(lcd); break; case '\n': /* * flush the remainder of the current line and * go to the beginning of the next line */ for (; lcd->addr.x < lcd->width; lcd->addr.x++) lcd->ops->print(lcd, ' '); lcd->addr.x = 0; lcd->addr.y = (lcd->addr.y + 1) % lcd->height; lcd->ops->gotoxy(lcd, lcd->addr.x, lcd->addr.y); break; case '\r': /* go to the beginning of the same line */ lcd->addr.x = 0; lcd->ops->gotoxy(lcd, lcd->addr.x, lcd->addr.y); break; case '\t': /* print a space instead of the tab */ charlcd_print(lcd, ' '); break; default: /* simply print this char */ charlcd_print(lcd, c); break; } } /* * now we'll see if we're in an escape mode and if the current * escape sequence can be understood. */ if (priv->esc_seq.len >= 2) { int processed = 0; if (!strcmp(priv->esc_seq.buf, "[2J")) { /* clear the display */ charlcd_clear_display(lcd); processed = 1; } else if (!strcmp(priv->esc_seq.buf, "[H")) { /* cursor to home */ charlcd_home(lcd); processed = 1; } /* codes starting with ^[[L */ else if ((priv->esc_seq.len >= 3) && (priv->esc_seq.buf[0] == '[') && (priv->esc_seq.buf[1] == 'L')) { processed = handle_lcd_special_code(lcd); } /* LCD special escape codes */ /* * flush the escape sequence if it's been processed * or if it is getting too long. */ if (processed || (priv->esc_seq.len >= LCD_ESCAPE_LEN)) priv->esc_seq.len = -1; } /* escape codes */ } static struct charlcd *the_charlcd; static ssize_t charlcd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { const char __user *tmp = buf; char c; for (; count-- > 0; (*ppos)++, tmp++) { if (((count + 1) & 0x1f) == 0) { /* * charlcd_write() is invoked as a VFS->write() callback * and as such it is always invoked from preemptible * context and may sleep. */ cond_resched(); } if (get_user(c, tmp)) return -EFAULT; charlcd_write_char(the_charlcd, c); } return tmp - buf; } static int charlcd_open(struct inode *inode, struct file *file) { struct charlcd_priv *priv = charlcd_to_priv(the_charlcd); int ret; ret = -EBUSY; if (!atomic_dec_and_test(&charlcd_available)) goto fail; /* open only once at a time */ ret = -EPERM; if (file->f_mode & FMODE_READ) /* device is write-only */ goto fail; if (priv->must_clear) { priv->lcd.ops->clear_display(&priv->lcd); priv->must_clear = false; priv->lcd.addr.x = 0; priv->lcd.addr.y = 0; } return nonseekable_open(inode, file); fail: atomic_inc(&charlcd_available); return ret; } static int charlcd_release(struct inode *inode, struct file *file) { atomic_inc(&charlcd_available); return 0; } static const struct file_operations charlcd_fops = { .write = charlcd_write, .open = charlcd_open, .release = charlcd_release, .llseek = no_llseek, }; static struct miscdevice charlcd_dev = { .minor = LCD_MINOR, .name = "lcd", .fops = &charlcd_fops, }; static void charlcd_puts(struct charlcd *lcd, const char *s) { const char *tmp = s; int count = strlen(s); for (; count-- > 0; tmp++) { if (((count + 1) & 0x1f) == 0) cond_resched(); charlcd_write_char(lcd, *tmp); } } #ifdef CONFIG_PANEL_BOOT_MESSAGE #define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE #else #define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n" #endif #ifdef CONFIG_CHARLCD_BL_ON #define LCD_INIT_BL "\x1b[L+" #elif defined(CONFIG_CHARLCD_BL_FLASH) #define LCD_INIT_BL "\x1b[L*" #else #define LCD_INIT_BL "\x1b[L-" #endif /* initialize the LCD driver */ static int charlcd_init(struct charlcd *lcd) { struct charlcd_priv *priv = charlcd_to_priv(lcd); int ret; priv->flags = ((lcd->height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B; if (lcd->ops->backlight) { mutex_init(&priv->bl_tempo_lock); INIT_DELAYED_WORK(&priv->bl_work, charlcd_bl_off); } /* * before this line, we must NOT send anything to the display. * Since charlcd_init_display() needs to write data, we have to * enable mark the LCD initialized just before. */ if (WARN_ON(!lcd->ops->init_display)) return -EINVAL; ret = lcd->ops->init_display(lcd); if (ret) return ret; /* display a short message */ charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT); /* clear the display on the next device opening */ priv->must_clear = true; charlcd_home(lcd); return 0; } struct charlcd *charlcd_alloc(void) { struct charlcd_priv *priv; struct charlcd *lcd; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return NULL; priv->esc_seq.len = -1; lcd = &priv->lcd; return lcd; } EXPORT_SYMBOL_GPL(charlcd_alloc); void charlcd_free(struct charlcd *lcd) { kfree(charlcd_to_priv(lcd)); } EXPORT_SYMBOL_GPL(charlcd_free); static int panel_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { struct charlcd *lcd = the_charlcd; switch (code) { case SYS_DOWN: charlcd_puts(lcd, "\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_HALT: charlcd_puts(lcd, "\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_POWER_OFF: charlcd_puts(lcd, "\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+"); break; default: break; } return NOTIFY_DONE; } static struct notifier_block panel_notifier = { .notifier_call = panel_notify_sys, }; int charlcd_register(struct charlcd *lcd) { int ret; ret = charlcd_init(lcd); if (ret) return ret; ret = misc_register(&charlcd_dev); if (ret) return ret; the_charlcd = lcd; register_reboot_notifier(&panel_notifier); return 0; } EXPORT_SYMBOL_GPL(charlcd_register); int charlcd_unregister(struct charlcd *lcd) { struct charlcd_priv *priv = charlcd_to_priv(lcd); unregister_reboot_notifier(&panel_notifier); charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-"); misc_deregister(&charlcd_dev); the_charlcd = NULL; if (lcd->ops->backlight) { cancel_delayed_work_sync(&priv->bl_work); priv->lcd.ops->backlight(&priv->lcd, CHARLCD_OFF); } return 0; } EXPORT_SYMBOL_GPL(charlcd_unregister); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/charlcd.c
// SPDX-License-Identifier: GPL-2.0 /* * Filename: ks0108.c * Version: 0.1.0 * Description: ks0108 LCD Controller driver * Depends: parport * * Author: Copyright (C) Miguel Ojeda <[email protected]> * Date: 2006-10-31 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/parport.h> #include <linux/ks0108.h> #define KS0108_NAME "ks0108" /* * Module Parameters */ static unsigned int ks0108_port = CONFIG_KS0108_PORT; module_param(ks0108_port, uint, 0444); MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected"); static unsigned int ks0108_delay = CONFIG_KS0108_DELAY; module_param(ks0108_delay, uint, 0444); MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)"); /* * Device */ static struct parport *ks0108_parport; static struct pardevice *ks0108_pardevice; /* * ks0108 Exported Commands (don't lock) * * You _should_ lock in the top driver: This functions _should not_ * get race conditions in any way. Locking for each byte here would be * so slow and useless. * * There are not bit definitions because they are not flags, * just arbitrary combinations defined by the documentation for each * function in the ks0108 LCD controller. If you want to know what means * a specific combination, look at the function's name. * * The ks0108_writecontrol bits need to be reverted ^(0,1,3) because * the parallel port also revert them using a "not" logic gate. */ #define bit(n) (((unsigned char)1)<<(n)) void ks0108_writedata(unsigned char byte) { parport_write_data(ks0108_parport, byte); } void ks0108_writecontrol(unsigned char byte) { udelay(ks0108_delay); parport_write_control(ks0108_parport, byte ^ (bit(0) | bit(1) | bit(3))); } void ks0108_displaystate(unsigned char state) { ks0108_writedata((state ? bit(0) : 0) | bit(1) | bit(2) | bit(3) | bit(4) | bit(5)); } void ks0108_startline(unsigned char startline) { ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) | bit(7)); } void ks0108_address(unsigned char address) { ks0108_writedata(min_t(unsigned char, address, 63) | bit(6)); } void ks0108_page(unsigned char page) { ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) | bit(5) | bit(7)); } EXPORT_SYMBOL_GPL(ks0108_writedata); EXPORT_SYMBOL_GPL(ks0108_writecontrol); EXPORT_SYMBOL_GPL(ks0108_displaystate); EXPORT_SYMBOL_GPL(ks0108_startline); EXPORT_SYMBOL_GPL(ks0108_address); EXPORT_SYMBOL_GPL(ks0108_page); /* * Is the module inited? */ static unsigned char ks0108_inited; unsigned char ks0108_isinited(void) { return ks0108_inited; } EXPORT_SYMBOL_GPL(ks0108_isinited); static void ks0108_parport_attach(struct parport *port) { struct pardev_cb ks0108_cb; if (port->base != ks0108_port) return; memset(&ks0108_cb, 0, sizeof(ks0108_cb)); ks0108_cb.flags = PARPORT_DEV_EXCL; ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME, &ks0108_cb, 0); if (!ks0108_pardevice) { pr_err("ERROR: parport didn't register new device\n"); return; } if (parport_claim(ks0108_pardevice)) { pr_err("could not claim access to parport %i. Aborting.\n", ks0108_port); goto err_unreg_device; } ks0108_parport = port; ks0108_inited = 1; return; err_unreg_device: parport_unregister_device(ks0108_pardevice); ks0108_pardevice = NULL; } static void ks0108_parport_detach(struct parport *port) { if (port->base != ks0108_port) return; if (!ks0108_pardevice) { pr_err("%s: already unregistered.\n", KS0108_NAME); return; } parport_release(ks0108_pardevice); parport_unregister_device(ks0108_pardevice); ks0108_pardevice = NULL; ks0108_parport = NULL; } /* * Module Init & Exit */ static struct parport_driver ks0108_parport_driver = { .name = "ks0108", .match_port = ks0108_parport_attach, .detach = ks0108_parport_detach, .devmodel = true, }; module_parport_driver(ks0108_parport_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda <[email protected]>"); MODULE_DESCRIPTION("ks0108 LCD Controller driver");
linux-master
drivers/auxdisplay/ks0108.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Character line display core support * * Copyright (C) 2016 Imagination Technologies * Author: Paul Burton <[email protected]> * * Copyright (C) 2021 Glider bv */ #include <generated/utsrelease.h> #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/timer.h> #include "line-display.h" #define DEFAULT_SCROLL_RATE (HZ / 2) /** * linedisp_scroll() - scroll the display by a character * @t: really a pointer to the private data structure * * Scroll the current message along the display by one character, rearming the * timer if required. */ static void linedisp_scroll(struct timer_list *t) { struct linedisp *linedisp = from_timer(linedisp, t, timer); unsigned int i, ch = linedisp->scroll_pos; unsigned int num_chars = linedisp->num_chars; /* update the current message string */ for (i = 0; i < num_chars;) { /* copy as many characters from the string as possible */ for (; i < num_chars && ch < linedisp->message_len; i++, ch++) linedisp->buf[i] = linedisp->message[ch]; /* wrap around to the start of the string */ ch = 0; } /* update the display */ linedisp->update(linedisp); /* move on to the next character */ linedisp->scroll_pos++; linedisp->scroll_pos %= linedisp->message_len; /* rearm the timer */ if (linedisp->message_len > num_chars && linedisp->scroll_rate) mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate); } /** * linedisp_display() - set the message to be displayed * @linedisp: pointer to the private data structure * @msg: the message to display * @count: length of msg, or -1 * * Display a new message @msg on the display. @msg can be longer than the * number of characters the display can display, in which case it will begin * scrolling across the display. * * Return: 0 on success, -ENOMEM on memory allocation failure */ static int linedisp_display(struct linedisp *linedisp, const char *msg, ssize_t count) { char *new_msg; /* stop the scroll timer */ del_timer_sync(&linedisp->timer); if (count == -1) count = strlen(msg); /* if the string ends with a newline, trim it */ if (msg[count - 1] == '\n') count--; if (!count) { /* Clear the display */ kfree(linedisp->message); linedisp->message = NULL; linedisp->message_len = 0; memset(linedisp->buf, ' ', linedisp->num_chars); linedisp->update(linedisp); return 0; } new_msg = kmemdup_nul(msg, count, GFP_KERNEL); if (!new_msg) return -ENOMEM; kfree(linedisp->message); linedisp->message = new_msg; linedisp->message_len = count; linedisp->scroll_pos = 0; /* update the display */ linedisp_scroll(&linedisp->timer); return 0; } /** * message_show() - read message via sysfs * @dev: the display device * @attr: the display message attribute * @buf: the buffer to read the message into * * Read the current message being displayed or scrolled across the display into * @buf, for reads from sysfs. * * Return: the number of characters written to @buf */ static ssize_t message_show(struct device *dev, struct device_attribute *attr, char *buf) { struct linedisp *linedisp = container_of(dev, struct linedisp, dev); return sysfs_emit(buf, "%s\n", linedisp->message); } /** * message_store() - write a new message via sysfs * @dev: the display device * @attr: the display message attribute * @buf: the buffer containing the new message * @count: the size of the message in @buf * * Write a new message to display or scroll across the display from sysfs. * * Return: the size of the message on success, else -ERRNO */ static ssize_t message_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct linedisp *linedisp = container_of(dev, struct linedisp, dev); int err; err = linedisp_display(linedisp, buf, count); return err ?: count; } static DEVICE_ATTR_RW(message); static ssize_t scroll_step_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { struct linedisp *linedisp = container_of(dev, struct linedisp, dev); return sysfs_emit(buf, "%u\n", jiffies_to_msecs(linedisp->scroll_rate)); } static ssize_t scroll_step_ms_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct linedisp *linedisp = container_of(dev, struct linedisp, dev); unsigned int ms; if (kstrtouint(buf, 10, &ms) != 0) return -EINVAL; linedisp->scroll_rate = msecs_to_jiffies(ms); if (linedisp->message && linedisp->message_len > linedisp->num_chars) { del_timer_sync(&linedisp->timer); if (linedisp->scroll_rate) linedisp_scroll(&linedisp->timer); } return count; } static DEVICE_ATTR_RW(scroll_step_ms); static struct attribute *linedisp_attrs[] = { &dev_attr_message.attr, &dev_attr_scroll_step_ms.attr, NULL, }; ATTRIBUTE_GROUPS(linedisp); static const struct device_type linedisp_type = { .groups = linedisp_groups, }; /** * linedisp_register - register a character line display * @linedisp: pointer to character line display structure * @parent: parent device * @num_chars: the number of characters that can be displayed * @buf: pointer to a buffer that can hold @num_chars characters * @update: Function called to update the display. This must not sleep! * * Return: zero on success, else a negative error code. */ int linedisp_register(struct linedisp *linedisp, struct device *parent, unsigned int num_chars, char *buf, void (*update)(struct linedisp *linedisp)) { static atomic_t linedisp_id = ATOMIC_INIT(-1); int err; memset(linedisp, 0, sizeof(*linedisp)); linedisp->dev.parent = parent; linedisp->dev.type = &linedisp_type; linedisp->update = update; linedisp->buf = buf; linedisp->num_chars = num_chars; linedisp->scroll_rate = DEFAULT_SCROLL_RATE; device_initialize(&linedisp->dev); dev_set_name(&linedisp->dev, "linedisp.%lu", (unsigned long)atomic_inc_return(&linedisp_id)); /* initialise a timer for scrolling the message */ timer_setup(&linedisp->timer, linedisp_scroll, 0); err = device_add(&linedisp->dev); if (err) goto out_del_timer; /* display a default message */ err = linedisp_display(linedisp, "Linux " UTS_RELEASE " ", -1); if (err) goto out_del_dev; return 0; out_del_dev: device_del(&linedisp->dev); out_del_timer: del_timer_sync(&linedisp->timer); put_device(&linedisp->dev); return err; } EXPORT_SYMBOL_GPL(linedisp_register); /** * linedisp_unregister - unregister a character line display * @linedisp: pointer to character line display structure registered previously * with linedisp_register() */ void linedisp_unregister(struct linedisp *linedisp) { device_del(&linedisp->dev); del_timer_sync(&linedisp->timer); kfree(linedisp->message); put_device(&linedisp->dev); } EXPORT_SYMBOL_GPL(linedisp_unregister); MODULE_LICENSE("GPL");
linux-master
drivers/auxdisplay/line-display.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PPS sysfs support * * Copyright (C) 2007-2009 Rodolfo Giometti <[email protected]> */ #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <linux/pps_kernel.h> /* * Attribute functions */ static ssize_t assert_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); if (!(pps->info.mode & PPS_CAPTUREASSERT)) return 0; return sprintf(buf, "%lld.%09d#%d\n", (long long) pps->assert_tu.sec, pps->assert_tu.nsec, pps->assert_sequence); } static DEVICE_ATTR_RO(assert); static ssize_t clear_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); if (!(pps->info.mode & PPS_CAPTURECLEAR)) return 0; return sprintf(buf, "%lld.%09d#%d\n", (long long) pps->clear_tu.sec, pps->clear_tu.nsec, pps->clear_sequence); } static DEVICE_ATTR_RO(clear); static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); return sprintf(buf, "%4x\n", pps->info.mode); } static DEVICE_ATTR_RO(mode); static ssize_t echo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!pps->info.echo); } static DEVICE_ATTR_RO(echo); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); return sprintf(buf, "%s\n", pps->info.name); } static DEVICE_ATTR_RO(name); static ssize_t path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pps_device *pps = dev_get_drvdata(dev); return sprintf(buf, "%s\n", pps->info.path); } static DEVICE_ATTR_RO(path); static struct attribute *pps_attrs[] = { &dev_attr_assert.attr, &dev_attr_clear.attr, &dev_attr_mode.attr, &dev_attr_echo.attr, &dev_attr_name.attr, &dev_attr_path.attr, NULL, }; static const struct attribute_group pps_group = { .attrs = pps_attrs, }; const struct attribute_group *pps_groups[] = { &pps_group, NULL, };
linux-master
drivers/pps/sysfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PPS kernel consumer API * * Copyright (C) 2009-2010 Alexander Gordeev <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/pps_kernel.h> #include "kc.h" /* * Global variables */ /* state variables to bind kernel consumer */ static DEFINE_SPINLOCK(pps_kc_hardpps_lock); /* PPS API (RFC 2783): current source and mode for kernel consumer */ static struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ static int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ /* pps_kc_bind - control PPS kernel consumer binding * @pps: the PPS source * @bind_args: kernel consumer bind parameters * * This function is used to bind or unbind PPS kernel consumer according to * supplied parameters. Should not be called in interrupt context. */ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) { /* Check if another consumer is already bound */ spin_lock_irq(&pps_kc_hardpps_lock); if (bind_args->edge == 0) if (pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel" " consumer\n"); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "selected kernel consumer" " is not bound\n"); return -EINVAL; } else if (pps_kc_hardpps_dev == NULL || pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = bind_args->edge; pps_kc_hardpps_dev = pps; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "bound kernel consumer: " "edge=0x%x\n", bind_args->edge); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "another kernel consumer" " is already bound\n"); return -EINVAL; } return 0; } /* pps_kc_remove - unbind kernel consumer on PPS source removal * @pps: the PPS source * * This function is used to disable kernel consumer on PPS source removal * if this source was bound to PPS kernel consumer. Can be called on any * source safely. Should not be called in interrupt context. */ void pps_kc_remove(struct pps_device *pps) { spin_lock_irq(&pps_kc_hardpps_lock); if (pps == pps_kc_hardpps_dev) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel consumer" " on device removal\n"); } else spin_unlock_irq(&pps_kc_hardpps_lock); } /* pps_kc_event - call hardpps() on PPS event * @pps: the PPS source * @ts: PPS event timestamp * @event: PPS event edge * * This function calls hardpps() when an event from bound PPS source occurs. */ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) hardpps(&ts->ts_real, &ts->ts_raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); }
linux-master
drivers/pps/kc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PPS core file * * Copyright (C) 2005-2009 Rodolfo Giometti <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/idr.h> #include <linux/mutex.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/pps_kernel.h> #include <linux/slab.h> #include "kc.h" /* * Local variables */ static dev_t pps_devt; static struct class *pps_class; static DEFINE_MUTEX(pps_idr_lock); static DEFINE_IDR(pps_idr); /* * Char device methods */ static __poll_t pps_cdev_poll(struct file *file, poll_table *wait) { struct pps_device *pps = file->private_data; poll_wait(file, &pps->queue, wait); return EPOLLIN | EPOLLRDNORM; } static int pps_cdev_fasync(int fd, struct file *file, int on) { struct pps_device *pps = file->private_data; return fasync_helper(fd, file, on, &pps->async_queue); } static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata) { unsigned int ev = pps->last_ev; int err = 0; /* Manage the timeout */ if (fdata->timeout.flags & PPS_TIME_INVALID) err = wait_event_interruptible(pps->queue, ev != pps->last_ev); else { unsigned long ticks; dev_dbg(pps->dev, "timeout %lld.%09d\n", (long long) fdata->timeout.sec, fdata->timeout.nsec); ticks = fdata->timeout.sec * HZ; ticks += fdata->timeout.nsec / (NSEC_PER_SEC / HZ); if (ticks != 0) { err = wait_event_interruptible_timeout( pps->queue, ev != pps->last_ev, ticks); if (err == 0) return -ETIMEDOUT; } } /* Check for pending signals */ if (err == -ERESTARTSYS) { dev_dbg(pps->dev, "pending signal caught\n"); return -EINTR; } return 0; } static long pps_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct pps_device *pps = file->private_data; struct pps_kparams params; void __user *uarg = (void __user *) arg; int __user *iuarg = (int __user *) arg; int err; switch (cmd) { case PPS_GETPARAMS: dev_dbg(pps->dev, "PPS_GETPARAMS\n"); spin_lock_irq(&pps->lock); /* Get the current parameters */ params = pps->params; spin_unlock_irq(&pps->lock); err = copy_to_user(uarg, &params, sizeof(struct pps_kparams)); if (err) return -EFAULT; break; case PPS_SETPARAMS: dev_dbg(pps->dev, "PPS_SETPARAMS\n"); /* Check the capabilities */ if (!capable(CAP_SYS_TIME)) return -EPERM; err = copy_from_user(&params, uarg, sizeof(struct pps_kparams)); if (err) return -EFAULT; if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) { dev_dbg(pps->dev, "capture mode unspecified (%x)\n", params.mode); return -EINVAL; } /* Check for supported capabilities */ if ((params.mode & ~pps->info.mode) != 0) { dev_dbg(pps->dev, "unsupported capabilities (%x)\n", params.mode); return -EINVAL; } spin_lock_irq(&pps->lock); /* Save the new parameters */ pps->params = params; /* Restore the read only parameters */ if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { /* section 3.3 of RFC 2783 interpreted */ dev_dbg(pps->dev, "time format unspecified (%x)\n", params.mode); pps->params.mode |= PPS_TSFMT_TSPEC; } if (pps->info.mode & PPS_CANWAIT) pps->params.mode |= PPS_CANWAIT; pps->params.api_version = PPS_API_VERS; /* * Clear unused fields of pps_kparams to avoid leaking * uninitialized data of the PPS_SETPARAMS caller via * PPS_GETPARAMS */ pps->params.assert_off_tu.flags = 0; pps->params.clear_off_tu.flags = 0; spin_unlock_irq(&pps->lock); break; case PPS_GETCAP: dev_dbg(pps->dev, "PPS_GETCAP\n"); err = put_user(pps->info.mode, iuarg); if (err) return -EFAULT; break; case PPS_FETCH: { struct pps_fdata fdata; dev_dbg(pps->dev, "PPS_FETCH\n"); err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata)); if (err) return -EFAULT; err = pps_cdev_pps_fetch(pps, &fdata); if (err) return err; /* Return the fetched timestamp */ spin_lock_irq(&pps->lock); fdata.info.assert_sequence = pps->assert_sequence; fdata.info.clear_sequence = pps->clear_sequence; fdata.info.assert_tu = pps->assert_tu; fdata.info.clear_tu = pps->clear_tu; fdata.info.current_mode = pps->current_mode; spin_unlock_irq(&pps->lock); err = copy_to_user(uarg, &fdata, sizeof(struct pps_fdata)); if (err) return -EFAULT; break; } case PPS_KC_BIND: { struct pps_bind_args bind_args; dev_dbg(pps->dev, "PPS_KC_BIND\n"); /* Check the capabilities */ if (!capable(CAP_SYS_TIME)) return -EPERM; if (copy_from_user(&bind_args, uarg, sizeof(struct pps_bind_args))) return -EFAULT; /* Check for supported capabilities */ if ((bind_args.edge & ~pps->info.mode) != 0) { dev_err(pps->dev, "unsupported capabilities (%x)\n", bind_args.edge); return -EINVAL; } /* Validate parameters roughly */ if (bind_args.tsformat != PPS_TSFMT_TSPEC || (bind_args.edge & ~PPS_CAPTUREBOTH) != 0 || bind_args.consumer != PPS_KC_HARDPPS) { dev_err(pps->dev, "invalid kernel consumer bind" " parameters (%x)\n", bind_args.edge); return -EINVAL; } err = pps_kc_bind(pps, &bind_args); if (err < 0) return err; break; } default: return -ENOTTY; } return 0; } #ifdef CONFIG_COMPAT static long pps_cdev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct pps_device *pps = file->private_data; void __user *uarg = (void __user *) arg; cmd = _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(void *)); if (cmd == PPS_FETCH) { struct pps_fdata_compat compat; struct pps_fdata fdata; int err; dev_dbg(pps->dev, "PPS_FETCH\n"); err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat)); if (err) return -EFAULT; memcpy(&fdata.timeout, &compat.timeout, sizeof(struct pps_ktime_compat)); err = pps_cdev_pps_fetch(pps, &fdata); if (err) return err; /* Return the fetched timestamp */ spin_lock_irq(&pps->lock); compat.info.assert_sequence = pps->assert_sequence; compat.info.clear_sequence = pps->clear_sequence; compat.info.current_mode = pps->current_mode; memcpy(&compat.info.assert_tu, &pps->assert_tu, sizeof(struct pps_ktime_compat)); memcpy(&compat.info.clear_tu, &pps->clear_tu, sizeof(struct pps_ktime_compat)); spin_unlock_irq(&pps->lock); return copy_to_user(uarg, &compat, sizeof(struct pps_fdata_compat)) ? -EFAULT : 0; } return pps_cdev_ioctl(file, cmd, arg); } #else #define pps_cdev_compat_ioctl NULL #endif static int pps_cdev_open(struct inode *inode, struct file *file) { struct pps_device *pps = container_of(inode->i_cdev, struct pps_device, cdev); file->private_data = pps; kobject_get(&pps->dev->kobj); return 0; } static int pps_cdev_release(struct inode *inode, struct file *file) { struct pps_device *pps = container_of(inode->i_cdev, struct pps_device, cdev); kobject_put(&pps->dev->kobj); return 0; } /* * Char device stuff */ static const struct file_operations pps_cdev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .poll = pps_cdev_poll, .fasync = pps_cdev_fasync, .compat_ioctl = pps_cdev_compat_ioctl, .unlocked_ioctl = pps_cdev_ioctl, .open = pps_cdev_open, .release = pps_cdev_release, }; static void pps_device_destruct(struct device *dev) { struct pps_device *pps = dev_get_drvdata(dev); cdev_del(&pps->cdev); /* Now we can release the ID for re-use */ pr_debug("deallocating pps%d\n", pps->id); mutex_lock(&pps_idr_lock); idr_remove(&pps_idr, pps->id); mutex_unlock(&pps_idr_lock); kfree(dev); kfree(pps); } int pps_register_cdev(struct pps_device *pps) { int err; dev_t devt; mutex_lock(&pps_idr_lock); /* * Get new ID for the new PPS source. After idr_alloc() calling * the new source will be freely available into the kernel. */ err = idr_alloc(&pps_idr, pps, 0, PPS_MAX_SOURCES, GFP_KERNEL); if (err < 0) { if (err == -ENOSPC) { pr_err("%s: too many PPS sources in the system\n", pps->info.name); err = -EBUSY; } goto out_unlock; } pps->id = err; mutex_unlock(&pps_idr_lock); devt = MKDEV(MAJOR(pps_devt), pps->id); cdev_init(&pps->cdev, &pps_cdev_fops); pps->cdev.owner = pps->info.owner; err = cdev_add(&pps->cdev, devt, 1); if (err) { pr_err("%s: failed to add char device %d:%d\n", pps->info.name, MAJOR(pps_devt), pps->id); goto free_idr; } pps->dev = device_create(pps_class, pps->info.dev, devt, pps, "pps%d", pps->id); if (IS_ERR(pps->dev)) { err = PTR_ERR(pps->dev); goto del_cdev; } /* Override the release function with our own */ pps->dev->release = pps_device_destruct; pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, MAJOR(pps_devt), pps->id); return 0; del_cdev: cdev_del(&pps->cdev); free_idr: mutex_lock(&pps_idr_lock); idr_remove(&pps_idr, pps->id); out_unlock: mutex_unlock(&pps_idr_lock); return err; } void pps_unregister_cdev(struct pps_device *pps) { pr_debug("unregistering pps%d\n", pps->id); pps->lookup_cookie = NULL; device_destroy(pps_class, pps->dev->devt); } /* * Look up a pps device by magic cookie. * The cookie is usually a pointer to some enclosing device, but this * code doesn't care; you should never be dereferencing it. * * This is a bit of a kludge that is currently used only by the PPS * serial line discipline. It may need to be tweaked when a second user * is found. * * There is no function interface for setting the lookup_cookie field. * It's initialized to NULL when the pps device is created, and if a * client wants to use it, just fill it in afterward. * * The cookie is automatically set to NULL in pps_unregister_source() * so that it will not be used again, even if the pps device cannot * be removed from the idr due to pending references holding the minor * number in use. */ struct pps_device *pps_lookup_dev(void const *cookie) { struct pps_device *pps; unsigned id; rcu_read_lock(); idr_for_each_entry(&pps_idr, pps, id) if (cookie == pps->lookup_cookie) break; rcu_read_unlock(); return pps; } EXPORT_SYMBOL(pps_lookup_dev); /* * Module stuff */ static void __exit pps_exit(void) { class_destroy(pps_class); unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES); } static int __init pps_init(void) { int err; pps_class = class_create("pps"); if (IS_ERR(pps_class)) { pr_err("failed to allocate class\n"); return PTR_ERR(pps_class); } pps_class->dev_groups = pps_groups; err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps"); if (err < 0) { pr_err("failed to allocate char device region\n"); goto remove_class; } pr_info("LinuxPPS API ver. %d registered\n", PPS_API_VERS); pr_info("Software ver. %s - Copyright 2005-2007 Rodolfo Giometti " "<[email protected]>\n", PPS_VERSION); return 0; remove_class: class_destroy(pps_class); return err; } subsys_initcall(pps_init); module_exit(pps_exit); MODULE_AUTHOR("Rodolfo Giometti <[email protected]>"); MODULE_DESCRIPTION("LinuxPPS support (RFC 2783) - ver. " PPS_VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/pps/pps.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * kernel API * * Copyright (C) 2005-2009 Rodolfo Giometti <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/pps_kernel.h> #include <linux/slab.h> #include "kc.h" /* * Local functions */ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset) { ts->nsec += offset->nsec; while (ts->nsec >= NSEC_PER_SEC) { ts->nsec -= NSEC_PER_SEC; ts->sec++; } while (ts->nsec < 0) { ts->nsec += NSEC_PER_SEC; ts->sec--; } ts->sec += offset->sec; } static void pps_echo_client_default(struct pps_device *pps, int event, void *data) { dev_info(pps->dev, "echo %s %s\n", event & PPS_CAPTUREASSERT ? "assert" : "", event & PPS_CAPTURECLEAR ? "clear" : ""); } /* * Exported functions */ /* pps_register_source - add a PPS source in the system * @info: the PPS info struct * @default_params: the default PPS parameters of the new source * * This function is used to add a new PPS source in the system. The new * source is described by info's fields and it will have, as default PPS * parameters, the ones specified into default_params. * * The function returns, in case of success, the PPS device. Otherwise * ERR_PTR(errno). */ struct pps_device *pps_register_source(struct pps_source_info *info, int default_params) { struct pps_device *pps; int err; /* Sanity checks */ if ((info->mode & default_params) != default_params) { pr_err("%s: unsupported default parameters\n", info->name); err = -EINVAL; goto pps_register_source_exit; } if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { pr_err("%s: unspecified time format\n", info->name); err = -EINVAL; goto pps_register_source_exit; } /* Allocate memory for the new PPS source struct */ pps = kzalloc(sizeof(struct pps_device), GFP_KERNEL); if (pps == NULL) { err = -ENOMEM; goto pps_register_source_exit; } /* These initializations must be done before calling idr_alloc() * in order to avoid reces into pps_event(). */ pps->params.api_version = PPS_API_VERS; pps->params.mode = default_params; pps->info = *info; /* check for default echo function */ if ((pps->info.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) && pps->info.echo == NULL) pps->info.echo = pps_echo_client_default; init_waitqueue_head(&pps->queue); spin_lock_init(&pps->lock); /* Create the char device */ err = pps_register_cdev(pps); if (err < 0) { pr_err("%s: unable to create char device\n", info->name); goto kfree_pps; } dev_info(pps->dev, "new PPS source %s\n", info->name); return pps; kfree_pps: kfree(pps); pps_register_source_exit: pr_err("%s: unable to register source\n", info->name); return ERR_PTR(err); } EXPORT_SYMBOL(pps_register_source); /* pps_unregister_source - remove a PPS source from the system * @pps: the PPS source * * This function is used to remove a previously registered PPS source from * the system. */ void pps_unregister_source(struct pps_device *pps) { pps_kc_remove(pps); pps_unregister_cdev(pps); /* don't have to kfree(pps) here because it will be done on * device destruction */ } EXPORT_SYMBOL(pps_unregister_source); /* pps_event - register a PPS event into the system * @pps: the PPS device * @ts: the event timestamp * @event: the event type * @data: userdef pointer * * This function is used by each PPS client in order to register a new * PPS event into the system (it's usually called inside an IRQ handler). * * If an echo function is associated with the PPS device it will be called * as: * pps->info.echo(pps, event, data); */ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, void *data) { unsigned long flags; int captured = 0; struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 }; /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); dev_dbg(pps->dev, "PPS event at %lld.%09ld\n", (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); timespec_to_pps_ktime(&ts_real, ts->ts_real); spin_lock_irqsave(&pps->lock, flags); /* Must call the echo function? */ if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR))) pps->info.echo(pps, event, data); /* Check the event */ pps->current_mode = pps->params.mode; if (event & pps->params.mode & PPS_CAPTUREASSERT) { /* We have to add an offset? */ if (pps->params.mode & PPS_OFFSETASSERT) pps_add_offset(&ts_real, &pps->params.assert_off_tu); /* Save the time stamp */ pps->assert_tu = ts_real; pps->assert_sequence++; dev_dbg(pps->dev, "capture assert seq #%u\n", pps->assert_sequence); captured = ~0; } if (event & pps->params.mode & PPS_CAPTURECLEAR) { /* We have to add an offset? */ if (pps->params.mode & PPS_OFFSETCLEAR) pps_add_offset(&ts_real, &pps->params.clear_off_tu); /* Save the time stamp */ pps->clear_tu = ts_real; pps->clear_sequence++; dev_dbg(pps->dev, "capture clear seq #%u\n", pps->clear_sequence); captured = ~0; } pps_kc_event(pps, ts, event); /* Wake up if captured something */ if (captured) { pps->last_ev++; wake_up_interruptible_all(&pps->queue); kill_fasync(&pps->async_queue, SIGIO, POLL_IN); } spin_unlock_irqrestore(&pps->lock, flags); } EXPORT_SYMBOL(pps_event);
linux-master
drivers/pps/kapi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pps-gpio.c -- PPS client driver using GPIO * * Copyright (C) 2010 Ricardo Martins <[email protected]> * Copyright (C) 2011 James Nuss <[email protected]> */ #define PPS_GPIO_NAME "pps-gpio" #define pr_fmt(fmt) PPS_GPIO_NAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pps_kernel.h> #include <linux/gpio/consumer.h> #include <linux/list.h> #include <linux/property.h> #include <linux/timer.h> #include <linux/jiffies.h> /* Info for each registered platform device */ struct pps_gpio_device_data { int irq; /* IRQ used as PPS source */ struct pps_device *pps; /* PPS source device */ struct pps_source_info info; /* PPS source information */ struct gpio_desc *gpio_pin; /* GPIO port descriptors */ struct gpio_desc *echo_pin; struct timer_list echo_timer; /* timer to reset echo active state */ bool assert_falling_edge; bool capture_clear; unsigned int echo_active_ms; /* PPS echo active duration */ unsigned long echo_timeout; /* timer timeout value in jiffies */ }; /* * Report the PPS event */ static irqreturn_t pps_gpio_irq_handler(int irq, void *data) { const struct pps_gpio_device_data *info; struct pps_event_time ts; int rising_edge; /* Get the time stamp first */ pps_get_ts(&ts); info = data; rising_edge = gpiod_get_value(info->gpio_pin); if ((rising_edge && !info->assert_falling_edge) || (!rising_edge && info->assert_falling_edge)) pps_event(info->pps, &ts, PPS_CAPTUREASSERT, data); else if (info->capture_clear && ((rising_edge && info->assert_falling_edge) || (!rising_edge && !info->assert_falling_edge))) pps_event(info->pps, &ts, PPS_CAPTURECLEAR, data); return IRQ_HANDLED; } /* This function will only be called when an ECHO GPIO is defined */ static void pps_gpio_echo(struct pps_device *pps, int event, void *data) { /* add_timer() needs to write into info->echo_timer */ struct pps_gpio_device_data *info = data; switch (event) { case PPS_CAPTUREASSERT: if (pps->params.mode & PPS_ECHOASSERT) gpiod_set_value(info->echo_pin, 1); break; case PPS_CAPTURECLEAR: if (pps->params.mode & PPS_ECHOCLEAR) gpiod_set_value(info->echo_pin, 1); break; } /* fire the timer */ if (info->pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) { info->echo_timer.expires = jiffies + info->echo_timeout; add_timer(&info->echo_timer); } } /* Timer callback to reset the echo pin to the inactive state */ static void pps_gpio_echo_timer_callback(struct timer_list *t) { const struct pps_gpio_device_data *info; info = from_timer(info, t, echo_timer); gpiod_set_value(info->echo_pin, 0); } static int pps_gpio_setup(struct device *dev) { struct pps_gpio_device_data *data = dev_get_drvdata(dev); int ret; u32 value; data->gpio_pin = devm_gpiod_get(dev, NULL, GPIOD_IN); if (IS_ERR(data->gpio_pin)) return dev_err_probe(dev, PTR_ERR(data->gpio_pin), "failed to request PPS GPIO\n"); data->assert_falling_edge = device_property_read_bool(dev, "assert-falling-edge"); data->echo_pin = devm_gpiod_get_optional(dev, "echo", GPIOD_OUT_LOW); if (IS_ERR(data->echo_pin)) return dev_err_probe(dev, PTR_ERR(data->echo_pin), "failed to request ECHO GPIO\n"); if (!data->echo_pin) return 0; ret = device_property_read_u32(dev, "echo-active-ms", &value); if (ret) { dev_err(dev, "failed to get echo-active-ms from FW\n"); return ret; } /* sanity check on echo_active_ms */ if (!value || value > 999) { dev_err(dev, "echo-active-ms: %u - bad value from FW\n", value); return -EINVAL; } data->echo_active_ms = value; return 0; } static unsigned long get_irqf_trigger_flags(const struct pps_gpio_device_data *data) { unsigned long flags = data->assert_falling_edge ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (data->capture_clear) { flags |= ((flags & IRQF_TRIGGER_RISING) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING); } return flags; } static int pps_gpio_probe(struct platform_device *pdev) { struct pps_gpio_device_data *data; struct device *dev = &pdev->dev; int ret; int pps_default_params; /* allocate space for device info */ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; dev_set_drvdata(dev, data); /* GPIO setup */ ret = pps_gpio_setup(dev); if (ret) return ret; /* IRQ setup */ ret = gpiod_to_irq(data->gpio_pin); if (ret < 0) { dev_err(dev, "failed to map GPIO to IRQ: %d\n", ret); return -EINVAL; } data->irq = ret; /* initialize PPS specific parts of the bookkeeping data structure. */ data->info.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC; if (data->capture_clear) data->info.mode |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR | PPS_ECHOCLEAR; data->info.owner = THIS_MODULE; snprintf(data->info.name, PPS_MAX_NAME_LEN - 1, "%s.%d", pdev->name, pdev->id); if (data->echo_pin) { data->info.echo = pps_gpio_echo; data->echo_timeout = msecs_to_jiffies(data->echo_active_ms); timer_setup(&data->echo_timer, pps_gpio_echo_timer_callback, 0); } /* register PPS source */ pps_default_params = PPS_CAPTUREASSERT | PPS_OFFSETASSERT; if (data->capture_clear) pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR; data->pps = pps_register_source(&data->info, pps_default_params); if (IS_ERR(data->pps)) { dev_err(dev, "failed to register IRQ %d as PPS source\n", data->irq); return PTR_ERR(data->pps); } /* register IRQ interrupt handler */ ret = devm_request_irq(dev, data->irq, pps_gpio_irq_handler, get_irqf_trigger_flags(data), data->info.name, data); if (ret) { pps_unregister_source(data->pps); dev_err(dev, "failed to acquire IRQ %d\n", data->irq); return -EINVAL; } dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n", data->irq); return 0; } static int pps_gpio_remove(struct platform_device *pdev) { struct pps_gpio_device_data *data = platform_get_drvdata(pdev); pps_unregister_source(data->pps); del_timer_sync(&data->echo_timer); /* reset echo pin in any case */ gpiod_set_value(data->echo_pin, 0); dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq); return 0; } static const struct of_device_id pps_gpio_dt_ids[] = { { .compatible = "pps-gpio", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, pps_gpio_dt_ids); static struct platform_driver pps_gpio_driver = { .probe = pps_gpio_probe, .remove = pps_gpio_remove, .driver = { .name = PPS_GPIO_NAME, .of_match_table = pps_gpio_dt_ids, }, }; module_platform_driver(pps_gpio_driver); MODULE_AUTHOR("Ricardo Martins <[email protected]>"); MODULE_AUTHOR("James Nuss <[email protected]>"); MODULE_DESCRIPTION("Use GPIO pin as PPS source"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.2.0");
linux-master
drivers/pps/clients/pps-gpio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pps-ldisc.c -- PPS line discipline * * Copyright (C) 2008 Rodolfo Giometti <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/serial_core.h> #include <linux/tty.h> #include <linux/pps_kernel.h> #include <linux/bug.h> static void pps_tty_dcd_change(struct tty_struct *tty, bool active) { struct pps_device *pps; struct pps_event_time ts; pps_get_ts(&ts); pps = pps_lookup_dev(tty); /* * This should never fail, but the ldisc locking is very * convoluted, so don't crash just in case. */ if (WARN_ON_ONCE(pps == NULL)) return; /* Now do the PPS event report */ pps_event(pps, &ts, active ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR, NULL); dev_dbg(pps->dev, "PPS %s at %lu\n", active ? "assert" : "clear", jiffies); } static int (*alias_n_tty_open)(struct tty_struct *tty); static int pps_tty_open(struct tty_struct *tty) { struct pps_source_info info; struct tty_driver *drv = tty->driver; int index = tty->index + drv->name_base; struct pps_device *pps; int ret; info.owner = THIS_MODULE; info.dev = NULL; snprintf(info.name, PPS_MAX_NAME_LEN, "%s%d", drv->driver_name, index); snprintf(info.path, PPS_MAX_NAME_LEN, "/dev/%s%d", drv->name, index); info.mode = PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ PPS_CANWAIT | PPS_TSFMT_TSPEC; pps = pps_register_source(&info, PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR); if (IS_ERR(pps)) { pr_err("cannot register PPS source \"%s\"\n", info.path); return PTR_ERR(pps); } pps->lookup_cookie = tty; /* Now open the base class N_TTY ldisc */ ret = alias_n_tty_open(tty); if (ret < 0) { pr_err("cannot open tty ldisc \"%s\"\n", info.path); goto err_unregister; } dev_info(pps->dev, "source \"%s\" added\n", info.path); return 0; err_unregister: pps_unregister_source(pps); return ret; } static void (*alias_n_tty_close)(struct tty_struct *tty); static void pps_tty_close(struct tty_struct *tty) { struct pps_device *pps = pps_lookup_dev(tty); alias_n_tty_close(tty); if (WARN_ON(!pps)) return; dev_info(pps->dev, "removed\n"); pps_unregister_source(pps); } static struct tty_ldisc_ops pps_ldisc_ops; /* * Module stuff */ static int __init pps_tty_init(void) { int err; /* Inherit the N_TTY's ops */ n_tty_inherit_ops(&pps_ldisc_ops); /* Save N_TTY's open()/close() methods */ alias_n_tty_open = pps_ldisc_ops.open; alias_n_tty_close = pps_ldisc_ops.close; /* Init PPS_TTY data */ pps_ldisc_ops.owner = THIS_MODULE; pps_ldisc_ops.num = N_PPS; pps_ldisc_ops.name = "pps_tty"; pps_ldisc_ops.dcd_change = pps_tty_dcd_change; pps_ldisc_ops.open = pps_tty_open; pps_ldisc_ops.close = pps_tty_close; err = tty_register_ldisc(&pps_ldisc_ops); if (err) pr_err("can't register PPS line discipline\n"); else pr_info("PPS line discipline registered\n"); return err; } static void __exit pps_tty_cleanup(void) { tty_unregister_ldisc(&pps_ldisc_ops); } module_init(pps_tty_init); module_exit(pps_tty_cleanup); MODULE_ALIAS_LDISC(N_PPS); MODULE_AUTHOR("Rodolfo Giometti <[email protected]>"); MODULE_DESCRIPTION("PPS TTY device driver"); MODULE_LICENSE("GPL");
linux-master
drivers/pps/clients/pps-ldisc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pps_parport.c -- kernel parallel port PPS client * * Copyright (C) 2009 Alexander Gordeev <[email protected]> */ /* * TODO: * implement echo over SEL pin */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/irqnr.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/parport.h> #include <linux/pps_kernel.h> /* module parameters */ #define CLEAR_WAIT_MAX 100 #define CLEAR_WAIT_MAX_ERRORS 5 static unsigned int clear_wait = 100; MODULE_PARM_DESC(clear_wait, "Maximum number of port reads when polling for signal clear," " zero turns clear edge capture off entirely"); module_param(clear_wait, uint, 0); static DEFINE_IDA(pps_client_index); /* internal per port structure */ struct pps_client_pp { struct pardevice *pardev; /* parport device */ struct pps_device *pps; /* PPS device */ unsigned int cw; /* port clear timeout */ unsigned int cw_err; /* number of timeouts */ int index; /* device number */ }; static inline int signal_is_set(struct parport *port) { return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0; } /* parport interrupt handler */ static void parport_irq(void *handle) { struct pps_event_time ts_assert, ts_clear; struct pps_client_pp *dev = handle; struct parport *port = dev->pardev->port; unsigned int i; unsigned long flags; /* first of all we get the time stamp... */ pps_get_ts(&ts_assert); if (dev->cw == 0) /* clear edge capture disabled */ goto out_assert; /* try capture the clear edge */ /* We have to disable interrupts here. The idea is to prevent * other interrupts on the same processor to introduce random * lags while polling the port. Reading from IO port is known * to take approximately 1us while other interrupt handlers can * take much more potentially. * * Interrupts won't be disabled for a long time because the * number of polls is limited by clear_wait parameter which is * kept rather low. So it should never be an issue. */ local_irq_save(flags); /* check the signal (no signal means the pulse is lost this time) */ if (!signal_is_set(port)) { local_irq_restore(flags); dev_err(dev->pps->dev, "lost the signal\n"); goto out_assert; } /* poll the port until the signal is unset */ for (i = dev->cw; i; i--) if (!signal_is_set(port)) { pps_get_ts(&ts_clear); local_irq_restore(flags); dev->cw_err = 0; goto out_both; } local_irq_restore(flags); /* timeout */ dev->cw_err++; if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) { dev_err(dev->pps->dev, "disabled clear edge capture after %d" " timeouts\n", dev->cw_err); dev->cw = 0; dev->cw_err = 0; } out_assert: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); return; out_both: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); /* fire clear event */ pps_event(dev->pps, &ts_clear, PPS_CAPTURECLEAR, NULL); return; } static void parport_attach(struct parport *port) { struct pardev_cb pps_client_cb; int index; struct pps_client_pp *device; struct pps_source_info info = { .name = KBUILD_MODNAME, .path = "", .mode = PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ PPS_ECHOASSERT | PPS_ECHOCLEAR | \ PPS_CANWAIT | PPS_TSFMT_TSPEC, .owner = THIS_MODULE, .dev = NULL }; if (clear_wait > CLEAR_WAIT_MAX) { pr_err("clear_wait value should be not greater then %d\n", CLEAR_WAIT_MAX); return; } device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL); if (!device) { pr_err("memory allocation failed, not attaching\n"); return; } index = ida_simple_get(&pps_client_index, 0, 0, GFP_KERNEL); memset(&pps_client_cb, 0, sizeof(pps_client_cb)); pps_client_cb.private = device; pps_client_cb.irq_func = parport_irq; pps_client_cb.flags = PARPORT_FLAG_EXCL; device->pardev = parport_register_dev_model(port, KBUILD_MODNAME, &pps_client_cb, index); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); goto err_free; } if (parport_claim_or_block(device->pardev) < 0) { pr_err("couldn't claim %s\n", port->name); goto err_unregister_dev; } device->pps = pps_register_source(&info, PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); if (IS_ERR(device->pps)) { pr_err("couldn't register PPS source\n"); goto err_release_dev; } device->cw = clear_wait; port->ops->enable_irq(port); device->index = index; pr_info("attached to %s\n", port->name); return; err_release_dev: parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); err_free: ida_simple_remove(&pps_client_index, index); kfree(device); } static void parport_detach(struct parport *port) { struct pardevice *pardev = port->cad; struct pps_client_pp *device; /* FIXME: oooh, this is ugly! */ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME)) /* not our port */ return; device = pardev->private; port->ops->disable_irq(port); pps_unregister_source(device->pps); parport_release(pardev); parport_unregister_device(pardev); ida_simple_remove(&pps_client_index, device->index); kfree(device); } static struct parport_driver pps_parport_driver = { .name = KBUILD_MODNAME, .match_port = parport_attach, .detach = parport_detach, .devmodel = true, }; module_parport_driver(pps_parport_driver); MODULE_AUTHOR("Alexander Gordeev <[email protected]>"); MODULE_DESCRIPTION("parallel port PPS client"); MODULE_LICENSE("GPL");
linux-master
drivers/pps/clients/pps_parport.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pps-ktimer.c -- kernel timer test client * * Copyright (C) 2005-2006 Rodolfo Giometti <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/pps_kernel.h> /* * Global variables */ static struct pps_device *pps; static struct timer_list ktimer; /* * The kernel timer */ static void pps_ktimer_event(struct timer_list *unused) { struct pps_event_time ts; /* First of all we get the time stamp... */ pps_get_ts(&ts); pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL); mod_timer(&ktimer, jiffies + HZ); } /* * The PPS info struct */ static struct pps_source_info pps_ktimer_info = { .name = "ktimer", .path = "", .mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | PPS_ECHOASSERT | PPS_CANWAIT | PPS_TSFMT_TSPEC, .owner = THIS_MODULE, }; /* * Module staff */ static void __exit pps_ktimer_exit(void) { dev_info(pps->dev, "ktimer PPS source unregistered\n"); del_timer_sync(&ktimer); pps_unregister_source(pps); } static int __init pps_ktimer_init(void) { pps = pps_register_source(&pps_ktimer_info, PPS_CAPTUREASSERT | PPS_OFFSETASSERT); if (IS_ERR(pps)) { pr_err("cannot register PPS source\n"); return PTR_ERR(pps); } timer_setup(&ktimer, pps_ktimer_event, 0); mod_timer(&ktimer, jiffies + HZ); dev_info(pps->dev, "ktimer PPS source registered\n"); return 0; } module_init(pps_ktimer_init); module_exit(pps_ktimer_exit); MODULE_AUTHOR("Rodolfo Giometti <[email protected]>"); MODULE_DESCRIPTION("dummy PPS source by using a kernel timer (just for debug)"); MODULE_LICENSE("GPL");
linux-master
drivers/pps/clients/pps-ktimer.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * pps_gen_parport.c -- kernel parallel port PPS signal generator * * Copyright (C) 2009 Alexander Gordeev <[email protected]> */ /* * TODO: * fix issues when realtime clock is adjusted in a leap */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/parport.h> #define SIGNAL 0 #define NO_SIGNAL PARPORT_CONTROL_STROBE /* module parameters */ #define SEND_DELAY_MAX 100000 static unsigned int send_delay = 30000; MODULE_PARM_DESC(delay, "Delay between setting and dropping the signal (ns)"); module_param_named(delay, send_delay, uint, 0); #define SAFETY_INTERVAL 3000 /* set the hrtimer earlier for safety (ns) */ /* internal per port structure */ struct pps_generator_pp { struct pardevice *pardev; /* parport device */ struct hrtimer timer; long port_write_time; /* calibrated port write time (ns) */ }; static struct pps_generator_pp device = { .pardev = NULL, }; static int attached; /* calibrated time between a hrtimer event and the reaction */ static long hrtimer_error = SAFETY_INTERVAL; /* the kernel hrtimer event */ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) { struct timespec64 expire_time, ts1, ts2, ts3, dts; struct pps_generator_pp *dev; struct parport *port; long lim, delta; unsigned long flags; /* We have to disable interrupts here. The idea is to prevent * other interrupts on the same processor to introduce random * lags while polling the clock. ktime_get_real_ts64() takes <1us on * most machines while other interrupt handlers can take much * more potentially. * * NB: approx time with blocked interrupts = * send_delay + 3 * SAFETY_INTERVAL */ local_irq_save(flags); /* first of all we get the time stamp... */ ktime_get_real_ts64(&ts1); expire_time = ktime_to_timespec64(hrtimer_get_softexpires(timer)); dev = container_of(timer, struct pps_generator_pp, timer); lim = NSEC_PER_SEC - send_delay - dev->port_write_time; /* check if we are late */ if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { local_irq_restore(flags); pr_err("we are late this time %lld.%09ld\n", (s64)ts1.tv_sec, ts1.tv_nsec); goto done; } /* busy loop until the time is right for an assert edge */ do { ktime_get_real_ts64(&ts2); } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); /* set the signal */ port = dev->pardev->port; port->ops->write_control(port, SIGNAL); /* busy loop until the time is right for a clear edge */ lim = NSEC_PER_SEC - dev->port_write_time; do { ktime_get_real_ts64(&ts2); } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); /* unset the signal */ port->ops->write_control(port, NO_SIGNAL); ktime_get_real_ts64(&ts3); local_irq_restore(flags); /* update calibrated port write time */ dts = timespec64_sub(ts3, ts2); dev->port_write_time = (dev->port_write_time + timespec64_to_ns(&dts)) >> 1; done: /* update calibrated hrtimer error */ dts = timespec64_sub(ts1, expire_time); delta = timespec64_to_ns(&dts); /* If the new error value is bigger then the old, use the new * value, if not then slowly move towards the new value. This * way it should be safe in bad conditions and efficient in * good conditions. */ if (delta >= hrtimer_error) hrtimer_error = delta; else hrtimer_error = (3 * hrtimer_error + delta) >> 2; /* update the hrtimer expire time */ hrtimer_set_expires(timer, ktime_set(expire_time.tv_sec + 1, NSEC_PER_SEC - (send_delay + dev->port_write_time + SAFETY_INTERVAL + 2 * hrtimer_error))); return HRTIMER_RESTART; } /* calibrate port write time */ #define PORT_NTESTS_SHIFT 5 static void calibrate_port(struct pps_generator_pp *dev) { struct parport *port = dev->pardev->port; int i; long acc = 0; for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) { struct timespec64 a, b; unsigned long irq_flags; local_irq_save(irq_flags); ktime_get_real_ts64(&a); port->ops->write_control(port, NO_SIGNAL); ktime_get_real_ts64(&b); local_irq_restore(irq_flags); b = timespec64_sub(b, a); acc += timespec64_to_ns(&b); } dev->port_write_time = acc >> PORT_NTESTS_SHIFT; pr_info("port write takes %ldns\n", dev->port_write_time); } static inline ktime_t next_intr_time(struct pps_generator_pp *dev) { struct timespec64 ts; ktime_get_real_ts64(&ts); return ktime_set(ts.tv_sec + ((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0), NSEC_PER_SEC - (send_delay + dev->port_write_time + 3 * SAFETY_INTERVAL)); } static void parport_attach(struct parport *port) { struct pardev_cb pps_cb; if (send_delay > SEND_DELAY_MAX) { pr_err("delay value should be not greater then %d\n", SEND_DELAY_MAX); return; } if (attached) { /* we already have a port */ return; } memset(&pps_cb, 0, sizeof(pps_cb)); pps_cb.private = &device; pps_cb.flags = PARPORT_FLAG_EXCL; device.pardev = parport_register_dev_model(port, KBUILD_MODNAME, &pps_cb, 0); if (!device.pardev) { pr_err("couldn't register with %s\n", port->name); return; } if (parport_claim_or_block(device.pardev) < 0) { pr_err("couldn't claim %s\n", port->name); goto err_unregister_dev; } pr_info("attached to %s\n", port->name); attached = 1; calibrate_port(&device); hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); device.timer.function = hrtimer_event; hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS); return; err_unregister_dev: parport_unregister_device(device.pardev); } static void parport_detach(struct parport *port) { if (port->cad != device.pardev) return; /* not our port */ hrtimer_cancel(&device.timer); parport_release(device.pardev); parport_unregister_device(device.pardev); } static struct parport_driver pps_gen_parport_driver = { .name = KBUILD_MODNAME, .match_port = parport_attach, .detach = parport_detach, .devmodel = true, }; module_parport_driver(pps_gen_parport_driver); MODULE_AUTHOR("Alexander Gordeev <[email protected]>"); MODULE_DESCRIPTION("parallel port PPS signal generator"); MODULE_LICENSE("GPL");
linux-master
drivers/pps/generators/pps_gen_parport.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus interface code * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #include <linux/delay.h> #include <linux/greybus.h> #include "greybus_trace.h" #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 #define GB_INTERFACE_DEVICE_ID_BAD 0xff #define GB_INTERFACE_AUTOSUSPEND_MS 3000 /* Time required for interface to enter standby before disabling REFCLK */ #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20 /* Don't-care selector index */ #define DME_SELECTOR_INDEX_NULL 0 /* DME attributes */ /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */ #define DME_T_TST_SRC_INCREMENT 0x4083 #define DME_DDBL1_MANUFACTURERID 0x5003 #define DME_DDBL1_PRODUCTID 0x5004 #define DME_TOSHIBA_GMP_VID 0x6000 #define DME_TOSHIBA_GMP_PID 0x6001 #define DME_TOSHIBA_GMP_SN0 0x6002 #define DME_TOSHIBA_GMP_SN1 0x6003 #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101 /* DDBL1 Manufacturer and Product ids */ #define TOSHIBA_DMID 0x0126 #define TOSHIBA_ES2_BRIDGE_DPID 0x1000 #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001 #define TOSHIBA_ES3_GBPHY_DPID 0x1002 static int gb_interface_hibernate_link(struct gb_interface *intf); static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); static int gb_interface_dme_attr_get(struct gb_interface *intf, u16 attr, u32 *val) { return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, attr, DME_SELECTOR_INDEX_NULL, val); } static int gb_interface_read_ara_dme(struct gb_interface *intf) { u32 sn0, sn1; int ret; /* * Unless this is a Toshiba bridge, bail out until we have defined * standard GMP attributes. */ if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { dev_err(&intf->dev, "unknown manufacturer %08x\n", intf->ddbl1_manufacturer_id); return -ENODEV; } ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID, &intf->vendor_id); if (ret) return ret; ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID, &intf->product_id); if (ret) return ret; ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0); if (ret) return ret; ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1); if (ret) return ret; intf->serial_number = (u64)sn1 << 32 | sn0; return 0; } static int gb_interface_read_dme(struct gb_interface *intf) { int ret; /* DME attributes have already been read */ if (intf->dme_read) return 0; ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID, &intf->ddbl1_manufacturer_id); if (ret) return ret; ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID, &intf->ddbl1_product_id); if (ret) return ret; if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; } ret = gb_interface_read_ara_dme(intf); if (ret) return ret; intf->dme_read = true; return 0; } static int gb_interface_route_create(struct gb_interface *intf) { struct gb_svc *svc = intf->hd->svc; u8 intf_id = intf->interface_id; u8 device_id; int ret; /* Allocate an interface device id. */ ret = ida_simple_get(&svc->device_id_map, GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1, GFP_KERNEL); if (ret < 0) { dev_err(&intf->dev, "failed to allocate device id: %d\n", ret); return ret; } device_id = ret; ret = gb_svc_intf_device_id(svc, intf_id, device_id); if (ret) { dev_err(&intf->dev, "failed to set device id %u: %d\n", device_id, ret); goto err_ida_remove; } /* FIXME: Hard-coded AP device id. */ ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP, intf_id, device_id); if (ret) { dev_err(&intf->dev, "failed to create route: %d\n", ret); goto err_svc_id_free; } intf->device_id = device_id; return 0; err_svc_id_free: /* * XXX Should we tell SVC that this id doesn't belong to interface * XXX anymore. */ err_ida_remove: ida_simple_remove(&svc->device_id_map, device_id); return ret; } static void gb_interface_route_destroy(struct gb_interface *intf) { struct gb_svc *svc = intf->hd->svc; if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD) return; gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id); ida_simple_remove(&svc->device_id_map, intf->device_id); intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; } /* Locking: Caller holds the interface mutex. */ static int gb_interface_legacy_mode_switch(struct gb_interface *intf) { int ret; dev_info(&intf->dev, "legacy mode switch detected\n"); /* Mark as disconnected to prevent I/O during disable. */ intf->disconnected = true; gb_interface_disable(intf); intf->disconnected = false; ret = gb_interface_enable(intf); if (ret) { dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); gb_interface_deactivate(intf); } return ret; } void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, u32 mailbox) { mutex_lock(&intf->mutex); if (result) { dev_warn(&intf->dev, "mailbox event with UniPro error: 0x%04x\n", result); goto err_disable; } if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { dev_warn(&intf->dev, "mailbox event with unexpected value: 0x%08x\n", mailbox); goto err_disable; } if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) { gb_interface_legacy_mode_switch(intf); goto out_unlock; } if (!intf->mode_switch) { dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", mailbox); goto err_disable; } dev_info(&intf->dev, "mode switch detected\n"); complete(&intf->mode_switch_completion); out_unlock: mutex_unlock(&intf->mutex); return; err_disable: gb_interface_disable(intf); gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); } static void gb_interface_mode_switch_work(struct work_struct *work) { struct gb_interface *intf; struct gb_control *control; unsigned long timeout; int ret; intf = container_of(work, struct gb_interface, mode_switch_work); mutex_lock(&intf->mutex); /* Make sure interface is still enabled. */ if (!intf->enabled) { dev_dbg(&intf->dev, "mode switch aborted\n"); intf->mode_switch = false; mutex_unlock(&intf->mutex); goto out_interface_put; } /* * Prepare the control device for mode switch and make sure to get an * extra reference before it goes away during interface disable. */ control = gb_control_get(intf->control); gb_control_mode_switch_prepare(control); gb_interface_disable(intf); mutex_unlock(&intf->mutex); timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT); ret = wait_for_completion_interruptible_timeout( &intf->mode_switch_completion, timeout); /* Finalise control-connection mode switch. */ gb_control_mode_switch_complete(control); gb_control_put(control); if (ret < 0) { dev_err(&intf->dev, "mode switch interrupted\n"); goto err_deactivate; } else if (ret == 0) { dev_err(&intf->dev, "mode switch timed out\n"); goto err_deactivate; } /* Re-enable (re-enumerate) interface if still active. */ mutex_lock(&intf->mutex); intf->mode_switch = false; if (intf->active) { ret = gb_interface_enable(intf); if (ret) { dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); gb_interface_deactivate(intf); } } mutex_unlock(&intf->mutex); out_interface_put: gb_interface_put(intf); return; err_deactivate: mutex_lock(&intf->mutex); intf->mode_switch = false; gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); gb_interface_put(intf); } int gb_interface_request_mode_switch(struct gb_interface *intf) { int ret = 0; mutex_lock(&intf->mutex); if (intf->mode_switch) { ret = -EBUSY; goto out_unlock; } intf->mode_switch = true; reinit_completion(&intf->mode_switch_completion); /* * Get a reference to the interface device, which will be put once the * mode switch is complete. */ get_device(&intf->dev); if (!queue_work(system_long_wq, &intf->mode_switch_work)) { put_device(&intf->dev); ret = -EBUSY; goto out_unlock; } out_unlock: mutex_unlock(&intf->mutex); return ret; } EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch); /* * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and * clear it after reading a non-zero value from it. * * FIXME: This is module-hardware dependent and needs to be extended for every * type of module we want to support. */ static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) { struct gb_host_device *hd = intf->hd; unsigned long bootrom_quirks; unsigned long s2l_quirks; int ret; u32 value; u16 attr; u8 init_status; /* * ES2 bridges use T_TstSrcIncrement for the init status. * * FIXME: Remove ES2 support */ if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) attr = DME_T_TST_SRC_INCREMENT; else attr = DME_TOSHIBA_GMP_INIT_STATUS; ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, DME_SELECTOR_INDEX_NULL, &value); if (ret) return ret; /* * A nonzero init status indicates the module has finished * initializing. */ if (!value) { dev_err(&intf->dev, "invalid init status\n"); return -ENODEV; } /* * Extract the init status. * * For ES2: We need to check lowest 8 bits of 'value'. * For ES3: We need to check highest 8 bits out of 32 of 'value'. * * FIXME: Remove ES2 support */ if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) init_status = value & 0xff; else init_status = value >> 24; /* * Check if the interface is executing the quirky ES3 bootrom that, * for example, requires E2EFC, CSD and CSV to be disabled. */ bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | GB_INTERFACE_QUIRK_FORCED_DISABLE | GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH | GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE; s2l_quirks = GB_INTERFACE_QUIRK_NO_PM; switch (init_status) { case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: intf->quirks |= bootrom_quirks; break; case GB_INIT_S2_LOADER_BOOT_STARTED: /* S2 Loader doesn't support runtime PM */ intf->quirks &= ~bootrom_quirks; intf->quirks |= s2l_quirks; break; default: intf->quirks &= ~bootrom_quirks; intf->quirks &= ~s2l_quirks; } /* Clear the init status. */ return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, DME_SELECTOR_INDEX_NULL, 0); } /* interface sysfs attributes */ #define gb_interface_attr(field, type) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct gb_interface *intf = to_gb_interface(dev); \ return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \ } \ static DEVICE_ATTR_RO(field) gb_interface_attr(ddbl1_manufacturer_id, "0x%08x"); gb_interface_attr(ddbl1_product_id, "0x%08x"); gb_interface_attr(interface_id, "%u"); gb_interface_attr(vendor_id, "0x%08x"); gb_interface_attr(product_id, "0x%08x"); gb_interface_attr(serial_number, "0x%016llx"); static ssize_t voltage_now_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_interface *intf = to_gb_interface(dev); int ret; u32 measurement; ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, GB_SVC_PWRMON_TYPE_VOL, &measurement); if (ret) { dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret); return ret; } return sprintf(buf, "%u\n", measurement); } static DEVICE_ATTR_RO(voltage_now); static ssize_t current_now_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_interface *intf = to_gb_interface(dev); int ret; u32 measurement; ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, GB_SVC_PWRMON_TYPE_CURR, &measurement); if (ret) { dev_err(&intf->dev, "failed to get current sample (%d)\n", ret); return ret; } return sprintf(buf, "%u\n", measurement); } static DEVICE_ATTR_RO(current_now); static ssize_t power_now_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_interface *intf = to_gb_interface(dev); int ret; u32 measurement; ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, GB_SVC_PWRMON_TYPE_PWR, &measurement); if (ret) { dev_err(&intf->dev, "failed to get power sample (%d)\n", ret); return ret; } return sprintf(buf, "%u\n", measurement); } static DEVICE_ATTR_RO(power_now); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_interface *intf = to_gb_interface(dev); if (intf->active) return scnprintf(buf, PAGE_SIZE, "on\n"); else return scnprintf(buf, PAGE_SIZE, "off\n"); } static ssize_t power_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_interface *intf = to_gb_interface(dev); bool activate; int ret = 0; if (kstrtobool(buf, &activate)) return -EINVAL; mutex_lock(&intf->mutex); if (activate == intf->active) goto unlock; if (activate) { ret = gb_interface_activate(intf); if (ret) { dev_err(&intf->dev, "failed to activate interface: %d\n", ret); goto unlock; } ret = gb_interface_enable(intf); if (ret) { dev_err(&intf->dev, "failed to enable interface: %d\n", ret); gb_interface_deactivate(intf); goto unlock; } } else { gb_interface_disable(intf); gb_interface_deactivate(intf); } unlock: mutex_unlock(&intf->mutex); if (ret) return ret; return len; } static DEVICE_ATTR_RW(power_state); static const char *gb_interface_type_string(struct gb_interface *intf) { static const char * const types[] = { [GB_INTERFACE_TYPE_INVALID] = "invalid", [GB_INTERFACE_TYPE_UNKNOWN] = "unknown", [GB_INTERFACE_TYPE_DUMMY] = "dummy", [GB_INTERFACE_TYPE_UNIPRO] = "unipro", [GB_INTERFACE_TYPE_GREYBUS] = "greybus", }; return types[intf->type]; } static ssize_t interface_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_interface *intf = to_gb_interface(dev); return sprintf(buf, "%s\n", gb_interface_type_string(intf)); } static DEVICE_ATTR_RO(interface_type); static struct attribute *interface_unipro_attrs[] = { &dev_attr_ddbl1_manufacturer_id.attr, &dev_attr_ddbl1_product_id.attr, NULL }; static struct attribute *interface_greybus_attrs[] = { &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_serial_number.attr, NULL }; static struct attribute *interface_power_attrs[] = { &dev_attr_voltage_now.attr, &dev_attr_current_now.attr, &dev_attr_power_now.attr, &dev_attr_power_state.attr, NULL }; static struct attribute *interface_common_attrs[] = { &dev_attr_interface_id.attr, &dev_attr_interface_type.attr, NULL }; static umode_t interface_unipro_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct gb_interface *intf = to_gb_interface(dev); switch (intf->type) { case GB_INTERFACE_TYPE_UNIPRO: case GB_INTERFACE_TYPE_GREYBUS: return attr->mode; default: return 0; } } static umode_t interface_greybus_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct gb_interface *intf = to_gb_interface(dev); switch (intf->type) { case GB_INTERFACE_TYPE_GREYBUS: return attr->mode; default: return 0; } } static umode_t interface_power_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct gb_interface *intf = to_gb_interface(dev); switch (intf->type) { case GB_INTERFACE_TYPE_UNIPRO: case GB_INTERFACE_TYPE_GREYBUS: return attr->mode; default: return 0; } } static const struct attribute_group interface_unipro_group = { .is_visible = interface_unipro_is_visible, .attrs = interface_unipro_attrs, }; static const struct attribute_group interface_greybus_group = { .is_visible = interface_greybus_is_visible, .attrs = interface_greybus_attrs, }; static const struct attribute_group interface_power_group = { .is_visible = interface_power_is_visible, .attrs = interface_power_attrs, }; static const struct attribute_group interface_common_group = { .attrs = interface_common_attrs, }; static const struct attribute_group *interface_groups[] = { &interface_unipro_group, &interface_greybus_group, &interface_power_group, &interface_common_group, NULL }; static void gb_interface_release(struct device *dev) { struct gb_interface *intf = to_gb_interface(dev); trace_gb_interface_release(intf); kfree(intf); } #ifdef CONFIG_PM static int gb_interface_suspend(struct device *dev) { struct gb_interface *intf = to_gb_interface(dev); int ret; ret = gb_control_interface_suspend_prepare(intf->control); if (ret) return ret; ret = gb_control_suspend(intf->control); if (ret) goto err_hibernate_abort; ret = gb_interface_hibernate_link(intf); if (ret) return ret; /* Delay to allow interface to enter standby before disabling refclk */ msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS); ret = gb_interface_refclk_set(intf, false); if (ret) return ret; return 0; err_hibernate_abort: gb_control_interface_hibernate_abort(intf->control); return ret; } static int gb_interface_resume(struct device *dev) { struct gb_interface *intf = to_gb_interface(dev); struct gb_svc *svc = intf->hd->svc; int ret; ret = gb_interface_refclk_set(intf, true); if (ret) return ret; ret = gb_svc_intf_resume(svc, intf->interface_id); if (ret) return ret; ret = gb_control_resume(intf->control); if (ret) return ret; return 0; } static int gb_interface_runtime_idle(struct device *dev) { pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); return 0; } #endif static const struct dev_pm_ops gb_interface_pm_ops = { SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume, gb_interface_runtime_idle) }; struct device_type greybus_interface_type = { .name = "greybus_interface", .release = gb_interface_release, .pm = &gb_interface_pm_ops, }; /* * A Greybus module represents a user-replaceable component on a GMP * phone. An interface is the physical connection on that module. A * module may have more than one interface. * * Create a gb_interface structure to represent a discovered interface. * The position of interface within the Endo is encoded in "interface_id" * argument. * * Returns a pointer to the new interfce or a null pointer if a * failure occurs due to memory exhaustion. */ struct gb_interface *gb_interface_create(struct gb_module *module, u8 interface_id) { struct gb_host_device *hd = module->hd; struct gb_interface *intf; intf = kzalloc(sizeof(*intf), GFP_KERNEL); if (!intf) return NULL; intf->hd = hd; /* XXX refcount? */ intf->module = module; intf->interface_id = interface_id; INIT_LIST_HEAD(&intf->bundles); INIT_LIST_HEAD(&intf->manifest_descs); mutex_init(&intf->mutex); INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work); init_completion(&intf->mode_switch_completion); /* Invalid device id to start with */ intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; intf->dev.parent = &module->dev; intf->dev.bus = &greybus_bus_type; intf->dev.type = &greybus_interface_type; intf->dev.groups = interface_groups; intf->dev.dma_mask = module->dev.dma_mask; device_initialize(&intf->dev); dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), interface_id); pm_runtime_set_autosuspend_delay(&intf->dev, GB_INTERFACE_AUTOSUSPEND_MS); trace_gb_interface_create(intf); return intf; } static int gb_interface_vsys_set(struct gb_interface *intf, bool enable) { struct gb_svc *svc = intf->hd->svc; int ret; dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable); if (ret) { dev_err(&intf->dev, "failed to set v_sys: %d\n", ret); return ret; } return 0; } static int gb_interface_refclk_set(struct gb_interface *intf, bool enable) { struct gb_svc *svc = intf->hd->svc; int ret; dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable); if (ret) { dev_err(&intf->dev, "failed to set refclk: %d\n", ret); return ret; } return 0; } static int gb_interface_unipro_set(struct gb_interface *intf, bool enable) { struct gb_svc *svc = intf->hd->svc; int ret; dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable); if (ret) { dev_err(&intf->dev, "failed to set UniPro: %d\n", ret); return ret; } return 0; } static int gb_interface_activate_operation(struct gb_interface *intf, enum gb_interface_type *intf_type) { struct gb_svc *svc = intf->hd->svc; u8 type; int ret; dev_dbg(&intf->dev, "%s\n", __func__); ret = gb_svc_intf_activate(svc, intf->interface_id, &type); if (ret) { dev_err(&intf->dev, "failed to activate: %d\n", ret); return ret; } switch (type) { case GB_SVC_INTF_TYPE_DUMMY: *intf_type = GB_INTERFACE_TYPE_DUMMY; /* FIXME: handle as an error for now */ return -ENODEV; case GB_SVC_INTF_TYPE_UNIPRO: *intf_type = GB_INTERFACE_TYPE_UNIPRO; dev_err(&intf->dev, "interface type UniPro not supported\n"); /* FIXME: handle as an error for now */ return -ENODEV; case GB_SVC_INTF_TYPE_GREYBUS: *intf_type = GB_INTERFACE_TYPE_GREYBUS; break; default: dev_err(&intf->dev, "unknown interface type: %u\n", type); *intf_type = GB_INTERFACE_TYPE_UNKNOWN; return -ENODEV; } return 0; } static int gb_interface_hibernate_link(struct gb_interface *intf) { struct gb_svc *svc = intf->hd->svc; return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id); } static int _gb_interface_activate(struct gb_interface *intf, enum gb_interface_type *type) { int ret; *type = GB_INTERFACE_TYPE_UNKNOWN; if (intf->ejected || intf->removed) return -ENODEV; ret = gb_interface_vsys_set(intf, true); if (ret) return ret; ret = gb_interface_refclk_set(intf, true); if (ret) goto err_vsys_disable; ret = gb_interface_unipro_set(intf, true); if (ret) goto err_refclk_disable; ret = gb_interface_activate_operation(intf, type); if (ret) { switch (*type) { case GB_INTERFACE_TYPE_UNIPRO: case GB_INTERFACE_TYPE_GREYBUS: goto err_hibernate_link; default: goto err_unipro_disable; } } ret = gb_interface_read_dme(intf); if (ret) goto err_hibernate_link; ret = gb_interface_route_create(intf); if (ret) goto err_hibernate_link; intf->active = true; trace_gb_interface_activate(intf); return 0; err_hibernate_link: gb_interface_hibernate_link(intf); err_unipro_disable: gb_interface_unipro_set(intf, false); err_refclk_disable: gb_interface_refclk_set(intf, false); err_vsys_disable: gb_interface_vsys_set(intf, false); return ret; } /* * At present, we assume a UniPro-only module to be a Greybus module that * failed to send its mailbox poke. There is some reason to believe that this * is because of a bug in the ES3 bootrom. * * FIXME: Check if this is a Toshiba bridge before retrying? */ static int _gb_interface_activate_es3_hack(struct gb_interface *intf, enum gb_interface_type *type) { int retries = 3; int ret; while (retries--) { ret = _gb_interface_activate(intf, type); if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO) continue; break; } return ret; } /* * Activate an interface. * * Locking: Caller holds the interface mutex. */ int gb_interface_activate(struct gb_interface *intf) { enum gb_interface_type type; int ret; switch (intf->type) { case GB_INTERFACE_TYPE_INVALID: case GB_INTERFACE_TYPE_GREYBUS: ret = _gb_interface_activate_es3_hack(intf, &type); break; default: ret = _gb_interface_activate(intf, &type); } /* Make sure type is detected correctly during reactivation. */ if (intf->type != GB_INTERFACE_TYPE_INVALID) { if (type != intf->type) { dev_err(&intf->dev, "failed to detect interface type\n"); if (!ret) gb_interface_deactivate(intf); return -EIO; } } else { intf->type = type; } return ret; } /* * Deactivate an interface. * * Locking: Caller holds the interface mutex. */ void gb_interface_deactivate(struct gb_interface *intf) { if (!intf->active) return; trace_gb_interface_deactivate(intf); /* Abort any ongoing mode switch. */ if (intf->mode_switch) complete(&intf->mode_switch_completion); gb_interface_route_destroy(intf); gb_interface_hibernate_link(intf); gb_interface_unipro_set(intf, false); gb_interface_refclk_set(intf, false); gb_interface_vsys_set(intf, false); intf->active = false; } /* * Enable an interface by enabling its control connection, fetching the * manifest and other information over it, and finally registering its child * devices. * * Locking: Caller holds the interface mutex. */ int gb_interface_enable(struct gb_interface *intf) { struct gb_control *control; struct gb_bundle *bundle, *tmp; int ret, size; void *manifest; ret = gb_interface_read_and_clear_init_status(intf); if (ret) { dev_err(&intf->dev, "failed to clear init status: %d\n", ret); return ret; } /* Establish control connection */ control = gb_control_create(intf); if (IS_ERR(control)) { dev_err(&intf->dev, "failed to create control device: %ld\n", PTR_ERR(control)); return PTR_ERR(control); } intf->control = control; ret = gb_control_enable(intf->control); if (ret) goto err_put_control; /* Get manifest size using control protocol on CPort */ size = gb_control_get_manifest_size_operation(intf); if (size <= 0) { dev_err(&intf->dev, "failed to get manifest size: %d\n", size); if (size) ret = size; else ret = -EINVAL; goto err_disable_control; } manifest = kmalloc(size, GFP_KERNEL); if (!manifest) { ret = -ENOMEM; goto err_disable_control; } /* Get manifest using control protocol on CPort */ ret = gb_control_get_manifest_operation(intf, manifest, size); if (ret) { dev_err(&intf->dev, "failed to get manifest: %d\n", ret); goto err_free_manifest; } /* * Parse the manifest and build up our data structures representing * what's in it. */ if (!gb_manifest_parse(intf, manifest, size)) { dev_err(&intf->dev, "failed to parse manifest\n"); ret = -EINVAL; goto err_destroy_bundles; } ret = gb_control_get_bundle_versions(intf->control); if (ret) goto err_destroy_bundles; /* Register the control device and any bundles */ ret = gb_control_add(intf->control); if (ret) goto err_destroy_bundles; pm_runtime_use_autosuspend(&intf->dev); pm_runtime_get_noresume(&intf->dev); pm_runtime_set_active(&intf->dev); pm_runtime_enable(&intf->dev); list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) { ret = gb_bundle_add(bundle); if (ret) { gb_bundle_destroy(bundle); continue; } } kfree(manifest); intf->enabled = true; pm_runtime_put(&intf->dev); trace_gb_interface_enable(intf); return 0; err_destroy_bundles: list_for_each_entry_safe(bundle, tmp, &intf->bundles, links) gb_bundle_destroy(bundle); err_free_manifest: kfree(manifest); err_disable_control: gb_control_disable(intf->control); err_put_control: gb_control_put(intf->control); intf->control = NULL; return ret; } /* * Disable an interface and destroy its bundles. * * Locking: Caller holds the interface mutex. */ void gb_interface_disable(struct gb_interface *intf) { struct gb_bundle *bundle; struct gb_bundle *next; if (!intf->enabled) return; trace_gb_interface_disable(intf); pm_runtime_get_sync(&intf->dev); /* Set disconnected flag to avoid I/O during connection tear down. */ if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE) intf->disconnected = true; list_for_each_entry_safe(bundle, next, &intf->bundles, links) gb_bundle_destroy(bundle); if (!intf->mode_switch && !intf->disconnected) gb_control_interface_deactivate_prepare(intf->control); gb_control_del(intf->control); gb_control_disable(intf->control); gb_control_put(intf->control); intf->control = NULL; intf->enabled = false; pm_runtime_disable(&intf->dev); pm_runtime_set_suspended(&intf->dev); pm_runtime_dont_use_autosuspend(&intf->dev); pm_runtime_put_noidle(&intf->dev); } /* Register an interface. */ int gb_interface_add(struct gb_interface *intf) { int ret; ret = device_add(&intf->dev); if (ret) { dev_err(&intf->dev, "failed to register interface: %d\n", ret); return ret; } trace_gb_interface_add(intf); dev_info(&intf->dev, "Interface added (%s)\n", gb_interface_type_string(intf)); switch (intf->type) { case GB_INTERFACE_TYPE_GREYBUS: dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", intf->vendor_id, intf->product_id); fallthrough; case GB_INTERFACE_TYPE_UNIPRO: dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", intf->ddbl1_manufacturer_id, intf->ddbl1_product_id); break; default: break; } return 0; } /* Deregister an interface. */ void gb_interface_del(struct gb_interface *intf) { if (device_is_registered(&intf->dev)) { trace_gb_interface_del(intf); device_del(&intf->dev); dev_info(&intf->dev, "Interface removed\n"); } } void gb_interface_put(struct gb_interface *intf) { put_device(&intf->dev); }
linux-master
drivers/greybus/interface.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus "AP" USB driver for "ES2" controller chips * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kthread.h> #include <linux/sizes.h> #include <linux/usb.h> #include <linux/kfifo.h> #include <linux/debugfs.h> #include <linux/list.h> #include <linux/greybus.h> #include <asm/unaligned.h> #include "arpc.h" #include "greybus_trace.h" /* Default timeout for USB vendor requests. */ #define ES2_USB_CTRL_TIMEOUT 500 /* Default timeout for ARPC CPort requests */ #define ES2_ARPC_CPORT_TIMEOUT 500 /* Fixed CPort numbers */ #define ES2_CPORT_CDSI0 16 #define ES2_CPORT_CDSI1 17 /* Memory sizes for the buffers sent to/from the ES2 controller */ #define ES2_GBUF_MSG_SIZE_MAX 2048 /* Memory sizes for the ARPC buffers */ #define ARPC_OUT_SIZE_MAX U16_MAX #define ARPC_IN_SIZE_MAX 128 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x18d1, 0x1eaf) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); #define APB1_LOG_SIZE SZ_16K /* * Number of CPort IN urbs in flight at any point in time. * Adjust if we are having stalls in the USB buffer due to not enough urbs in * flight. */ #define NUM_CPORT_IN_URB 4 /* Number of CPort OUT urbs in flight at any point in time. * Adjust if we get messages saying we are out of urbs in the system log. */ #define NUM_CPORT_OUT_URB 8 /* * Number of ARPC in urbs in flight at any point in time. */ #define NUM_ARPC_IN_URB 2 /* * @endpoint: bulk in endpoint for CPort data * @urb: array of urbs for the CPort in messages * @buffer: array of buffers for the @cport_in_urb urbs */ struct es2_cport_in { __u8 endpoint; struct urb *urb[NUM_CPORT_IN_URB]; u8 *buffer[NUM_CPORT_IN_URB]; }; /** * struct es2_ap_dev - ES2 USB Bridge to AP structure * @usb_dev: pointer to the USB device we are. * @usb_intf: pointer to the USB interface we are bound to. * @hd: pointer to our gb_host_device structure * * @cport_in: endpoint, urbs and buffer for cport in messages * @cport_out_endpoint: endpoint for cport out messages * @cport_out_urb: array of urbs for the CPort out messages * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or * not. * @cport_out_urb_cancelled: array of flags indicating whether the * corresponding @cport_out_urb is being cancelled * @cport_out_urb_lock: locks the @cport_out_urb_busy "list" * @cdsi1_in_use: true if cport CDSI1 is in use * @apb_log_task: task pointer for logging thread * @apb_log_dentry: file system entry for the log file interface * @apb_log_enable_dentry: file system entry for enabling logging * @apb_log_fifo: kernel FIFO to carry logged data * @arpc_urb: array of urbs for the ARPC in messages * @arpc_buffer: array of buffers for the @arpc_urb urbs * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC * @arpc_id_cycle: gives an unique id to ARPC * @arpc_lock: locks ARPC list * @arpcs: list of in progress ARPCs */ struct es2_ap_dev { struct usb_device *usb_dev; struct usb_interface *usb_intf; struct gb_host_device *hd; struct es2_cport_in cport_in; __u8 cport_out_endpoint; struct urb *cport_out_urb[NUM_CPORT_OUT_URB]; bool cport_out_urb_busy[NUM_CPORT_OUT_URB]; bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB]; spinlock_t cport_out_urb_lock; bool cdsi1_in_use; struct task_struct *apb_log_task; struct dentry *apb_log_dentry; struct dentry *apb_log_enable_dentry; DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE); __u8 arpc_endpoint_in; struct urb *arpc_urb[NUM_ARPC_IN_URB]; u8 *arpc_buffer[NUM_ARPC_IN_URB]; int arpc_id_cycle; spinlock_t arpc_lock; struct list_head arpcs; }; struct arpc { struct list_head list; struct arpc_request_message *req; struct arpc_response_message *resp; struct completion response_received; bool active; }; static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd) { return (struct es2_ap_dev *)&hd->hd_priv; } static void cport_out_callback(struct urb *urb); static void usb_log_enable(struct es2_ap_dev *es2); static void usb_log_disable(struct es2_ap_dev *es2); static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout); static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; u8 *data; int retval; data = kmemdup(req, size, GFP_KERNEL); if (!data) return -ENOMEM; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, data, size, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "%s: return error %d\n", __func__, retval); else retval = 0; kfree(data); return retval; } static void ap_urb_complete(struct urb *urb) { struct usb_ctrlrequest *dr = urb->context; kfree(dr); usb_free_urb(urb); } static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; struct urb *urb; struct usb_ctrlrequest *dr; u8 *buf; int retval; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } buf = (u8 *)dr + sizeof(*dr); memcpy(buf, req, size); dr->bRequest = cmd; dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE; dr->wValue = 0; dr->wIndex = 0; dr->wLength = cpu_to_le16(size); usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)dr, buf, size, ap_urb_complete, dr); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_free_urb(urb); kfree(dr); } return retval; } static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async) { struct es2_ap_dev *es2 = hd_to_es2(hd); if (async) return output_async(es2, req, size, cmd); return output_sync(es2, req, size, cmd); } static int es2_cport_in_enable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } return ret; } static void es2_cport_in_disable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } } static int es2_arpc_in_enable(struct es2_ap_dev *es2) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit arpc in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } return ret; } static void es2_arpc_in_disable(struct es2_ap_dev *es2) { struct urb *urb; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } } static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask) { struct urb *urb = NULL; unsigned long flags; int i; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); /* Look in our pool of allocated urbs first, as that's the "fastest" */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (!es2->cport_out_urb_busy[i] && !es2->cport_out_urb_cancelled[i]) { es2->cport_out_urb_busy[i] = true; urb = es2->cport_out_urb[i]; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); if (urb) return urb; /* * Crap, pool is empty, complain to the syslog and go allocate one * dynamically as we have to succeed. */ dev_dbg(&es2->usb_dev->dev, "No free CPort OUT urbs, having to dynamically allocate one!\n"); return usb_alloc_urb(0, gfp_mask); } static void free_urb(struct es2_ap_dev *es2, struct urb *urb) { unsigned long flags; int i; /* * See if this was an urb in our pool, if so mark it "free", otherwise * we need to free it ourselves. */ spin_lock_irqsave(&es2->cport_out_urb_lock, flags); for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_busy[i] = false; urb = NULL; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* If urb is not NULL, then we need to free this urb */ usb_free_urb(urb); } /* * We (ab)use the operation-message header pad bytes to transfer the * cport id in order to minimise overhead. */ static void gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id) { header->pad[0] = cport_id; } /* Clear the pad bytes used for the CPort id */ static void gb_message_cport_clear(struct gb_operation_msg_hdr *header) { header->pad[0] = 0; } /* Extract the CPort id packed into the header, and clear it */ static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header) { u16 cport_id = header->pad[0]; gb_message_cport_clear(header); return cport_id; } /* * Returns zero if the message was successfully queued, or a negative errno * otherwise. */ static int message_send(struct gb_host_device *hd, u16 cport_id, struct gb_message *message, gfp_t gfp_mask) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; size_t buffer_size; int retval; struct urb *urb; unsigned long flags; /* * The data actually transferred will include an indication * of where the data should be sent. Do one last check of * the target CPort id before filling it in. */ if (!cport_id_valid(hd, cport_id)) { dev_err(&udev->dev, "invalid cport %u\n", cport_id); return -EINVAL; } /* Find a free urb */ urb = next_free_urb(es2, gfp_mask); if (!urb) return -ENOMEM; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = urb; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* Pack the cport id into the message header */ gb_message_cport_pack(message->header, cport_id); buffer_size = sizeof(*message->header) + message->payload_size; usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, es2->cport_out_endpoint), message->buffer, buffer_size, cport_out_callback, message); urb->transfer_flags |= URB_ZERO_PACKET; trace_gb_message_submit(message); retval = usb_submit_urb(urb, gfp_mask); if (retval) { dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); free_urb(es2, urb); gb_message_cport_clear(message->header); return retval; } return 0; } /* * Can not be called in atomic context. */ static void message_cancel(struct gb_message *message) { struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); struct urb *urb; int i; might_sleep(); spin_lock_irq(&es2->cport_out_urb_lock); urb = message->hcpriv; /* Prevent dynamically allocated urb from being deallocated. */ usb_get_urb(urb); /* Prevent pre-allocated urb from being reused. */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_cancelled[i] = true; break; } } spin_unlock_irq(&es2->cport_out_urb_lock); usb_kill_urb(urb); if (i < NUM_CPORT_OUT_URB) { spin_lock_irq(&es2->cport_out_urb_lock); es2->cport_out_urb_cancelled[i] = false; spin_unlock_irq(&es2->cport_out_urb_lock); } usb_free_urb(urb); } static int es2_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct ida *id_map = &hd->cport_id_map; int ida_start, ida_end; switch (cport_id) { case ES2_CPORT_CDSI0: case ES2_CPORT_CDSI1: dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EBUSY; } if (flags & GB_CONNECTION_FLAG_OFFLOADED && flags & GB_CONNECTION_FLAG_CDSI1) { if (es2->cdsi1_in_use) { dev_err(&hd->dev, "CDSI1 already in use\n"); return -EBUSY; } es2->cdsi1_in_use = true; return ES2_CPORT_CDSI1; } if (cport_id < 0) { ida_start = 0; ida_end = hd->num_cports; } else if (cport_id < hd->num_cports) { ida_start = cport_id; ida_end = cport_id + 1; } else { dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EINVAL; } return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL); } static void es2_cport_release(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); switch (cport_id) { case ES2_CPORT_CDSI1: es2->cdsi1_in_use = false; return; } ida_simple_remove(&hd->cport_id_map, cport_id); } static int cport_enable(struct gb_host_device *hd, u16 cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; struct gb_apb_request_cport_flags *req; u32 connection_flags; int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; connection_flags = 0; if (flags & GB_CONNECTION_FLAG_CONTROL) connection_flags |= GB_APB_CPORT_FLAG_CONTROL; if (flags & GB_CONNECTION_FLAG_HIGH_PRIO) connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO; req->flags = cpu_to_le32(connection_flags); dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__, cport_id, connection_flags); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_FLAGS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, req, sizeof(*req), ES2_USB_CTRL_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "failed to set cport flags for port %d\n", cport_id); goto out; } ret = 0; out: kfree(req); return ret; } static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_connected_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to set connected state for cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_flush_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id, u8 phase, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_shutdown_req req; int result; int ret; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.timeout = cpu_to_le16(timeout); req.phase = phase; ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id, size_t peer_space, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_quiesce_req req; int result; int ret; if (peer_space > U16_MAX) return -EINVAL; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.peer_space = cpu_to_le16(peer_space); req.timeout = cpu_to_le16(timeout); ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to quiesce cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_clear_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_EN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n", cport_id); return retval; } static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_DIS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n", cport_id); return retval; } static struct gb_hd_driver es2_driver = { .hd_priv_size = sizeof(struct es2_ap_dev), .message_send = message_send, .message_cancel = message_cancel, .cport_allocate = es2_cport_allocate, .cport_release = es2_cport_release, .cport_enable = cport_enable, .cport_connected = es2_cport_connected, .cport_flush = es2_cport_flush, .cport_shutdown = es2_cport_shutdown, .cport_quiesce = es2_cport_quiesce, .cport_clear = es2_cport_clear, .latency_tag_enable = latency_tag_enable, .latency_tag_disable = latency_tag_disable, .output = output, }; /* Common function to report consistent warnings based on URB status */ static int check_urb_status(struct urb *urb) { struct device *dev = &urb->dev->dev; int status = urb->status; switch (status) { case 0: return 0; case -EOVERFLOW: dev_err(dev, "%s: overflow actual length is %d\n", __func__, urb->actual_length); fallthrough; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EILSEQ: case -EPROTO: /* device is gone, stop sending */ return status; } dev_err(dev, "%s: unknown status %d\n", __func__, status); return -EAGAIN; } static void es2_destroy(struct es2_ap_dev *es2) { struct usb_device *udev; struct urb *urb; int i; debugfs_remove(es2->apb_log_enable_dentry); usb_log_disable(es2); /* Tear down everything! */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { urb = es2->cport_out_urb[i]; usb_kill_urb(urb); usb_free_urb(urb); es2->cport_out_urb[i] = NULL; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } for (i = 0; i < NUM_ARPC_IN_URB; ++i) { usb_free_urb(es2->arpc_urb[i]); kfree(es2->arpc_buffer[i]); es2->arpc_buffer[i] = NULL; } for (i = 0; i < NUM_CPORT_IN_URB; ++i) { usb_free_urb(es2->cport_in.urb[i]); kfree(es2->cport_in.buffer[i]); es2->cport_in.buffer[i] = NULL; } /* release reserved CDSI0 and CDSI1 cports */ gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1); gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0); udev = es2->usb_dev; gb_hd_put(es2->hd); usb_put_dev(udev); } static void cport_in_callback(struct urb *urb) { struct gb_host_device *hd = urb->context; struct device *dev = &urb->dev->dev; struct gb_operation_msg_hdr *header; int status = check_urb_status(urb); int retval; u16 cport_id; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "urb cport in error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*header)) { dev_err(dev, "short message received\n"); goto exit; } /* Extract the CPort id, which is packed in the message header */ header = urb->transfer_buffer; cport_id = gb_message_cport_unpack(header); if (cport_id_valid(hd, cport_id)) { greybus_data_rcvd(hd, cport_id, urb->transfer_buffer, urb->actual_length); } else { dev_err(dev, "invalid cport id %u received\n", cport_id); } exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit in-urb: %d\n", retval); } static void cport_out_callback(struct urb *urb) { struct gb_message *message = urb->context; struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); int status = check_urb_status(urb); unsigned long flags; gb_message_cport_clear(message->header); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* * Tell the submitter that the message send (attempt) is * complete, and report the status. */ greybus_message_sent(hd, message, status); free_urb(es2, urb); } static struct arpc *arpc_alloc(void *payload, u16 size, u8 type) { struct arpc *rpc; if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX) return NULL; rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); if (!rpc) return NULL; INIT_LIST_HEAD(&rpc->list); rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL); if (!rpc->req) goto err_free_rpc; rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL); if (!rpc->resp) goto err_free_req; rpc->req->type = type; rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size); memcpy(rpc->req->data, payload, size); init_completion(&rpc->response_received); return rpc; err_free_req: kfree(rpc->req); err_free_rpc: kfree(rpc); return NULL; } static void arpc_free(struct arpc *rpc) { kfree(rpc->req); kfree(rpc->resp); kfree(rpc); } static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id) { struct arpc *rpc; list_for_each_entry(rpc, &es2->arpcs, list) { if (rpc->req->id == id) return rpc; } return NULL; } static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc) { rpc->active = true; rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++); list_add_tail(&rpc->list, &es2->arpcs); } static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc) { if (rpc->active) { rpc->active = false; list_del(&rpc->list); } } static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout) { struct usb_device *udev = es2->usb_dev; int retval; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_ARPC_RUN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, rpc->req, le16_to_cpu(rpc->req->size), ES2_USB_CTRL_TIMEOUT); if (retval < 0) { dev_err(&udev->dev, "failed to send ARPC request %d: %d\n", rpc->req->type, retval); return retval; } return 0; } static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout) { struct arpc *rpc; unsigned long flags; int retval; if (result) *result = 0; rpc = arpc_alloc(payload, size, type); if (!rpc) return -ENOMEM; spin_lock_irqsave(&es2->arpc_lock, flags); arpc_add(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); retval = arpc_send(es2, rpc, timeout); if (retval) goto out_arpc_del; retval = wait_for_completion_interruptible_timeout( &rpc->response_received, msecs_to_jiffies(timeout)); if (retval <= 0) { if (!retval) retval = -ETIMEDOUT; goto out_arpc_del; } if (rpc->resp->result) { retval = -EREMOTEIO; if (result) *result = rpc->resp->result; } else { retval = 0; } out_arpc_del: spin_lock_irqsave(&es2->arpc_lock, flags); arpc_del(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); arpc_free(rpc); if (retval < 0 && retval != -EREMOTEIO) { dev_err(&es2->usb_dev->dev, "failed to execute ARPC: %d\n", retval); } return retval; } static void arpc_in_callback(struct urb *urb) { struct es2_ap_dev *es2 = urb->context; struct device *dev = &urb->dev->dev; int status = check_urb_status(urb); struct arpc *rpc; struct arpc_response_message *resp; unsigned long flags; int retval; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "arpc in-urb error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*resp)) { dev_err(dev, "short aprc response received\n"); goto exit; } resp = urb->transfer_buffer; spin_lock_irqsave(&es2->arpc_lock, flags); rpc = arpc_find(es2, resp->id); if (!rpc) { dev_err(dev, "invalid arpc response id received: %u\n", le16_to_cpu(resp->id)); spin_unlock_irqrestore(&es2->arpc_lock, flags); goto exit; } arpc_del(es2, rpc); memcpy(rpc->resp, resp, sizeof(*resp)); complete(&rpc->response_received); spin_unlock_irqrestore(&es2->arpc_lock, flags); exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval); } #define APB1_LOG_MSG_SIZE 64 static void apb_log_get(struct es2_ap_dev *es2, char *buf) { int retval; do { retval = usb_control_msg(es2->usb_dev, usb_rcvctrlpipe(es2->usb_dev, 0), GB_APB_REQUEST_LOG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x00, 0x00, buf, APB1_LOG_MSG_SIZE, ES2_USB_CTRL_TIMEOUT); if (retval > 0) kfifo_in(&es2->apb_log_fifo, buf, retval); } while (retval > 0); } static int apb_log_poll(void *data) { struct es2_ap_dev *es2 = data; char *buf; buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; while (!kthread_should_stop()) { msleep(1000); apb_log_get(es2, buf); } kfree(buf); return 0; } static ssize_t apb_log_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; ssize_t ret; size_t copied; char *tmp_buf; if (count > APB1_LOG_SIZE) count = APB1_LOG_SIZE; tmp_buf = kmalloc(count, GFP_KERNEL); if (!tmp_buf) return -ENOMEM; copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count); ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied); kfree(tmp_buf); return ret; } static const struct file_operations apb_log_fops = { .read = apb_log_read, }; static void usb_log_enable(struct es2_ap_dev *es2) { if (!IS_ERR_OR_NULL(es2->apb_log_task)) return; /* get log from APB1 */ es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log"); if (IS_ERR(es2->apb_log_task)) return; /* XXX We will need to rename this per APB */ es2->apb_log_dentry = debugfs_create_file("apb_log", 0444, gb_debugfs_get(), es2, &apb_log_fops); } static void usb_log_disable(struct es2_ap_dev *es2) { if (IS_ERR_OR_NULL(es2->apb_log_task)) return; debugfs_remove(es2->apb_log_dentry); es2->apb_log_dentry = NULL; kthread_stop(es2->apb_log_task); es2->apb_log_task = NULL; } static ssize_t apb_log_enable_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; int enable = !IS_ERR_OR_NULL(es2->apb_log_task); char tmp_buf[3]; sprintf(tmp_buf, "%d\n", enable); return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2); } static ssize_t apb_log_enable_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos) { int enable; ssize_t retval; struct es2_ap_dev *es2 = file_inode(f)->i_private; retval = kstrtoint_from_user(buf, count, 10, &enable); if (retval) return retval; if (enable) usb_log_enable(es2); else usb_log_disable(es2); return count; } static const struct file_operations apb_log_enable_fops = { .read = apb_log_enable_read, .write = apb_log_enable_write, }; static int apb_get_cport_count(struct usb_device *udev) { int retval; __le16 *cport_count; cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL); if (!cport_count) return -ENOMEM; retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_COUNT, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, cport_count, sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT); if (retval != sizeof(*cport_count)) { dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", retval); if (retval >= 0) retval = -EIO; goto out; } retval = le16_to_cpu(*cport_count); /* We need to fit a CPort ID in one byte of a message header */ if (retval > U8_MAX) { retval = U8_MAX; dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n"); } out: kfree(cport_count); return retval; } /* * The ES2 USB Bridge device has 15 endpoints * 1 Control - usual USB stuff + AP -> APBridgeA messages * 7 Bulk IN - CPort data in * 7 Bulk OUT - CPort data out */ static int ap_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct es2_ap_dev *es2; struct gb_host_device *hd; struct usb_device *udev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __u8 ep_addr; int retval; int i; int num_cports; bool bulk_out_found = false; bool bulk_in_found = false; bool arpc_in_found = false; udev = usb_get_dev(interface_to_usbdev(interface)); num_cports = apb_get_cport_count(udev); if (num_cports < 0) { usb_put_dev(udev); dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", num_cports); return num_cports; } hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX, num_cports); if (IS_ERR(hd)) { usb_put_dev(udev); return PTR_ERR(hd); } es2 = hd_to_es2(hd); es2->hd = hd; es2->usb_intf = interface; es2->usb_dev = udev; spin_lock_init(&es2->cport_out_urb_lock); INIT_KFIFO(es2->apb_log_fifo); usb_set_intfdata(interface, es2); /* * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated * dynamically. */ retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0); if (retval) goto error; retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1); if (retval) goto error; /* find all bulk endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; ep_addr = endpoint->bEndpointAddress; if (usb_endpoint_is_bulk_in(endpoint)) { if (!bulk_in_found) { es2->cport_in.endpoint = ep_addr; bulk_in_found = true; } else if (!arpc_in_found) { es2->arpc_endpoint_in = ep_addr; arpc_in_found = true; } else { dev_warn(&udev->dev, "Unused bulk IN endpoint found: 0x%02x\n", ep_addr); } continue; } if (usb_endpoint_is_bulk_out(endpoint)) { if (!bulk_out_found) { es2->cport_out_endpoint = ep_addr; bulk_out_found = true; } else { dev_warn(&udev->dev, "Unused bulk OUT endpoint found: 0x%02x\n", ep_addr); } continue; } dev_warn(&udev->dev, "Unknown endpoint type found, address 0x%02x\n", ep_addr); } if (!bulk_in_found || !arpc_in_found || !bulk_out_found) { dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n"); retval = -ENODEV; goto error; } /* Allocate buffers for our cport in messages */ for (i = 0; i < NUM_CPORT_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_in.urb[i] = urb; buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->cport_in.endpoint), buffer, ES2_GBUF_MSG_SIZE_MAX, cport_in_callback, hd); es2->cport_in.buffer[i] = buffer; } /* Allocate buffers for ARPC in messages */ for (i = 0; i < NUM_ARPC_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->arpc_urb[i] = urb; buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->arpc_endpoint_in), buffer, ARPC_IN_SIZE_MAX, arpc_in_callback, es2); es2->arpc_buffer[i] = buffer; } /* Allocate urbs for our CPort OUT messages */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_out_urb[i] = urb; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } /* XXX We will need to rename this per APB */ es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable", 0644, gb_debugfs_get(), es2, &apb_log_enable_fops); INIT_LIST_HEAD(&es2->arpcs); spin_lock_init(&es2->arpc_lock); retval = es2_arpc_in_enable(es2); if (retval) goto error; retval = gb_hd_add(hd); if (retval) goto err_disable_arpc_in; retval = es2_cport_in_enable(es2, &es2->cport_in); if (retval) goto err_hd_del; return 0; err_hd_del: gb_hd_del(hd); err_disable_arpc_in: es2_arpc_in_disable(es2); error: es2_destroy(es2); return retval; } static void ap_disconnect(struct usb_interface *interface) { struct es2_ap_dev *es2 = usb_get_intfdata(interface); gb_hd_del(es2->hd); es2_cport_in_disable(es2, &es2->cport_in); es2_arpc_in_disable(es2); es2_destroy(es2); } static struct usb_driver es2_ap_driver = { .name = "es2_ap_driver", .probe = ap_probe, .disconnect = ap_disconnect, .id_table = id_table, .soft_unbind = 1, }; module_usb_driver(es2_ap_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
linux-master
drivers/greybus/es2.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus debugfs code * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #include <linux/debugfs.h> #include <linux/greybus.h> static struct dentry *gb_debug_root; void __init gb_debugfs_init(void) { gb_debug_root = debugfs_create_dir("greybus", NULL); } void gb_debugfs_cleanup(void) { debugfs_remove_recursive(gb_debug_root); gb_debug_root = NULL; } struct dentry *gb_debugfs_get(void) { return gb_debug_root; } EXPORT_SYMBOL_GPL(gb_debugfs_get);
linux-master
drivers/greybus/debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus operations * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/greybus.h> #include "greybus_trace.h" static struct kmem_cache *gb_operation_cache; static struct kmem_cache *gb_message_cache; /* Workqueue to handle Greybus operation completions. */ static struct workqueue_struct *gb_operation_completion_wq; /* Wait queue for synchronous cancellations. */ static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue); /* * Protects updates to operation->errno. */ static DEFINE_SPINLOCK(gb_operations_lock); static int gb_operation_response_send(struct gb_operation *operation, int errno); /* * Increment operation active count and add to connection list unless the * connection is going away. * * Caller holds operation reference. */ static int gb_operation_get_active(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; unsigned long flags; spin_lock_irqsave(&connection->lock, flags); switch (connection->state) { case GB_CONNECTION_STATE_ENABLED: break; case GB_CONNECTION_STATE_ENABLED_TX: if (gb_operation_is_incoming(operation)) goto err_unlock; break; case GB_CONNECTION_STATE_DISCONNECTING: if (!gb_operation_is_core(operation)) goto err_unlock; break; default: goto err_unlock; } if (operation->active++ == 0) list_add_tail(&operation->links, &connection->operations); trace_gb_operation_get_active(operation); spin_unlock_irqrestore(&connection->lock, flags); return 0; err_unlock: spin_unlock_irqrestore(&connection->lock, flags); return -ENOTCONN; } /* Caller holds operation reference. */ static void gb_operation_put_active(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; unsigned long flags; spin_lock_irqsave(&connection->lock, flags); trace_gb_operation_put_active(operation); if (--operation->active == 0) { list_del(&operation->links); if (atomic_read(&operation->waiters)) wake_up(&gb_operation_cancellation_queue); } spin_unlock_irqrestore(&connection->lock, flags); } static bool gb_operation_is_active(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; unsigned long flags; bool ret; spin_lock_irqsave(&connection->lock, flags); ret = operation->active; spin_unlock_irqrestore(&connection->lock, flags); return ret; } /* * Set an operation's result. * * Initially an outgoing operation's errno value is -EBADR. * If no error occurs before sending the request message the only * valid value operation->errno can be set to is -EINPROGRESS, * indicating the request has been (or rather is about to be) sent. * At that point nobody should be looking at the result until the * response arrives. * * The first time the result gets set after the request has been * sent, that result "sticks." That is, if two concurrent threads * race to set the result, the first one wins. The return value * tells the caller whether its result was recorded; if not the * caller has nothing more to do. * * The result value -EILSEQ is reserved to signal an implementation * error; if it's ever observed, the code performing the request has * done something fundamentally wrong. It is an error to try to set * the result to -EBADR, and attempts to do so result in a warning, * and -EILSEQ is used instead. Similarly, the only valid result * value to set for an operation in initial state is -EINPROGRESS. * Attempts to do otherwise will also record a (successful) -EILSEQ * operation result. */ static bool gb_operation_result_set(struct gb_operation *operation, int result) { unsigned long flags; int prev; if (result == -EINPROGRESS) { /* * -EINPROGRESS is used to indicate the request is * in flight. It should be the first result value * set after the initial -EBADR. Issue a warning * and record an implementation error if it's * set at any other time. */ spin_lock_irqsave(&gb_operations_lock, flags); prev = operation->errno; if (prev == -EBADR) operation->errno = result; else operation->errno = -EILSEQ; spin_unlock_irqrestore(&gb_operations_lock, flags); WARN_ON(prev != -EBADR); return true; } /* * The first result value set after a request has been sent * will be the final result of the operation. Subsequent * attempts to set the result are ignored. * * Note that -EBADR is a reserved "initial state" result * value. Attempts to set this value result in a warning, * and the result code is set to -EILSEQ instead. */ if (WARN_ON(result == -EBADR)) result = -EILSEQ; /* Nobody should be setting -EBADR */ spin_lock_irqsave(&gb_operations_lock, flags); prev = operation->errno; if (prev == -EINPROGRESS) operation->errno = result; /* First and final result */ spin_unlock_irqrestore(&gb_operations_lock, flags); return prev == -EINPROGRESS; } int gb_operation_result(struct gb_operation *operation) { int result = operation->errno; WARN_ON(result == -EBADR); WARN_ON(result == -EINPROGRESS); return result; } EXPORT_SYMBOL_GPL(gb_operation_result); /* * Looks up an outgoing operation on a connection and returns a refcounted * pointer if found, or NULL otherwise. */ static struct gb_operation * gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) { struct gb_operation *operation; unsigned long flags; bool found = false; spin_lock_irqsave(&connection->lock, flags); list_for_each_entry(operation, &connection->operations, links) if (operation->id == operation_id && !gb_operation_is_incoming(operation)) { gb_operation_get(operation); found = true; break; } spin_unlock_irqrestore(&connection->lock, flags); return found ? operation : NULL; } static int gb_message_send(struct gb_message *message, gfp_t gfp) { struct gb_connection *connection = message->operation->connection; trace_gb_message_send(message); return connection->hd->driver->message_send(connection->hd, connection->hd_cport_id, message, gfp); } /* * Cancel a message we have passed to the host device layer to be sent. */ static void gb_message_cancel(struct gb_message *message) { struct gb_host_device *hd = message->operation->connection->hd; hd->driver->message_cancel(message); } static void gb_operation_request_handle(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; int status; int ret; if (connection->handler) { status = connection->handler(operation); } else { dev_err(&connection->hd->dev, "%s: unexpected incoming request of type 0x%02x\n", connection->name, operation->type); status = -EPROTONOSUPPORT; } ret = gb_operation_response_send(operation, status); if (ret) { dev_err(&connection->hd->dev, "%s: failed to send response %d for type 0x%02x: %d\n", connection->name, status, operation->type, ret); return; } } /* * Process operation work. * * For incoming requests, call the protocol request handler. The operation * result should be -EINPROGRESS at this point. * * For outgoing requests, the operation result value should have * been set before queueing this. The operation callback function * allows the original requester to know the request has completed * and its result is available. */ static void gb_operation_work(struct work_struct *work) { struct gb_operation *operation; int ret; operation = container_of(work, struct gb_operation, work); if (gb_operation_is_incoming(operation)) { gb_operation_request_handle(operation); } else { ret = del_timer_sync(&operation->timer); if (!ret) { /* Cancel request message if scheduled by timeout. */ if (gb_operation_result(operation) == -ETIMEDOUT) gb_message_cancel(operation->request); } operation->callback(operation); } gb_operation_put_active(operation); gb_operation_put(operation); } static void gb_operation_timeout(struct timer_list *t) { struct gb_operation *operation = from_timer(operation, t, timer); if (gb_operation_result_set(operation, -ETIMEDOUT)) { /* * A stuck request message will be cancelled from the * workqueue. */ queue_work(gb_operation_completion_wq, &operation->work); } } static void gb_operation_message_init(struct gb_host_device *hd, struct gb_message *message, u16 operation_id, size_t payload_size, u8 type) { struct gb_operation_msg_hdr *header; header = message->buffer; message->header = header; message->payload = payload_size ? header + 1 : NULL; message->payload_size = payload_size; /* * The type supplied for incoming message buffers will be * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by * arriving data so there's no need to initialize the message header. */ if (type != GB_REQUEST_TYPE_INVALID) { u16 message_size = (u16)(sizeof(*header) + payload_size); /* * For a request, the operation id gets filled in * when the message is sent. For a response, it * will be copied from the request by the caller. * * The result field in a request message must be * zero. It will be set just prior to sending for * a response. */ header->size = cpu_to_le16(message_size); header->operation_id = 0; header->type = type; header->result = 0; } } /* * Allocate a message to be used for an operation request or response. * Both types of message contain a common header. The request message * for an outgoing operation is outbound, as is the response message * for an incoming operation. The message header for an outbound * message is partially initialized here. * * The headers for inbound messages don't need to be initialized; * they'll be filled in by arriving data. * * Our message buffers have the following layout: * message header \_ these combined are * message payload / the message size */ static struct gb_message * gb_operation_message_alloc(struct gb_host_device *hd, u8 type, size_t payload_size, gfp_t gfp_flags) { struct gb_message *message; struct gb_operation_msg_hdr *header; size_t message_size = payload_size + sizeof(*header); if (message_size > hd->buffer_size_max) { dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n", message_size, hd->buffer_size_max); return NULL; } /* Allocate the message structure and buffer. */ message = kmem_cache_zalloc(gb_message_cache, gfp_flags); if (!message) return NULL; message->buffer = kzalloc(message_size, gfp_flags); if (!message->buffer) goto err_free_message; /* Initialize the message. Operation id is filled in later. */ gb_operation_message_init(hd, message, 0, payload_size, type); return message; err_free_message: kmem_cache_free(gb_message_cache, message); return NULL; } static void gb_operation_message_free(struct gb_message *message) { kfree(message->buffer); kmem_cache_free(gb_message_cache, message); } /* * Map an enum gb_operation_status value (which is represented in a * message as a single byte) to an appropriate Linux negative errno. */ static int gb_operation_status_map(u8 status) { switch (status) { case GB_OP_SUCCESS: return 0; case GB_OP_INTERRUPTED: return -EINTR; case GB_OP_TIMEOUT: return -ETIMEDOUT; case GB_OP_NO_MEMORY: return -ENOMEM; case GB_OP_PROTOCOL_BAD: return -EPROTONOSUPPORT; case GB_OP_OVERFLOW: return -EMSGSIZE; case GB_OP_INVALID: return -EINVAL; case GB_OP_RETRY: return -EAGAIN; case GB_OP_NONEXISTENT: return -ENODEV; case GB_OP_MALFUNCTION: return -EILSEQ; case GB_OP_UNKNOWN_ERROR: default: return -EIO; } } /* * Map a Linux errno value (from operation->errno) into the value * that should represent it in a response message status sent * over the wire. Returns an enum gb_operation_status value (which * is represented in a message as a single byte). */ static u8 gb_operation_errno_map(int errno) { switch (errno) { case 0: return GB_OP_SUCCESS; case -EINTR: return GB_OP_INTERRUPTED; case -ETIMEDOUT: return GB_OP_TIMEOUT; case -ENOMEM: return GB_OP_NO_MEMORY; case -EPROTONOSUPPORT: return GB_OP_PROTOCOL_BAD; case -EMSGSIZE: return GB_OP_OVERFLOW; /* Could be underflow too */ case -EINVAL: return GB_OP_INVALID; case -EAGAIN: return GB_OP_RETRY; case -EILSEQ: return GB_OP_MALFUNCTION; case -ENODEV: return GB_OP_NONEXISTENT; case -EIO: default: return GB_OP_UNKNOWN_ERROR; } } bool gb_operation_response_alloc(struct gb_operation *operation, size_t response_size, gfp_t gfp) { struct gb_host_device *hd = operation->connection->hd; struct gb_operation_msg_hdr *request_header; struct gb_message *response; u8 type; type = operation->type | GB_MESSAGE_TYPE_RESPONSE; response = gb_operation_message_alloc(hd, type, response_size, gfp); if (!response) return false; response->operation = operation; /* * Size and type get initialized when the message is * allocated. The errno will be set before sending. All * that's left is the operation id, which we copy from the * request message header (as-is, in little-endian order). */ request_header = operation->request->header; response->header->operation_id = request_header->operation_id; operation->response = response; return true; } EXPORT_SYMBOL_GPL(gb_operation_response_alloc); /* * Create a Greybus operation to be sent over the given connection. * The request buffer will be big enough for a payload of the given * size. * * For outgoing requests, the request message's header will be * initialized with the type of the request and the message size. * Outgoing operations must also specify the response buffer size, * which must be sufficient to hold all expected response data. The * response message header will eventually be overwritten, so there's * no need to initialize it here. * * Request messages for incoming operations can arrive in interrupt * context, so they must be allocated with GFP_ATOMIC. In this case * the request buffer will be immediately overwritten, so there is * no need to initialize the message header. Responsibility for * allocating a response buffer lies with the incoming request * handler for a protocol. So we don't allocate that here. * * Returns a pointer to the new operation or a null pointer if an * error occurs. */ static struct gb_operation * gb_operation_create_common(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long op_flags, gfp_t gfp_flags) { struct gb_host_device *hd = connection->hd; struct gb_operation *operation; operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags); if (!operation) return NULL; operation->connection = connection; operation->request = gb_operation_message_alloc(hd, type, request_size, gfp_flags); if (!operation->request) goto err_cache; operation->request->operation = operation; /* Allocate the response buffer for outgoing operations */ if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) { if (!gb_operation_response_alloc(operation, response_size, gfp_flags)) { goto err_request; } timer_setup(&operation->timer, gb_operation_timeout, 0); } operation->flags = op_flags; operation->type = type; operation->errno = -EBADR; /* Initial value--means "never set" */ INIT_WORK(&operation->work, gb_operation_work); init_completion(&operation->completion); kref_init(&operation->kref); atomic_set(&operation->waiters, 0); return operation; err_request: gb_operation_message_free(operation->request); err_cache: kmem_cache_free(gb_operation_cache, operation); return NULL; } /* * Create a new operation associated with the given connection. The * request and response sizes provided are the number of bytes * required to hold the request/response payload only. Both of * these are allowed to be 0. Note that 0x00 is reserved as an * invalid operation type for all protocols, and this is enforced * here. */ struct gb_operation * gb_operation_create_flags(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp) { struct gb_operation *operation; if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID)) return NULL; if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE)) type &= ~GB_MESSAGE_TYPE_RESPONSE; if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK)) flags &= GB_OPERATION_FLAG_USER_MASK; operation = gb_operation_create_common(connection, type, request_size, response_size, flags, gfp); if (operation) trace_gb_operation_create(operation); return operation; } EXPORT_SYMBOL_GPL(gb_operation_create_flags); struct gb_operation * gb_operation_create_core(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp) { struct gb_operation *operation; flags |= GB_OPERATION_FLAG_CORE; operation = gb_operation_create_common(connection, type, request_size, response_size, flags, gfp); if (operation) trace_gb_operation_create_core(operation); return operation; } /* Do not export this function. */ size_t gb_operation_get_payload_size_max(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr); } EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max); static struct gb_operation * gb_operation_create_incoming(struct gb_connection *connection, u16 id, u8 type, void *data, size_t size) { struct gb_operation *operation; size_t request_size; unsigned long flags = GB_OPERATION_FLAG_INCOMING; /* Caller has made sure we at least have a message header. */ request_size = size - sizeof(struct gb_operation_msg_hdr); if (!id) flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL; operation = gb_operation_create_common(connection, type, request_size, GB_REQUEST_TYPE_INVALID, flags, GFP_ATOMIC); if (!operation) return NULL; operation->id = id; memcpy(operation->request->header, data, size); trace_gb_operation_create_incoming(operation); return operation; } /* * Get an additional reference on an operation. */ void gb_operation_get(struct gb_operation *operation) { kref_get(&operation->kref); } EXPORT_SYMBOL_GPL(gb_operation_get); /* * Destroy a previously created operation. */ static void _gb_operation_destroy(struct kref *kref) { struct gb_operation *operation; operation = container_of(kref, struct gb_operation, kref); trace_gb_operation_destroy(operation); if (operation->response) gb_operation_message_free(operation->response); gb_operation_message_free(operation->request); kmem_cache_free(gb_operation_cache, operation); } /* * Drop a reference on an operation, and destroy it when the last * one is gone. */ void gb_operation_put(struct gb_operation *operation) { if (WARN_ON(!operation)) return; kref_put(&operation->kref, _gb_operation_destroy); } EXPORT_SYMBOL_GPL(gb_operation_put); /* Tell the requester we're done */ static void gb_operation_sync_callback(struct gb_operation *operation) { complete(&operation->completion); } /** * gb_operation_request_send() - send an operation request message * @operation: the operation to initiate * @callback: the operation completion callback * @timeout: operation timeout in milliseconds, or zero for no timeout * @gfp: the memory flags to use for any allocations * * The caller has filled in any payload so the request message is ready to go. * The callback function supplied will be called when the response message has * arrived, a unidirectional request has been sent, or the operation is * cancelled, indicating that the operation is complete. The callback function * can fetch the result of the operation using gb_operation_result() if * desired. * * Return: 0 if the request was successfully queued in the host-driver queues, * or a negative errno. */ int gb_operation_request_send(struct gb_operation *operation, gb_operation_callback callback, unsigned int timeout, gfp_t gfp) { struct gb_connection *connection = operation->connection; struct gb_operation_msg_hdr *header; unsigned int cycle; int ret; if (gb_connection_is_offloaded(connection)) return -EBUSY; if (!callback) return -EINVAL; /* * Record the callback function, which is executed in * non-atomic (workqueue) context when the final result * of an operation has been set. */ operation->callback = callback; /* * Assign the operation's id, and store it in the request header. * Zero is a reserved operation id for unidirectional operations. */ if (gb_operation_is_unidirectional(operation)) { operation->id = 0; } else { cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); operation->id = (u16)(cycle % U16_MAX + 1); } header = operation->request->header; header->operation_id = cpu_to_le16(operation->id); gb_operation_result_set(operation, -EINPROGRESS); /* * Get an extra reference on the operation. It'll be dropped when the * operation completes. */ gb_operation_get(operation); ret = gb_operation_get_active(operation); if (ret) goto err_put; ret = gb_message_send(operation->request, gfp); if (ret) goto err_put_active; if (timeout) { operation->timer.expires = jiffies + msecs_to_jiffies(timeout); add_timer(&operation->timer); } return 0; err_put_active: gb_operation_put_active(operation); err_put: gb_operation_put(operation); return ret; } EXPORT_SYMBOL_GPL(gb_operation_request_send); /* * Send a synchronous operation. This function is expected to * block, returning only when the response has arrived, (or when an * error is detected. The return value is the result of the * operation. */ int gb_operation_request_send_sync_timeout(struct gb_operation *operation, unsigned int timeout) { int ret; ret = gb_operation_request_send(operation, gb_operation_sync_callback, timeout, GFP_KERNEL); if (ret) return ret; ret = wait_for_completion_interruptible(&operation->completion); if (ret < 0) { /* Cancel the operation if interrupted */ gb_operation_cancel(operation, -ECANCELED); } return gb_operation_result(operation); } EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout); /* * Send a response for an incoming operation request. A non-zero * errno indicates a failed operation. * * If there is any response payload, the incoming request handler is * responsible for allocating the response message. Otherwise the * it can simply supply the result errno; this function will * allocate the response message if necessary. */ static int gb_operation_response_send(struct gb_operation *operation, int errno) { struct gb_connection *connection = operation->connection; int ret; if (!operation->response && !gb_operation_is_unidirectional(operation)) { if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL)) return -ENOMEM; } /* Record the result */ if (!gb_operation_result_set(operation, errno)) { dev_err(&connection->hd->dev, "request result already set\n"); return -EIO; /* Shouldn't happen */ } /* Sender of request does not care about response. */ if (gb_operation_is_unidirectional(operation)) return 0; /* Reference will be dropped when message has been sent. */ gb_operation_get(operation); ret = gb_operation_get_active(operation); if (ret) goto err_put; /* Fill in the response header and send it */ operation->response->header->result = gb_operation_errno_map(errno); ret = gb_message_send(operation->response, GFP_KERNEL); if (ret) goto err_put_active; return 0; err_put_active: gb_operation_put_active(operation); err_put: gb_operation_put(operation); return ret; } /* * This function is called when a message send request has completed. */ void greybus_message_sent(struct gb_host_device *hd, struct gb_message *message, int status) { struct gb_operation *operation = message->operation; struct gb_connection *connection = operation->connection; /* * If the message was a response, we just need to drop our * reference to the operation. If an error occurred, report * it. * * For requests, if there's no error and the operation in not * unidirectional, there's nothing more to do until the response * arrives. If an error occurred attempting to send it, or if the * operation is unidrectional, record the result of the operation and * schedule its completion. */ if (message == operation->response) { if (status) { dev_err(&connection->hd->dev, "%s: error sending response 0x%02x: %d\n", connection->name, operation->type, status); } gb_operation_put_active(operation); gb_operation_put(operation); } else if (status || gb_operation_is_unidirectional(operation)) { if (gb_operation_result_set(operation, status)) { queue_work(gb_operation_completion_wq, &operation->work); } } } EXPORT_SYMBOL_GPL(greybus_message_sent); /* * We've received data on a connection, and it doesn't look like a * response, so we assume it's a request. * * This is called in interrupt context, so just copy the incoming * data into the request buffer and handle the rest via workqueue. */ static void gb_connection_recv_request(struct gb_connection *connection, const struct gb_operation_msg_hdr *header, void *data, size_t size) { struct gb_operation *operation; u16 operation_id; u8 type; int ret; operation_id = le16_to_cpu(header->operation_id); type = header->type; operation = gb_operation_create_incoming(connection, operation_id, type, data, size); if (!operation) { dev_err(&connection->hd->dev, "%s: can't create incoming operation\n", connection->name); return; } ret = gb_operation_get_active(operation); if (ret) { gb_operation_put(operation); return; } trace_gb_message_recv_request(operation->request); /* * The initial reference to the operation will be dropped when the * request handler returns. */ if (gb_operation_result_set(operation, -EINPROGRESS)) queue_work(connection->wq, &operation->work); } /* * We've received data that appears to be an operation response * message. Look up the operation, and record that we've received * its response. * * This is called in interrupt context, so just copy the incoming * data into the response buffer and handle the rest via workqueue. */ static void gb_connection_recv_response(struct gb_connection *connection, const struct gb_operation_msg_hdr *header, void *data, size_t size) { struct gb_operation *operation; struct gb_message *message; size_t message_size; u16 operation_id; int errno; operation_id = le16_to_cpu(header->operation_id); if (!operation_id) { dev_err_ratelimited(&connection->hd->dev, "%s: invalid response id 0 received\n", connection->name); return; } operation = gb_operation_find_outgoing(connection, operation_id); if (!operation) { dev_err_ratelimited(&connection->hd->dev, "%s: unexpected response id 0x%04x received\n", connection->name, operation_id); return; } errno = gb_operation_status_map(header->result); message = operation->response; message_size = sizeof(*header) + message->payload_size; if (!errno && size > message_size) { dev_err_ratelimited(&connection->hd->dev, "%s: malformed response 0x%02x received (%zu > %zu)\n", connection->name, header->type, size, message_size); errno = -EMSGSIZE; } else if (!errno && size < message_size) { if (gb_operation_short_response_allowed(operation)) { message->payload_size = size - sizeof(*header); } else { dev_err_ratelimited(&connection->hd->dev, "%s: short response 0x%02x received (%zu < %zu)\n", connection->name, header->type, size, message_size); errno = -EMSGSIZE; } } /* We must ignore the payload if a bad status is returned */ if (errno) size = sizeof(*header); /* The rest will be handled in work queue context */ if (gb_operation_result_set(operation, errno)) { memcpy(message->buffer, data, size); trace_gb_message_recv_response(message); queue_work(gb_operation_completion_wq, &operation->work); } gb_operation_put(operation); } /* * Handle data arriving on a connection. As soon as we return the * supplied data buffer will be reused (so unless we do something * with, it's effectively dropped). */ void gb_connection_recv(struct gb_connection *connection, void *data, size_t size) { struct gb_operation_msg_hdr header; struct device *dev = &connection->hd->dev; size_t msg_size; if (connection->state == GB_CONNECTION_STATE_DISABLED || gb_connection_is_offloaded(connection)) { dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n", connection->name, size); return; } if (size < sizeof(header)) { dev_err_ratelimited(dev, "%s: short message received\n", connection->name); return; } /* Use memcpy as data may be unaligned */ memcpy(&header, data, sizeof(header)); msg_size = le16_to_cpu(header.size); if (size < msg_size) { dev_err_ratelimited(dev, "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n", connection->name, le16_to_cpu(header.operation_id), header.type, size, msg_size); return; /* XXX Should still complete operation */ } if (header.type & GB_MESSAGE_TYPE_RESPONSE) { gb_connection_recv_response(connection, &header, data, msg_size); } else { gb_connection_recv_request(connection, &header, data, msg_size); } } /* * Cancel an outgoing operation synchronously, and record the given error to * indicate why. */ void gb_operation_cancel(struct gb_operation *operation, int errno) { if (WARN_ON(gb_operation_is_incoming(operation))) return; if (gb_operation_result_set(operation, errno)) { gb_message_cancel(operation->request); queue_work(gb_operation_completion_wq, &operation->work); } trace_gb_message_cancel_outgoing(operation->request); atomic_inc(&operation->waiters); wait_event(gb_operation_cancellation_queue, !gb_operation_is_active(operation)); atomic_dec(&operation->waiters); } EXPORT_SYMBOL_GPL(gb_operation_cancel); /* * Cancel an incoming operation synchronously. Called during connection tear * down. */ void gb_operation_cancel_incoming(struct gb_operation *operation, int errno) { if (WARN_ON(!gb_operation_is_incoming(operation))) return; if (!gb_operation_is_unidirectional(operation)) { /* * Make sure the request handler has submitted the response * before cancelling it. */ flush_work(&operation->work); if (!gb_operation_result_set(operation, errno)) gb_message_cancel(operation->response); } trace_gb_message_cancel_incoming(operation->response); atomic_inc(&operation->waiters); wait_event(gb_operation_cancellation_queue, !gb_operation_is_active(operation)); atomic_dec(&operation->waiters); } /** * gb_operation_sync_timeout() - implement a "simple" synchronous operation * @connection: the Greybus connection to send this to * @type: the type of operation to send * @request: pointer to a memory buffer to copy the request from * @request_size: size of @request * @response: pointer to a memory buffer to copy the response to * @response_size: the size of @response. * @timeout: operation timeout in milliseconds * * This function implements a simple synchronous Greybus operation. It sends * the provided operation request and waits (sleeps) until the corresponding * operation response message has been successfully received, or an error * occurs. @request and @response are buffers to hold the request and response * data respectively, and if they are not NULL, their size must be specified in * @request_size and @response_size. * * If a response payload is to come back, and @response is not NULL, * @response_size number of bytes will be copied into @response if the operation * is successful. * * If there is an error, the response buffer is left alone. */ int gb_operation_sync_timeout(struct gb_connection *connection, int type, void *request, int request_size, void *response, int response_size, unsigned int timeout) { struct gb_operation *operation; int ret; if ((response_size && !response) || (request_size && !request)) return -EINVAL; operation = gb_operation_create(connection, type, request_size, response_size, GFP_KERNEL); if (!operation) return -ENOMEM; if (request_size) memcpy(operation->request->payload, request, request_size); ret = gb_operation_request_send_sync_timeout(operation, timeout); if (ret) { dev_err(&connection->hd->dev, "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n", connection->name, operation->id, type, ret); } else { if (response_size) { memcpy(response, operation->response->payload, response_size); } } gb_operation_put(operation); return ret; } EXPORT_SYMBOL_GPL(gb_operation_sync_timeout); /** * gb_operation_unidirectional_timeout() - initiate a unidirectional operation * @connection: connection to use * @type: type of operation to send * @request: memory buffer to copy the request from * @request_size: size of @request * @timeout: send timeout in milliseconds * * Initiate a unidirectional operation by sending a request message and * waiting for it to be acknowledged as sent by the host device. * * Note that successful send of a unidirectional operation does not imply that * the request as actually reached the remote end of the connection. */ int gb_operation_unidirectional_timeout(struct gb_connection *connection, int type, void *request, int request_size, unsigned int timeout) { struct gb_operation *operation; int ret; if (request_size && !request) return -EINVAL; operation = gb_operation_create_flags(connection, type, request_size, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL, GFP_KERNEL); if (!operation) return -ENOMEM; if (request_size) memcpy(operation->request->payload, request, request_size); ret = gb_operation_request_send_sync_timeout(operation, timeout); if (ret) { dev_err(&connection->hd->dev, "%s: unidirectional operation of type 0x%02x failed: %d\n", connection->name, type, ret); } gb_operation_put(operation); return ret; } EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout); int __init gb_operation_init(void) { gb_message_cache = kmem_cache_create("gb_message_cache", sizeof(struct gb_message), 0, 0, NULL); if (!gb_message_cache) return -ENOMEM; gb_operation_cache = kmem_cache_create("gb_operation_cache", sizeof(struct gb_operation), 0, 0, NULL); if (!gb_operation_cache) goto err_destroy_message_cache; gb_operation_completion_wq = alloc_workqueue("greybus_completion", 0, 0); if (!gb_operation_completion_wq) goto err_destroy_operation_cache; return 0; err_destroy_operation_cache: kmem_cache_destroy(gb_operation_cache); gb_operation_cache = NULL; err_destroy_message_cache: kmem_cache_destroy(gb_message_cache); gb_message_cache = NULL; return -ENOMEM; } void gb_operation_exit(void) { destroy_workqueue(gb_operation_completion_wq); gb_operation_completion_wq = NULL; kmem_cache_destroy(gb_operation_cache); gb_operation_cache = NULL; kmem_cache_destroy(gb_message_cache); gb_message_cache = NULL; }
linux-master
drivers/greybus/operation.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus Module code * * Copyright 2016 Google Inc. * Copyright 2016 Linaro Ltd. */ #include <linux/greybus.h> #include "greybus_trace.h" static ssize_t eject_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_module *module = to_gb_module(dev); struct gb_interface *intf; size_t i; long val; int ret; ret = kstrtol(buf, 0, &val); if (ret) return ret; if (!val) return len; for (i = 0; i < module->num_interfaces; ++i) { intf = module->interfaces[i]; mutex_lock(&intf->mutex); /* Set flag to prevent concurrent activation. */ intf->ejected = true; gb_interface_disable(intf); gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); } /* Tell the SVC to eject the primary interface. */ ret = gb_svc_intf_eject(module->hd->svc, module->module_id); if (ret) return ret; return len; } static DEVICE_ATTR_WO(eject); static ssize_t module_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_module *module = to_gb_module(dev); return sprintf(buf, "%u\n", module->module_id); } static DEVICE_ATTR_RO(module_id); static ssize_t num_interfaces_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_module *module = to_gb_module(dev); return sprintf(buf, "%zu\n", module->num_interfaces); } static DEVICE_ATTR_RO(num_interfaces); static struct attribute *module_attrs[] = { &dev_attr_eject.attr, &dev_attr_module_id.attr, &dev_attr_num_interfaces.attr, NULL, }; ATTRIBUTE_GROUPS(module); static void gb_module_release(struct device *dev) { struct gb_module *module = to_gb_module(dev); trace_gb_module_release(module); kfree(module); } struct device_type greybus_module_type = { .name = "greybus_module", .release = gb_module_release, }; struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, size_t num_interfaces) { struct gb_interface *intf; struct gb_module *module; int i; module = kzalloc(struct_size(module, interfaces, num_interfaces), GFP_KERNEL); if (!module) return NULL; module->hd = hd; module->module_id = module_id; module->num_interfaces = num_interfaces; module->dev.parent = &hd->dev; module->dev.bus = &greybus_bus_type; module->dev.type = &greybus_module_type; module->dev.groups = module_groups; module->dev.dma_mask = hd->dev.dma_mask; device_initialize(&module->dev); dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id); trace_gb_module_create(module); for (i = 0; i < num_interfaces; ++i) { intf = gb_interface_create(module, module_id + i); if (!intf) { dev_err(&module->dev, "failed to create interface %u\n", module_id + i); goto err_put_interfaces; } module->interfaces[i] = intf; } return module; err_put_interfaces: for (--i; i >= 0; --i) gb_interface_put(module->interfaces[i]); put_device(&module->dev); return NULL; } /* * Register and enable an interface after first attempting to activate it. */ static void gb_module_register_interface(struct gb_interface *intf) { struct gb_module *module = intf->module; u8 intf_id = intf->interface_id; int ret; mutex_lock(&intf->mutex); ret = gb_interface_activate(intf); if (ret) { if (intf->type != GB_INTERFACE_TYPE_DUMMY) { dev_err(&module->dev, "failed to activate interface %u: %d\n", intf_id, ret); } gb_interface_add(intf); goto err_unlock; } ret = gb_interface_add(intf); if (ret) goto err_interface_deactivate; ret = gb_interface_enable(intf); if (ret) { dev_err(&module->dev, "failed to enable interface %u: %d\n", intf_id, ret); goto err_interface_deactivate; } mutex_unlock(&intf->mutex); return; err_interface_deactivate: gb_interface_deactivate(intf); err_unlock: mutex_unlock(&intf->mutex); } static void gb_module_deregister_interface(struct gb_interface *intf) { /* Mark as disconnected to prevent I/O during disable. */ if (intf->module->disconnected) intf->disconnected = true; mutex_lock(&intf->mutex); intf->removed = true; gb_interface_disable(intf); gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); gb_interface_del(intf); } /* Register a module and its interfaces. */ int gb_module_add(struct gb_module *module) { size_t i; int ret; ret = device_add(&module->dev); if (ret) { dev_err(&module->dev, "failed to register module: %d\n", ret); return ret; } trace_gb_module_add(module); for (i = 0; i < module->num_interfaces; ++i) gb_module_register_interface(module->interfaces[i]); return 0; } /* Deregister a module and its interfaces. */ void gb_module_del(struct gb_module *module) { size_t i; for (i = 0; i < module->num_interfaces; ++i) gb_module_deregister_interface(module->interfaces[i]); trace_gb_module_del(module); device_del(&module->dev); } void gb_module_put(struct gb_module *module) { size_t i; for (i = 0; i < module->num_interfaces; ++i) gb_interface_put(module->interfaces[i]); put_device(&module->dev); }
linux-master
drivers/greybus/module.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus CPort control protocol. * * Copyright 2015 Google Inc. * Copyright 2015 Linaro Ltd. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/greybus.h> /* Highest control-protocol version supported */ #define GB_CONTROL_VERSION_MAJOR 0 #define GB_CONTROL_VERSION_MINOR 1 static int gb_control_get_version(struct gb_control *control) { struct gb_interface *intf = control->connection->intf; struct gb_control_version_request request; struct gb_control_version_response response; int ret; request.major = GB_CONTROL_VERSION_MAJOR; request.minor = GB_CONTROL_VERSION_MINOR; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_VERSION, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&intf->dev, "failed to get control-protocol version: %d\n", ret); return ret; } if (response.major > request.major) { dev_err(&intf->dev, "unsupported major control-protocol version (%u > %u)\n", response.major, request.major); return -ENOTSUPP; } control->protocol_major = response.major; control->protocol_minor = response.minor; dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major, response.minor); return 0; } static int gb_control_get_bundle_version(struct gb_control *control, struct gb_bundle *bundle) { struct gb_interface *intf = control->connection->intf; struct gb_control_bundle_version_request request; struct gb_control_bundle_version_response response; int ret; request.bundle_id = bundle->id; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_BUNDLE_VERSION, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&intf->dev, "failed to get bundle %u class version: %d\n", bundle->id, ret); return ret; } bundle->class_major = response.major; bundle->class_minor = response.minor; dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id, response.major, response.minor); return 0; } int gb_control_get_bundle_versions(struct gb_control *control) { struct gb_interface *intf = control->connection->intf; struct gb_bundle *bundle; int ret; if (!control->has_bundle_version) return 0; list_for_each_entry(bundle, &intf->bundles, links) { ret = gb_control_get_bundle_version(control, bundle); if (ret) return ret; } return 0; } /* Get Manifest's size from the interface */ int gb_control_get_manifest_size_operation(struct gb_interface *intf) { struct gb_control_get_manifest_size_response response; struct gb_connection *connection = intf->control->connection; int ret; ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&connection->intf->dev, "failed to get manifest size: %d\n", ret); return ret; } return le16_to_cpu(response.size); } /* Reads Manifest from the interface */ int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, size_t size) { struct gb_connection *connection = intf->control->connection; return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST, NULL, 0, manifest, size); } int gb_control_connected_operation(struct gb_control *control, u16 cport_id) { struct gb_control_connected_request request; request.cport_id = cpu_to_le16(cport_id); return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED, &request, sizeof(request), NULL, 0); } int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id) { struct gb_control_disconnected_request request; request.cport_id = cpu_to_le16(cport_id); return gb_operation_sync(control->connection, GB_CONTROL_TYPE_DISCONNECTED, &request, sizeof(request), NULL, 0); } int gb_control_disconnecting_operation(struct gb_control *control, u16 cport_id) { struct gb_control_disconnecting_request *request; struct gb_operation *operation; int ret; operation = gb_operation_create_core(control->connection, GB_CONTROL_TYPE_DISCONNECTING, sizeof(*request), 0, 0, GFP_KERNEL); if (!operation) return -ENOMEM; request = operation->request->payload; request->cport_id = cpu_to_le16(cport_id); ret = gb_operation_request_send_sync(operation); if (ret) { dev_err(&control->dev, "failed to send disconnecting: %d\n", ret); } gb_operation_put(operation); return ret; } int gb_control_mode_switch_operation(struct gb_control *control) { struct gb_operation *operation; int ret; operation = gb_operation_create_core(control->connection, GB_CONTROL_TYPE_MODE_SWITCH, 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL, GFP_KERNEL); if (!operation) return -ENOMEM; ret = gb_operation_request_send_sync(operation); if (ret) dev_err(&control->dev, "failed to send mode switch: %d\n", ret); gb_operation_put(operation); return ret; } static int gb_control_bundle_pm_status_map(u8 status) { switch (status) { case GB_CONTROL_BUNDLE_PM_INVAL: return -EINVAL; case GB_CONTROL_BUNDLE_PM_BUSY: return -EBUSY; case GB_CONTROL_BUNDLE_PM_NA: return -ENOMSG; case GB_CONTROL_BUNDLE_PM_FAIL: default: return -EREMOTEIO; } } int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id) { struct gb_control_bundle_pm_request request; struct gb_control_bundle_pm_response response; int ret; request.bundle_id = bundle_id; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send bundle %u suspend: %d\n", bundle_id, ret); return ret; } if (response.status != GB_CONTROL_BUNDLE_PM_OK) { dev_err(&control->dev, "failed to suspend bundle %u: %d\n", bundle_id, response.status); return gb_control_bundle_pm_status_map(response.status); } return 0; } int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id) { struct gb_control_bundle_pm_request request; struct gb_control_bundle_pm_response response; int ret; request.bundle_id = bundle_id; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_BUNDLE_RESUME, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send bundle %u resume: %d\n", bundle_id, ret); return ret; } if (response.status != GB_CONTROL_BUNDLE_PM_OK) { dev_err(&control->dev, "failed to resume bundle %u: %d\n", bundle_id, response.status); return gb_control_bundle_pm_status_map(response.status); } return 0; } int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id) { struct gb_control_bundle_pm_request request; struct gb_control_bundle_pm_response response; int ret; request.bundle_id = bundle_id; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send bundle %u deactivate: %d\n", bundle_id, ret); return ret; } if (response.status != GB_CONTROL_BUNDLE_PM_OK) { dev_err(&control->dev, "failed to deactivate bundle %u: %d\n", bundle_id, response.status); return gb_control_bundle_pm_status_map(response.status); } return 0; } int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id) { struct gb_control_bundle_pm_request request; struct gb_control_bundle_pm_response response; int ret; if (!control->has_bundle_activate) return 0; request.bundle_id = bundle_id; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send bundle %u activate: %d\n", bundle_id, ret); return ret; } if (response.status != GB_CONTROL_BUNDLE_PM_OK) { dev_err(&control->dev, "failed to activate bundle %u: %d\n", bundle_id, response.status); return gb_control_bundle_pm_status_map(response.status); } return 0; } static int gb_control_interface_pm_status_map(u8 status) { switch (status) { case GB_CONTROL_INTF_PM_BUSY: return -EBUSY; case GB_CONTROL_INTF_PM_NA: return -ENOMSG; default: return -EREMOTEIO; } } int gb_control_interface_suspend_prepare(struct gb_control *control) { struct gb_control_intf_pm_response response; int ret; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send interface suspend prepare: %d\n", ret); return ret; } if (response.status != GB_CONTROL_INTF_PM_OK) { dev_err(&control->dev, "interface error while preparing suspend: %d\n", response.status); return gb_control_interface_pm_status_map(response.status); } return 0; } int gb_control_interface_deactivate_prepare(struct gb_control *control) { struct gb_control_intf_pm_response response; int ret; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n", ret); return ret; } if (response.status != GB_CONTROL_INTF_PM_OK) { dev_err(&control->dev, "interface error while preparing deactivate: %d\n", response.status); return gb_control_interface_pm_status_map(response.status); } return 0; } int gb_control_interface_hibernate_abort(struct gb_control *control) { struct gb_control_intf_pm_response response; int ret; ret = gb_operation_sync(control->connection, GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&control->dev, "failed to send interface aborting hibernate: %d\n", ret); return ret; } if (response.status != GB_CONTROL_INTF_PM_OK) { dev_err(&control->dev, "interface error while aborting hibernate: %d\n", response.status); return gb_control_interface_pm_status_map(response.status); } return 0; } static ssize_t vendor_string_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_control *control = to_gb_control(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string); } static DEVICE_ATTR_RO(vendor_string); static ssize_t product_string_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_control *control = to_gb_control(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string); } static DEVICE_ATTR_RO(product_string); static struct attribute *control_attrs[] = { &dev_attr_vendor_string.attr, &dev_attr_product_string.attr, NULL, }; ATTRIBUTE_GROUPS(control); static void gb_control_release(struct device *dev) { struct gb_control *control = to_gb_control(dev); gb_connection_destroy(control->connection); kfree(control->vendor_string); kfree(control->product_string); kfree(control); } struct device_type greybus_control_type = { .name = "greybus_control", .release = gb_control_release, }; struct gb_control *gb_control_create(struct gb_interface *intf) { struct gb_connection *connection; struct gb_control *control; control = kzalloc(sizeof(*control), GFP_KERNEL); if (!control) return ERR_PTR(-ENOMEM); control->intf = intf; connection = gb_connection_create_control(intf); if (IS_ERR(connection)) { dev_err(&intf->dev, "failed to create control connection: %ld\n", PTR_ERR(connection)); kfree(control); return ERR_CAST(connection); } control->connection = connection; control->dev.parent = &intf->dev; control->dev.bus = &greybus_bus_type; control->dev.type = &greybus_control_type; control->dev.groups = control_groups; control->dev.dma_mask = intf->dev.dma_mask; device_initialize(&control->dev); dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev)); gb_connection_set_data(control->connection, control); return control; } int gb_control_enable(struct gb_control *control) { int ret; dev_dbg(&control->connection->intf->dev, "%s\n", __func__); ret = gb_connection_enable_tx(control->connection); if (ret) { dev_err(&control->connection->intf->dev, "failed to enable control connection: %d\n", ret); return ret; } ret = gb_control_get_version(control); if (ret) goto err_disable_connection; if (control->protocol_major > 0 || control->protocol_minor > 1) control->has_bundle_version = true; /* FIXME: use protocol version instead */ if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE)) control->has_bundle_activate = true; return 0; err_disable_connection: gb_connection_disable(control->connection); return ret; } void gb_control_disable(struct gb_control *control) { dev_dbg(&control->connection->intf->dev, "%s\n", __func__); if (control->intf->disconnected) gb_connection_disable_forced(control->connection); else gb_connection_disable(control->connection); } int gb_control_suspend(struct gb_control *control) { gb_connection_disable(control->connection); return 0; } int gb_control_resume(struct gb_control *control) { int ret; ret = gb_connection_enable_tx(control->connection); if (ret) { dev_err(&control->connection->intf->dev, "failed to enable control connection: %d\n", ret); return ret; } return 0; } int gb_control_add(struct gb_control *control) { int ret; ret = device_add(&control->dev); if (ret) { dev_err(&control->dev, "failed to register control device: %d\n", ret); return ret; } return 0; } void gb_control_del(struct gb_control *control) { if (device_is_registered(&control->dev)) device_del(&control->dev); } struct gb_control *gb_control_get(struct gb_control *control) { get_device(&control->dev); return control; } void gb_control_put(struct gb_control *control) { put_device(&control->dev); } void gb_control_mode_switch_prepare(struct gb_control *control) { gb_connection_mode_switch_prepare(control->connection); } void gb_control_mode_switch_complete(struct gb_control *control) { gb_connection_mode_switch_complete(control->connection); }
linux-master
drivers/greybus/control.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus "Core" * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define CREATE_TRACE_POINTS #include <linux/greybus.h> #include "greybus_trace.h" #define GB_BUNDLE_AUTOSUSPEND_MS 3000 /* Allow greybus to be disabled at boot if needed */ static bool nogreybus; #ifdef MODULE module_param(nogreybus, bool, 0444); #else core_param(nogreybus, nogreybus, bool, 0444); #endif int greybus_disabled(void) { return nogreybus; } EXPORT_SYMBOL_GPL(greybus_disabled); static bool greybus_match_one_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id) { if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) && (id->vendor != bundle->intf->vendor_id)) return false; if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) && (id->product != bundle->intf->product_id)) return false; if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) && (id->class != bundle->class)) return false; return true; } static const struct greybus_bundle_id * greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id) { if (!id) return NULL; for (; id->vendor || id->product || id->class || id->driver_info; id++) { if (greybus_match_one_id(bundle, id)) return id; } return NULL; } static int greybus_match_device(struct device *dev, struct device_driver *drv) { struct greybus_driver *driver = to_greybus_driver(drv); struct gb_bundle *bundle; const struct greybus_bundle_id *id; if (!is_gb_bundle(dev)) return 0; bundle = to_gb_bundle(dev); id = greybus_match_id(bundle, driver->id_table); if (id) return 1; /* FIXME - Dynamic ids? */ return 0; } static int greybus_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct gb_host_device *hd; const struct gb_module *module = NULL; const struct gb_interface *intf = NULL; const struct gb_control *control = NULL; const struct gb_bundle *bundle = NULL; const struct gb_svc *svc = NULL; if (is_gb_host_device(dev)) { hd = to_gb_host_device(dev); } else if (is_gb_module(dev)) { module = to_gb_module(dev); hd = module->hd; } else if (is_gb_interface(dev)) { intf = to_gb_interface(dev); module = intf->module; hd = intf->hd; } else if (is_gb_control(dev)) { control = to_gb_control(dev); intf = control->intf; module = intf->module; hd = intf->hd; } else if (is_gb_bundle(dev)) { bundle = to_gb_bundle(dev); intf = bundle->intf; module = intf->module; hd = intf->hd; } else if (is_gb_svc(dev)) { svc = to_gb_svc(dev); hd = svc->hd; } else { dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n"); return -EINVAL; } if (add_uevent_var(env, "BUS=%u", hd->bus_id)) return -ENOMEM; if (module) { if (add_uevent_var(env, "MODULE=%u", module->module_id)) return -ENOMEM; } if (intf) { if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id)) return -ENOMEM; if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x", intf->vendor_id, intf->product_id)) return -ENOMEM; } if (bundle) { // FIXME // add a uevent that can "load" a bundle type // This is what we need to bind a driver to so use the info // in gmod here as well if (add_uevent_var(env, "BUNDLE=%u", bundle->id)) return -ENOMEM; if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class)) return -ENOMEM; } return 0; } static void greybus_shutdown(struct device *dev) { if (is_gb_host_device(dev)) { struct gb_host_device *hd; hd = to_gb_host_device(dev); gb_hd_shutdown(hd); } } struct bus_type greybus_bus_type = { .name = "greybus", .match = greybus_match_device, .uevent = greybus_uevent, .shutdown = greybus_shutdown, }; static int greybus_probe(struct device *dev) { struct greybus_driver *driver = to_greybus_driver(dev->driver); struct gb_bundle *bundle = to_gb_bundle(dev); const struct greybus_bundle_id *id; int retval; /* match id */ id = greybus_match_id(bundle, driver->id_table); if (!id) return -ENODEV; retval = pm_runtime_get_sync(&bundle->intf->dev); if (retval < 0) { pm_runtime_put_noidle(&bundle->intf->dev); return retval; } retval = gb_control_bundle_activate(bundle->intf->control, bundle->id); if (retval) { pm_runtime_put(&bundle->intf->dev); return retval; } /* * Unbound bundle devices are always deactivated. During probe, the * Runtime PM is set to enabled and active and the usage count is * incremented. If the driver supports runtime PM, it should call * pm_runtime_put() in its probe routine and pm_runtime_get_sync() * in remove routine. */ pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); retval = driver->probe(bundle, id); if (retval) { /* * Catch buggy drivers that fail to destroy their connections. */ WARN_ON(!list_empty(&bundle->connections)); gb_control_bundle_deactivate(bundle->intf->control, bundle->id); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); pm_runtime_dont_use_autosuspend(dev); pm_runtime_put(&bundle->intf->dev); return retval; } pm_runtime_put(&bundle->intf->dev); return 0; } static int greybus_remove(struct device *dev) { struct greybus_driver *driver = to_greybus_driver(dev->driver); struct gb_bundle *bundle = to_gb_bundle(dev); struct gb_connection *connection; int retval; retval = pm_runtime_get_sync(dev); if (retval < 0) dev_err(dev, "failed to resume bundle: %d\n", retval); /* * Disable (non-offloaded) connections early in case the interface is * already gone to avoid unceccessary operation timeouts during * driver disconnect. Otherwise, only disable incoming requests. */ list_for_each_entry(connection, &bundle->connections, bundle_links) { if (gb_connection_is_offloaded(connection)) continue; if (bundle->intf->disconnected) gb_connection_disable_forced(connection); else gb_connection_disable_rx(connection); } driver->disconnect(bundle); /* Catch buggy drivers that fail to destroy their connections. */ WARN_ON(!list_empty(&bundle->connections)); if (!bundle->intf->disconnected) gb_control_bundle_deactivate(bundle->intf->control, bundle->id); pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_dont_use_autosuspend(dev); pm_runtime_put_noidle(dev); return 0; } int greybus_register_driver(struct greybus_driver *driver, struct module *owner, const char *mod_name) { int retval; if (greybus_disabled()) return -ENODEV; driver->driver.bus = &greybus_bus_type; driver->driver.name = driver->name; driver->driver.probe = greybus_probe; driver->driver.remove = greybus_remove; driver->driver.owner = owner; driver->driver.mod_name = mod_name; retval = driver_register(&driver->driver); if (retval) return retval; pr_info("registered new driver %s\n", driver->name); return 0; } EXPORT_SYMBOL_GPL(greybus_register_driver); void greybus_deregister_driver(struct greybus_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(greybus_deregister_driver); static int __init gb_init(void) { int retval; if (greybus_disabled()) return -ENODEV; BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD); gb_debugfs_init(); retval = bus_register(&greybus_bus_type); if (retval) { pr_err("bus_register failed (%d)\n", retval); goto error_bus; } retval = gb_hd_init(); if (retval) { pr_err("gb_hd_init failed (%d)\n", retval); goto error_hd; } retval = gb_operation_init(); if (retval) { pr_err("gb_operation_init failed (%d)\n", retval); goto error_operation; } return 0; /* Success */ error_operation: gb_hd_exit(); error_hd: bus_unregister(&greybus_bus_type); error_bus: gb_debugfs_cleanup(); return retval; } module_init(gb_init); static void __exit gb_exit(void) { gb_operation_exit(); gb_hd_exit(); bus_unregister(&greybus_bus_type); gb_debugfs_cleanup(); tracepoint_synchronize_unregister(); } module_exit(gb_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
linux-master
drivers/greybus/core.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus connections * * Copyright 2014 Google Inc. * Copyright 2014 Linaro Ltd. */ #include <linux/workqueue.h> #include <linux/greybus.h> #include "greybus_trace.h" #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000 static void gb_connection_kref_release(struct kref *kref); static DEFINE_SPINLOCK(gb_connections_lock); static DEFINE_MUTEX(gb_connection_mutex); /* Caller holds gb_connection_mutex. */ static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id) { struct gb_host_device *hd = intf->hd; struct gb_connection *connection; list_for_each_entry(connection, &hd->connections, hd_links) { if (connection->intf == intf && connection->intf_cport_id == cport_id) return true; } return false; } static void gb_connection_get(struct gb_connection *connection) { kref_get(&connection->kref); trace_gb_connection_get(connection); } static void gb_connection_put(struct gb_connection *connection) { trace_gb_connection_put(connection); kref_put(&connection->kref, gb_connection_kref_release); } /* * Returns a reference-counted pointer to the connection if found. */ static struct gb_connection * gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id) { struct gb_connection *connection; unsigned long flags; spin_lock_irqsave(&gb_connections_lock, flags); list_for_each_entry(connection, &hd->connections, hd_links) if (connection->hd_cport_id == cport_id) { gb_connection_get(connection); goto found; } connection = NULL; found: spin_unlock_irqrestore(&gb_connections_lock, flags); return connection; } /* * Callback from the host driver to let us know that data has been * received on the bundle. */ void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, u8 *data, size_t length) { struct gb_connection *connection; trace_gb_hd_in(hd); connection = gb_connection_hd_find(hd, cport_id); if (!connection) { dev_err(&hd->dev, "nonexistent connection (%zu bytes dropped)\n", length); return; } gb_connection_recv(connection, data, length); gb_connection_put(connection); } EXPORT_SYMBOL_GPL(greybus_data_rcvd); static void gb_connection_kref_release(struct kref *kref) { struct gb_connection *connection; connection = container_of(kref, struct gb_connection, kref); trace_gb_connection_release(connection); kfree(connection); } static void gb_connection_init_name(struct gb_connection *connection) { u16 hd_cport_id = connection->hd_cport_id; u16 cport_id = 0; u8 intf_id = 0; if (connection->intf) { intf_id = connection->intf->interface_id; cport_id = connection->intf_cport_id; } snprintf(connection->name, sizeof(connection->name), "%u/%u:%u", hd_cport_id, intf_id, cport_id); } /* * _gb_connection_create() - create a Greybus connection * @hd: host device of the connection * @hd_cport_id: host-device cport id, or -1 for dynamic allocation * @intf: remote interface, or NULL for static connections * @bundle: remote-interface bundle (may be NULL) * @cport_id: remote-interface cport id, or 0 for static connections * @handler: request handler (may be NULL) * @flags: connection flags * * Create a Greybus connection, representing the bidirectional link * between a CPort on a (local) Greybus host device and a CPort on * another Greybus interface. * * A connection also maintains the state of operations sent over the * connection. * * Serialised against concurrent create and destroy using the * gb_connection_mutex. * * Return: A pointer to the new connection if successful, or an ERR_PTR * otherwise. */ static struct gb_connection * _gb_connection_create(struct gb_host_device *hd, int hd_cport_id, struct gb_interface *intf, struct gb_bundle *bundle, int cport_id, gb_request_handler_t handler, unsigned long flags) { struct gb_connection *connection; int ret; mutex_lock(&gb_connection_mutex); if (intf && gb_connection_cport_in_use(intf, cport_id)) { dev_err(&intf->dev, "cport %u already in use\n", cport_id); ret = -EBUSY; goto err_unlock; } ret = gb_hd_cport_allocate(hd, hd_cport_id, flags); if (ret < 0) { dev_err(&hd->dev, "failed to allocate cport: %d\n", ret); goto err_unlock; } hd_cport_id = ret; connection = kzalloc(sizeof(*connection), GFP_KERNEL); if (!connection) { ret = -ENOMEM; goto err_hd_cport_release; } connection->hd_cport_id = hd_cport_id; connection->intf_cport_id = cport_id; connection->hd = hd; connection->intf = intf; connection->bundle = bundle; connection->handler = handler; connection->flags = flags; if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES)) connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL; connection->state = GB_CONNECTION_STATE_DISABLED; atomic_set(&connection->op_cycle, 0); mutex_init(&connection->mutex); spin_lock_init(&connection->lock); INIT_LIST_HEAD(&connection->operations); connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev), hd_cport_id); if (!connection->wq) { ret = -ENOMEM; goto err_free_connection; } kref_init(&connection->kref); gb_connection_init_name(connection); spin_lock_irq(&gb_connections_lock); list_add(&connection->hd_links, &hd->connections); if (bundle) list_add(&connection->bundle_links, &bundle->connections); else INIT_LIST_HEAD(&connection->bundle_links); spin_unlock_irq(&gb_connections_lock); mutex_unlock(&gb_connection_mutex); trace_gb_connection_create(connection); return connection; err_free_connection: kfree(connection); err_hd_cport_release: gb_hd_cport_release(hd, hd_cport_id); err_unlock: mutex_unlock(&gb_connection_mutex); return ERR_PTR(ret); } struct gb_connection * gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, gb_request_handler_t handler) { return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler, GB_CONNECTION_FLAG_HIGH_PRIO); } struct gb_connection * gb_connection_create_control(struct gb_interface *intf) { return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, GB_CONNECTION_FLAG_CONTROL | GB_CONNECTION_FLAG_HIGH_PRIO); } struct gb_connection * gb_connection_create(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler) { struct gb_interface *intf = bundle->intf; return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, handler, 0); } EXPORT_SYMBOL_GPL(gb_connection_create); struct gb_connection * gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id, gb_request_handler_t handler, unsigned long flags) { struct gb_interface *intf = bundle->intf; if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK)) flags &= ~GB_CONNECTION_FLAG_CORE_MASK; return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, handler, flags); } EXPORT_SYMBOL_GPL(gb_connection_create_flags); struct gb_connection * gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id, unsigned long flags) { flags |= GB_CONNECTION_FLAG_OFFLOADED; return gb_connection_create_flags(bundle, cport_id, NULL, flags); } EXPORT_SYMBOL_GPL(gb_connection_create_offloaded); static int gb_connection_hd_cport_enable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_enable) return 0; ret = hd->driver->cport_enable(hd, connection->hd_cport_id, connection->flags); if (ret) { dev_err(&hd->dev, "%s: failed to enable host cport: %d\n", connection->name, ret); return ret; } return 0; } static void gb_connection_hd_cport_disable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_disable) return; ret = hd->driver->cport_disable(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to disable host cport: %d\n", connection->name, ret); } } static int gb_connection_hd_cport_connected(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_connected) return 0; ret = hd->driver->cport_connected(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to set connected state: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_flush(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_flush) return 0; ret = hd->driver->cport_flush(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to flush host cport: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_quiesce(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; size_t peer_space; int ret; if (!hd->driver->cport_quiesce) return 0; peer_space = sizeof(struct gb_operation_msg_hdr) + sizeof(struct gb_cport_shutdown_request); if (connection->mode_switch) peer_space += sizeof(struct gb_operation_msg_hdr); ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id, peer_space, GB_CONNECTION_CPORT_QUIESCE_TIMEOUT); if (ret) { dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n", connection->name, ret); return ret; } return 0; } static int gb_connection_hd_cport_clear(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->cport_clear) return 0; ret = hd->driver->cport_clear(hd, connection->hd_cport_id); if (ret) { dev_err(&hd->dev, "%s: failed to clear host cport: %d\n", connection->name, ret); return ret; } return 0; } /* * Request the SVC to create a connection from AP's cport to interface's * cport. */ static int gb_connection_svc_connection_create(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; struct gb_interface *intf; u8 cport_flags; int ret; if (gb_connection_is_static(connection)) return 0; intf = connection->intf; /* * Enable either E2EFC or CSD, unless no flow control is requested. */ cport_flags = GB_SVC_CPORT_FLAG_CSV_N; if (gb_connection_flow_control_disabled(connection)) { cport_flags |= GB_SVC_CPORT_FLAG_CSD_N; } else if (gb_connection_e2efc_enabled(connection)) { cport_flags |= GB_SVC_CPORT_FLAG_CSD_N | GB_SVC_CPORT_FLAG_E2EFC; } ret = gb_svc_connection_create(hd->svc, hd->svc->ap_intf_id, connection->hd_cport_id, intf->interface_id, connection->intf_cport_id, cport_flags); if (ret) { dev_err(&connection->hd->dev, "%s: failed to create svc connection: %d\n", connection->name, ret); return ret; } return 0; } static void gb_connection_svc_connection_destroy(struct gb_connection *connection) { if (gb_connection_is_static(connection)) return; gb_svc_connection_destroy(connection->hd->svc, connection->hd->svc->ap_intf_id, connection->hd_cport_id, connection->intf->interface_id, connection->intf_cport_id); } /* Inform Interface about active CPorts */ static int gb_connection_control_connected(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return 0; if (gb_connection_is_control(connection)) return 0; control = connection->intf->control; ret = gb_control_connected_operation(control, cport_id); if (ret) { dev_err(&connection->bundle->dev, "failed to connect cport: %d\n", ret); return ret; } return 0; } static void gb_connection_control_disconnecting(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return; control = connection->intf->control; ret = gb_control_disconnecting_operation(control, cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to send disconnecting: %d\n", connection->name, ret); } } static void gb_connection_control_disconnected(struct gb_connection *connection) { struct gb_control *control; u16 cport_id = connection->intf_cport_id; int ret; if (gb_connection_is_static(connection)) return; control = connection->intf->control; if (gb_connection_is_control(connection)) { if (connection->mode_switch) { ret = gb_control_mode_switch_operation(control); if (ret) { /* * Allow mode switch to time out waiting for * mailbox event. */ return; } } return; } ret = gb_control_disconnected_operation(control, cport_id); if (ret) { dev_warn(&connection->bundle->dev, "failed to disconnect cport: %d\n", ret); } } static int gb_connection_shutdown_operation(struct gb_connection *connection, u8 phase) { struct gb_cport_shutdown_request *req; struct gb_operation *operation; int ret; operation = gb_operation_create_core(connection, GB_REQUEST_TYPE_CPORT_SHUTDOWN, sizeof(*req), 0, 0, GFP_KERNEL); if (!operation) return -ENOMEM; req = operation->request->payload; req->phase = phase; ret = gb_operation_request_send_sync(operation); gb_operation_put(operation); return ret; } static int gb_connection_cport_shutdown(struct gb_connection *connection, u8 phase) { struct gb_host_device *hd = connection->hd; const struct gb_hd_driver *drv = hd->driver; int ret; if (gb_connection_is_static(connection)) return 0; if (gb_connection_is_offloaded(connection)) { if (!drv->cport_shutdown) return 0; ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase, GB_OPERATION_TIMEOUT_DEFAULT); } else { ret = gb_connection_shutdown_operation(connection, phase); } if (ret) { dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n", connection->name, phase, ret); return ret; } return 0; } static int gb_connection_cport_shutdown_phase_1(struct gb_connection *connection) { return gb_connection_cport_shutdown(connection, 1); } static int gb_connection_cport_shutdown_phase_2(struct gb_connection *connection) { return gb_connection_cport_shutdown(connection, 2); } /* * Cancel all active operations on a connection. * * Locking: Called with connection lock held and state set to DISABLED or * DISCONNECTING. */ static void gb_connection_cancel_operations(struct gb_connection *connection, int errno) __must_hold(&connection->lock) { struct gb_operation *operation; while (!list_empty(&connection->operations)) { operation = list_last_entry(&connection->operations, struct gb_operation, links); gb_operation_get(operation); spin_unlock_irq(&connection->lock); if (gb_operation_is_incoming(operation)) gb_operation_cancel_incoming(operation, errno); else gb_operation_cancel(operation, errno); gb_operation_put(operation); spin_lock_irq(&connection->lock); } } /* * Cancel all active incoming operations on a connection. * * Locking: Called with connection lock held and state set to ENABLED_TX. */ static void gb_connection_flush_incoming_operations(struct gb_connection *connection, int errno) __must_hold(&connection->lock) { struct gb_operation *operation; bool incoming; while (!list_empty(&connection->operations)) { incoming = false; list_for_each_entry(operation, &connection->operations, links) { if (gb_operation_is_incoming(operation)) { gb_operation_get(operation); incoming = true; break; } } if (!incoming) break; spin_unlock_irq(&connection->lock); /* FIXME: flush, not cancel? */ gb_operation_cancel_incoming(operation, errno); gb_operation_put(operation); spin_lock_irq(&connection->lock); } } /* * _gb_connection_enable() - enable a connection * @connection: connection to enable * @rx: whether to enable incoming requests * * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and * ENABLED_TX->ENABLED state transitions. * * Locking: Caller holds connection->mutex. */ static int _gb_connection_enable(struct gb_connection *connection, bool rx) { int ret; /* Handle ENABLED_TX -> ENABLED transitions. */ if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) { if (!(connection->handler && rx)) return 0; spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_ENABLED; spin_unlock_irq(&connection->lock); return 0; } ret = gb_connection_hd_cport_enable(connection); if (ret) return ret; ret = gb_connection_svc_connection_create(connection); if (ret) goto err_hd_cport_clear; ret = gb_connection_hd_cport_connected(connection); if (ret) goto err_svc_connection_destroy; spin_lock_irq(&connection->lock); if (connection->handler && rx) connection->state = GB_CONNECTION_STATE_ENABLED; else connection->state = GB_CONNECTION_STATE_ENABLED_TX; spin_unlock_irq(&connection->lock); ret = gb_connection_control_connected(connection); if (ret) goto err_control_disconnecting; return 0; err_control_disconnecting: spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISCONNECTING; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); /* Transmit queue should already be empty. */ gb_connection_hd_cport_flush(connection); gb_connection_control_disconnecting(connection); gb_connection_cport_shutdown_phase_1(connection); gb_connection_hd_cport_quiesce(connection); gb_connection_cport_shutdown_phase_2(connection); gb_connection_control_disconnected(connection); connection->state = GB_CONNECTION_STATE_DISABLED; err_svc_connection_destroy: gb_connection_svc_connection_destroy(connection); err_hd_cport_clear: gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); return ret; } int gb_connection_enable(struct gb_connection *connection) { int ret = 0; mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_ENABLED) goto out_unlock; ret = _gb_connection_enable(connection, true); if (!ret) trace_gb_connection_enable(connection); out_unlock: mutex_unlock(&connection->mutex); return ret; } EXPORT_SYMBOL_GPL(gb_connection_enable); int gb_connection_enable_tx(struct gb_connection *connection) { int ret = 0; mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_ENABLED) { ret = -EINVAL; goto out_unlock; } if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) goto out_unlock; ret = _gb_connection_enable(connection, false); if (!ret) trace_gb_connection_enable(connection); out_unlock: mutex_unlock(&connection->mutex); return ret; } EXPORT_SYMBOL_GPL(gb_connection_enable_tx); void gb_connection_disable_rx(struct gb_connection *connection) { mutex_lock(&connection->mutex); spin_lock_irq(&connection->lock); if (connection->state != GB_CONNECTION_STATE_ENABLED) { spin_unlock_irq(&connection->lock); goto out_unlock; } connection->state = GB_CONNECTION_STATE_ENABLED_TX; gb_connection_flush_incoming_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); trace_gb_connection_disable(connection); out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable_rx); void gb_connection_mode_switch_prepare(struct gb_connection *connection) { connection->mode_switch = true; } void gb_connection_mode_switch_complete(struct gb_connection *connection) { gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); connection->mode_switch = false; } void gb_connection_disable(struct gb_connection *connection) { mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_DISABLED) goto out_unlock; trace_gb_connection_disable(connection); spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISCONNECTING; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); gb_connection_hd_cport_flush(connection); gb_connection_control_disconnecting(connection); gb_connection_cport_shutdown_phase_1(connection); gb_connection_hd_cport_quiesce(connection); gb_connection_cport_shutdown_phase_2(connection); gb_connection_control_disconnected(connection); connection->state = GB_CONNECTION_STATE_DISABLED; /* control-connection tear down is deferred when mode switching */ if (!connection->mode_switch) { gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); } out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable); /* Disable a connection without communicating with the remote end. */ void gb_connection_disable_forced(struct gb_connection *connection) { mutex_lock(&connection->mutex); if (connection->state == GB_CONNECTION_STATE_DISABLED) goto out_unlock; trace_gb_connection_disable(connection); spin_lock_irq(&connection->lock); connection->state = GB_CONNECTION_STATE_DISABLED; gb_connection_cancel_operations(connection, -ESHUTDOWN); spin_unlock_irq(&connection->lock); gb_connection_hd_cport_flush(connection); gb_connection_svc_connection_destroy(connection); gb_connection_hd_cport_clear(connection); gb_connection_hd_cport_disable(connection); out_unlock: mutex_unlock(&connection->mutex); } EXPORT_SYMBOL_GPL(gb_connection_disable_forced); /* Caller must have disabled the connection before destroying it. */ void gb_connection_destroy(struct gb_connection *connection) { if (!connection) return; if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED)) gb_connection_disable(connection); mutex_lock(&gb_connection_mutex); spin_lock_irq(&gb_connections_lock); list_del(&connection->bundle_links); list_del(&connection->hd_links); spin_unlock_irq(&gb_connections_lock); destroy_workqueue(connection->wq); gb_hd_cport_release(connection->hd, connection->hd_cport_id); connection->hd_cport_id = CPORT_ID_BAD; mutex_unlock(&gb_connection_mutex); gb_connection_put(connection); } EXPORT_SYMBOL_GPL(gb_connection_destroy); void gb_connection_latency_tag_enable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->latency_tag_enable) return; ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to enable latency tag: %d\n", connection->name, ret); } } EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable); void gb_connection_latency_tag_disable(struct gb_connection *connection) { struct gb_host_device *hd = connection->hd; int ret; if (!hd->driver->latency_tag_disable) return; ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id); if (ret) { dev_err(&connection->hd->dev, "%s: failed to disable latency tag: %d\n", connection->name, ret); } } EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
linux-master
drivers/greybus/connection.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus manifest parsing * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/greybus.h> static const char *get_descriptor_type_string(u8 type) { switch (type) { case GREYBUS_TYPE_INVALID: return "invalid"; case GREYBUS_TYPE_STRING: return "string"; case GREYBUS_TYPE_INTERFACE: return "interface"; case GREYBUS_TYPE_CPORT: return "cport"; case GREYBUS_TYPE_BUNDLE: return "bundle"; default: WARN_ON(1); return "unknown"; } } /* * We scan the manifest once to identify where all the descriptors * are. The result is a list of these manifest_desc structures. We * then pick through them for what we're looking for (starting with * the interface descriptor). As each is processed we remove it from * the list. When we're done the list should (probably) be empty. */ struct manifest_desc { struct list_head links; size_t size; void *data; enum greybus_descriptor_type type; }; static void release_manifest_descriptor(struct manifest_desc *descriptor) { list_del(&descriptor->links); kfree(descriptor); } static void release_manifest_descriptors(struct gb_interface *intf) { struct manifest_desc *descriptor; struct manifest_desc *next; list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) release_manifest_descriptor(descriptor); } static void release_cport_descriptors(struct list_head *head, u8 bundle_id) { struct manifest_desc *desc, *tmp; struct greybus_descriptor_cport *desc_cport; list_for_each_entry_safe(desc, tmp, head, links) { desc_cport = desc->data; if (desc->type != GREYBUS_TYPE_CPORT) continue; if (desc_cport->bundle == bundle_id) release_manifest_descriptor(desc); } } static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf) { struct manifest_desc *descriptor; struct manifest_desc *next; list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links) if (descriptor->type == GREYBUS_TYPE_BUNDLE) return descriptor; return NULL; } /* * Validate the given descriptor. Its reported size must fit within * the number of bytes remaining, and it must have a recognized * type. Check that the reported size is at least as big as what * we expect to see. (It could be bigger, perhaps for a new version * of the format.) * * Returns the (non-zero) number of bytes consumed by the descriptor, * or a negative errno. */ static int identify_descriptor(struct gb_interface *intf, struct greybus_descriptor *desc, size_t size) { struct greybus_descriptor_header *desc_header = &desc->header; struct manifest_desc *descriptor; size_t desc_size; size_t expected_size; if (size < sizeof(*desc_header)) { dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size, sizeof(*desc_header)); return -EINVAL; /* Must at least have header */ } desc_size = le16_to_cpu(desc_header->size); if (desc_size > size) { dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n", desc_size, size); return -EINVAL; } /* Descriptor needs to at least have a header */ expected_size = sizeof(*desc_header); switch (desc_header->type) { case GREYBUS_TYPE_STRING: expected_size += sizeof(struct greybus_descriptor_string); expected_size += desc->string.length; /* String descriptors are padded to 4 byte boundaries */ expected_size = ALIGN(expected_size, 4); break; case GREYBUS_TYPE_INTERFACE: expected_size += sizeof(struct greybus_descriptor_interface); break; case GREYBUS_TYPE_BUNDLE: expected_size += sizeof(struct greybus_descriptor_bundle); break; case GREYBUS_TYPE_CPORT: expected_size += sizeof(struct greybus_descriptor_cport); break; case GREYBUS_TYPE_INVALID: default: dev_err(&intf->dev, "invalid descriptor type (%u)\n", desc_header->type); return -EINVAL; } if (desc_size < expected_size) { dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n", get_descriptor_type_string(desc_header->type), desc_size, expected_size); return -EINVAL; } /* Descriptor bigger than what we expect */ if (desc_size > expected_size) { dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n", get_descriptor_type_string(desc_header->type), expected_size, desc_size); } descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL); if (!descriptor) return -ENOMEM; descriptor->size = desc_size; descriptor->data = (char *)desc + sizeof(*desc_header); descriptor->type = desc_header->type; list_add_tail(&descriptor->links, &intf->manifest_descs); /* desc_size is positive and is known to fit in a signed int */ return desc_size; } /* * Find the string descriptor having the given id, validate it, and * allocate a duplicate copy of it. The duplicate has an extra byte * which guarantees the returned string is NUL-terminated. * * String index 0 is valid (it represents "no string"), and for * that a null pointer is returned. * * Otherwise returns a pointer to a newly-allocated copy of the * descriptor string, or an error-coded pointer on failure. */ static char *gb_string_get(struct gb_interface *intf, u8 string_id) { struct greybus_descriptor_string *desc_string; struct manifest_desc *descriptor; bool found = false; char *string; /* A zero string id means no string (but no error) */ if (!string_id) return NULL; list_for_each_entry(descriptor, &intf->manifest_descs, links) { if (descriptor->type != GREYBUS_TYPE_STRING) continue; desc_string = descriptor->data; if (desc_string->id == string_id) { found = true; break; } } if (!found) return ERR_PTR(-ENOENT); /* Allocate an extra byte so we can guarantee it's NUL-terminated */ string = kmemdup(&desc_string->string, desc_string->length + 1, GFP_KERNEL); if (!string) return ERR_PTR(-ENOMEM); string[desc_string->length] = '\0'; /* Ok we've used this string, so we're done with it */ release_manifest_descriptor(descriptor); return string; } /* * Find cport descriptors in the manifest associated with the given * bundle, and set up data structures for the functions that use * them. Returns the number of cports set up for the bundle, or 0 * if there is an error. */ static u32 gb_manifest_parse_cports(struct gb_bundle *bundle) { struct gb_interface *intf = bundle->intf; struct greybus_descriptor_cport *desc_cport; struct manifest_desc *desc, *next, *tmp; LIST_HEAD(list); u8 bundle_id = bundle->id; u16 cport_id; u32 count = 0; int i; /* Set up all cport descriptors associated with this bundle */ list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) { if (desc->type != GREYBUS_TYPE_CPORT) continue; desc_cport = desc->data; if (desc_cport->bundle != bundle_id) continue; cport_id = le16_to_cpu(desc_cport->id); if (cport_id > CPORT_ID_MAX) goto exit; /* Nothing else should have its cport_id as control cport id */ if (cport_id == GB_CONTROL_CPORT_ID) { dev_err(&bundle->dev, "invalid cport id found (%02u)\n", cport_id); goto exit; } /* * Found one, move it to our temporary list after checking for * duplicates. */ list_for_each_entry(tmp, &list, links) { desc_cport = tmp->data; if (cport_id == le16_to_cpu(desc_cport->id)) { dev_err(&bundle->dev, "duplicate CPort %u found\n", cport_id); goto exit; } } list_move_tail(&desc->links, &list); count++; } if (!count) return 0; bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc), GFP_KERNEL); if (!bundle->cport_desc) goto exit; bundle->num_cports = count; i = 0; list_for_each_entry_safe(desc, next, &list, links) { desc_cport = desc->data; memcpy(&bundle->cport_desc[i++], desc_cport, sizeof(*desc_cport)); /* Release the cport descriptor */ release_manifest_descriptor(desc); } return count; exit: release_cport_descriptors(&list, bundle_id); /* * Free all cports for this bundle to avoid 'excess descriptors' * warnings. */ release_cport_descriptors(&intf->manifest_descs, bundle_id); return 0; /* Error; count should also be 0 */ } /* * Find bundle descriptors in the manifest and set up their data * structures. Returns the number of bundles set up for the * given interface. */ static u32 gb_manifest_parse_bundles(struct gb_interface *intf) { struct manifest_desc *desc; struct gb_bundle *bundle; struct gb_bundle *bundle_next; u32 count = 0; u8 bundle_id; u8 class; while ((desc = get_next_bundle_desc(intf))) { struct greybus_descriptor_bundle *desc_bundle; /* Found one. Set up its bundle structure*/ desc_bundle = desc->data; bundle_id = desc_bundle->id; class = desc_bundle->class; /* Done with this bundle descriptor */ release_manifest_descriptor(desc); /* Ignore any legacy control bundles */ if (bundle_id == GB_CONTROL_BUNDLE_ID) { dev_dbg(&intf->dev, "%s - ignoring control bundle\n", __func__); release_cport_descriptors(&intf->manifest_descs, bundle_id); continue; } /* Nothing else should have its class set to control class */ if (class == GREYBUS_CLASS_CONTROL) { dev_err(&intf->dev, "bundle %u cannot use control class\n", bundle_id); goto cleanup; } bundle = gb_bundle_create(intf, bundle_id, class); if (!bundle) goto cleanup; /* * Now go set up this bundle's functions and cports. * * A 'bundle' represents a device in greybus. It may require * multiple cports for its functioning. If we fail to setup any * cport of a bundle, we better reject the complete bundle as * the device may not be able to function properly then. * * But, failing to setup a cport of bundle X doesn't mean that * the device corresponding to bundle Y will not work properly. * Bundles should be treated as separate independent devices. * * While parsing manifest for an interface, treat bundles as * separate entities and don't reject entire interface and its * bundles on failing to initialize a cport. But make sure the * bundle which needs the cport, gets destroyed properly. */ if (!gb_manifest_parse_cports(bundle)) { gb_bundle_destroy(bundle); continue; } count++; } return count; cleanup: /* An error occurred; undo any changes we've made */ list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) { gb_bundle_destroy(bundle); count--; } return 0; /* Error; count should also be 0 */ } static bool gb_manifest_parse_interface(struct gb_interface *intf, struct manifest_desc *interface_desc) { struct greybus_descriptor_interface *desc_intf = interface_desc->data; struct gb_control *control = intf->control; char *str; /* Handle the strings first--they can fail */ str = gb_string_get(intf, desc_intf->vendor_stringid); if (IS_ERR(str)) return false; control->vendor_string = str; str = gb_string_get(intf, desc_intf->product_stringid); if (IS_ERR(str)) goto out_free_vendor_string; control->product_string = str; /* Assign feature flags communicated via manifest */ intf->features = desc_intf->features; /* Release the interface descriptor, now that we're done with it */ release_manifest_descriptor(interface_desc); /* An interface must have at least one bundle descriptor */ if (!gb_manifest_parse_bundles(intf)) { dev_err(&intf->dev, "manifest bundle descriptors not valid\n"); goto out_err; } return true; out_err: kfree(control->product_string); control->product_string = NULL; out_free_vendor_string: kfree(control->vendor_string); control->vendor_string = NULL; return false; } /* * Parse a buffer containing an interface manifest. * * If we find anything wrong with the content/format of the buffer * we reject it. * * The first requirement is that the manifest's version is * one we can parse. * * We make an initial pass through the buffer and identify all of * the descriptors it contains, keeping track for each its type * and the location size of its data in the buffer. * * Next we scan the descriptors, looking for an interface descriptor; * there must be exactly one of those. When found, we record the * information it contains, and then remove that descriptor (and any * string descriptors it refers to) from further consideration. * * After that we look for the interface's bundles--there must be at * least one of those. * * Returns true if parsing was successful, false otherwise. */ bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size) { struct greybus_manifest *manifest; struct greybus_manifest_header *header; struct greybus_descriptor *desc; struct manifest_desc *descriptor; struct manifest_desc *interface_desc = NULL; u16 manifest_size; u32 found = 0; bool result; /* Manifest descriptor list should be empty here */ if (WARN_ON(!list_empty(&intf->manifest_descs))) return false; /* we have to have at _least_ the manifest header */ if (size < sizeof(*header)) { dev_err(&intf->dev, "short manifest (%zu < %zu)\n", size, sizeof(*header)); return false; } /* Make sure the size is right */ manifest = data; header = &manifest->header; manifest_size = le16_to_cpu(header->size); if (manifest_size != size) { dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n", size, manifest_size); return false; } /* Validate major/minor number */ if (header->version_major > GREYBUS_VERSION_MAJOR) { dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n", header->version_major, header->version_minor, GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR); return false; } /* OK, find all the descriptors */ desc = manifest->descriptors; size -= sizeof(*header); while (size) { int desc_size; desc_size = identify_descriptor(intf, desc, size); if (desc_size < 0) { result = false; goto out; } desc = (struct greybus_descriptor *)((char *)desc + desc_size); size -= desc_size; } /* There must be a single interface descriptor */ list_for_each_entry(descriptor, &intf->manifest_descs, links) { if (descriptor->type == GREYBUS_TYPE_INTERFACE) if (!found++) interface_desc = descriptor; } if (found != 1) { dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n", found); result = false; goto out; } /* Parse the manifest, starting with the interface descriptor */ result = gb_manifest_parse_interface(intf, interface_desc); /* * We really should have no remaining descriptors, but we * don't know what newer format manifests might leave. */ if (result && !list_empty(&intf->manifest_descs)) dev_info(&intf->dev, "excess descriptors in interface manifest\n"); out: release_manifest_descriptors(intf); return result; }
linux-master
drivers/greybus/manifest.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus Host Device * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/greybus.h> #include "greybus_trace.h" EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit); static struct ida gb_hd_bus_id_map; int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async) { if (!hd || !hd->driver || !hd->driver->output) return -EINVAL; return hd->driver->output(hd, req, size, cmd, async); } EXPORT_SYMBOL_GPL(gb_hd_output); static ssize_t bus_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_host_device *hd = to_gb_host_device(dev); return sprintf(buf, "%d\n", hd->bus_id); } static DEVICE_ATTR_RO(bus_id); static struct attribute *bus_attrs[] = { &dev_attr_bus_id.attr, NULL }; ATTRIBUTE_GROUPS(bus); int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id) { struct ida *id_map = &hd->cport_id_map; int ret; ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL); if (ret < 0) { dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id); return ret; } return 0; } EXPORT_SYMBOL_GPL(gb_hd_cport_reserve); void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id) { struct ida *id_map = &hd->cport_id_map; ida_simple_remove(id_map, cport_id); } EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved); /* Locking: Caller guarantees serialisation */ int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags) { struct ida *id_map = &hd->cport_id_map; int ida_start, ida_end; if (hd->driver->cport_allocate) return hd->driver->cport_allocate(hd, cport_id, flags); if (cport_id < 0) { ida_start = 0; ida_end = hd->num_cports; } else if (cport_id < hd->num_cports) { ida_start = cport_id; ida_end = cport_id + 1; } else { dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EINVAL; } return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL); } /* Locking: Caller guarantees serialisation */ void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id) { if (hd->driver->cport_release) { hd->driver->cport_release(hd, cport_id); return; } ida_simple_remove(&hd->cport_id_map, cport_id); } static void gb_hd_release(struct device *dev) { struct gb_host_device *hd = to_gb_host_device(dev); trace_gb_hd_release(hd); if (hd->svc) gb_svc_put(hd->svc); ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id); ida_destroy(&hd->cport_id_map); kfree(hd); } struct device_type greybus_hd_type = { .name = "greybus_host_device", .release = gb_hd_release, }; struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, struct device *parent, size_t buffer_size_max, size_t num_cports) { struct gb_host_device *hd; int ret; /* * Validate that the driver implements all of the callbacks * so that we don't have to every time we make them. */ if ((!driver->message_send) || (!driver->message_cancel)) { dev_err(parent, "mandatory hd-callbacks missing\n"); return ERR_PTR(-EINVAL); } if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) { dev_err(parent, "greybus host-device buffers too small\n"); return ERR_PTR(-EINVAL); } if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) { dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports); return ERR_PTR(-EINVAL); } /* * Make sure to never allocate messages larger than what the Greybus * protocol supports. */ if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) { dev_warn(parent, "limiting buffer size to %u\n", GB_OPERATION_MESSAGE_SIZE_MAX); buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX; } hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL); if (!hd) return ERR_PTR(-ENOMEM); ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL); if (ret < 0) { kfree(hd); return ERR_PTR(ret); } hd->bus_id = ret; hd->driver = driver; INIT_LIST_HEAD(&hd->modules); INIT_LIST_HEAD(&hd->connections); ida_init(&hd->cport_id_map); hd->buffer_size_max = buffer_size_max; hd->num_cports = num_cports; hd->dev.parent = parent; hd->dev.bus = &greybus_bus_type; hd->dev.type = &greybus_hd_type; hd->dev.groups = bus_groups; hd->dev.dma_mask = hd->dev.parent->dma_mask; device_initialize(&hd->dev); dev_set_name(&hd->dev, "greybus%d", hd->bus_id); trace_gb_hd_create(hd); hd->svc = gb_svc_create(hd); if (!hd->svc) { dev_err(&hd->dev, "failed to create svc\n"); put_device(&hd->dev); return ERR_PTR(-ENOMEM); } return hd; } EXPORT_SYMBOL_GPL(gb_hd_create); int gb_hd_add(struct gb_host_device *hd) { int ret; ret = device_add(&hd->dev); if (ret) return ret; ret = gb_svc_add(hd->svc); if (ret) { device_del(&hd->dev); return ret; } trace_gb_hd_add(hd); return 0; } EXPORT_SYMBOL_GPL(gb_hd_add); void gb_hd_del(struct gb_host_device *hd) { trace_gb_hd_del(hd); /* * Tear down the svc and flush any on-going hotplug processing before * removing the remaining interfaces. */ gb_svc_del(hd->svc); device_del(&hd->dev); } EXPORT_SYMBOL_GPL(gb_hd_del); void gb_hd_shutdown(struct gb_host_device *hd) { gb_svc_del(hd->svc); } EXPORT_SYMBOL_GPL(gb_hd_shutdown); void gb_hd_put(struct gb_host_device *hd) { put_device(&hd->dev); } EXPORT_SYMBOL_GPL(gb_hd_put); int __init gb_hd_init(void) { ida_init(&gb_hd_bus_id_map); return 0; } void gb_hd_exit(void) { ida_destroy(&gb_hd_bus_id_map); }
linux-master
drivers/greybus/hd.c
// SPDX-License-Identifier: GPL-2.0 /* * SVC Greybus "watchdog" driver. * * Copyright 2016 Google Inc. */ #include <linux/delay.h> #include <linux/suspend.h> #include <linux/workqueue.h> #include <linux/greybus.h> #define SVC_WATCHDOG_PERIOD (2 * HZ) struct gb_svc_watchdog { struct delayed_work work; struct gb_svc *svc; bool enabled; struct notifier_block pm_notifier; }; static struct delayed_work reset_work; static int svc_watchdog_pm_notifier(struct notifier_block *notifier, unsigned long pm_event, void *unused) { struct gb_svc_watchdog *watchdog = container_of(notifier, struct gb_svc_watchdog, pm_notifier); switch (pm_event) { case PM_SUSPEND_PREPARE: gb_svc_watchdog_disable(watchdog->svc); break; case PM_POST_SUSPEND: gb_svc_watchdog_enable(watchdog->svc); break; default: break; } return NOTIFY_DONE; } static void greybus_reset(struct work_struct *work) { static char const start_path[] = "/system/bin/start"; static char *envp[] = { "HOME=/", "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin", NULL, }; static char *argv[] = { (char *)start_path, "unipro_reset", NULL, }; pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n", argv[0], argv[1]); call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC); } static void do_work(struct work_struct *work) { struct gb_svc_watchdog *watchdog; struct gb_svc *svc; int retval; watchdog = container_of(work, struct gb_svc_watchdog, work.work); svc = watchdog->svc; dev_dbg(&svc->dev, "%s: ping.\n", __func__); retval = gb_svc_ping(svc); if (retval) { /* * Something went really wrong, let's warn userspace and then * pull the plug and reset the whole greybus network. * We need to do this outside of this workqueue as we will be * tearing down the svc device itself. So queue up * yet-another-callback to do that. */ dev_err(&svc->dev, "SVC ping has returned %d, something is wrong!!!\n", retval); if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) { panic("SVC is not responding\n"); } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) { dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n"); INIT_DELAYED_WORK(&reset_work, greybus_reset); schedule_delayed_work(&reset_work, HZ / 2); /* * Disable ourselves, we don't want to trip again unless * userspace wants us to. */ watchdog->enabled = false; } } /* resubmit our work to happen again, if we are still "alive" */ if (watchdog->enabled) schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); } int gb_svc_watchdog_create(struct gb_svc *svc) { struct gb_svc_watchdog *watchdog; int retval; if (svc->watchdog) return 0; watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL); if (!watchdog) return -ENOMEM; watchdog->enabled = false; watchdog->svc = svc; INIT_DELAYED_WORK(&watchdog->work, do_work); svc->watchdog = watchdog; watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier; retval = register_pm_notifier(&watchdog->pm_notifier); if (retval) { dev_err(&svc->dev, "error registering pm notifier(%d)\n", retval); goto svc_watchdog_create_err; } retval = gb_svc_watchdog_enable(svc); if (retval) { dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval); unregister_pm_notifier(&watchdog->pm_notifier); goto svc_watchdog_create_err; } return retval; svc_watchdog_create_err: svc->watchdog = NULL; kfree(watchdog); return retval; } void gb_svc_watchdog_destroy(struct gb_svc *svc) { struct gb_svc_watchdog *watchdog = svc->watchdog; if (!watchdog) return; unregister_pm_notifier(&watchdog->pm_notifier); gb_svc_watchdog_disable(svc); svc->watchdog = NULL; kfree(watchdog); } bool gb_svc_watchdog_enabled(struct gb_svc *svc) { if (!svc || !svc->watchdog) return false; return svc->watchdog->enabled; } int gb_svc_watchdog_enable(struct gb_svc *svc) { struct gb_svc_watchdog *watchdog; if (!svc->watchdog) return -ENODEV; watchdog = svc->watchdog; if (watchdog->enabled) return 0; watchdog->enabled = true; schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD); return 0; } int gb_svc_watchdog_disable(struct gb_svc *svc) { struct gb_svc_watchdog *watchdog; if (!svc->watchdog) return -ENODEV; watchdog = svc->watchdog; if (!watchdog->enabled) return 0; watchdog->enabled = false; cancel_delayed_work_sync(&watchdog->work); return 0; }
linux-master
drivers/greybus/svc_watchdog.c
// SPDX-License-Identifier: GPL-2.0 /* * Greybus bundles * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/greybus.h> #include "greybus_trace.h" static ssize_t bundle_class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); return sprintf(buf, "0x%02x\n", bundle->class); } static DEVICE_ATTR_RO(bundle_class); static ssize_t bundle_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); return sprintf(buf, "%u\n", bundle->id); } static DEVICE_ATTR_RO(bundle_id); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); if (!bundle->state) return sprintf(buf, "\n"); return sprintf(buf, "%s\n", bundle->state); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gb_bundle *bundle = to_gb_bundle(dev); kfree(bundle->state); bundle->state = kstrdup(buf, GFP_KERNEL); if (!bundle->state) return -ENOMEM; /* Tell userspace that the file contents changed */ sysfs_notify(&bundle->dev.kobj, NULL, "state"); return size; } static DEVICE_ATTR_RW(state); static struct attribute *bundle_attrs[] = { &dev_attr_bundle_class.attr, &dev_attr_bundle_id.attr, &dev_attr_state.attr, NULL, }; ATTRIBUTE_GROUPS(bundle); static struct gb_bundle *gb_bundle_find(struct gb_interface *intf, u8 bundle_id) { struct gb_bundle *bundle; list_for_each_entry(bundle, &intf->bundles, links) { if (bundle->id == bundle_id) return bundle; } return NULL; } static void gb_bundle_release(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); trace_gb_bundle_release(bundle); kfree(bundle->state); kfree(bundle->cport_desc); kfree(bundle); } #ifdef CONFIG_PM static void gb_bundle_disable_all_connections(struct gb_bundle *bundle) { struct gb_connection *connection; list_for_each_entry(connection, &bundle->connections, bundle_links) gb_connection_disable(connection); } static void gb_bundle_enable_all_connections(struct gb_bundle *bundle) { struct gb_connection *connection; list_for_each_entry(connection, &bundle->connections, bundle_links) gb_connection_enable(connection); } static int gb_bundle_suspend(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); const struct dev_pm_ops *pm = dev->driver->pm; int ret; if (pm && pm->runtime_suspend) { ret = pm->runtime_suspend(&bundle->dev); if (ret) return ret; } else { gb_bundle_disable_all_connections(bundle); } ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id); if (ret) { if (pm && pm->runtime_resume) ret = pm->runtime_resume(dev); else gb_bundle_enable_all_connections(bundle); return ret; } return 0; } static int gb_bundle_resume(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); const struct dev_pm_ops *pm = dev->driver->pm; int ret; ret = gb_control_bundle_resume(bundle->intf->control, bundle->id); if (ret) return ret; if (pm && pm->runtime_resume) { ret = pm->runtime_resume(dev); if (ret) return ret; } else { gb_bundle_enable_all_connections(bundle); } return 0; } static int gb_bundle_idle(struct device *dev) { pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); return 0; } #endif static const struct dev_pm_ops gb_bundle_pm_ops = { SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle) }; struct device_type greybus_bundle_type = { .name = "greybus_bundle", .release = gb_bundle_release, .pm = &gb_bundle_pm_ops, }; /* * Create a gb_bundle structure to represent a discovered * bundle. Returns a pointer to the new bundle or a null * pointer if a failure occurs due to memory exhaustion. */ struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, u8 class) { struct gb_bundle *bundle; if (bundle_id == BUNDLE_ID_NONE) { dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id); return NULL; } /* * Reject any attempt to reuse a bundle id. We initialize * these serially, so there's no need to worry about keeping * the interface bundle list locked here. */ if (gb_bundle_find(intf, bundle_id)) { dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id); return NULL; } bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) return NULL; bundle->intf = intf; bundle->id = bundle_id; bundle->class = class; INIT_LIST_HEAD(&bundle->connections); bundle->dev.parent = &intf->dev; bundle->dev.bus = &greybus_bus_type; bundle->dev.type = &greybus_bundle_type; bundle->dev.groups = bundle_groups; bundle->dev.dma_mask = intf->dev.dma_mask; device_initialize(&bundle->dev); dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id); list_add(&bundle->links, &intf->bundles); trace_gb_bundle_create(bundle); return bundle; } int gb_bundle_add(struct gb_bundle *bundle) { int ret; ret = device_add(&bundle->dev); if (ret) { dev_err(&bundle->dev, "failed to register bundle: %d\n", ret); return ret; } trace_gb_bundle_add(bundle); return 0; } /* * Tear down a previously set up bundle. */ void gb_bundle_destroy(struct gb_bundle *bundle) { trace_gb_bundle_destroy(bundle); if (device_is_registered(&bundle->dev)) device_del(&bundle->dev); list_del(&bundle->links); put_device(&bundle->dev); }
linux-master
drivers/greybus/bundle.c
// SPDX-License-Identifier: GPL-2.0 /* * SVC Greybus driver. * * Copyright 2015 Google Inc. * Copyright 2015 Linaro Ltd. */ #include <linux/debugfs.h> #include <linux/kstrtox.h> #include <linux/workqueue.h> #include <linux/greybus.h> #define SVC_INTF_EJECT_TIMEOUT 9000 #define SVC_INTF_ACTIVATE_TIMEOUT 6000 #define SVC_INTF_RESUME_TIMEOUT 3000 struct gb_svc_deferred_request { struct work_struct work; struct gb_operation *operation; }; static int gb_svc_queue_deferred_request(struct gb_operation *operation); static ssize_t endo_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "0x%04x\n", svc->endo_id); } static DEVICE_ATTR_RO(endo_id); static ssize_t ap_intf_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "%u\n", svc->ap_intf_id); } static DEVICE_ATTR_RO(ap_intf_id); // FIXME // This is a hack, we need to do this "right" and clean the interface up // properly, not just forcibly yank the thing out of the system and hope for the // best. But for now, people want their modules to come out without having to // throw the thing to the ground or get out a screwdriver. static ssize_t intf_eject_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); unsigned short intf_id; int ret; ret = kstrtou16(buf, 10, &intf_id); if (ret < 0) return ret; dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id); ret = gb_svc_intf_eject(svc, intf_id); if (ret < 0) return ret; return len; } static DEVICE_ATTR_WO(intf_eject); static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); return sprintf(buf, "%s\n", gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled"); } static ssize_t watchdog_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); int retval; bool user_request; retval = kstrtobool(buf, &user_request); if (retval) return retval; if (user_request) retval = gb_svc_watchdog_enable(svc); else retval = gb_svc_watchdog_disable(svc); if (retval) return retval; return len; } static DEVICE_ATTR_RW(watchdog); static ssize_t watchdog_action_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_svc *svc = to_gb_svc(dev); if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) return sprintf(buf, "panic\n"); else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) return sprintf(buf, "reset\n"); return -EINVAL; } static ssize_t watchdog_action_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct gb_svc *svc = to_gb_svc(dev); if (sysfs_streq(buf, "panic")) svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL; else if (sysfs_streq(buf, "reset")) svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO; else return -EINVAL; return len; } static DEVICE_ATTR_RW(watchdog_action); static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value) { struct gb_svc_pwrmon_rail_count_get_response response; int ret; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0, &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get rail count: %d\n", ret); return ret; } *value = response.rail_count; return 0; } static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc, struct gb_svc_pwrmon_rail_names_get_response *response, size_t bufsize) { int ret; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0, response, bufsize); if (ret) { dev_err(&svc->dev, "failed to get rail names: %d\n", ret); return ret; } if (response->status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "SVC error while getting rail names: %u\n", response->status); return -EREMOTEIO; } return 0; } static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id, u8 measurement_type, u32 *value) { struct gb_svc_pwrmon_sample_get_request request; struct gb_svc_pwrmon_sample_get_response response; int ret; request.rail_id = rail_id; request.measurement_type = measurement_type; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get rail sample: %d\n", ret); return ret; } if (response.result) { dev_err(&svc->dev, "UniPro error while getting rail power sample (%d %d): %d\n", rail_id, measurement_type, response.result); switch (response.result) { case GB_SVC_PWRMON_GET_SAMPLE_INVAL: return -EINVAL; case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: return -ENOMSG; default: return -EREMOTEIO; } } *value = le32_to_cpu(response.measurement); return 0; } int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, u8 measurement_type, u32 *value) { struct gb_svc_pwrmon_intf_sample_get_request request; struct gb_svc_pwrmon_intf_sample_get_response response; int ret; request.intf_id = intf_id; request.measurement_type = measurement_type; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get intf sample: %d\n", ret); return ret; } if (response.result) { dev_err(&svc->dev, "UniPro error while getting intf power sample (%d %d): %d\n", intf_id, measurement_type, response.result); switch (response.result) { case GB_SVC_PWRMON_GET_SAMPLE_INVAL: return -EINVAL; case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP: return -ENOMSG; default: return -EREMOTEIO; } } *value = le32_to_cpu(response.measurement); return 0; } static struct attribute *svc_attrs[] = { &dev_attr_endo_id.attr, &dev_attr_ap_intf_id.attr, &dev_attr_intf_eject.attr, &dev_attr_watchdog.attr, &dev_attr_watchdog_action.attr, NULL, }; ATTRIBUTE_GROUPS(svc); int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id) { struct gb_svc_intf_device_id_request request; request.intf_id = intf_id; request.device_id = device_id; return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID, &request, sizeof(request), NULL, 0); } int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_eject_request request; int ret; request.intf_id = intf_id; /* * The pulse width for module release in svc is long so we need to * increase the timeout so the operation will not return to soon. */ ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_EJECT, &request, sizeof(request), NULL, 0, SVC_INTF_EJECT_TIMEOUT); if (ret) { dev_err(&svc->dev, "failed to eject interface %u\n", intf_id); return ret; } return 0; } int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_vsys_request request; struct gb_svc_intf_vsys_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_VSYS_ENABLE; else type = GB_SVC_TYPE_INTF_VSYS_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_VSYS_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_refclk_request request; struct gb_svc_intf_refclk_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_REFCLK_ENABLE; else type = GB_SVC_TYPE_INTF_REFCLK_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_REFCLK_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable) { struct gb_svc_intf_unipro_request request; struct gb_svc_intf_unipro_response response; int type, ret; request.intf_id = intf_id; if (enable) type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE; else type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE; ret = gb_operation_sync(svc->connection, type, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; if (response.result_code != GB_SVC_INTF_UNIPRO_OK) return -EREMOTEIO; return 0; } int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type) { struct gb_svc_intf_activate_request request; struct gb_svc_intf_activate_response response; int ret; request.intf_id = intf_id; ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_ACTIVATE, &request, sizeof(request), &response, sizeof(response), SVC_INTF_ACTIVATE_TIMEOUT); if (ret < 0) return ret; if (response.status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "failed to activate interface %u: %u\n", intf_id, response.status); return -EREMOTEIO; } *intf_type = response.intf_type; return 0; } int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_resume_request request; struct gb_svc_intf_resume_response response; int ret; request.intf_id = intf_id; ret = gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_INTF_RESUME, &request, sizeof(request), &response, sizeof(response), SVC_INTF_RESUME_TIMEOUT); if (ret < 0) { dev_err(&svc->dev, "failed to send interface resume %u: %d\n", intf_id, ret); return ret; } if (response.status != GB_SVC_OP_SUCCESS) { dev_err(&svc->dev, "failed to resume interface %u: %u\n", intf_id, response.status); return -EREMOTEIO; } return 0; } int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 *value) { struct gb_svc_dme_peer_get_request request; struct gb_svc_dme_peer_get_response response; u16 result; int ret; request.intf_id = intf_id; request.attr = cpu_to_le16(attr); request.selector = cpu_to_le16(selector); ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n", intf_id, attr, selector, ret); return ret; } result = le16_to_cpu(response.result_code); if (result) { dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n", intf_id, attr, selector, result); return -EREMOTEIO; } if (value) *value = le32_to_cpu(response.attr_value); return 0; } int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, u32 value) { struct gb_svc_dme_peer_set_request request; struct gb_svc_dme_peer_set_response response; u16 result; int ret; request.intf_id = intf_id; request.attr = cpu_to_le16(attr); request.selector = cpu_to_le16(selector); request.value = cpu_to_le32(value); ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET, &request, sizeof(request), &response, sizeof(response)); if (ret) { dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n", intf_id, attr, selector, value, ret); return ret; } result = le16_to_cpu(response.result_code); if (result) { dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n", intf_id, attr, selector, value, result); return -EREMOTEIO; } return 0; } int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id, u8 cport_flags) { struct gb_svc_conn_create_request request; request.intf1_id = intf1_id; request.cport1_id = cpu_to_le16(cport1_id); request.intf2_id = intf2_id; request.cport2_id = cpu_to_le16(cport2_id); request.tc = 0; /* TC0 */ request.flags = cport_flags; return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE, &request, sizeof(request), NULL, 0); } void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, u8 intf2_id, u16 cport2_id) { struct gb_svc_conn_destroy_request request; struct gb_connection *connection = svc->connection; int ret; request.intf1_id = intf1_id; request.cport1_id = cpu_to_le16(cport1_id); request.intf2_id = intf2_id; request.cport2_id = cpu_to_le16(cport2_id); ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY, &request, sizeof(request), NULL, 0); if (ret) { dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n", intf1_id, cport1_id, intf2_id, cport2_id, ret); } } /* Creates bi-directional routes between the devices */ int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, u8 intf2_id, u8 dev2_id) { struct gb_svc_route_create_request request; request.intf1_id = intf1_id; request.dev1_id = dev1_id; request.intf2_id = intf2_id; request.dev2_id = dev2_id; return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE, &request, sizeof(request), NULL, 0); } /* Destroys bi-directional routes between the devices */ void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id) { struct gb_svc_route_destroy_request request; int ret; request.intf1_id = intf1_id; request.intf2_id = intf2_id; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY, &request, sizeof(request), NULL, 0); if (ret) { dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n", intf1_id, intf2_id, ret); } } int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, u8 tx_mode, u8 tx_gear, u8 tx_nlanes, u8 tx_amplitude, u8 tx_hs_equalizer, u8 rx_mode, u8 rx_gear, u8 rx_nlanes, u8 flags, u32 quirks, struct gb_svc_l2_timer_cfg *local, struct gb_svc_l2_timer_cfg *remote) { struct gb_svc_intf_set_pwrm_request request; struct gb_svc_intf_set_pwrm_response response; int ret; u16 result_code; memset(&request, 0, sizeof(request)); request.intf_id = intf_id; request.hs_series = hs_series; request.tx_mode = tx_mode; request.tx_gear = tx_gear; request.tx_nlanes = tx_nlanes; request.tx_amplitude = tx_amplitude; request.tx_hs_equalizer = tx_hs_equalizer; request.rx_mode = rx_mode; request.rx_gear = rx_gear; request.rx_nlanes = rx_nlanes; request.flags = flags; request.quirks = cpu_to_le32(quirks); if (local) request.local_l2timerdata = *local; if (remote) request.remote_l2timerdata = *remote; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) return ret; result_code = response.result_code; if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) { dev_err(&svc->dev, "set power mode = %d\n", result_code); return -EIO; } return 0; } EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode); int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id) { struct gb_svc_intf_set_pwrm_request request; struct gb_svc_intf_set_pwrm_response response; int ret; u16 result_code; memset(&request, 0, sizeof(request)); request.intf_id = intf_id; request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A; request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE; ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM, &request, sizeof(request), &response, sizeof(response)); if (ret < 0) { dev_err(&svc->dev, "failed to send set power mode operation to interface %u: %d\n", intf_id, ret); return ret; } result_code = response.result_code; if (result_code != GB_SVC_SETPWRM_PWR_OK) { dev_err(&svc->dev, "failed to hibernate the link for interface %u: %u\n", intf_id, result_code); return -EIO; } return 0; } int gb_svc_ping(struct gb_svc *svc) { return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING, NULL, 0, NULL, 0, GB_OPERATION_TIMEOUT_DEFAULT * 2); } static int gb_svc_version_request(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_svc_version_request *request; struct gb_svc_version_response *response; if (op->request->payload_size < sizeof(*request)) { dev_err(&svc->dev, "short version request (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; if (request->major > GB_SVC_VERSION_MAJOR) { dev_warn(&svc->dev, "unsupported major version (%u > %u)\n", request->major, GB_SVC_VERSION_MAJOR); return -ENOTSUPP; } svc->protocol_major = request->major; svc->protocol_minor = request->minor; if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) return -ENOMEM; response = op->response->payload; response->major = svc->protocol_major; response->minor = svc->protocol_minor; return 0; } static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_VOL, &value); if (ret) { dev_err(&svc->dev, "failed to get voltage sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_CURR, &value); if (ret) { dev_err(&svc->dev, "failed to get current sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf, size_t len, loff_t *offset) { struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private; struct gb_svc *svc = pwrmon_rails->svc; int ret, desc; u32 value; char buff[16]; ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id, GB_SVC_PWRMON_TYPE_PWR, &value); if (ret) { dev_err(&svc->dev, "failed to get power sample %u: %d\n", pwrmon_rails->id, ret); return ret; } desc = scnprintf(buff, sizeof(buff), "%u\n", value); return simple_read_from_buffer(buf, len, offset, buff, desc); } static const struct file_operations pwrmon_debugfs_voltage_fops = { .read = pwr_debugfs_voltage_read, }; static const struct file_operations pwrmon_debugfs_current_fops = { .read = pwr_debugfs_current_read, }; static const struct file_operations pwrmon_debugfs_power_fops = { .read = pwr_debugfs_power_read, }; static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc) { int i; size_t bufsize; struct dentry *dent; struct gb_svc_pwrmon_rail_names_get_response *rail_names; u8 rail_count; dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry); if (IS_ERR_OR_NULL(dent)) return; if (gb_svc_pwrmon_rail_count_get(svc, &rail_count)) goto err_pwrmon_debugfs; if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT) goto err_pwrmon_debugfs; bufsize = sizeof(*rail_names) + GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count; rail_names = kzalloc(bufsize, GFP_KERNEL); if (!rail_names) goto err_pwrmon_debugfs; svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails), GFP_KERNEL); if (!svc->pwrmon_rails) goto err_pwrmon_debugfs_free; if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize)) goto err_pwrmon_debugfs_free; for (i = 0; i < rail_count; i++) { struct dentry *dir; struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i]; char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; snprintf(fname, sizeof(fname), "%s", (char *)&rail_names->name[i]); rail->id = i; rail->svc = svc; dir = debugfs_create_dir(fname, dent); debugfs_create_file("voltage_now", 0444, dir, rail, &pwrmon_debugfs_voltage_fops); debugfs_create_file("current_now", 0444, dir, rail, &pwrmon_debugfs_current_fops); debugfs_create_file("power_now", 0444, dir, rail, &pwrmon_debugfs_power_fops); } kfree(rail_names); return; err_pwrmon_debugfs_free: kfree(rail_names); kfree(svc->pwrmon_rails); svc->pwrmon_rails = NULL; err_pwrmon_debugfs: debugfs_remove(dent); } static void gb_svc_debugfs_init(struct gb_svc *svc) { svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev), gb_debugfs_get()); gb_svc_pwrmon_debugfs_init(svc); } static void gb_svc_debugfs_exit(struct gb_svc *svc) { debugfs_remove_recursive(svc->debugfs_dentry); kfree(svc->pwrmon_rails); svc->pwrmon_rails = NULL; } static int gb_svc_hello(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_svc_hello_request *hello_request; int ret; if (op->request->payload_size < sizeof(*hello_request)) { dev_warn(&svc->dev, "short hello request (%zu < %zu)\n", op->request->payload_size, sizeof(*hello_request)); return -EINVAL; } hello_request = op->request->payload; svc->endo_id = le16_to_cpu(hello_request->endo_id); svc->ap_intf_id = hello_request->interface_id; ret = device_add(&svc->dev); if (ret) { dev_err(&svc->dev, "failed to register svc device: %d\n", ret); return ret; } ret = gb_svc_watchdog_create(svc); if (ret) { dev_err(&svc->dev, "failed to create watchdog: %d\n", ret); goto err_deregister_svc; } /* * FIXME: This is a temporary hack to reconfigure the link at HELLO * (which abuses the deferred request processing mechanism). */ ret = gb_svc_queue_deferred_request(op); if (ret) goto err_destroy_watchdog; gb_svc_debugfs_init(svc); return 0; err_destroy_watchdog: gb_svc_watchdog_destroy(svc); err_deregister_svc: device_del(&svc->dev); return ret; } static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc, u8 intf_id) { struct gb_host_device *hd = svc->hd; struct gb_module *module; size_t num_interfaces; u8 module_id; list_for_each_entry(module, &hd->modules, hd_node) { module_id = module->module_id; num_interfaces = module->num_interfaces; if (intf_id >= module_id && intf_id < module_id + num_interfaces) { return module->interfaces[intf_id - module_id]; } } return NULL; } static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id) { struct gb_host_device *hd = svc->hd; struct gb_module *module; list_for_each_entry(module, &hd->modules, hd_node) { if (module->module_id == module_id) return module; } return NULL; } static void gb_svc_process_hello_deferred(struct gb_operation *operation) { struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); int ret; /* * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged * module. * * The code should be removed once SW-2217, Heuristic for UniPro * Power Mode Changes is resolved. */ ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id, GB_SVC_UNIPRO_HS_SERIES_A, GB_SVC_UNIPRO_SLOW_AUTO_MODE, 2, 1, GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS, GB_SVC_UNIPRO_SLOW_AUTO_MODE, 2, 1, 0, 0, NULL, NULL); if (ret) dev_warn(&svc->dev, "power mode change failed on AP to switch link: %d\n", ret); } static void gb_svc_process_module_inserted(struct gb_operation *operation) { struct gb_svc_module_inserted_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_host_device *hd = svc->hd; struct gb_module *module; size_t num_interfaces; u8 module_id; u16 flags; int ret; /* The request message size has already been verified. */ request = operation->request->payload; module_id = request->primary_intf_id; num_interfaces = request->intf_count; flags = le16_to_cpu(request->flags); dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n", __func__, module_id, num_interfaces, flags); if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) { dev_warn(&svc->dev, "no primary interface detected on module %u\n", module_id); } module = gb_svc_module_lookup(svc, module_id); if (module) { dev_warn(&svc->dev, "unexpected module-inserted event %u\n", module_id); return; } module = gb_module_create(hd, module_id, num_interfaces); if (!module) { dev_err(&svc->dev, "failed to create module\n"); return; } ret = gb_module_add(module); if (ret) { gb_module_put(module); return; } list_add(&module->hd_node, &hd->modules); } static void gb_svc_process_module_removed(struct gb_operation *operation) { struct gb_svc_module_removed_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_module *module; u8 module_id; /* The request message size has already been verified. */ request = operation->request->payload; module_id = request->primary_intf_id; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id); module = gb_svc_module_lookup(svc, module_id); if (!module) { dev_warn(&svc->dev, "unexpected module-removed event %u\n", module_id); return; } module->disconnected = true; gb_module_del(module); list_del(&module->hd_node); gb_module_put(module); } static void gb_svc_process_intf_oops(struct gb_operation *operation) { struct gb_svc_intf_oops_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_interface *intf; u8 intf_id; u8 reason; /* The request message size has already been verified. */ request = operation->request->payload; intf_id = request->intf_id; reason = request->reason; intf = gb_svc_interface_lookup(svc, intf_id); if (!intf) { dev_warn(&svc->dev, "unexpected interface-oops event %u\n", intf_id); return; } dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n", intf_id, reason); mutex_lock(&intf->mutex); intf->disconnected = true; gb_interface_disable(intf); gb_interface_deactivate(intf); mutex_unlock(&intf->mutex); } static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation) { struct gb_svc_intf_mailbox_event_request *request; struct gb_connection *connection = operation->connection; struct gb_svc *svc = gb_connection_get_data(connection); struct gb_interface *intf; u8 intf_id; u16 result_code; u32 mailbox; /* The request message size has already been verified. */ request = operation->request->payload; intf_id = request->intf_id; result_code = le16_to_cpu(request->result_code); mailbox = le32_to_cpu(request->mailbox); dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n", __func__, intf_id, result_code, mailbox); intf = gb_svc_interface_lookup(svc, intf_id); if (!intf) { dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id); return; } gb_interface_mailbox_event(intf, result_code, mailbox); } static void gb_svc_process_deferred_request(struct work_struct *work) { struct gb_svc_deferred_request *dr; struct gb_operation *operation; struct gb_svc *svc; u8 type; dr = container_of(work, struct gb_svc_deferred_request, work); operation = dr->operation; svc = gb_connection_get_data(operation->connection); type = operation->request->header->type; switch (type) { case GB_SVC_TYPE_SVC_HELLO: gb_svc_process_hello_deferred(operation); break; case GB_SVC_TYPE_MODULE_INSERTED: gb_svc_process_module_inserted(operation); break; case GB_SVC_TYPE_MODULE_REMOVED: gb_svc_process_module_removed(operation); break; case GB_SVC_TYPE_INTF_MAILBOX_EVENT: gb_svc_process_intf_mailbox_event(operation); break; case GB_SVC_TYPE_INTF_OOPS: gb_svc_process_intf_oops(operation); break; default: dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type); } gb_operation_put(operation); kfree(dr); } static int gb_svc_queue_deferred_request(struct gb_operation *operation) { struct gb_svc *svc = gb_connection_get_data(operation->connection); struct gb_svc_deferred_request *dr; dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; gb_operation_get(operation); dr->operation = operation; INIT_WORK(&dr->work, gb_svc_process_deferred_request); queue_work(svc->wq, &dr->work); return 0; } static int gb_svc_intf_reset_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_message *request = op->request; struct gb_svc_intf_reset_request *reset; if (request->payload_size < sizeof(*reset)) { dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n", request->payload_size, sizeof(*reset)); return -EINVAL; } reset = request->payload; /* FIXME Reset the interface here */ return 0; } static int gb_svc_module_inserted_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_module_inserted_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->primary_intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_module_removed_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_module_removed_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->primary_intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_intf_oops_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_intf_oops_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } return gb_svc_queue_deferred_request(op); } static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op) { struct gb_svc *svc = gb_connection_get_data(op->connection); struct gb_svc_intf_mailbox_event_request *request; if (op->request->payload_size < sizeof(*request)) { dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n", op->request->payload_size, sizeof(*request)); return -EINVAL; } request = op->request->payload; dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id); return gb_svc_queue_deferred_request(op); } static int gb_svc_request_handler(struct gb_operation *op) { struct gb_connection *connection = op->connection; struct gb_svc *svc = gb_connection_get_data(connection); u8 type = op->type; int ret = 0; /* * SVC requests need to follow a specific order (at least initially) and * below code takes care of enforcing that. The expected order is: * - PROTOCOL_VERSION * - SVC_HELLO * - Any other request, but the earlier two. * * Incoming requests are guaranteed to be serialized and so we don't * need to protect 'state' for any races. */ switch (type) { case GB_SVC_TYPE_PROTOCOL_VERSION: if (svc->state != GB_SVC_STATE_RESET) ret = -EINVAL; break; case GB_SVC_TYPE_SVC_HELLO: if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION) ret = -EINVAL; break; default: if (svc->state != GB_SVC_STATE_SVC_HELLO) ret = -EINVAL; break; } if (ret) { dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n", type, svc->state); return ret; } switch (type) { case GB_SVC_TYPE_PROTOCOL_VERSION: ret = gb_svc_version_request(op); if (!ret) svc->state = GB_SVC_STATE_PROTOCOL_VERSION; return ret; case GB_SVC_TYPE_SVC_HELLO: ret = gb_svc_hello(op); if (!ret) svc->state = GB_SVC_STATE_SVC_HELLO; return ret; case GB_SVC_TYPE_INTF_RESET: return gb_svc_intf_reset_recv(op); case GB_SVC_TYPE_MODULE_INSERTED: return gb_svc_module_inserted_recv(op); case GB_SVC_TYPE_MODULE_REMOVED: return gb_svc_module_removed_recv(op); case GB_SVC_TYPE_INTF_MAILBOX_EVENT: return gb_svc_intf_mailbox_event_recv(op); case GB_SVC_TYPE_INTF_OOPS: return gb_svc_intf_oops_recv(op); default: dev_warn(&svc->dev, "unsupported request 0x%02x\n", type); return -EINVAL; } } static void gb_svc_release(struct device *dev) { struct gb_svc *svc = to_gb_svc(dev); if (svc->connection) gb_connection_destroy(svc->connection); ida_destroy(&svc->device_id_map); destroy_workqueue(svc->wq); kfree(svc); } struct device_type greybus_svc_type = { .name = "greybus_svc", .release = gb_svc_release, }; struct gb_svc *gb_svc_create(struct gb_host_device *hd) { struct gb_svc *svc; svc = kzalloc(sizeof(*svc), GFP_KERNEL); if (!svc) return NULL; svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev)); if (!svc->wq) { kfree(svc); return NULL; } svc->dev.parent = &hd->dev; svc->dev.bus = &greybus_bus_type; svc->dev.type = &greybus_svc_type; svc->dev.groups = svc_groups; svc->dev.dma_mask = svc->dev.parent->dma_mask; device_initialize(&svc->dev); dev_set_name(&svc->dev, "%d-svc", hd->bus_id); ida_init(&svc->device_id_map); svc->state = GB_SVC_STATE_RESET; svc->hd = hd; svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, gb_svc_request_handler); if (IS_ERR(svc->connection)) { dev_err(&svc->dev, "failed to create connection: %ld\n", PTR_ERR(svc->connection)); goto err_put_device; } gb_connection_set_data(svc->connection, svc); return svc; err_put_device: put_device(&svc->dev); return NULL; } int gb_svc_add(struct gb_svc *svc) { int ret; /* * The SVC protocol is currently driven by the SVC, so the SVC device * is added from the connection request handler when enough * information has been received. */ ret = gb_connection_enable(svc->connection); if (ret) return ret; return 0; } static void gb_svc_remove_modules(struct gb_svc *svc) { struct gb_host_device *hd = svc->hd; struct gb_module *module, *tmp; list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { gb_module_del(module); list_del(&module->hd_node); gb_module_put(module); } } void gb_svc_del(struct gb_svc *svc) { gb_connection_disable_rx(svc->connection); /* * The SVC device may have been registered from the request handler. */ if (device_is_registered(&svc->dev)) { gb_svc_debugfs_exit(svc); gb_svc_watchdog_destroy(svc); device_del(&svc->dev); } flush_workqueue(svc->wq); gb_svc_remove_modules(svc); gb_connection_disable(svc->connection); } void gb_svc_put(struct gb_svc *svc) { put_device(&svc->dev); }
linux-master
drivers/greybus/svc.c