python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* ata_generic.c - Generic PATA/SATA controller driver.
* Copyright 2005 Red Hat Inc, all rights reserved.
*
* Elements from ide/pci/generic.c
* Copyright (C) 2001-2002 Andre Hedrick <[email protected]>
* Portions (C) Copyright 2002 Red Hat Inc <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Driver for PCI IDE interfaces implementing the standard bus mastering
* interface functionality. This assumes the BIOS did the drive set up and
* tuning for us. By default we do not grab all IDE class devices as they
* may have other drivers or need fixups to avoid problems. Instead we keep
* a default list of stuff without documentation/driver that appears to
* work.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "ata_generic"
#define DRV_VERSION "0.2.15"
/*
* A generic parallel ATA driver using libata
*/
enum {
ATA_GEN_CLASS_MATCH = (1 << 0),
ATA_GEN_FORCE_DMA = (1 << 1),
ATA_GEN_INTEL_IDER = (1 << 2),
};
/**
* generic_set_mode - mode setting
* @link: link to set up
* @unused: returned device on error
*
* Use a non standard set_mode function. We don't want to be tuned.
* The BIOS configured everything. Our job is not to fiddle. We
* read the dma enabled bits from the PCI configuration of the device
* and respect them.
*/
static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_port *ap = link->ap;
const struct pci_device_id *id = ap->host->private_data;
int dma_enabled = 0;
struct ata_device *dev;
if (id->driver_data & ATA_GEN_FORCE_DMA) {
dma_enabled = 0xff;
} else if (ap->ioaddr.bmdma_addr) {
/* Bits 5 and 6 indicate if DMA is active on master/slave */
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->dma_mode = XFER_MW_DMA_0;
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (dma_enabled & (1 << (5 + dev->devno))) {
unsigned int xfer_mask = ata_id_xfermask(dev->id);
const char *name;
if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
name = ata_mode_string(xfer_mask);
else {
/* SWDMA perhaps? */
name = "DMA";
xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
}
ata_dev_info(dev, "configured for %s\n", name);
dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
dev->flags &= ~ATA_DFLAG_PIO;
} else {
ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
return 0;
}
static const struct scsi_host_template generic_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations generic_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_unknown,
.set_mode = generic_set_mode,
};
static int all_generic_ide; /* Set to claim all devices */
/**
* is_intel_ider - identify intel IDE-R devices
* @dev: PCI device
*
* Distinguish Intel IDE-R controller devices from other Intel IDE
* devices. IDE-R devices have no timing registers and are in
* most respects virtual. They should be driven by the ata_generic
* driver.
*
* IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has
* it non zero. All Intel ATA has 0x40 writable (timing), but it is
* not writable on IDE-R devices (this is guaranteed).
*/
static int is_intel_ider(struct pci_dev *dev)
{
/* For Intel IDE the value at 0xF8 is only zero on IDE-R
interfaces */
u32 r;
u16 t;
/* Check the manufacturing ID, it will be zero for IDE-R */
pci_read_config_dword(dev, 0xF8, &r);
/* Not IDE-R: punt so that ata_(old)piix gets it */
if (r != 0)
return 0;
/* 0xF8 will also be zero on some early Intel IDE devices
but they will have a sane timing register */
pci_read_config_word(dev, 0x40, &t);
if (t != 0)
return 0;
/* Finally check if the timing register is writable so that
we eliminate any early devices hot-docked in a docking
station */
pci_write_config_word(dev, 0x40, 1);
pci_read_config_word(dev, 0x40, &t);
if (t) {
pci_write_config_word(dev, 0x40, 0);
return 0;
}
return 1;
}
/**
* ata_generic_init_one - attach generic IDE
* @dev: PCI device found
* @id: match entry
*
* Called each time a matching IDE interface is found. We check if the
* interface is one we wish to claim and if so we perform any chip
* specific hacks then let the ATA layer do the heavy lifting.
*/
static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
u16 command;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &generic_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
/* Don't use the generic entry unless instructed to do so */
if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
return -ENODEV;
if ((id->driver_data & ATA_GEN_INTEL_IDER) && !all_generic_ide)
if (!is_intel_ider(dev))
return -ENODEV;
/* Devices that need care */
if (dev->vendor == PCI_VENDOR_ID_UMC &&
dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
(!(PCI_FUNC(dev->devfn) & 1)))
return -ENODEV;
if (dev->vendor == PCI_VENDOR_ID_OPTI &&
dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
(!(PCI_FUNC(dev->devfn) & 1)))
return -ENODEV;
/* Don't re-enable devices in generic mode or we will break some
motherboards with disabled and unused IDE controllers */
pci_read_config_word(dev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_IO))
return -ENODEV;
if (dev->vendor == PCI_VENDOR_ID_AL)
ata_pci_bmdma_clear_simplex(dev);
if (dev->vendor == PCI_VENDOR_ID_ATI) {
int rc = pcim_enable_device(dev);
if (rc < 0)
return rc;
pcim_pin_device(dev);
}
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), },
{ PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
.driver_data = ATA_GEN_FORCE_DMA },
#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
#endif
/* Intel, IDE class device */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
.driver_data = ATA_GEN_INTEL_IDER },
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
.driver_data = ATA_GEN_CLASS_MATCH },
{ 0, },
};
static struct pci_driver ata_generic_pci_driver = {
.name = DRV_NAME,
.id_table = ata_generic,
.probe = ata_generic_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(ata_generic_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for generic ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ata_generic);
MODULE_VERSION(DRV_VERSION);
module_param(all_generic_ide, int, 0);
| linux-master | drivers/ata/ata_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_sx4.c - Promise SATA
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available under NDA.
*/
/*
Theory of operation
-------------------
The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
engine, DIMM memory, and four ATA engines (one per SATA port).
Data is copied to/from DIMM memory by the HDMA engine, before
handing off to one (or more) of the ATA engines. The ATA
engines operate solely on DIMM memory.
The SX4 behaves like a PATA chip, with no SATA controls or
knowledge whatsoever, leading to the presumption that
PATA<->SATA bridges exist on SX4 boards, external to the
PDC20621 chip itself.
The chip is quite capable, supporting an XOR engine and linked
hardware commands (permits a string to transactions to be
submitted and waited-on as a single unit), and an optional
microprocessor.
The limiting factor is largely software. This Linux driver was
written to multiplex the single HDMA engine to copy disk
transactions into a fixed DIMM memory space, from where an ATA
engine takes over. As a result, each WRITE looks like this:
submit HDMA packet to hardware
hardware copies data from system memory to DIMM
hardware raises interrupt
submit ATA packet to hardware
hardware executes ATA WRITE command, w/ data in DIMM
hardware raises interrupt
and each READ looks like this:
submit ATA packet to hardware
hardware executes ATA READ command, w/ data in DIMM
hardware raises interrupt
submit HDMA packet to hardware
hardware copies data from DIMM to system memory
hardware raises interrupt
This is a very slow, lock-step way of doing things that can
certainly be improved by motivated kernel hackers.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "sata_promise.h"
#define DRV_NAME "sata_sx4"
#define DRV_VERSION "0.12"
static int dimm_test;
module_param(dimm_test, int, 0644);
MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
enum {
PDC_MMIO_BAR = 3,
PDC_DIMM_BAR = 4,
PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
PDC_CTLSTAT = 0x60, /* IDEn control / status */
PDC_20621_SEQCTL = 0x400,
PDC_20621_SEQMASK = 0x480,
PDC_20621_GENERAL_CTL = 0x484,
PDC_20621_PAGE_SIZE = (32 * 1024),
/* chosen, not constant, values; we design our own DIMM mem map */
PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
PDC_20621_DIMM_BASE = 0x00200000,
PDC_20621_DIMM_DATA = (64 * 1024),
PDC_DIMM_DATA_STEP = (256 * 1024),
PDC_DIMM_WINDOW_STEP = (8 * 1024),
PDC_DIMM_HOST_PRD = (6 * 1024),
PDC_DIMM_HOST_PKT = (128 * 0),
PDC_DIMM_HPKT_PRD = (128 * 1),
PDC_DIMM_ATA_PKT = (128 * 2),
PDC_DIMM_APKT_PRD = (128 * 3),
PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
PDC_PAGE_WINDOW = 0x40,
PDC_PAGE_DATA = PDC_PAGE_WINDOW +
(PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
(1<<23),
board_20621 = 0, /* FastTrak S150 SX4 */
PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
PDC_RESET = (1 << 11), /* HDMA/ATA reset */
PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
PDC_MAX_HDMA = 32,
PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
PDC_I2C_CONTROL = 0x48,
PDC_I2C_ADDR_DATA = 0x4C,
PDC_DIMM0_CONTROL = 0x80,
PDC_DIMM1_CONTROL = 0x84,
PDC_SDRAM_CONTROL = 0x88,
PDC_I2C_WRITE = 0, /* master -> slave */
PDC_I2C_READ = (1 << 6), /* master <- slave */
PDC_I2C_START = (1 << 7), /* start I2C proto */
PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
PDC_DIMM_SPD_ROW_NUM = 3,
PDC_DIMM_SPD_COLUMN_NUM = 4,
PDC_DIMM_SPD_MODULE_ROW = 5,
PDC_DIMM_SPD_TYPE = 11,
PDC_DIMM_SPD_FRESH_RATE = 12,
PDC_DIMM_SPD_BANK_NUM = 17,
PDC_DIMM_SPD_CAS_LATENCY = 18,
PDC_DIMM_SPD_ATTRIBUTE = 21,
PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
PDC_DIMM_SPD_SYSTEM_FREQ = 126,
PDC_CTL_STATUS = 0x08,
PDC_DIMM_WINDOW_CTLR = 0x0C,
PDC_TIME_CONTROL = 0x3C,
PDC_TIME_PERIOD = 0x40,
PDC_TIME_COUNTER = 0x44,
PDC_GENERAL_CTLR = 0x484,
PCI_PLL_INIT = 0x8A531824,
PCI_X_TCOUNT = 0xEE1E5CFF,
/* PDC_TIME_CONTROL bits */
PDC_TIMER_BUZZER = (1 << 10),
PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
PDC_TIMER_ENABLE = (1 << 7),
PDC_TIMER_MASK_INT = (1 << 5),
PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
PDC_TIMER_ENABLE |
PDC_TIMER_MASK_INT,
};
#define ECC_ERASE_BUF_SZ (128 * 1024)
struct pdc_port_priv {
u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
u8 *pkt;
dma_addr_t pkt_dma;
};
struct pdc_host_priv {
unsigned int doing_hdma;
unsigned int hdma_prod;
unsigned int hdma_cons;
struct {
struct ata_queued_cmd *qc;
unsigned int seq;
unsigned long pkt_ofs;
} hdma[32];
};
static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void pdc_error_handler(struct ata_port *ap);
static void pdc_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static unsigned int pdc20621_dimm_init(struct ata_host *host);
static int pdc20621_detect_dimm(struct ata_host *host);
static unsigned int pdc20621_i2c_read(struct ata_host *host,
u32 device, u32 subaddr, u32 *pdata);
static int pdc20621_prog_dimm0(struct ata_host *host);
static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
static void pdc20621_get_from_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
static void pdc20621_put_to_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
static void pdc20621_irq_clear(struct ata_port *ap);
static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
static int pdc_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
static const struct scsi_host_template pdc_sata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations pdc_20621_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = pdc_check_atapi_dma,
.qc_prep = pdc20621_qc_prep,
.qc_issue = pdc20621_qc_issue,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.softreset = pdc_softreset,
.error_handler = pdc_error_handler,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
.port_start = pdc_port_start,
.sff_tf_load = pdc_tf_load_mmio,
.sff_exec_command = pdc_exec_command_mmio,
.sff_irq_clear = pdc20621_irq_clear,
};
static const struct ata_port_info pdc_port_info[] = {
/* board_20621 */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_20621_ops,
},
};
static const struct pci_device_id pdc_sata_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
{ } /* terminate list */
};
static struct pci_driver pdc_sata_pci_driver = {
.name = DRV_NAME,
.id_table = pdc_sata_pci_tbl,
.probe = pdc_sata_init_one,
.remove = ata_pci_remove_one,
};
static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
ap->private_data = pp;
return 0;
}
static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
__le32 *buf32 = (__le32 *) buf;
/* output ATA packet S/G table */
addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
(PDC_DIMM_DATA_STEP * portno);
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
}
static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
__le32 *buf32 = (__le32 *) buf;
/* output Host DMA packet S/G table */
addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
(PDC_DIMM_DATA_STEP * portno);
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
}
static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
unsigned int devno, u8 *buf,
unsigned int portno)
{
unsigned int i, dw;
__le32 *buf32 = (__le32 *) buf;
u8 dev_reg;
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_APKT_PRD;
i = PDC_DIMM_ATA_PKT;
/*
* Set up ATA packet
*/
if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
buf[i++] = PDC_PKT_READ;
else if (tf->protocol == ATA_PROT_NODATA)
buf[i++] = PDC_PKT_NODATA;
else
buf[i++] = 0;
buf[i++] = 0; /* reserved */
buf[i++] = portno + 1; /* seq. id */
buf[i++] = 0xff; /* delay seq. id */
/* dimm dma S/G, and next-pkt */
dw = i >> 2;
if (tf->protocol == ATA_PROT_NODATA)
buf32[dw] = 0;
else
buf32[dw] = cpu_to_le32(dimm_sg);
buf32[dw + 1] = 0;
i += 8;
if (devno == 0)
dev_reg = ATA_DEVICE_OBS;
else
dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
/* select device */
buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
buf[i++] = dev_reg;
/* device control register */
buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
buf[i++] = tf->ctl;
return i;
}
static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
unsigned int portno)
{
unsigned int dw;
u32 tmp;
__le32 *buf32 = (__le32 *) buf;
unsigned int host_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_HOST_PRD;
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_HPKT_PRD;
dw = PDC_DIMM_HOST_PKT >> 2;
/*
* Set up Host DMA packet
*/
if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
tmp = PDC_PKT_READ;
else
tmp = 0;
tmp |= ((portno + 1 + 4) << 16); /* seq. id */
tmp |= (0xff << 24); /* delay seq. id */
buf32[dw + 0] = cpu_to_le32(tmp);
buf32[dw + 1] = cpu_to_le32(host_sg);
buf32[dw + 2] = cpu_to_le32(dimm_sg);
buf32[dw + 3] = 0;
}
static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
unsigned int portno = ap->port_no;
unsigned int i, si, idx, total_len = 0, sgt_len;
__le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
/*
* Build S/G table
*/
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
buf[idx++] = cpu_to_le32(sg_dma_address(sg));
buf[idx++] = cpu_to_le32(sg_dma_len(sg));
total_len += sg_dma_len(sg);
}
buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
sgt_len = idx * 4;
/*
* Build ATA, host DMA packets
*/
pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
else
i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
/* copy three S/G tables and two packets to DIMM MMIO window */
memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
&pp->dimm_buf, PDC_DIMM_HEADER_SZ);
memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
PDC_DIMM_HOST_PRD,
&pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
/* force host FIFO dump */
writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
readl(dimm_mmio); /* MMIO PCI posting flush */
ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
i, sgt_len);
}
static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
unsigned int portno = ap->port_no;
unsigned int i;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
else
i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
/* copy three S/G tables and two packets to DIMM MMIO window */
memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
&pp->dimm_buf, PDC_DIMM_HEADER_SZ);
/* force host FIFO dump */
writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
readl(dimm_mmio); /* MMIO PCI posting flush */
ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
}
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc20621_dma_prep(qc);
break;
case ATA_PROT_NODATA:
pdc20621_nodata_prep(qc);
break;
default:
break;
}
return AC_ERR_OK;
}
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
unsigned int seq,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct ata_host *host = ap->host;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
}
static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
unsigned int seq,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
if (!pp->doing_hdma) {
__pdc20621_push_hdma(qc, seq, pkt_ofs);
pp->doing_hdma = 1;
return;
}
pp->hdma[idx].qc = qc;
pp->hdma[idx].seq = seq;
pp->hdma[idx].pkt_ofs = pkt_ofs;
pp->hdma_prod++;
}
static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
/* if nothing on queue, we're done */
if (pp->hdma_prod == pp->hdma_cons) {
pp->doing_hdma = 0;
return;
}
__pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
pp->hdma[idx].pkt_ofs);
pp->hdma_cons++;
}
static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int port_no = ap->port_no;
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
dimm_mmio += PDC_DIMM_HOST_PKT;
ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
readl(dimm_mmio), readl(dimm_mmio + 4),
readl(dimm_mmio + 8), readl(dimm_mmio + 12));
}
static void pdc20621_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host *host = ap->host;
unsigned int port_no = ap->port_no;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 seq = (u8) (port_no + 1);
unsigned int port_ofs;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
wmb(); /* flush PRD, pkt writes */
port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
/* if writing, we (1) DMA to DIMM, then (2) do ATA command */
if (rw && qc->tf.protocol == ATA_PROT_DMA) {
seq += 4;
pdc20621_dump_hdma(qc);
pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_HOST_PKT,
port_ofs + PDC_DIMM_HOST_PKT,
seq);
} else {
writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
writel(port_ofs + PDC_DIMM_ATA_PKT,
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_ATA_PKT,
port_ofs + PDC_DIMM_ATA_PKT,
seq);
}
}
static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
fallthrough;
case ATA_PROT_DMA:
pdc20621_packet_start(qc);
return 0;
case ATAPI_PROT_DMA:
BUG();
break;
default:
break;
}
return ata_sff_qc_issue(qc);
}
static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc,
unsigned int doing_hdma,
void __iomem *mmio)
{
unsigned int port_no = ap->port_no;
unsigned int port_ofs =
PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
u8 status;
unsigned int handled = 0;
if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
/* step two - DMA from DIMM to host */
if (doing_hdma) {
ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
ata_qc_complete(qc);
pdc20621_pop_hdma(qc);
}
/* step one - exec ATA command */
else {
u8 seq = (u8) (port_no + 1 + 4);
ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit hdma pkt */
pdc20621_dump_hdma(qc);
pdc20621_push_hdma(qc, seq,
port_ofs + PDC_DIMM_HOST_PKT);
}
handled = 1;
} else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
/* step one - DMA from host to DIMM */
if (doing_hdma) {
u8 seq = (u8) (port_no + 1);
ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit ata pkt */
writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
readl(mmio + PDC_20621_SEQCTL + (seq * 4));
writel(port_ofs + PDC_DIMM_ATA_PKT,
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
}
/* step two - execute ATA command */
else {
ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
ata_qc_complete(qc);
pdc20621_pop_hdma(qc);
}
handled = 1;
/* command completion, but no data xfer */
} else if (qc->tf.protocol == ATA_PROT_NODATA) {
status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
qc->err_mask |= ac_err_mask(status);
ata_qc_complete(qc);
handled = 1;
} else {
ap->stats.idle_irq++;
}
return handled;
}
static void pdc20621_irq_clear(struct ata_port *ap)
{
ioread8(ap->ioaddr.status_addr);
}
static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp, port_no;
unsigned int handled = 0;
void __iomem *mmio_base;
if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
mmio_base = host->iomap[PDC_MMIO_BAR];
/* reading should also clear interrupts */
mmio_base += PDC_CHIP0_OFS;
mask = readl(mmio_base + PDC_20621_SEQMASK);
if (mask == 0xffffffff)
return IRQ_NONE;
mask &= 0xffff; /* only 16 tags possible */
if (!mask)
return IRQ_NONE;
spin_lock(&host->lock);
for (i = 1; i < 9; i++) {
port_no = i - 1;
if (port_no > 3)
port_no -= 4;
if (port_no >= host->n_ports)
ap = NULL;
else
ap = host->ports[port_no];
tmp = mask & (1 << i);
if (ap)
ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
if (tmp && ap) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc20621_host_intr(ap, qc, (i > 4),
mmio_base);
}
}
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void pdc_freeze(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.cmd_addr;
u32 tmp;
/* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
tmp = readl(mmio + PDC_CTLSTAT);
tmp |= PDC_MASK_INT;
tmp &= ~PDC_DMA_ENABLE;
writel(tmp, mmio + PDC_CTLSTAT);
readl(mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_thaw(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.cmd_addr;
u32 tmp;
/* FIXME: start HDMA engine, if zero ATA engines running */
/* clear IRQ */
ioread8(ap->ioaddr.status_addr);
/* turn IRQ back on */
tmp = readl(mmio + PDC_CTLSTAT);
tmp &= ~PDC_MASK_INT;
writel(tmp, mmio + PDC_CTLSTAT);
readl(mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_reset_port(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int i;
u32 tmp;
/* FIXME: handle HDMA copy engine */
for (i = 11; i > 0; i--) {
tmp = readl(mmio);
if (tmp & PDC_RESET)
break;
udelay(100);
tmp |= PDC_RESET;
writel(tmp, mmio);
}
tmp &= ~PDC_RESET;
writel(tmp, mmio);
readl(mmio); /* flush */
}
static int pdc_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
pdc_reset_port(link->ap);
return ata_sff_softreset(link, class, deadline);
}
static void pdc_error_handler(struct ata_port *ap)
{
if (!ata_port_is_frozen(ap))
pdc_reset_port(ap);
ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
{
u8 *scsicmd = qc->scsicmd->cmnd;
int pio = 1; /* atapi dma off by default */
/* Whitelist commands that may use DMA. */
switch (scsicmd[0]) {
case WRITE_12:
case WRITE_10:
case WRITE_6:
case READ_12:
case READ_10:
case READ_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
pio = 0;
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
unsigned int lba =
(scsicmd[2] << 24) |
(scsicmd[3] << 16) |
(scsicmd[4] << 8) |
scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
return pio;
}
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATAPI_PROT_DMA);
ata_sff_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATAPI_PROT_DMA);
ata_sff_exec_command(ap, tf);
}
static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base;
port->data_addr = base;
port->feature_addr =
port->error_addr = base + 0x4;
port->nsect_addr = base + 0x8;
port->lbal_addr = base + 0xc;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x14;
port->device_addr = base + 0x18;
port->command_addr =
port->status_addr = base + 0x1c;
port->altstatus_addr =
port->ctl_addr = base + 0x38;
}
static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
u32 offset, u32 size)
{
u32 window_size;
u16 idx;
u8 page_mask;
long dist;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
page_mask = 0x00;
window_size = 0x2000 * 4; /* 32K byte uchar size */
idx = (u16) (offset / window_size);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
size -= dist;
for (; (long) size >= (long) window_size ;) {
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
memcpy_fromio(psource, dimm_mmio, window_size / 4);
psource += window_size;
size -= window_size;
idx++;
}
if (size) {
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
memcpy_fromio(psource, dimm_mmio, size / 4);
}
}
static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
u32 offset, u32 size)
{
u32 window_size;
u16 idx;
u8 page_mask;
long dist;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
page_mask = 0x00;
window_size = 0x2000 * 4; /* 32K byte uchar size */
idx = (u16) (offset / window_size);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
psource += dist;
size -= dist;
for (; (long) size >= (long) window_size ;) {
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
memcpy_toio(dimm_mmio, psource, window_size / 4);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
psource += window_size;
size -= window_size;
idx++;
}
if (size) {
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
memcpy_toio(dimm_mmio, psource, size / 4);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
}
}
static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
u32 subaddr, u32 *pdata)
{
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
u32 i2creg = 0;
u32 status;
u32 count = 0;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
i2creg |= device << 24;
i2creg |= subaddr << 16;
/* Set the device and subaddress */
writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
readl(mmio + PDC_I2C_ADDR_DATA);
/* Write Control to perform read operation, mask int */
writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
mmio + PDC_I2C_CONTROL);
for (count = 0; count <= 1000; count ++) {
status = readl(mmio + PDC_I2C_CONTROL);
if (status & PDC_I2C_COMPLETE) {
status = readl(mmio + PDC_I2C_ADDR_DATA);
break;
} else if (count == 1000)
return 0;
}
*pdata = (status >> 8) & 0x000000ff;
return 1;
}
static int pdc20621_detect_dimm(struct ata_host *host)
{
u32 data = 0;
if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
if (data == 100)
return 100;
} else
return 0;
if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
if (data <= 0x75)
return 133;
} else
return 0;
return 0;
}
static int pdc20621_prog_dimm0(struct ata_host *host)
{
u32 spd0[50];
u32 data = 0;
int size, i;
u8 bdimmsize;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
static const struct {
unsigned int reg;
unsigned int ofs;
} pdc_i2c_read_data [] = {
{ PDC_DIMM_SPD_TYPE, 11 },
{ PDC_DIMM_SPD_FRESH_RATE, 12 },
{ PDC_DIMM_SPD_COLUMN_NUM, 4 },
{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
{ PDC_DIMM_SPD_ROW_NUM, 3 },
{ PDC_DIMM_SPD_BANK_NUM, 17 },
{ PDC_DIMM_SPD_MODULE_ROW, 5 },
{ PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
{ PDC_DIMM_SPD_CAS_LATENCY, 18 },
};
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
pdc_i2c_read_data[i].reg,
&spd0[pdc_i2c_read_data[i].ofs]);
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
((((spd0[27] + 9) / 10) - 1) << 8) ;
data |= (((((spd0[29] > spd0[28])
? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
if (spd0[18] & 0x08)
data |= ((0x03) << 14);
else if (spd0[18] & 0x04)
data |= ((0x02) << 14);
else if (spd0[18] & 0x01)
data |= ((0x01) << 14);
else
data |= (0 << 14);
/*
Calculate the size of bDIMMSize (power of 2) and
merge the DIMM size by program start/end address.
*/
bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
data |= (((size / 16) - 1) << 16);
data |= (0 << 23);
data |= 8;
writel(data, mmio + PDC_DIMM0_CONTROL);
readl(mmio + PDC_DIMM0_CONTROL);
return size;
}
static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
{
u32 data, spd0;
int error, i;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
/*
Set To Default : DIMM Module Global Control Register (0x022259F1)
DIMM Arbitration Disable (bit 20)
DIMM Data/Control Output Driving Selection (bit12 - bit15)
Refresh Enable (bit 17)
*/
data = 0x022259F1;
writel(data, mmio + PDC_SDRAM_CONTROL);
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
dev_err(host->dev,
"Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
readl(mmio + PDC_SDRAM_CONTROL);
dev_err(host->dev, "Local DIMM ECC Enabled\n");
}
/* DIMM Initialization Select/Enable (bit 18/19) */
data &= (~(1<<18));
data |= (1<<19);
writel(data, mmio + PDC_SDRAM_CONTROL);
error = 1;
for (i = 1; i <= 10; i++) { /* polling ~5 secs */
data = readl(mmio + PDC_SDRAM_CONTROL);
if (!(data & (1<<19))) {
error = 0;
break;
}
msleep(i*100);
}
return error;
}
static unsigned int pdc20621_dimm_init(struct ata_host *host)
{
int speed, size, length;
u32 addr, spd0, pci_status;
u32 time_period = 0;
u32 tcount = 0;
u32 ticks = 0;
u32 clock = 0;
u32 fparam = 0;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
/* Initialize PLL based upon PCI Bus Frequency */
/* Initialize Time Period Register */
writel(0xffffffff, mmio + PDC_TIME_PERIOD);
time_period = readl(mmio + PDC_TIME_PERIOD);
dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
/* Enable timer */
writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
readl(mmio + PDC_TIME_CONTROL);
/* Wait 3 seconds */
msleep(3000);
/*
When timer is enabled, counter is decreased every internal
clock cycle.
*/
tcount = readl(mmio + PDC_TIME_COUNTER);
dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
/*
If SX4 is on PCI-X bus, after 3 seconds, the timer counter
register should be >= (0xffffffff - 3x10^8).
*/
if (tcount >= PCI_X_TCOUNT) {
ticks = (time_period - tcount);
dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
clock = (ticks / 300000);
dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
clock, clock);
clock = (clock * 33);
dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
clock, clock);
/* PLL F Param (bit 22:16) */
fparam = (1400000 / clock) - 2;
dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
pci_status = (0x8a001824 | (fparam << 16));
} else
pci_status = PCI_PLL_INIT;
/* Initialize PLL. */
dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
writel(pci_status, mmio + PDC_CTL_STATUS);
readl(mmio + PDC_CTL_STATUS);
/*
Read SPD of DIMM by I2C interface,
and program the DIMM Module Controller.
*/
if (!(speed = pdc20621_detect_dimm(host))) {
dev_err(host->dev, "Detect Local DIMM Fail\n");
return 1; /* DIMM error */
}
dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
if (pdc20621_prog_dimm_global(host)) {
dev_err(host->dev,
"Programming DIMM Module Global Control Register Fail\n");
return 1;
}
if (dimm_test) {
u8 test_parttern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
u8 test_parttern2[40] = {0};
pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
}
/* ECC initiliazation. */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
dev_err(host->dev,
"Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
void *buf;
dev_dbg(host->dev, "Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
if (!buf)
return 1;
while (addr < length) {
pdc20621_put_to_dimm(host, buf, addr,
ECC_ERASE_BUF_SZ);
addr += ECC_ERASE_BUF_SZ;
}
kfree(buf);
dev_dbg(host->dev, "Finish ECC initialization\n");
}
return 0;
}
static void pdc_20621_init(struct ata_host *host)
{
u32 tmp;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
/*
* Select page 0x40 for our 32k DIMM window
*/
tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
/*
* Reset Host DMA
*/
tmp = readl(mmio + PDC_HDMA_CTLSTAT);
tmp |= PDC_RESET;
writel(tmp, mmio + PDC_HDMA_CTLSTAT);
readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
udelay(10);
tmp = readl(mmio + PDC_HDMA_CTLSTAT);
tmp &= ~PDC_RESET;
writel(tmp, mmio + PDC_HDMA_CTLSTAT);
readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
}
static int pdc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] =
{ &pdc_port_info[ent->driver_data], NULL };
struct ata_host *host;
struct pdc_host_priv *hpriv;
int i, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
for (i = 0; i < 4; i++) {
struct ata_port *ap = host->ports[i];
void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
unsigned int offset = 0x200 + i * 0x80;
pdc_sata_setup_port(&ap->ioaddr, base + offset);
ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
}
/* configure and activate */
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
if (pdc20621_dimm_init(host))
return -ENOMEM;
pdc_20621_init(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
IRQF_SHARED, &pdc_sata_sht);
}
module_pci_driver(pdc_sata_pci_driver);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Promise SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_sx4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Broadcom SATA3 AHCI Controller Driver
*
* Copyright © 2009-2015 Broadcom Corporation
*/
#include <linux/ahci_platform.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/string.h>
#include "ahci.h"
#define DRV_NAME "brcm-ahci"
#define SATA_TOP_CTRL_VERSION 0x0
#define SATA_TOP_CTRL_BUS_CTRL 0x4
#define MMIO_ENDIAN_SHIFT 0 /* CPU->AHCI */
#define DMADESC_ENDIAN_SHIFT 2 /* AHCI->DDR */
#define DMADATA_ENDIAN_SHIFT 4 /* AHCI->DDR */
#define PIODATA_ENDIAN_SHIFT 6
#define ENDIAN_SWAP_NONE 0
#define ENDIAN_SWAP_FULL 2
#define SATA_TOP_CTRL_TP_CTRL 0x8
#define SATA_TOP_CTRL_PHY_CTRL 0xc
#define SATA_TOP_CTRL_PHY_CTRL_1 0x0
#define SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE BIT(14)
#define SATA_TOP_CTRL_PHY_CTRL_2 0x4
#define SATA_TOP_CTRL_2_SW_RST_MDIOREG BIT(0)
#define SATA_TOP_CTRL_2_SW_RST_OOB BIT(1)
#define SATA_TOP_CTRL_2_SW_RST_RX BIT(2)
#define SATA_TOP_CTRL_2_SW_RST_TX BIT(3)
#define SATA_TOP_CTRL_2_PHY_GLOBAL_RESET BIT(14)
#define SATA_TOP_CTRL_PHY_OFFS 0x8
#define SATA_TOP_MAX_PHYS 2
#define SATA_FIRST_PORT_CTRL 0x700
#define SATA_NEXT_PORT_CTRL_OFFSET 0x80
#define SATA_PORT_PCTRL6(reg_base) (reg_base + 0x18)
/* On big-endian MIPS, buses are reversed to big endian, so switch them back */
#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
#define DATA_ENDIAN 2 /* AHCI->DDR inbound accesses */
#define MMIO_ENDIAN 2 /* CPU->AHCI outbound accesses */
#else
#define DATA_ENDIAN 0
#define MMIO_ENDIAN 0
#endif
#define BUS_CTRL_ENDIAN_CONF \
((DATA_ENDIAN << DMADATA_ENDIAN_SHIFT) | \
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
#define BUS_CTRL_ENDIAN_NSP_CONF \
(0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
#define BUS_CTRL_ENDIAN_CONF_MASK \
(0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT | \
0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
BRCM_SATA_NSP,
BRCM_SATA_BCM7216,
};
enum brcm_ahci_quirks {
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
};
struct brcm_ahci_priv {
struct device *dev;
void __iomem *top_ctrl;
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
struct reset_control *rcdev_rescal;
struct reset_control *rcdev_ahci;
};
static inline u32 brcm_sata_readreg(void __iomem *addr)
{
/*
* MIPS endianness is configured by boot strap, which also reverses all
* bus endianness (i.e., big-endian CPU + big endian bus ==> native
* endian I/O).
*
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
}
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
}
static void brcm_sata_alpm_init(struct ahci_host_priv *hpriv)
{
struct brcm_ahci_priv *priv = hpriv->plat_data;
u32 port_ctrl, host_caps;
int i;
/* Enable support for ALPM */
host_caps = readl(hpriv->mmio + HOST_CAP);
if (!(host_caps & HOST_CAP_ALPM))
hpriv->flags |= AHCI_HFLAG_YES_ALPM;
/*
* Adjust timeout to allow PLL sufficient time to lock while waking
* up from slumber mode.
*/
for (i = 0, port_ctrl = SATA_FIRST_PORT_CTRL;
i < SATA_TOP_MAX_PHYS;
i++, port_ctrl += SATA_NEXT_PORT_CTRL_OFFSET) {
if (priv->port_mask & BIT(i))
writel(0xff1003fc,
hpriv->mmio + SATA_PORT_PCTRL6(port_ctrl));
}
}
static void brcm_sata_phy_enable(struct brcm_ahci_priv *priv, int port)
{
void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
(port * SATA_TOP_CTRL_PHY_OFFS);
void __iomem *p;
u32 reg;
if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
return;
/* clear PHY_DEFAULT_POWER_STATE */
p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
reg = brcm_sata_readreg(p);
reg &= ~SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
brcm_sata_writereg(reg, p);
/* reset the PHY digital logic */
p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
reg = brcm_sata_readreg(p);
reg &= ~(SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
SATA_TOP_CTRL_2_SW_RST_RX);
reg |= SATA_TOP_CTRL_2_SW_RST_TX;
brcm_sata_writereg(reg, p);
reg = brcm_sata_readreg(p);
reg |= SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
brcm_sata_writereg(reg, p);
reg = brcm_sata_readreg(p);
reg &= ~SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
brcm_sata_writereg(reg, p);
(void)brcm_sata_readreg(p);
}
static void brcm_sata_phy_disable(struct brcm_ahci_priv *priv, int port)
{
void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
(port * SATA_TOP_CTRL_PHY_OFFS);
void __iomem *p;
u32 reg;
if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
return;
/* power-off the PHY digital logic */
p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
reg = brcm_sata_readreg(p);
reg |= (SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
SATA_TOP_CTRL_2_SW_RST_RX | SATA_TOP_CTRL_2_SW_RST_TX |
SATA_TOP_CTRL_2_PHY_GLOBAL_RESET);
brcm_sata_writereg(reg, p);
/* set PHY_DEFAULT_POWER_STATE */
p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
reg = brcm_sata_readreg(p);
reg |= SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
brcm_sata_writereg(reg, p);
}
static void brcm_sata_phys_enable(struct brcm_ahci_priv *priv)
{
int i;
for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
if (priv->port_mask & BIT(i))
brcm_sata_phy_enable(priv, i);
}
static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
{
int i;
for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
if (priv->port_mask & BIT(i))
brcm_sata_phy_disable(priv, i);
}
static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
struct brcm_ahci_priv *priv)
{
u32 impl;
impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (fls(impl) > SATA_TOP_MAX_PHYS)
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
impl);
else if (!impl)
dev_info(priv->dev, "no ports found\n");
return impl;
}
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
u32 data;
/* Configure endianness */
data = brcm_sata_readreg(ctrl);
data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
if (priv->version == BRCM_SATA_NSP)
data |= BUS_CTRL_ENDIAN_NSP_CONF;
else
data |= BUS_CTRL_ENDIAN_CONF;
brcm_sata_writereg(data, ctrl);
}
static unsigned int brcm_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, __le16 *id)
{
struct ata_port *ap = dev->link->ap;
struct ata_host *host = ap->host;
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
void __iomem *mmio = hpriv->mmio;
unsigned int err_mask;
unsigned long flags;
int i, rc;
u32 ctl;
/* Try to read the device ID and, if this fails, proceed with the
* recovery sequence below
*/
err_mask = ata_do_dev_read_id(dev, tf, id);
if (likely(!err_mask))
return err_mask;
/* Disable host interrupts */
spin_lock_irqsave(&host->lock, flags);
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
spin_unlock_irqrestore(&host->lock, flags);
/* Perform the SATA PHY reset sequence */
brcm_sata_phy_disable(priv, ap->port_no);
/* Reset the SATA clock */
ahci_platform_disable_clks(hpriv);
msleep(10);
ahci_platform_enable_clks(hpriv);
msleep(10);
/* Bring the PHY back on */
brcm_sata_phy_enable(priv, ap->port_no);
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
rc = phy_calibrate(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
}
/* Re-enable host interrupts */
spin_lock_irqsave(&host->lock, flags);
ctl = readl(mmio + HOST_CTL);
ctl |= HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
spin_unlock_irqrestore(&host->lock, flags);
return ata_do_dev_read_id(dev, tf, id);
disable_phys:
while (--i >= 0) {
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
return AC_ERR_OTHER;
}
static void brcm_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
ahci_platform_disable_resources(hpriv);
}
static struct ata_port_operations ahci_brcm_platform_ops = {
.inherits = &ahci_ops,
.host_stop = brcm_ahci_host_stop,
.read_id = brcm_ahci_read_id,
};
static const struct ata_port_info ahci_brcm_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
.link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_brcm_platform_ops,
};
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
brcm_sata_phys_disable(priv);
if (IS_ENABLED(CONFIG_PM_SLEEP))
ret = ahci_platform_suspend(dev);
else
ret = 0;
reset_control_assert(priv->rcdev_ahci);
reset_control_rearm(priv->rcdev_rescal);
return ret;
}
static int __maybe_unused brcm_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret = 0;
ret = reset_control_deassert(priv->rcdev_ahci);
if (ret)
return ret;
ret = reset_control_reset(priv->rcdev_rescal);
if (ret)
return ret;
/* Make sure clocks are turned on before re-configuration */
ret = ahci_platform_enable_clks(hpriv);
if (ret)
return ret;
ret = ahci_platform_enable_regulators(hpriv);
if (ret)
goto out_disable_clks;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
/* Since we had to enable clocks earlier on, we cannot use
* ahci_platform_resume() as-is since a second call to
* ahci_platform_enable_resources() would bump up the resources
* (regulators, clocks, PHYs) count artificially so we copy the part
* after ahci_platform_enable_resources().
*/
ret = ahci_platform_enable_phys(hpriv);
if (ret)
goto out_disable_phys;
ret = ahci_platform_resume_host(dev);
if (ret)
goto out_disable_platform_phys;
/* We resumed so update PM runtime state */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
return ret;
}
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
{.compatible = "brcm,bcm7216-ahci", .data = (void *)BRCM_SATA_BCM7216},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
static int brcm_ahci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
of_id = of_match_node(ahci_of_match, pdev->dev.of_node);
if (!of_id)
return -ENODEV;
priv->version = (unsigned long)of_id->data;
priv->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
priv->top_ctrl = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
if (priv->version == BRCM_SATA_BCM7216) {
priv->rcdev_rescal = devm_reset_control_get_optional_shared(
&pdev->dev, "rescal");
if (IS_ERR(priv->rcdev_rescal))
return PTR_ERR(priv->rcdev_rescal);
}
priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
if (IS_ERR(priv->rcdev_ahci))
return PTR_ERR(priv->rcdev_ahci);
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
switch (priv->version) {
case BRCM_SATA_BCM7425:
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
fallthrough;
case BRCM_SATA_NSP:
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
break;
default:
break;
}
ret = reset_control_reset(priv->rcdev_rescal);
if (ret)
return ret;
ret = reset_control_deassert(priv->rcdev_ahci);
if (ret)
return ret;
ret = ahci_platform_enable_clks(hpriv);
if (ret)
goto out_reset;
ret = ahci_platform_enable_regulators(hpriv);
if (ret)
goto out_disable_clks;
/* Must be first so as to configure endianness including that
* of the standard AHCI register space.
*/
brcm_sata_init(priv);
/* Initializes priv->port_mask which is used below */
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
if (!priv->port_mask) {
ret = -ENODEV;
goto out_disable_regulators;
}
/* Must be done before ahci_platform_enable_phys() */
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
ret = ahci_platform_enable_phys(hpriv);
if (ret)
goto out_disable_phys;
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
if (ret)
goto out_disable_platform_phys;
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
return 0;
out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
out_disable_regulators:
ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:
reset_control_assert(priv->rcdev_ahci);
reset_control_rearm(priv->rcdev_rescal);
return ret;
}
static void brcm_ahci_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
brcm_sata_phys_disable(priv);
ata_platform_remove_one(pdev);
}
static void brcm_ahci_shutdown(struct platform_device *pdev)
{
int ret;
/* All resources releasing happens via devres, but our device, unlike a
* proper remove is not disappearing, therefore using
* brcm_ahci_suspend() here which does explicit power management is
* appropriate.
*/
ret = brcm_ahci_suspend(&pdev->dev);
if (ret)
dev_err(&pdev->dev, "failed to shutdown\n");
}
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
.remove_new = brcm_ahci_remove,
.shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
.pm = &ahci_brcm_pm_ops,
},
};
module_platform_driver(brcm_ahci_driver);
MODULE_DESCRIPTION("Broadcom SATA3 AHCI Controller Driver");
MODULE_AUTHOR("Brian Norris");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sata-brcmstb");
| linux-master | drivers/ata/ahci_brcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_amd.c - AMD PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
*
* Based on pata-sil680. Errata information is taken from data sheets
* and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
* claimed by sata-nv.c.
*
* TODO:
* Variable system clock when/if it makes sense
* Power management on ports
*
*
* Documentation publicly available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_amd"
#define DRV_VERSION "0.4.1"
/**
* timing_setup - shared timing computation and load
* @ap: ATA port being set up
* @adev: drive being configured
* @offset: port offset
* @speed: target speed
* @clock: clock multiplier (number of times 33MHz for this part)
*
* Perform the actual timing set up for Nvidia or AMD PATA devices.
* The actual devices vary so they all call into this helper function
* providing the clock multipler and offset (because AMD and Nvidia put
* the ports at different locations).
*/
static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
{
static const unsigned char amd_cyc2udma[] = {
6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *peer = ata_dev_pair(adev);
int dn = ap->port_no * 2 + adev->devno;
struct ata_timing at, apeer;
int T, UT;
const int amd_clock = 33333; /* KHz. */
u8 t;
T = 1000000000 / amd_clock;
UT = T;
if (clock >= 2)
UT = T / 2;
if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
dev_err(&pdev->dev, "unknown mode %d\n", speed);
return;
}
if (peer) {
/* This may be over conservative */
if (ata_dma_enabled(peer)) {
ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
/*
* Now do the setup work
*/
/* Configure the address set up timing */
pci_read_config_byte(pdev, offset + 0x0C, &t);
t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
pci_write_config_byte(pdev, offset + 0x0C , t);
/* Configure the 8bit I/O timing */
pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
/* Drive timing */
pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
switch (clock) {
case 1:
t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
break;
case 2:
t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
break;
case 3:
t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
break;
case 4:
t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
break;
default:
return;
}
/* UDMA timing */
if (at.udma)
pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
}
/**
* amd_pre_reset - perform reset handling
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Reset sequence checking enable bits to see which ports are
* active.
*/
static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits amd_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* amd_cable_detect - report cable type
* @ap: port
*
* AMD controller/BIOS setups record the cable type in word 0x42
*/
static int amd_cable_detect(struct ata_port *ap)
{
static const u32 bitmask[2] = {0x03, 0x0C};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
pci_read_config_byte(pdev, 0x42, &ata66);
if (ata66 & bitmask[ap->port_no])
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
/**
* amd_fifo_setup - set the PIO FIFO for ATA/ATAPI
* @ap: ATA interface
*
* Set the PCI fifo for this device according to the devices present
* on the bus at this point in time. We need to turn the post write buffer
* off for ATAPI devices as we may need to issue a word sized write to the
* device as the final I/O
*/
static void amd_fifo_setup(struct ata_port *ap)
{
struct ata_device *adev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const u8 fifobit[2] = { 0xC0, 0x30};
u8 fifo = fifobit[ap->port_no];
u8 r;
ata_for_each_dev(adev, &ap->link, ENABLED) {
if (adev->class == ATA_DEV_ATAPI)
fifo = 0;
}
if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */
fifo = 0;
/* On the later chips the read prefetch bits become no-op bits */
pci_read_config_byte(pdev, 0x41, &r);
r &= ~fifobit[ap->port_no];
r |= fifo;
pci_write_config_byte(pdev, 0x41, r);
}
/**
* amd33_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the AMD registers for PIO mode.
*/
static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
amd_fifo_setup(ap);
timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
}
static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
amd_fifo_setup(ap);
timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
}
static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
amd_fifo_setup(ap);
timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
}
static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
amd_fifo_setup(ap);
timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
}
/**
* amd33_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the AMD and Nvidia
* chipset.
*/
static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
}
static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
}
static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
}
static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
}
/* Both host-side and drive-side detection results are worthless on NV
* PATAs. Ignore them and just follow what BIOS configured. Both the
* current configuration in PCI config reg and ACPI GTM result are
* cached during driver attach and are consulted to select transfer
* mode.
*/
static unsigned int nv_mode_filter(struct ata_device *dev,
unsigned int xfer_mask)
{
static const unsigned int udma_mask_map[] =
{ ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
struct ata_port *ap = dev->link->ap;
char acpi_str[32] = "";
u32 saved_udma, udma;
const struct ata_acpi_gtm *gtm;
unsigned int bios_limit = 0, acpi_limit = 0, limit;
/* find out what BIOS configured */
udma = saved_udma = (unsigned long)ap->host->private_data;
if (ap->port_no == 0)
udma >>= 16;
if (dev->devno == 0)
udma >>= 8;
if ((udma & 0xc0) == 0xc0)
bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
/* consult ACPI GTM too */
gtm = ata_acpi_init_gtm(ap);
if (gtm) {
acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
}
/* be optimistic, EH can take care of things if something goes wrong */
limit = bios_limit | acpi_limit;
/* If PIO or DMA isn't configured at all, don't limit. Let EH
* handle it.
*/
if (!(limit & ATA_MASK_PIO))
limit |= ATA_MASK_PIO;
if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
/* PIO4, MWDMA2, UDMA2 should always be supported regardless of
cable detection result */
limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
ata_port_dbg(ap,
"nv_mode_filter: 0x%x&0x%x->0x%x, BIOS=0x%x (0x%x) ACPI=0x%x%s\n",
xfer_mask, limit, xfer_mask & limit, bios_limit,
saved_udma, acpi_limit, acpi_str);
return xfer_mask & limit;
}
/**
* nv_pre_reset - cable detection
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform cable detection. The BIOS stores this in PCI config
* space for us.
*/
static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits nv_enable_bits[] = {
{ 0x50, 1, 0x02, 0x02 },
{ 0x50, 1, 0x01, 0x01 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* nv100_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the AMD registers for PIO mode.
*/
static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
}
static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
}
/**
* nv100_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the AMD and Nvidia
* chipset.
*/
static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
}
static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
}
static void nv_host_stop(struct ata_host *host)
{
u32 udma = (unsigned long)host->private_data;
/* restore PCI config register 0x60 */
pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
}
static const struct scsi_host_template amd_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static const struct ata_port_operations amd_base_port_ops = {
.inherits = &ata_bmdma32_port_ops,
.prereset = amd_pre_reset,
};
static struct ata_port_operations amd33_port_ops = {
.inherits = &amd_base_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = amd33_set_piomode,
.set_dmamode = amd33_set_dmamode,
};
static struct ata_port_operations amd66_port_ops = {
.inherits = &amd_base_port_ops,
.cable_detect = ata_cable_unknown,
.set_piomode = amd66_set_piomode,
.set_dmamode = amd66_set_dmamode,
};
static struct ata_port_operations amd100_port_ops = {
.inherits = &amd_base_port_ops,
.cable_detect = ata_cable_unknown,
.set_piomode = amd100_set_piomode,
.set_dmamode = amd100_set_dmamode,
};
static struct ata_port_operations amd133_port_ops = {
.inherits = &amd_base_port_ops,
.cable_detect = amd_cable_detect,
.set_piomode = amd133_set_piomode,
.set_dmamode = amd133_set_dmamode,
};
static const struct ata_port_operations nv_base_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_ignore,
.mode_filter = nv_mode_filter,
.prereset = nv_pre_reset,
.host_stop = nv_host_stop,
};
static struct ata_port_operations nv100_port_ops = {
.inherits = &nv_base_port_ops,
.set_piomode = nv100_set_piomode,
.set_dmamode = nv100_set_dmamode,
};
static struct ata_port_operations nv133_port_ops = {
.inherits = &nv_base_port_ops,
.set_piomode = nv133_set_piomode,
.set_dmamode = nv133_set_dmamode,
};
static void amd_clear_fifo(struct pci_dev *pdev)
{
u8 fifo;
/* Disable the FIFO, the FIFO logic will re-enable it as
appropriate */
pci_read_config_byte(pdev, 0x41, &fifo);
fifo &= 0x0F;
pci_write_config_byte(pdev, 0x41, fifo);
}
static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info[10] = {
{ /* 0: AMD 7401 - no swdma */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &amd33_port_ops
},
{ /* 1: Early AMD7409 - no swdma */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &amd66_port_ops
},
{ /* 2: AMD 7409 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &amd66_port_ops
},
{ /* 3: AMD 7411 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &amd100_port_ops
},
{ /* 4: AMD 7441 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &amd100_port_ops
},
{ /* 5: AMD 8111 - no swdma */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &amd133_port_ops
},
{ /* 6: AMD 8111 UDMA 100 (Serenade) - no swdma */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &amd133_port_ops
},
{ /* 7: Nvidia Nforce */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &nv100_port_ops
},
{ /* 8: Nvidia Nforce2 and later - no swdma */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &nv133_port_ops
},
{ /* 9: AMD CS5536 (Geode companion) */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &amd100_port_ops
}
};
const struct ata_port_info *ppi[] = { NULL, NULL };
int type = id->driver_data;
void *hpriv = NULL;
u8 fifo;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
pci_read_config_byte(pdev, 0x41, &fifo);
/* Check for AMD7409 without swdma errata and if found adjust type */
if (type == 1 && pdev->revision > 0x7)
type = 2;
/* Serenade ? */
if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
type = 6; /* UDMA 100 only */
/*
* Okay, type is determined now. Apply type-specific workarounds.
*/
ppi[0] = &info[type];
if (type < 3)
ata_pci_bmdma_clear_simplex(pdev);
if (pdev->vendor == PCI_VENDOR_ID_AMD)
amd_clear_fifo(pdev);
/* Cable detection on Nvidia chips doesn't work too well,
* cache BIOS programmed UDMA mode.
*/
if (type == 7 || type == 8) {
u32 udma;
pci_read_config_dword(pdev, 0x60, &udma);
hpriv = (void *)(unsigned long)udma;
}
/* And fire it up */
return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
}
#ifdef CONFIG_PM_SLEEP
static int amd_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_AMD) {
amd_clear_fifo(pdev);
if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
ata_pci_bmdma_clear_simplex(pdev);
}
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
static struct pci_driver amd_pci_driver = {
.name = DRV_NAME,
.id_table = amd,
.probe = amd_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = amd_reinit_one,
#endif
};
module_pci_driver(amd_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, amd);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_amd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_cs5536.c - CS5536 PATA for new ATA layer
* (C) 2007 Martin K. Petersen <[email protected]>
* (C) 2011 Bartlomiej Zolnierkiewicz
*
* Documentation:
* Available from AMD web site.
*
* The IDE timing registers for the CS5536 live in the Geode Machine
* Specific Register file and not PCI config space. Most BIOSes
* virtualize the PCI registers so the chip looks like a standard IDE
* controller. Unfortunately not all implementations get this right.
* In particular some have problems with unaligned accesses to the
* virtualized PCI registers. This driver always does full dword
* writes to work around the issue. Also, in case of a bad BIOS this
* driver can be loaded with the "msr=1" parameter which forces using
* the Machine Specific Registers to configure the device.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
#ifdef CONFIG_X86_32
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
#else
#undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */
#undef wrmsr
#define rdmsr(x, y, z) do { } while (0)
#define wrmsr(x, y, z) do { } while (0)
#define use_msr 0
#endif
#define DRV_NAME "pata_cs5536"
#define DRV_VERSION "0.0.8"
enum {
MSR_IDE_CFG = 0x51300010,
PCI_IDE_CFG = 0x40,
CFG = 0,
DTC = 2,
CAST = 3,
ETC = 4,
IDE_CFG_CHANEN = (1 << 1),
IDE_CFG_CABLE = (1 << 17) | (1 << 16),
IDE_D0_SHIFT = 24,
IDE_D1_SHIFT = 16,
IDE_DRV_MASK = 0xff,
IDE_CAST_D0_SHIFT = 6,
IDE_CAST_D1_SHIFT = 4,
IDE_CAST_DRV_MASK = 0x3,
IDE_CAST_CMD_MASK = 0xff,
IDE_CAST_CMD_SHIFT = 24,
IDE_ETC_UDMA_MASK = 0xc0,
};
/* Some Bachmann OT200 devices have a non working UDMA support due a
* missing resistor.
*/
static const struct dmi_system_id udma_quirk_dmi_table[] = {
{
.ident = "Bachmann electronic OT200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"),
DMI_MATCH(DMI_PRODUCT_NAME, "OT200"),
DMI_MATCH(DMI_PRODUCT_VERSION, "1")
},
},
{ }
};
static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy __maybe_unused;
rdmsr(MSR_IDE_CFG + reg, *val, dummy);
return 0;
}
return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
}
static int cs5536_write(struct pci_dev *pdev, int reg, int val)
{
if (unlikely(use_msr)) {
wrmsr(MSR_IDE_CFG + reg, val, 0);
return 0;
}
return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
}
static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
{
struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
u32 dtc;
cs5536_read(pdev, DTC, &dtc);
dtc &= ~(IDE_DRV_MASK << dshift);
dtc |= tim << dshift;
cs5536_write(pdev, DTC, dtc);
}
/**
* cs5536_cable_detect - detect cable type
* @ap: Port to detect on
*
* Perform cable detection for ATA66 capable cable.
*
* Returns a cable type.
*/
static int cs5536_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 cfg;
cs5536_read(pdev, CFG, &cfg);
if (cfg & IDE_CFG_CABLE)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
/**
* cs5536_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*/
static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 drv_timings[5] = {
0x98, 0x55, 0x32, 0x21, 0x20,
};
static const u8 addr_timings[5] = {
0x2, 0x1, 0x0, 0x0, 0x0,
};
static const u8 cmd_timings[5] = {
0x99, 0x92, 0x90, 0x22, 0x20,
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
u32 cast;
if (pair)
cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
cs5536_program_dtc(adev, drv_timings[mode]);
cs5536_read(pdev, CAST, &cast);
cast &= ~(IDE_CAST_DRV_MASK << cshift);
cast |= addr_timings[mode] << cshift;
cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
cs5536_write(pdev, CAST, cast);
}
/**
* cs5536_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
*/
static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 udma_timings[6] = {
0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
};
static const u8 mwdma_timings[3] = {
0x67, 0x21, 0x20,
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 etc;
int mode = adev->dma_mode;
int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
cs5536_read(pdev, ETC, &etc);
if (mode >= XFER_UDMA_0) {
etc &= ~(IDE_DRV_MASK << dshift);
etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
} else { /* MWDMA */
etc &= ~(IDE_ETC_UDMA_MASK << dshift);
cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
}
cs5536_write(pdev, ETC, etc);
}
static const struct scsi_host_template cs5536_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations cs5536_port_ops = {
.inherits = &ata_bmdma32_port_ops,
.cable_detect = cs5536_cable_detect,
.set_piomode = cs5536_set_piomode,
.set_dmamode = cs5536_set_dmamode,
};
/**
* cs5536_init_one
* @dev: PCI device
* @id: Entry in match table
*
*/
static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &cs5536_port_ops,
};
static const struct ata_port_info no_udma_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cs5536_port_ops,
};
const struct ata_port_info *ppi[2];
u32 cfg;
if (dmi_check_system(udma_quirk_dmi_table))
ppi[0] = &no_udma_info;
else
ppi[0] = &info;
ppi[1] = &ata_dummy_port_info;
if (use_msr)
dev_err(&dev->dev, DRV_NAME ": Using MSR regs instead of PCI\n");
cs5536_read(dev, CFG, &cfg);
if ((cfg & IDE_CFG_CHANEN) == 0) {
dev_err(&dev->dev, DRV_NAME ": disabled by BIOS\n");
return -ENODEV;
}
return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
}
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
static struct pci_driver cs5536_pci_driver = {
.name = DRV_NAME,
.id_table = cs5536,
.probe = cs5536_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(cs5536_pci_driver);
MODULE_AUTHOR("Martin K. Petersen");
MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5536);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cs5536.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* libata-acpi.c
* Provides ACPI support for PATA/SATA.
*
* Copyright (C) 2006 Intel Corp.
* Copyright (C) 2006 Randy Dunlap
*/
#include <linux/module.h>
#include <linux/ata.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/libata.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <scsi/scsi_device.h>
#include "libata.h"
unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)");
#define NO_PORT_MULT 0xffff
#define SATA_ADR(root, pmp) (((root) << 16) | (pmp))
#define REGS_PER_GTF 7
struct ata_acpi_gtf {
u8 tf[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
} __packed;
static void ata_acpi_clear_gtf(struct ata_device *dev)
{
kfree(dev->gtf_cache);
dev->gtf_cache = NULL;
}
struct ata_acpi_hotplug_context {
struct acpi_hotplug_context hp;
union {
struct ata_port *ap;
struct ata_device *dev;
} data;
};
#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
/**
* ata_dev_acpi_handle - provide the acpi_handle for an ata_device
* @dev: the acpi_handle returned will correspond to this device
*
* Returns the acpi_handle for the ACPI namespace object corresponding to
* the ata_device passed into the function, or NULL if no such object exists
* or ACPI is disabled for this device due to consecutive errors.
*/
acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
{
return dev->flags & ATA_DFLAG_ACPI_DISABLED ?
NULL : ACPI_HANDLE(&dev->tdev);
}
/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
{
if (dev)
dev->flags |= ATA_DFLAG_DETACH;
else {
struct ata_link *tlink;
struct ata_device *tdev;
ata_for_each_link(tlink, ap, EDGE)
ata_for_each_dev(tdev, tlink, ALL)
tdev->flags |= ATA_DFLAG_DETACH;
}
ata_port_schedule_eh(ap);
}
/**
* ata_acpi_handle_hotplug - ACPI event handler backend
* @ap: ATA port ACPI event occurred
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
*
* All ACPI bay / device realted events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
* device, @dev points to it.
*
* Hotplug (as opposed to unplug) notification is always handled as
* port-wide while unplug only kills the target device on device-wide
* event.
*
* LOCKING:
* ACPI notify handler context. May sleep.
*/
static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
u32 event)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
int wait = 0;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
/*
* When dock driver calls into the routine, it will always use
* ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
* ACPI_NOTIFY_EJECT_REQUEST for remove
*/
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
ata_ehi_push_desc(ehi, "ACPI event");
ata_ehi_hotplugged(ehi);
ata_port_freeze(ap);
break;
case ACPI_NOTIFY_EJECT_REQUEST:
ata_ehi_push_desc(ehi, "ACPI event");
ata_acpi_detach_device(ap, dev);
wait = 1;
break;
}
spin_unlock_irqrestore(ap->lock, flags);
if (wait)
ata_port_wait_eh(ap);
}
static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
{
struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_handle_hotplug(dev->link->ap, dev, event);
return 0;
}
static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
{
ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
return 0;
}
static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
u32 event)
{
struct kobject *kobj = NULL;
char event_string[20];
char *envp[] = { event_string, NULL };
if (dev) {
if (dev->sdev)
kobj = &dev->sdev->sdev_gendev.kobj;
} else
kobj = &ap->dev->kobj;
if (kobj) {
snprintf(event_string, 20, "BAY_EVENT=%d", event);
kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
}
}
static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
{
ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
}
static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
{
struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
ata_acpi_uevent(dev->link->ap, dev, event);
}
/* bind acpi handle to pata port */
void ata_acpi_bind_port(struct ata_port *ap)
{
struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
struct acpi_device *adev;
struct ata_acpi_hotplug_context *context;
if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
return;
acpi_preset_companion(&ap->tdev, host_companion, ap->port_no);
if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
adev = ACPI_COMPANION(&ap->tdev);
if (!adev || adev->hp)
return;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return;
context->data.ap = ap;
acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
ata_acpi_ap_uevent);
}
void ata_acpi_bind_dev(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
struct acpi_device *parent, *adev;
struct ata_acpi_hotplug_context *context;
u64 adr;
/*
* For both sata/pata devices, host companion device is required.
* For pata device, port companion device is also required.
*/
if (libata_noacpi || !host_companion ||
(!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_companion))
return;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
else
adr = SATA_ADR(ap->port_no, dev->link->pmp);
parent = host_companion;
} else {
adr = dev->devno;
parent = port_companion;
}
acpi_preset_companion(&dev->tdev, parent, adr);
adev = ACPI_COMPANION(&dev->tdev);
if (!adev || adev->hp)
return;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return;
context->data.dev = dev;
acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
ata_acpi_dev_uevent);
}
/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
* This function is called during driver detach after the whole host
* is shut down.
*
* LOCKING:
* EH context.
*/
void ata_acpi_dissociate(struct ata_host *host)
{
int i;
/* Restore initial _GTM values so that driver which attaches
* afterward can use them too.
*/
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
if (ACPI_HANDLE(&ap->tdev) && gtm)
ata_acpi_stm(ap, gtm);
}
}
/**
* ata_acpi_gtm - execute _GTM
* @ap: target ATA port
* @gtm: out parameter for _GTM result
*
* Evaluate _GTM and store the result in @gtm.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
*/
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
{
struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
union acpi_object *out_obj;
acpi_status status;
int rc = 0;
acpi_handle handle = ACPI_HANDLE(&ap->tdev);
if (!handle)
return -EINVAL;
status = acpi_evaluate_object(handle, "_GTM", NULL, &output);
rc = -ENOENT;
if (status == AE_NOT_FOUND)
goto out_free;
rc = -EINVAL;
if (ACPI_FAILURE(status)) {
ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n",
status);
goto out_free;
}
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n",
out_obj->type);
goto out_free;
}
if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
ata_port_err(ap, "_GTM returned invalid length %d\n",
out_obj->buffer.length);
goto out_free;
}
memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm));
rc = 0;
out_free:
kfree(output.pointer);
return rc;
}
EXPORT_SYMBOL_GPL(ata_acpi_gtm);
/**
* ata_acpi_stm - execute _STM
* @ap: target ATA port
* @stm: timing parameter to _STM
*
* Evaluate _STM with timing parameter @stm.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -ENOENT if _STM doesn't exist, -errno on failure.
*/
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
{
acpi_status status;
struct ata_acpi_gtm stm_buf = *stm;
struct acpi_object_list input;
union acpi_object in_params[3];
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = sizeof(struct ata_acpi_gtm);
in_params[0].buffer.pointer = (u8 *)&stm_buf;
/* Buffers for id may need byteswapping ? */
in_params[1].type = ACPI_TYPE_BUFFER;
in_params[1].buffer.length = 512;
in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
in_params[2].type = ACPI_TYPE_BUFFER;
in_params[2].buffer.length = 512;
in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
input.count = 3;
input.pointer = in_params;
status = acpi_evaluate_object(ACPI_HANDLE(&ap->tdev), "_STM",
&input, NULL);
if (status == AE_NOT_FOUND)
return -ENOENT;
if (ACPI_FAILURE(status)) {
ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n",
status);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_acpi_stm);
/**
* ata_dev_get_GTF - get the drive bootup default taskfile settings
* @dev: target ATA device
* @gtf: output parameter for buffer containing _GTF taskfile arrays
*
* This applies to both PATA and SATA drives.
*
* The _GTF method has no input parameters.
* It returns a variable number of register set values (registers
* hex 1F1..1F7, taskfiles).
* The <variable number> is not known in advance, so have ACPI-CA
* allocate the buffer as needed and return it, then free it later.
*
* LOCKING:
* EH context.
*
* RETURNS:
* Number of taskfiles on success, 0 if _GTF doesn't exist. -EINVAL
* if _GTF is invalid.
*/
static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
{
acpi_status status;
struct acpi_buffer output;
union acpi_object *out_obj;
int rc = 0;
/* if _GTF is cached, use the cached value */
if (dev->gtf_cache) {
out_obj = dev->gtf_cache;
goto done;
}
/* set up output buffer */
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
/* _GTF has no input parameters */
status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
&output);
out_obj = dev->gtf_cache = output.pointer;
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n",
status);
rc = -EINVAL;
}
goto out_free;
}
if (!output.length || !output.pointer) {
ata_dev_dbg(dev, "Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
(unsigned long long)output.length,
output.pointer);
rc = -EINVAL;
goto out_free;
}
if (out_obj->type != ACPI_TYPE_BUFFER) {
ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n",
out_obj->type);
rc = -EINVAL;
goto out_free;
}
if (out_obj->buffer.length % REGS_PER_GTF) {
ata_dev_warn(dev, "unexpected _GTF length (%d)\n",
out_obj->buffer.length);
rc = -EINVAL;
goto out_free;
}
done:
rc = out_obj->buffer.length / REGS_PER_GTF;
if (gtf) {
*gtf = (void *)out_obj->buffer.pointer;
ata_dev_dbg(dev, "returning gtf=%p, gtf_count=%d\n",
*gtf, rc);
}
return rc;
out_free:
ata_acpi_clear_gtf(dev);
return rc;
}
/**
* ata_acpi_gtm_xfermask - determine xfermode from GTM parameter
* @dev: target device
* @gtm: GTM parameter to use
*
* Determine xfermask for @dev from @gtm.
*
* LOCKING:
* None.
*
* RETURNS:
* Determined xfermask.
*/
unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
const struct ata_acpi_gtm *gtm)
{
unsigned int xfer_mask = 0;
unsigned int type;
int unit;
u8 mode;
/* we always use the 0 slot for crap hardware */
unit = dev->devno;
if (!(gtm->flags & 0x10))
unit = 0;
/* PIO */
mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
xfer_mask |= ata_xfer_mode2mask(mode);
/* See if we have MWDMA or UDMA data. We don't bother with
* MWDMA if UDMA is available as this means the BIOS set UDMA
* and our error changedown if it works is UDMA to PIO anyway.
*/
if (!(gtm->flags & (1 << (2 * unit))))
type = ATA_SHIFT_MWDMA;
else
type = ATA_SHIFT_UDMA;
mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
xfer_mask |= ata_xfer_mode2mask(mode);
return xfer_mask;
}
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
* ata_acpi_cbl_80wire - Check for 80 wire cable
* @ap: Port to check
* @gtm: GTM data to use
*
* Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
*/
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
{
struct ata_device *dev;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
if (udma_mask & ~ATA_UDMA_MASK_40C)
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
struct ata_taskfile *tf)
{
ata_tf_init(dev, tf);
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
tf->error = gtf->tf[0]; /* 0x1f1 */
tf->nsect = gtf->tf[1]; /* 0x1f2 */
tf->lbal = gtf->tf[2]; /* 0x1f3 */
tf->lbam = gtf->tf[3]; /* 0x1f4 */
tf->lbah = gtf->tf[4]; /* 0x1f5 */
tf->device = gtf->tf[5]; /* 0x1f6 */
tf->status = gtf->tf[6]; /* 0x1f7 */
}
static int ata_acpi_filter_tf(struct ata_device *dev,
const struct ata_taskfile *tf,
const struct ata_taskfile *ptf)
{
if (dev->gtf_filter & ATA_ACPI_FILTER_SETXFER) {
/* libata doesn't use ACPI to configure transfer mode.
* It will only confuse device configuration. Skip.
*/
if (tf->command == ATA_CMD_SET_FEATURES &&
tf->feature == SETFEATURES_XFER)
return 1;
}
if (dev->gtf_filter & ATA_ACPI_FILTER_LOCK) {
/* BIOS writers, sorry but we don't wanna lock
* features unless the user explicitly said so.
*/
/* DEVICE CONFIGURATION FREEZE LOCK */
if (tf->command == ATA_CMD_CONF_OVERLAY &&
tf->feature == ATA_DCO_FREEZE_LOCK)
return 1;
/* SECURITY FREEZE LOCK */
if (tf->command == ATA_CMD_SEC_FREEZE_LOCK)
return 1;
/* SET MAX LOCK and SET MAX FREEZE LOCK */
if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) &&
tf->command == ATA_CMD_SET_MAX &&
(tf->feature == ATA_SET_MAX_LOCK ||
tf->feature == ATA_SET_MAX_FREEZE_LOCK))
return 1;
}
if (tf->command == ATA_CMD_SET_FEATURES &&
tf->feature == SETFEATURES_SATA_ENABLE) {
/* inhibit enabling DIPM */
if (dev->gtf_filter & ATA_ACPI_FILTER_DIPM &&
tf->nsect == SATA_DIPM)
return 1;
/* inhibit FPDMA non-zero offset */
if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_OFFSET &&
(tf->nsect == SATA_FPDMA_OFFSET ||
tf->nsect == SATA_FPDMA_IN_ORDER))
return 1;
/* inhibit FPDMA auto activation */
if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_AA &&
tf->nsect == SATA_FPDMA_AA)
return 1;
}
return 0;
}
/**
* ata_acpi_run_tf - send taskfile registers to host controller
* @dev: target ATA device
* @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
* @prev_gtf: previous command
*
* Outputs ATA taskfile to standard ATA host controller.
* Writes the control, feature, nsect, lbal, lbam, and lbah registers.
* Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
* hob_lbal, hob_lbam, and hob_lbah.
*
* This function waits for idle (!BUSY and !DRQ) after writing
* registers. If the control register has a new value, this
* function also waits for idle after writing control and before
* writing the remaining registers.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 1 if command is executed successfully. 0 if ignored, rejected or
* filtered out, -errno on other errors.
*/
static int ata_acpi_run_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
const struct ata_acpi_gtf *prev_gtf)
{
struct ata_taskfile *pptf = NULL;
struct ata_taskfile tf, ptf, rtf;
unsigned int err_mask;
const char *descr;
int rc;
if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
&& (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0)
&& (gtf->tf[6] == 0))
return 0;
ata_acpi_gtf_to_tf(dev, gtf, &tf);
if (prev_gtf) {
ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf);
pptf = &ptf;
}
descr = ata_get_cmd_name(tf.command);
if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
rtf = tf;
err_mask = ata_exec_internal(dev, &rtf, NULL,
DMA_NONE, NULL, 0, 0);
switch (err_mask) {
case 0:
ata_dev_dbg(dev,
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
"(%s) succeeded\n",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr);
rc = 1;
break;
case AC_ERR_DEV:
ata_dev_info(dev,
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
"(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
rtf.status, rtf.error);
rc = 0;
break;
default:
ata_dev_err(dev,
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
"(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr,
err_mask, rtf.status, rtf.error);
rc = -EIO;
break;
}
} else {
ata_dev_info(dev,
"ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
"(%s) filtered out\n",
tf.command, tf.feature, tf.nsect, tf.lbal,
tf.lbam, tf.lbah, tf.device, descr);
rc = 0;
}
return rc;
}
/**
* ata_acpi_exec_tfs - get then write drive taskfile settings
* @dev: target ATA device
* @nr_executed: out parameter for the number of executed commands
*
* Evaluate _GTF and execute returned taskfiles.
*
* LOCKING:
* EH context.
*
* RETURNS:
* Number of executed taskfiles on success, 0 if _GTF doesn't exist.
* -errno on other errors.
*/
static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed)
{
struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL;
int gtf_count, i, rc;
/* get taskfiles */
rc = ata_dev_get_GTF(dev, >f);
if (rc < 0)
return rc;
gtf_count = rc;
/* execute them */
for (i = 0; i < gtf_count; i++, gtf++) {
rc = ata_acpi_run_tf(dev, gtf, pgtf);
if (rc < 0)
break;
if (rc) {
(*nr_executed)++;
pgtf = gtf;
}
}
ata_acpi_clear_gtf(dev);
if (rc < 0)
return rc;
return 0;
}
/**
* ata_acpi_push_id - send Identify data to drive
* @dev: target ATA device
*
* _SDD ACPI object: for SATA mode only
* Must be after Identify (Packet) Device -- uses its data
* ATM this function never returns a failure. It is an optional
* method and if it fails for whatever reason, we should still
* just keep going.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -ENOENT if _SDD doesn't exist, -errno on failure.
*/
static int ata_acpi_push_id(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[1];
ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
__func__, dev->devno, ap->port_no);
/* Give the drive Identify data to the drive via the _SDD method */
/* _SDD: set up input parameters */
input.count = 1;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS;
in_params[0].buffer.pointer = (u8 *)dev->id;
/* Output buffer: _SDD has no output */
/* It's OK for _SDD to be missing too. */
swap_buf_le16(dev->id, ATA_ID_WORDS);
status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_SDD", &input,
NULL);
swap_buf_le16(dev->id, ATA_ID_WORDS);
if (status == AE_NOT_FOUND)
return -ENOENT;
if (ACPI_FAILURE(status)) {
ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status);
return -EIO;
}
return 0;
}
/**
* ata_acpi_on_resume - ATA ACPI hook called on resume
* @ap: target ATA port
*
* This function is called when @ap is resumed - right after port
* itself is resumed but before any EH action is taken.
*
* LOCKING:
* EH context.
*/
void ata_acpi_on_resume(struct ata_port *ap)
{
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
struct ata_device *dev;
if (ACPI_HANDLE(&ap->tdev) && gtm) {
/* _GTM valid */
/* restore timing parameters */
ata_acpi_stm(ap, gtm);
/* _GTF should immediately follow _STM so that it can
* use values set by _STM. Cache _GTF result and
* schedule _GTF.
*/
ata_for_each_dev(dev, &ap->link, ALL) {
ata_acpi_clear_gtf(dev);
if (ata_dev_enabled(dev) &&
ata_dev_acpi_handle(dev) &&
ata_dev_get_GTF(dev, NULL) >= 0)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
} else {
/* SATA _GTF needs to be evaulated after _SDD and
* there's no reason to evaluate IDE _GTF early
* without _STM. Clear cache and schedule _GTF.
*/
ata_for_each_dev(dev, &ap->link, ALL) {
ata_acpi_clear_gtf(dev);
if (ata_dev_enabled(dev))
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
}
}
static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
{
int d_max_in = ACPI_STATE_D3_COLD;
if (!runtime)
goto out;
/*
* For ATAPI, runtime D3 cold is only allowed
* for ZPODD in zero power ready state
*/
if (dev->class == ATA_DEV_ATAPI &&
!(zpodd_dev_enabled(dev) && zpodd_zpready(dev)))
d_max_in = ACPI_STATE_D3_HOT;
out:
return acpi_pm_device_sleep_state(&dev->tdev, NULL, d_max_in);
}
static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
{
bool runtime = PMSG_IS_AUTO(state);
struct ata_device *dev;
acpi_handle handle;
int acpi_state;
ata_for_each_dev(dev, &ap->link, ENABLED) {
handle = ata_dev_acpi_handle(dev);
if (!handle)
continue;
if (!(state.event & PM_EVENT_RESUME)) {
acpi_state = ata_acpi_choose_suspend_state(dev, runtime);
if (acpi_state == ACPI_STATE_D0)
continue;
if (runtime && zpodd_dev_enabled(dev) &&
acpi_state == ACPI_STATE_D3_COLD)
zpodd_enable_run_wake(dev);
acpi_bus_set_power(handle, acpi_state);
} else {
if (runtime && zpodd_dev_enabled(dev))
zpodd_disable_run_wake(dev);
acpi_bus_set_power(handle, ACPI_STATE_D0);
}
}
}
/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */
static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
{
struct ata_device *dev;
acpi_handle port_handle;
port_handle = ACPI_HANDLE(&ap->tdev);
if (!port_handle)
return;
/* channel first and then drives for power on and vica versa
for power off */
if (state.event & PM_EVENT_RESUME)
acpi_bus_set_power(port_handle, ACPI_STATE_D0);
ata_for_each_dev(dev, &ap->link, ENABLED) {
acpi_handle dev_handle = ata_dev_acpi_handle(dev);
if (!dev_handle)
continue;
acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
}
if (!(state.event & PM_EVENT_RESUME))
acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD);
}
/**
* ata_acpi_set_state - set the port power state
* @ap: target ATA port
* @state: state, on/off
*
* This function sets a proper ACPI D state for the device on
* system and runtime PM operations.
*/
void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
{
if (ap->flags & ATA_FLAG_ACPI_SATA)
sata_acpi_set_state(ap, state);
else
pata_acpi_set_state(ap, state);
}
/**
* ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
* @dev: target ATA device
*
* This function is called when @dev is about to be configured.
* IDENTIFY data might have been modified after this hook is run.
*
* LOCKING:
* EH context.
*
* RETURNS:
* Positive number if IDENTIFY data needs to be refreshed, 0 if not,
* -errno on failure.
*/
int ata_acpi_on_devcfg(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct ata_eh_context *ehc = &ap->link.eh_context;
int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
int nr_executed = 0;
int rc;
if (!ata_dev_acpi_handle(dev))
return 0;
/* do we need to do _GTF? */
if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) &&
!(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET)))
return 0;
/* do _SDD if SATA */
if (acpi_sata) {
rc = ata_acpi_push_id(dev);
if (rc && rc != -ENOENT)
goto acpi_err;
}
/* do _GTF */
rc = ata_acpi_exec_tfs(dev, &nr_executed);
if (rc)
goto acpi_err;
dev->flags &= ~ATA_DFLAG_ACPI_PENDING;
/* refresh IDENTIFY page if any _GTF command has been executed */
if (nr_executed) {
rc = ata_dev_reread_id(dev, 0);
if (rc < 0) {
ata_dev_err(dev,
"failed to IDENTIFY after ACPI commands\n");
return rc;
}
}
return 0;
acpi_err:
/* ignore evaluation failure if we can continue safely */
if (rc == -EINVAL && !nr_executed && !ata_port_is_frozen(ap))
return 0;
/* fail and let EH retry once more for unknown IO errors */
if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) {
dev->flags |= ATA_DFLAG_ACPI_FAILED;
return rc;
}
dev->flags |= ATA_DFLAG_ACPI_DISABLED;
ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
/* We can safely continue if no _GTF command has been executed
* and port is not frozen.
*/
if (!nr_executed && !ata_port_is_frozen(ap))
return 0;
return rc;
}
/**
* ata_acpi_on_disable - ATA ACPI hook called when a device is disabled
* @dev: target ATA device
*
* This function is called when @dev is about to be disabled.
*
* LOCKING:
* EH context.
*/
void ata_acpi_on_disable(struct ata_device *dev)
{
ata_acpi_clear_gtf(dev);
}
| linux-master | drivers/ata/libata-acpi.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#define DRV_NAME "pata_icside"
#define ICS_IDENT_OFFSET 0x2280
#define ICS_ARCIN_V5_INTRSTAT 0x0000
#define ICS_ARCIN_V5_INTROFFSET 0x0004
#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
struct portinfo {
unsigned int dataoffset;
unsigned int ctrloffset;
unsigned int stepping;
};
static const struct portinfo pata_icside_portinfo_v5 = {
.dataoffset = 0x2800,
.ctrloffset = 0x2b80,
.stepping = 6,
};
static const struct portinfo pata_icside_portinfo_v6_1 = {
.dataoffset = 0x2000,
.ctrloffset = 0x2380,
.stepping = 6,
};
static const struct portinfo pata_icside_portinfo_v6_2 = {
.dataoffset = 0x3000,
.ctrloffset = 0x3380,
.stepping = 6,
};
struct pata_icside_state {
void __iomem *irq_port;
void __iomem *ioc_base;
unsigned int type;
unsigned int dma;
struct {
u8 port_sel;
u8 disabled;
unsigned int speed[ATA_MAX_DEVICES];
} port[2];
};
struct pata_icside_info {
struct pata_icside_state *state;
struct expansion_card *ec;
void __iomem *base;
void __iomem *irqaddr;
unsigned int irqmask;
const expansioncard_ops_t *irqops;
unsigned int mwdma_mask;
unsigned int nr_ports;
const struct portinfo *port[2];
unsigned long raw_base;
unsigned long raw_ioc_base;
};
#define ICS_TYPE_A3IN 0
#define ICS_TYPE_A3USER 1
#define ICS_TYPE_V6 3
#define ICS_TYPE_V5 15
#define ICS_TYPE_NOTYPE ((unsigned int)-1)
/* ---------------- Version 5 PCB Support Functions --------------------- */
/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
* Purpose : enable interrupts from card
*/
static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
{
struct pata_icside_state *state = ec->irq_data;
writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
}
/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
* Purpose : disable interrupts from card
*/
static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
{
struct pata_icside_state *state = ec->irq_data;
readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
}
static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
.irqenable = pata_icside_irqenable_arcin_v5,
.irqdisable = pata_icside_irqdisable_arcin_v5,
};
/* ---------------- Version 6 PCB Support Functions --------------------- */
/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
* Purpose : enable interrupts from card
*/
static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct pata_icside_state *state = ec->irq_data;
void __iomem *base = state->irq_port;
if (!state->port[0].disabled)
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
if (!state->port[1].disabled)
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
}
/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
* Purpose : disable interrupts from card
*/
static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct pata_icside_state *state = ec->irq_data;
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
}
/* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
* Purpose : detect an active interrupt from card
*/
static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
{
struct pata_icside_state *state = ec->irq_data;
return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
}
static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
.irqenable = pata_icside_irqenable_arcin_v6,
.irqdisable = pata_icside_irqdisable_arcin_v6,
.irqpending = pata_icside_irqpending_arcin_v6,
};
/*
* SG-DMA support.
*
* Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
* There is only one DMA controller per card, which means that only
* one drive can be accessed at one time. NOTE! We do not enforce that
* here, but we rely on the main IDE driver spotting that both
* interfaces use the same IRQ, which should guarantee this.
*/
/*
* Configure the IOMD to give the appropriate timings for the transfer
* mode being requested. We take the advice of the ATA standards, and
* calculate the cycle time based on the transfer mode, and the EIDE
* MW DMA specs that the drive provides in the IDENTIFY command.
*
* We have the following IOMD DMA modes to choose from:
*
* Type Active Recovery Cycle
* A 250 (250) 312 (550) 562 (800)
* B 187 (200) 250 (550) 437 (750)
* C 125 (125) 125 (375) 250 (500)
* D 62 (50) 125 (375) 187 (425)
*
* (figures in brackets are actual measured timings on DIOR/DIOW)
*
* However, we also need to take care of the read/write active and
* recovery timings:
*
* Read Write
* Mode Active -- Recovery -- Cycle IOMD type
* MW0 215 50 215 480 A
* MW1 80 50 50 150 C
* MW2 70 25 25 120 C
*/
static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pata_icside_state *state = ap->host->private_data;
struct ata_timing t;
unsigned int cycle;
char iomd_type;
/*
* DMA is based on a 16MHz clock
*/
if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
return;
/*
* Choose the IOMD cycle timing which ensure that the interface
* satisfies the measured active, recovery and cycle times.
*/
if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) {
iomd_type = 'D';
cycle = 187;
} else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) {
iomd_type = 'C';
cycle = 250;
} else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) {
iomd_type = 'B';
cycle = 437;
} else {
iomd_type = 'A';
cycle = 562;
}
ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
t.active, t.recover, t.cycle, iomd_type);
state->port[ap->port_no].speed[adev->devno] = cycle;
}
static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_icside_state *state = ap->host->private_data;
unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
/*
* We are simplex; BUG if we try to fiddle with DMA
* while it's active.
*/
BUG_ON(dma_channel_active(state->dma));
/*
* Route the DMA signals to the correct interface
*/
writeb(state->port[ap->port_no].port_sel, state->ioc_base);
set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
set_dma_sg(state->dma, qc->sg, qc->n_elem);
set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_icside_state *state = ap->host->private_data;
BUG_ON(dma_channel_active(state->dma));
enable_dma(state->dma);
}
static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_icside_state *state = ap->host->private_data;
disable_dma(state->dma);
/* see ata_bmdma_stop */
ata_sff_dma_pause(ap);
}
static u8 pata_icside_bmdma_status(struct ata_port *ap)
{
struct pata_icside_state *state = ap->host->private_data;
void __iomem *irq_port;
irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
ICS_ARCIN_V6_INTRSTAT_1);
return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
}
static int icside_dma_init(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
struct expansion_card *ec = info->ec;
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
state->port[0].speed[i] = 480;
state->port[1].speed[i] = 480;
}
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
state->dma = ec->dma;
info->mwdma_mask = ATA_MWDMA2;
}
return 0;
}
static const struct scsi_host_template pata_icside_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
};
static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
{
struct ata_port *ap = link->ap;
struct pata_icside_state *state = ap->host->private_data;
if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
return ata_sff_postreset(link, classes);
state->port[ap->port_no].disabled = 1;
if (state->type == ICS_TYPE_V6) {
/*
* Disable interrupts from this port, otherwise we
* receive spurious interrupts from the floating
* interrupt line.
*/
void __iomem *irq_port = state->irq_port +
(ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
readb(irq_port);
}
}
static struct ata_port_operations pata_icside_port_ops = {
.inherits = &ata_bmdma_port_ops,
/* no need to build any PRD tables for DMA */
.qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer32,
.bmdma_setup = pata_icside_bmdma_setup,
.bmdma_start = pata_icside_bmdma_start,
.bmdma_stop = pata_icside_bmdma_stop,
.bmdma_status = pata_icside_bmdma_status,
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
.postreset = pata_icside_postreset,
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
struct pata_icside_info *info,
const struct portinfo *port)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
void __iomem *cmd = base + port->dataoffset;
ioaddr->cmd_addr = cmd;
ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping);
ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping);
ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping);
ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping);
ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping);
ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping);
ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping);
ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping);
ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping);
ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping);
ioaddr->ctl_addr = base + port->ctrloffset;
ioaddr->altstatus_addr = ioaddr->ctl_addr;
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
info->raw_base + port->dataoffset,
info->raw_base + port->ctrloffset);
if (info->raw_ioc_base)
ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
}
static int pata_icside_register_v5(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
void __iomem *base;
base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
if (!base)
return -ENOMEM;
state->irq_port = base;
info->base = base;
info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
info->irqmask = 1;
info->irqops = &pata_icside_ops_arcin_v5;
info->nr_ports = 1;
info->port[0] = &pata_icside_portinfo_v5;
info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
return 0;
}
static int pata_icside_register_v6(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
struct expansion_card *ec = info->ec;
void __iomem *ioc_base, *easi_base;
unsigned int sel = 0;
ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
if (!ioc_base)
return -ENOMEM;
easi_base = ioc_base;
if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
if (!easi_base)
return -ENOMEM;
/*
* Enable access to the EASI region.
*/
sel = 1 << 5;
}
writeb(sel, ioc_base);
state->irq_port = easi_base;
state->ioc_base = ioc_base;
state->port[0].port_sel = sel;
state->port[1].port_sel = sel | 1;
info->base = easi_base;
info->irqops = &pata_icside_ops_arcin_v6;
info->nr_ports = 2;
info->port[0] = &pata_icside_portinfo_v6_1;
info->port[1] = &pata_icside_portinfo_v6_2;
info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
return icside_dma_init(info);
}
static int pata_icside_add_ports(struct pata_icside_info *info)
{
struct expansion_card *ec = info->ec;
struct ata_host *host;
int i;
if (info->irqaddr) {
ec->irqaddr = info->irqaddr;
ec->irqmask = info->irqmask;
}
if (info->irqops)
ecard_setirq(ec, info->irqops, info->state);
/*
* Be on the safe side - disable interrupts
*/
ec->ops->irqdisable(ec, ec->irq);
host = ata_host_alloc(&ec->dev, info->nr_ports);
if (!host)
return -ENOMEM;
host->private_data = info->state;
host->flags = ATA_HOST_SIMPLEX;
for (i = 0; i < info->nr_ports; i++) {
struct ata_port *ap = host->ports[i];
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = info->mwdma_mask;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ops = &pata_icside_port_ops;
pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
}
return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
&pata_icside_sht);
}
static int pata_icside_probe(struct expansion_card *ec,
const struct ecard_id *id)
{
struct pata_icside_state *state;
struct pata_icside_info info;
void __iomem *idmem;
int ret;
ret = ecard_request_resources(ec);
if (ret)
goto out;
state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
if (!state) {
ret = -ENOMEM;
goto release;
}
state->type = ICS_TYPE_NOTYPE;
state->dma = NO_DMA;
idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
if (idmem) {
unsigned int type;
type = readb(idmem + ICS_IDENT_OFFSET) & 1;
type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
ecardm_iounmap(ec, idmem);
state->type = type;
}
memset(&info, 0, sizeof(info));
info.state = state;
info.ec = ec;
switch (state->type) {
case ICS_TYPE_A3IN:
dev_warn(&ec->dev, "A3IN unsupported\n");
ret = -ENODEV;
break;
case ICS_TYPE_A3USER:
dev_warn(&ec->dev, "A3USER unsupported\n");
ret = -ENODEV;
break;
case ICS_TYPE_V5:
ret = pata_icside_register_v5(&info);
break;
case ICS_TYPE_V6:
ret = pata_icside_register_v6(&info);
break;
default:
dev_warn(&ec->dev, "unknown interface type\n");
ret = -ENODEV;
break;
}
if (ret == 0)
ret = pata_icside_add_ports(&info);
if (ret == 0)
goto out;
release:
ecard_release_resources(ec);
out:
return ret;
}
static void pata_icside_shutdown(struct expansion_card *ec)
{
struct ata_host *host = ecard_get_drvdata(ec);
unsigned long flags;
/*
* Disable interrupts from this card. We need to do
* this before disabling EASI since we may be accessing
* this register via that region.
*/
local_irq_save(flags);
ec->ops->irqdisable(ec, ec->irq);
local_irq_restore(flags);
/*
* Reset the ROM pointer so that we can read the ROM
* after a soft reboot. This also disables access to
* the IDE taskfile via the EASI region.
*/
if (host) {
struct pata_icside_state *state = host->private_data;
if (state->ioc_base)
writeb(0, state->ioc_base);
}
}
static void pata_icside_remove(struct expansion_card *ec)
{
struct ata_host *host = ecard_get_drvdata(ec);
struct pata_icside_state *state = host->private_data;
ata_host_detach(host);
pata_icside_shutdown(ec);
/*
* don't NULL out the drvdata - devres/libata wants it
* to free the ata_host structure.
*/
if (state->dma != NO_DMA)
free_dma(state->dma);
ecard_release_resources(ec);
}
static const struct ecard_id pata_icside_ids[] = {
{ MANU_ICS, PROD_ICS_IDE },
{ MANU_ICS2, PROD_ICS2_IDE },
{ 0xffff, 0xffff }
};
static struct ecard_driver pata_icside_driver = {
.probe = pata_icside_probe,
.remove = pata_icside_remove,
.shutdown = pata_icside_shutdown,
.id_table = pata_icside_ids,
.drv = {
.name = DRV_NAME,
},
};
static int __init pata_icside_init(void)
{
return ecard_register_driver(&pata_icside_driver);
}
static void __exit pata_icside_exit(void)
{
ecard_remove_driver(&pata_icside_driver);
}
MODULE_AUTHOR("Russell King <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ICS PATA driver");
module_init(pata_icside_init);
module_exit(pata_icside_exit);
| linux-master | drivers/ata/pata_icside.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_triflex.c - Compaq PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <[email protected]>
*
* based upon
*
* triflex.c
*
* IDE Chipset driver for the Compaq TriFlex IDE controller.
*
* Known to work with the Compaq Workstation 5x00 series.
*
* Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
* Author: Torben Mathiasen <[email protected]>
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Not publicly available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_triflex"
#define DRV_VERSION "0.2.8"
/**
* triflex_prereset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int triflex_prereset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits triflex_enable_bits[] = {
{ 0x80, 1, 0x01, 0x01 },
{ 0x80, 1, 0x02, 0x02 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* triflex_load_timing - timing configuration
* @ap: ATA interface
* @adev: Device on the bus
* @speed: speed to configure
*
* The Triflex has one set of timings per device per channel. This
* means we must do some switching. As the PIO and DMA timings don't
* match we have to do some reloading unlike PIIX devices where tuning
* tricks can avoid it.
*/
static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 timing = 0;
u32 triflex_timing, old_triflex_timing;
int channel_offset = ap->port_no ? 0x74: 0x70;
unsigned int is_slave = (adev->devno != 0);
pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
triflex_timing = old_triflex_timing;
switch(speed)
{
case XFER_MW_DMA_2:
timing = 0x0103;break;
case XFER_MW_DMA_1:
timing = 0x0203;break;
case XFER_MW_DMA_0:
timing = 0x0808;break;
case XFER_SW_DMA_2:
case XFER_SW_DMA_1:
case XFER_SW_DMA_0:
timing = 0x0F0F;break;
case XFER_PIO_4:
timing = 0x0202;break;
case XFER_PIO_3:
timing = 0x0204;break;
case XFER_PIO_2:
timing = 0x0404;break;
case XFER_PIO_1:
timing = 0x0508;break;
case XFER_PIO_0:
timing = 0x0808;break;
default:
BUG();
}
triflex_timing &= ~ (0xFFFF << (16 * is_slave));
triflex_timing |= (timing << (16 * is_slave));
if (triflex_timing != old_triflex_timing)
pci_write_config_dword(pdev, channel_offset, triflex_timing);
}
/**
* triflex_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Use the timing loader to set up the PIO mode. We have to do this
* because DMA start/stop will only be called once DMA occurs. If there
* has been no DMA then the PIO timings are still needed.
*/
static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
triflex_load_timing(ap, adev, adev->pio_mode);
}
/**
* triflex_bmdma_start - DMA start callback
* @qc: Command in progress
*
* Usually drivers set the DMA timing at the point the set_dmamode call
* is made. Triflex however requires we load new timings on the
* transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
* We load the DMA timings just before starting DMA and then restore
* the PIO timing when the DMA is finished.
*/
static void triflex_bmdma_start(struct ata_queued_cmd *qc)
{
triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
ata_bmdma_start(qc);
}
/**
* triflex_bmdma_stop - DMA stop callback
* @qc: ATA command
*
* We loaded new timings in dma_start, as a result we need to restore
* the PIO timings in dma_stop so that the next command issue gets the
* right clock values.
*/
static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
}
static const struct scsi_host_template triflex_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations triflex_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_start = triflex_bmdma_start,
.bmdma_stop = triflex_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = triflex_set_piomode,
.prereset = triflex_prereset,
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &triflex_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
static const struct pci_device_id triflex[] = {
{ PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), },
{ },
};
#ifdef CONFIG_PM_SLEEP
static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
ata_host_suspend(host, mesg);
/*
* We must not disable or powerdown the device.
* APM bios refuses to suspend if IDE is not accessible.
*/
pci_save_state(pdev);
return 0;
}
#endif
static struct pci_driver triflex_pci_driver = {
.name = DRV_NAME,
.id_table = triflex,
.probe = triflex_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = triflex_ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(triflex_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, triflex);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_triflex.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libata-eh.c - libata error handling
*
* Copyright 2006 Tejun Heo <[email protected]>
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from http://www.t13.org/ and
* http://www.sata-io.org/
*/
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include "../scsi/scsi_transport_api.h"
#include <linux/libata.h>
#include <trace/events/libata.h>
#include "libata.h"
enum {
/* speed down verdicts */
ATA_EH_SPDN_NCQ_OFF = (1 << 0),
ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
/* error flags */
ATA_EFLAG_IS_IO = (1 << 0),
ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
ATA_EFLAG_OLD_ER = (1 << 31),
/* error categories */
ATA_ECAT_NONE = 0,
ATA_ECAT_ATA_BUS = 1,
ATA_ECAT_TOUT_HSM = 2,
ATA_ECAT_UNK_DEV = 3,
ATA_ECAT_DUBIOUS_NONE = 4,
ATA_ECAT_DUBIOUS_ATA_BUS = 5,
ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
ATA_ECAT_DUBIOUS_UNK_DEV = 7,
ATA_ECAT_NR = 8,
ATA_EH_CMD_DFL_TIMEOUT = 5000,
/* always put at least this amount of time between resets */
ATA_EH_RESET_COOL_DOWN = 5000,
/* Waiting in ->prereset can never be reliable. It's
* sometimes nice to wait there but it can't be depended upon;
* otherwise, we wouldn't be resetting. Just give it enough
* time for most drives to spin up.
*/
ATA_EH_PRERESET_TIMEOUT = 10000,
ATA_EH_FASTDRAIN_INTERVAL = 3000,
ATA_EH_UA_TRIES = 5,
/* probe speed down parameters, see ata_eh_schedule_probe() */
ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
ATA_EH_PROBE_TRIALS = 2,
};
/* The following table determines how we sequence resets. Each entry
* represents timeout for that try. The first try can be soft or
* hardreset. All others are hardreset if available. In most cases
* the first reset w/ 10sec timeout should succeed. Following entries
* are mostly for error handling, hotplug and those outlier devices that
* take an exceptionally long time to recover from reset.
*/
static const unsigned int ata_eh_reset_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
UINT_MAX, /* > 1 min has elapsed, give up */
};
static const unsigned int ata_eh_identify_timeouts[] = {
5000, /* covers > 99% of successes and not too boring on failures */
10000, /* combined time till here is enough even for media access */
30000, /* for true idiots */
UINT_MAX,
};
static const unsigned int ata_eh_revalidate_timeouts[] = {
15000, /* Some drives are slow to read log pages when waking-up */
15000, /* combined time till here is enough even for media access */
UINT_MAX,
};
static const unsigned int ata_eh_flush_timeouts[] = {
15000, /* be generous with flush */
15000, /* ditto */
30000, /* and even more generous */
UINT_MAX,
};
static const unsigned int ata_eh_other_timeouts[] = {
5000, /* same rationale as identify timeout */
10000, /* ditto */
/* but no merciful 30sec for other commands, it just isn't worth it */
UINT_MAX,
};
struct ata_eh_cmd_timeout_ent {
const u8 *commands;
const unsigned int *timeouts;
};
/* The following table determines timeouts to use for EH internal
* commands. Each table entry is a command class and matches the
* commands the entry applies to and the timeout table to use.
*
* On the retry after a command timed out, the next timeout value from
* the table is used. If the table doesn't contain further entries,
* the last value is used.
*
* ehc->cmd_timeout_idx keeps track of which timeout to use per
* command class, so if SET_FEATURES times out on the first try, the
* next try will use the second timeout value only for that class.
*/
#define CMDS(cmds...) (const u8 []){ cmds, 0 }
static const struct ata_eh_cmd_timeout_ent
ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
.timeouts = ata_eh_identify_timeouts, },
{ .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
.timeouts = ata_eh_revalidate_timeouts, },
{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_SET_FEATURES),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
};
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
#else /* CONFIG_PM */
static void ata_eh_handle_port_suspend(struct ata_port *ap)
{ }
static void ata_eh_handle_port_resume(struct ata_port *ap)
{ }
#endif /* CONFIG_PM */
static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
const char *fmt, va_list args)
{
ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
ATA_EH_DESC_LEN - ehi->desc_len,
fmt, args);
}
/**
* __ata_ehi_push_desc - push error description without adding separator
* @ehi: target EHI
* @fmt: printf format string
*
* Format string according to @fmt and append it to @ehi->desc.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
__ata_ehi_pushv_desc(ehi, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
/**
* ata_ehi_push_desc - push error description with separator
* @ehi: target EHI
* @fmt: printf format string
*
* Format string according to @fmt and append it to @ehi->desc.
* If @ehi->desc is not empty, ", " is added in-between.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
{
va_list args;
if (ehi->desc_len)
__ata_ehi_push_desc(ehi, ", ");
va_start(args, fmt);
__ata_ehi_pushv_desc(ehi, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
/**
* ata_ehi_clear_desc - clean error description
* @ehi: target EHI
*
* Clear @ehi->desc.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_ehi_clear_desc(struct ata_eh_info *ehi)
{
ehi->desc[0] = '\0';
ehi->desc_len = 0;
}
EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
/**
* ata_port_desc - append port description
* @ap: target ATA port
* @fmt: printf format string
*
* Format string according to @fmt and append it to port
* description. If port description is not empty, " " is added
* in-between. This function is to be used while initializing
* ata_host. The description is printed on host registration.
*
* LOCKING:
* None.
*/
void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
{
va_list args;
WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
if (ap->link.eh_info.desc_len)
__ata_ehi_push_desc(&ap->link.eh_info, " ");
va_start(args, fmt);
__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
va_end(args);
}
EXPORT_SYMBOL_GPL(ata_port_desc);
#ifdef CONFIG_PCI
/**
* ata_port_pbar_desc - append PCI BAR description
* @ap: target ATA port
* @bar: target PCI BAR
* @offset: offset into PCI BAR
* @name: name of the area
*
* If @offset is negative, this function formats a string which
* contains the name, address, size and type of the BAR and
* appends it to the port description. If @offset is zero or
* positive, only name and offsetted address is appended.
*
* LOCKING:
* None.
*/
void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
char *type = "";
unsigned long long start, len;
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
type = "m";
else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
type = "i";
start = (unsigned long long)pci_resource_start(pdev, bar);
len = (unsigned long long)pci_resource_len(pdev, bar);
if (offset < 0)
ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
else
ata_port_desc(ap, "%s 0x%llx", name,
start + (unsigned long long)offset);
}
EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
#endif /* CONFIG_PCI */
static int ata_lookup_timeout_table(u8 cmd)
{
int i;
for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
const u8 *cur;
for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
if (*cur == cmd)
return i;
}
return -1;
}
/**
* ata_internal_cmd_timeout - determine timeout for an internal command
* @dev: target device
* @cmd: internal command to be issued
*
* Determine timeout for internal command @cmd for @dev.
*
* LOCKING:
* EH context.
*
* RETURNS:
* Determined timeout.
*/
unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
int ent = ata_lookup_timeout_table(cmd);
int idx;
if (ent < 0)
return ATA_EH_CMD_DFL_TIMEOUT;
idx = ehc->cmd_timeout_idx[dev->devno][ent];
return ata_eh_cmd_timeout_table[ent].timeouts[idx];
}
/**
* ata_internal_cmd_timed_out - notification for internal command timeout
* @dev: target device
* @cmd: internal command which timed out
*
* Notify EH that internal command @cmd for @dev timed out. This
* function should be called only for commands whose timeouts are
* determined using ata_internal_cmd_timeout().
*
* LOCKING:
* EH context.
*/
void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
int ent = ata_lookup_timeout_table(cmd);
int idx;
if (ent < 0)
return;
idx = ehc->cmd_timeout_idx[dev->devno][ent];
if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX)
ehc->cmd_timeout_idx[dev->devno][ent]++;
}
static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
unsigned int err_mask)
{
struct ata_ering_entry *ent;
WARN_ON(!err_mask);
ering->cursor++;
ering->cursor %= ATA_ERING_SIZE;
ent = &ering->ring[ering->cursor];
ent->eflags = eflags;
ent->err_mask = err_mask;
ent->timestamp = get_jiffies_64();
}
static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
{
struct ata_ering_entry *ent = &ering->ring[ering->cursor];
if (ent->err_mask)
return ent;
return NULL;
}
int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg)
{
int idx, rc = 0;
struct ata_ering_entry *ent;
idx = ering->cursor;
do {
ent = &ering->ring[idx];
if (!ent->err_mask)
break;
rc = map_fn(ent, arg);
if (rc)
break;
idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
} while (idx != ering->cursor);
return rc;
}
static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
{
ent->eflags |= ATA_EFLAG_OLD_ER;
return 0;
}
static void ata_ering_clear(struct ata_ering *ering)
{
ata_ering_map(ering, ata_ering_clear_cb, NULL);
}
static unsigned int ata_eh_dev_action(struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.action | ehc->i.dev_action[dev->devno];
}
static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
struct ata_eh_info *ehi, unsigned int action)
{
struct ata_device *tdev;
if (!dev) {
ehi->action &= ~action;
ata_for_each_dev(tdev, link, ALL)
ehi->dev_action[tdev->devno] &= ~action;
} else {
/* doesn't make sense for port-wide EH actions */
WARN_ON(!(action & ATA_EH_PERDEV_MASK));
/* break ehi->action into ehi->dev_action */
if (ehi->action & action) {
ata_for_each_dev(tdev, link, ALL)
ehi->dev_action[tdev->devno] |=
ehi->action & action;
ehi->action &= ~action;
}
/* turn off the specified per-dev action */
ehi->dev_action[dev->devno] &= ~action;
}
}
/**
* ata_eh_acquire - acquire EH ownership
* @ap: ATA port to acquire EH ownership for
*
* Acquire EH ownership for @ap. This is the basic exclusion
* mechanism for ports sharing a host. Only one port hanging off
* the same host can claim the ownership of EH.
*
* LOCKING:
* EH context.
*/
void ata_eh_acquire(struct ata_port *ap)
{
mutex_lock(&ap->host->eh_mutex);
WARN_ON_ONCE(ap->host->eh_owner);
ap->host->eh_owner = current;
}
/**
* ata_eh_release - release EH ownership
* @ap: ATA port to release EH ownership for
*
* Release EH ownership for @ap if the caller. The caller must
* have acquired EH ownership using ata_eh_acquire() previously.
*
* LOCKING:
* EH context.
*/
void ata_eh_release(struct ata_port *ap)
{
WARN_ON_ONCE(ap->host->eh_owner != current);
ap->host->eh_owner = NULL;
mutex_unlock(&ap->host->eh_mutex);
}
static void ata_eh_unload(struct ata_port *ap)
{
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
/* Restore SControl IPM and SPD for the next driver and
* disable attached devices.
*/
ata_for_each_link(link, ap, PMP_FIRST) {
sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
ata_for_each_dev(dev, link, ALL)
ata_dev_disable(dev);
}
/* freeze and set UNLOADED */
spin_lock_irqsave(ap->lock, flags);
ata_port_freeze(ap); /* won't be thawed */
ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
ap->pflags |= ATA_PFLAG_UNLOADED;
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* ata_scsi_error - SCSI layer error handler callback
* @host: SCSI host on which error occurred
*
* Handles SCSI-layer-thrown error events.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*
* RETURNS:
* Zero.
*/
void ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap = ata_shost_to_port(host);
unsigned long flags;
LIST_HEAD(eh_work_q);
spin_lock_irqsave(host->host_lock, flags);
list_splice_init(&host->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(host->host_lock, flags);
ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
/* If we timed raced normal completion and there is nothing to
recover nr_timedout == 0 why exactly are we doing error recovery ? */
ata_scsi_port_error_handler(host, ap);
/* finish or retry handled scmd's and clean up */
WARN_ON(!list_empty(&eh_work_q));
}
/**
* ata_scsi_cmd_error_handler - error callback for a list of commands
* @host: scsi host containing the port
* @ap: ATA port within the host
* @eh_work_q: list of commands to process
*
* process the given list of commands and return those finished to the
* ap->eh_done_q. This function is the first part of the libata error
* handler which processes a given list of failed commands.
*/
void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
struct list_head *eh_work_q)
{
int i;
unsigned long flags;
struct scsi_cmnd *scmd, *tmp;
int nr_timedout = 0;
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
/* synchronize with host lock and sort out timeouts */
/*
* For EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
* Both completions can race against SCSI timeout. When normal
* completion wins, the qc never reaches EH. When error
* completion wins, the qc has ATA_QCFLAG_EH set.
*
* When SCSI timeout wins, things are a bit more complex.
* Normal or error completion can occur after the timeout but
* before this point. In such cases, both types of
* completions are honored. A scmd is determined to have
* timed out iff its associated qc is active and not failed.
*/
spin_lock_irqsave(ap->lock, flags);
/*
* This must occur under the ap->lock as we don't want
* a polled recovery to race the real interrupt handler
*
* The lost_interrupt handler checks for any completed but
* non-notified command and completes much like an IRQ handler.
*
* We then fall into the error recovery code which will treat
* this as if normal completion won the race
*/
if (ap->ops->lost_interrupt)
ap->ops->lost_interrupt(ap);
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
break;
}
if (i < ATA_MAX_QUEUE) {
/* the scmd has an associated qc */
if (!(qc->flags & ATA_QCFLAG_EH)) {
/* which hasn't failed yet, timeout */
qc->err_mask |= AC_ERR_TIMEOUT;
qc->flags |= ATA_QCFLAG_EH;
nr_timedout++;
}
} else {
/* Normal completion occurred after
* SCSI timeout but before this point.
* Successfully complete it.
*/
scmd->retries = scmd->allowed;
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
}
/*
* If we have timed out qcs. They belong to EH from
* this point but the state of the controller is
* unknown. Freeze the port to make sure the IRQ
* handler doesn't diddle with those qcs. This must
* be done atomically w.r.t. setting ATA_QCFLAG_EH.
*/
if (nr_timedout)
__ata_port_freeze(ap);
/* initialize eh_tries */
ap->eh_tries = ATA_EH_MAX_TRIES;
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
/**
* ata_scsi_port_error_handler - recover the port after the commands
* @host: SCSI host containing the port
* @ap: the ATA port
*
* Handle the recovery of the port @ap after all the commands
* have been recovered.
*/
void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
{
unsigned long flags;
struct ata_link *link;
/* acquire EH ownership */
ata_eh_acquire(ap);
repeat:
/* kill fast drain timer */
del_timer_sync(&ap->fastdrain_timer);
/* process port resume request */
ata_eh_handle_port_resume(ap);
/* fetch & clear EH info */
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, HOST_FIRST) {
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev;
memset(&link->eh_context, 0, sizeof(link->eh_context));
link->eh_context.i = link->eh_info;
memset(&link->eh_info, 0, sizeof(link->eh_info));
ata_for_each_dev(dev, link, ENABLED) {
int devno = dev->devno;
ehc->saved_xfer_mode[devno] = dev->xfer_mode;
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
}
}
ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ap->excl_link = NULL; /* don't maintain exclusion over EH */
spin_unlock_irqrestore(ap->lock, flags);
/* invoke EH, skip if unloading or suspended */
if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
ap->ops->error_handler(ap);
else {
/* if unloading, commence suicide */
if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
!(ap->pflags & ATA_PFLAG_UNLOADED))
ata_eh_unload(ap);
ata_eh_finish(ap);
}
/* process port suspend request */
ata_eh_handle_port_suspend(ap);
/*
* Exception might have happened after ->error_handler recovered the
* port but before this point. Repeat EH in such case.
*/
spin_lock_irqsave(ap->lock, flags);
if (ap->pflags & ATA_PFLAG_EH_PENDING) {
if (--ap->eh_tries) {
spin_unlock_irqrestore(ap->lock, flags);
goto repeat;
}
ata_port_err(ap,
"EH pending after %d tries, giving up\n",
ATA_EH_MAX_TRIES);
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
}
/* this run is complete, make sure EH info is clear */
ata_for_each_link(link, ap, HOST_FIRST)
memset(&link->eh_info, 0, sizeof(link->eh_info));
/*
* end eh (clear host_eh_scheduled) while holding ap->lock such that if
* exception occurs after this point but before EH completion, SCSI
* midlayer will re-initiate EH.
*/
ap->ops->end_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
ata_eh_release(ap);
scsi_eh_flush_done_q(&ap->eh_done_q);
/* clean up */
spin_lock_irqsave(ap->lock, flags);
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
!(ap->flags & ATA_FLAG_SAS_HOST))
schedule_delayed_work(&ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_info(ap, "EH complete\n");
ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
/* tell wait_eh that we're done */
ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
wake_up_all(&ap->eh_wait_q);
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
/**
* ata_port_wait_eh - Wait for the currently pending EH to complete
* @ap: Port to wait EH for
*
* Wait until the currently pending EH is complete.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_port_wait_eh(struct ata_port *ap)
{
unsigned long flags;
DEFINE_WAIT(wait);
retry:
spin_lock_irqsave(ap->lock, flags);
while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
spin_lock_irqsave(ap->lock, flags);
}
finish_wait(&ap->eh_wait_q, &wait);
spin_unlock_irqrestore(ap->lock, flags);
/* make sure SCSI EH is complete */
if (scsi_host_in_recovery(ap->scsi_host)) {
ata_msleep(ap, 10);
goto retry;
}
}
EXPORT_SYMBOL_GPL(ata_port_wait_eh);
static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned int tag;
unsigned int nr = 0;
/* count only non-internal commands */
ata_qc_for_each(ap, qc, tag) {
if (qc)
nr++;
}
return nr;
}
void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
unsigned long flags;
unsigned int cnt;
spin_lock_irqsave(ap->lock, flags);
cnt = ata_eh_nr_in_flight(ap);
/* are we done? */
if (!cnt)
goto out_unlock;
if (cnt == ap->fastdrain_cnt) {
struct ata_queued_cmd *qc;
unsigned int tag;
/* No progress during the last interval, tag all
* in-flight qcs as timed out and freeze the port.
*/
ata_qc_for_each(ap, qc, tag) {
if (qc)
qc->err_mask |= AC_ERR_TIMEOUT;
}
ata_port_freeze(ap);
} else {
/* some qcs have finished, give it another chance */
ap->fastdrain_cnt = cnt;
ap->fastdrain_timer.expires =
ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
add_timer(&ap->fastdrain_timer);
}
out_unlock:
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
* @ap: target ATA port
* @fastdrain: activate fast drain
*
* Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
* is non-zero and EH wasn't pending before. Fast drain ensures
* that EH kicks in in timely manner.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
{
unsigned int cnt;
/* already scheduled? */
if (ap->pflags & ATA_PFLAG_EH_PENDING)
return;
ap->pflags |= ATA_PFLAG_EH_PENDING;
if (!fastdrain)
return;
/* do we have in-flight qcs? */
cnt = ata_eh_nr_in_flight(ap);
if (!cnt)
return;
/* activate fast drain */
ap->fastdrain_cnt = cnt;
ap->fastdrain_timer.expires =
ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
add_timer(&ap->fastdrain_timer);
}
/**
* ata_qc_schedule_eh - schedule qc for error handling
* @qc: command to schedule error handling for
*
* Schedule error handling for @qc. EH will kick in as soon as
* other commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
qc->flags |= ATA_QCFLAG_EH;
ata_eh_set_pending(ap, 1);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
* Note that ATA_QCFLAG_EH is unconditionally set after
* this function completes.
*/
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
}
/**
* ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
* @ap: ATA port to schedule EH for
*
* LOCKING: inherited from ata_port_schedule_eh
* spin_lock_irqsave(host lock)
*/
void ata_std_sched_eh(struct ata_port *ap)
{
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
ata_eh_set_pending(ap, 1);
scsi_schedule_eh(ap->scsi_host);
trace_ata_std_sched_eh(ap);
}
EXPORT_SYMBOL_GPL(ata_std_sched_eh);
/**
* ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
* @ap: ATA port to end EH for
*
* In the libata object model there is a 1:1 mapping of ata_port to
* shost, so host fields can be directly manipulated under ap->lock, in
* the libsas case we need to hold a lock at the ha->level to coordinate
* these events.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_std_end_eh(struct ata_port *ap)
{
struct Scsi_Host *host = ap->scsi_host;
host->host_eh_scheduled = 0;
}
EXPORT_SYMBOL(ata_std_end_eh);
/**
* ata_port_schedule_eh - schedule error handling without a qc
* @ap: ATA port to schedule EH for
*
* Schedule error handling for @ap. EH will kick in as soon as
* all commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_port_schedule_eh(struct ata_port *ap)
{
/* see: ata_std_sched_eh, unless you know better */
ap->ops->sched_eh(ap);
}
EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
/* we're gonna abort all commands, no need for fast drain */
ata_eh_set_pending(ap, 0);
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
qc->flags |= ATA_QCFLAG_EH;
ata_qc_complete(qc);
nr_aborted++;
}
}
if (!nr_aborted)
ata_port_schedule_eh(ap);
return nr_aborted;
}
/**
* ata_link_abort - abort all qc's on the link
* @link: ATA link to abort qc's for
*
* Abort all active qc's active on @link and schedule EH.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted qc's.
*/
int ata_link_abort(struct ata_link *link)
{
return ata_do_link_abort(link->ap, link);
}
EXPORT_SYMBOL_GPL(ata_link_abort);
/**
* ata_port_abort - abort all qc's on the port
* @ap: ATA port to abort qc's for
*
* Abort all active qc's of @ap and schedule EH.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*
* RETURNS:
* Number of aborted qc's.
*/
int ata_port_abort(struct ata_port *ap)
{
return ata_do_link_abort(ap, NULL);
}
EXPORT_SYMBOL_GPL(ata_port_abort);
/**
* __ata_port_freeze - freeze port
* @ap: ATA port to freeze
*
* This function is called when HSM violation or some other
* condition disrupts normal operation of the port. Frozen port
* is not allowed to perform any operation until the port is
* thawed, which usually follows a successful reset.
*
* ap->ops->freeze() callback can be used for freezing the port
* hardware-wise (e.g. mask interrupt and stop DMA engine). If a
* port cannot be frozen hardware-wise, the interrupt handler
* must ack and clear interrupts unconditionally while the port
* is frozen.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void __ata_port_freeze(struct ata_port *ap)
{
if (ap->ops->freeze)
ap->ops->freeze(ap);
ap->pflags |= ATA_PFLAG_FROZEN;
trace_ata_port_freeze(ap);
}
/**
* ata_port_freeze - abort & freeze port
* @ap: ATA port to freeze
*
* Abort and freeze @ap. The freeze operation must be called
* first, because some hardware requires special operations
* before the taskfile registers are accessible.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted commands.
*/
int ata_port_freeze(struct ata_port *ap)
{
__ata_port_freeze(ap);
return ata_port_abort(ap);
}
EXPORT_SYMBOL_GPL(ata_port_freeze);
/**
* ata_eh_freeze_port - EH helper to freeze port
* @ap: ATA port to freeze
*
* Freeze @ap.
*
* LOCKING:
* None.
*/
void ata_eh_freeze_port(struct ata_port *ap)
{
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
__ata_port_freeze(ap);
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
/**
* ata_eh_thaw_port - EH helper to thaw port
* @ap: ATA port to thaw
*
* Thaw frozen port @ap.
*
* LOCKING:
* None.
*/
void ata_eh_thaw_port(struct ata_port *ap)
{
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_FROZEN;
if (ap->ops->thaw)
ap->ops->thaw(ap);
spin_unlock_irqrestore(ap->lock, flags);
trace_ata_port_thaw(ap);
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
{
/* nada */
}
static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scsi_cmnd *scmd = qc->scsicmd;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
qc->scsidone = ata_eh_scsidone;
__ata_qc_complete(qc);
WARN_ON(ata_tag_valid(qc->tag));
spin_unlock_irqrestore(ap->lock, flags);
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
/**
* ata_eh_qc_complete - Complete an active ATA command from EH
* @qc: Command to complete
*
* Indicate to the mid and upper layers that an ATA command has
* completed. To be used from EH.
*/
void ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
scmd->retries = scmd->allowed;
__ata_eh_qc_complete(qc);
}
/**
* ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
* @qc: Command to retry
*
* Indicate to the mid and upper layers that an ATA command
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
* scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (!qc->err_mask)
scmd->allowed++;
__ata_eh_qc_complete(qc);
}
/**
* ata_dev_disable - disable ATA device
* @dev: ATA device to disable
*
* Disable @dev.
*
* Locking:
* EH context.
*/
void ata_dev_disable(struct ata_device *dev)
{
if (!ata_dev_enabled(dev))
return;
ata_dev_warn(dev, "disable device\n");
ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
/* From now till the next successful probe, ering is used to
* track probe failures. Clear accumulated device error info.
*/
ata_ering_clear(&dev->ering);
}
EXPORT_SYMBOL_GPL(ata_dev_disable);
/**
* ata_eh_detach_dev - detach ATA device
* @dev: ATA device to detach
*
* Detach @dev.
*
* LOCKING:
* None.
*/
void ata_eh_detach_dev(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
ata_dev_disable(dev);
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_DETACH;
if (ata_scsi_offline_dev(dev)) {
dev->flags |= ATA_DFLAG_DETACHED;
ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
}
/* clear per-dev EH info */
ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
ehc->saved_xfer_mode[dev->devno] = 0;
ehc->saved_ncq_enabled &= ~(1 << dev->devno);
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* ata_eh_about_to_do - about to perform eh_action
* @link: target ATA link
* @dev: target ATA dev for per-dev action (can be NULL)
* @action: action about to be performed
*
* Called just before performing EH actions to clear related bits
* in @link->eh_info such that eh actions are not unnecessarily
* repeated.
*
* LOCKING:
* None.
*/
void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
unsigned int action)
{
struct ata_port *ap = link->ap;
struct ata_eh_info *ehi = &link->eh_info;
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
spin_lock_irqsave(ap->lock, flags);
ata_eh_clear_action(link, dev, ehi, action);
/* About to take EH action, set RECOVERED. Ignore actions on
* slave links as master will do them again.
*/
if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
ap->pflags |= ATA_PFLAG_RECOVERED;
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* ata_eh_done - EH action complete
* @link: ATA link for which EH actions are complete
* @dev: target ATA dev for per-dev action (can be NULL)
* @action: action just completed
*
* Called right after performing EH actions to clear related bits
* in @link->eh_context.
*
* LOCKING:
* None.
*/
void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action)
{
struct ata_eh_context *ehc = &link->eh_context;
trace_ata_eh_done(link, dev ? dev->devno : 0, action);
ata_eh_clear_action(link, dev, &ehc->i, action);
}
/**
* ata_err_string - convert err_mask to descriptive string
* @err_mask: error mask to convert to string
*
* Convert @err_mask to descriptive string. Errors are
* prioritized according to severity and only the most severe
* error is reported.
*
* LOCKING:
* None.
*
* RETURNS:
* Descriptive string for @err_mask
*/
static const char *ata_err_string(unsigned int err_mask)
{
if (err_mask & AC_ERR_HOST_BUS)
return "host bus error";
if (err_mask & AC_ERR_ATA_BUS)
return "ATA bus error";
if (err_mask & AC_ERR_TIMEOUT)
return "timeout";
if (err_mask & AC_ERR_HSM)
return "HSM violation";
if (err_mask & AC_ERR_SYSTEM)
return "internal error";
if (err_mask & AC_ERR_MEDIA)
return "media error";
if (err_mask & AC_ERR_INVALID)
return "invalid argument";
if (err_mask & AC_ERR_DEV)
return "device error";
if (err_mask & AC_ERR_NCQ)
return "NCQ error";
if (err_mask & AC_ERR_NODEV_HINT)
return "Polling detection error";
return "unknown error";
}
/**
* atapi_eh_tur - perform ATAPI TEST_UNIT_READY
* @dev: target ATAPI device
* @r_sense_key: out parameter for sense_key
*
* Perform ATAPI TEST_UNIT_READY.
*
* LOCKING:
* EH context (may sleep).
*
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
struct ata_taskfile tf;
unsigned int err_mask;
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
if (err_mask == AC_ERR_DEV)
*r_sense_key = tf.error >> 4;
return err_mask;
}
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
* SENSE. This function is an EH helper.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* true if sense data could be fetched, false otherwise.
*/
static bool ata_eh_request_sense(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
struct ata_taskfile tf;
unsigned int err_mask;
if (ata_port_is_frozen(qc->ap)) {
ata_dev_warn(dev, "sense data available but port frozen\n");
return false;
}
if (!ata_id_sense_reporting_enabled(dev->id)) {
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
return false;
}
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
tf.command = ATA_CMD_REQ_SENSE_DATA;
tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/* Ignore err_mask; ATA_ERR might be set */
if (tf.status & ATA_SENSE) {
if (ata_scsi_sense_is_valid(tf.lbah, tf.lbam, tf.lbal)) {
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
cmd->sense_buffer, tf.lbah,
tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
return true;
}
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
tf.status, err_mask);
}
return false;
}
/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
* @dfl_sense_key: default sense key to use
*
* Perform ATAPI REQUEST_SENSE after the device reported CHECK
* SENSE. This function is EH helper.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, AC_ERR_* mask on failure
*/
unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] =
{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf;
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
/* initialize sense_buf with the error register,
* for the case where they are -not- overwritten
*/
sense_buf[0] = 0x70;
sense_buf[2] = dfl_sense_key;
/* some devices time out if garbage left in tf */
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
/* is it pointless to prefer PIO for "safety reasons"? */
if (ap->flags & ATA_FLAG_PIO_DMA) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {
tf.protocol = ATAPI_PROT_PIO;
tf.lbam = SCSI_SENSE_BUFFERSIZE;
tf.lbah = 0;
}
return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
}
/**
* ata_eh_analyze_serror - analyze SError for a failed port
* @link: ATA link to analyze SError for
*
* Analyze SError if available and further determine cause of
* failure.
*
* LOCKING:
* None.
*/
static void ata_eh_analyze_serror(struct ata_link *link)
{
struct ata_eh_context *ehc = &link->eh_context;
u32 serror = ehc->i.serror;
unsigned int err_mask = 0, action = 0;
u32 hotplug_mask;
if (serror & (SERR_PERSISTENT | SERR_DATA)) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
}
if (serror & SERR_PROTOCOL) {
err_mask |= AC_ERR_HSM;
action |= ATA_EH_RESET;
}
if (serror & SERR_INTERNAL) {
err_mask |= AC_ERR_SYSTEM;
action |= ATA_EH_RESET;
}
/* Determine whether a hotplug event has occurred. Both
* SError.N/X are considered hotplug events for enabled or
* host links. For disabled PMP links, only N bit is
* considered as X bit is left at 1 for link plugging.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER)
hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
else
hotplug_mask = SERR_PHYRDY_CHG;
if (serror & hotplug_mask)
ata_ehi_hotplugged(&ehc->i);
ehc->i.err_mask |= err_mask;
ehc->i.action |= action;
}
/**
* ata_eh_analyze_tf - analyze taskfile of a failed qc
* @qc: qc to analyze
*
* Analyze taskfile of @qc and further determine cause of
* failure. This function also requests ATAPI sense data if
* available.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* Determined recovery action
*/
static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
{
const struct ata_taskfile *tf = &qc->result_tf;
unsigned int tmp, action = 0;
u8 stat = tf->status, err = tf->error;
if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
qc->err_mask |= AC_ERR_HSM;
return ATA_EH_RESET;
}
if (stat & (ATA_ERR | ATA_DF)) {
qc->err_mask |= AC_ERR_DEV;
/*
* Sense data reporting does not work if the
* device fault bit is set.
*/
if (stat & ATA_DF)
stat &= ~ATA_SENSE;
} else {
return 0;
}
switch (qc->dev->class) {
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
/*
* Fetch the sense data explicitly if:
* -It was a non-NCQ command that failed, or
* -It was a NCQ command that failed, but the sense data
* was not included in the NCQ command error log
* (i.e. NCQ autosense is not supported by the device).
*/
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) &&
(stat & ATA_SENSE) && ata_eh_request_sense(qc))
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
break;
case ATA_DEV_ATAPI:
if (!ata_port_is_frozen(qc->ap)) {
tmp = atapi_eh_request_sense(qc->dev,
qc->scsicmd->sense_buffer,
qc->result_tf.error >> 4);
if (!tmp)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
else
qc->err_mask |= tmp;
}
}
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
/*
* SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers
* for correct evaluation.
* FAILED means the sense code could not be interpreted
* and the device would need to be reset.
* NEEDS_RETRY and ADD_TO_MLQUEUE means that the
* command would need to be retried.
*/
if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
qc->flags |= ATA_QCFLAG_RETRY;
qc->err_mask |= AC_ERR_OTHER;
} else if (ret != SUCCESS) {
qc->err_mask |= AC_ERR_HSM;
}
}
if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
action |= ATA_EH_RESET;
return action;
}
static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
int *xfer_ok)
{
int base = 0;
if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
*xfer_ok = 1;
if (!*xfer_ok)
base = ATA_ECAT_DUBIOUS_NONE;
if (err_mask & AC_ERR_ATA_BUS)
return base + ATA_ECAT_ATA_BUS;
if (err_mask & AC_ERR_TIMEOUT)
return base + ATA_ECAT_TOUT_HSM;
if (eflags & ATA_EFLAG_IS_IO) {
if (err_mask & AC_ERR_HSM)
return base + ATA_ECAT_TOUT_HSM;
if ((err_mask &
(AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
return base + ATA_ECAT_UNK_DEV;
}
return 0;
}
struct speed_down_verdict_arg {
u64 since;
int xfer_ok;
int nr_errors[ATA_ECAT_NR];
};
static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
{
struct speed_down_verdict_arg *arg = void_arg;
int cat;
if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
return -1;
cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
&arg->xfer_ok);
arg->nr_errors[cat]++;
return 0;
}
/**
* ata_eh_speed_down_verdict - Determine speed down verdict
* @dev: Device of interest
*
* This function examines error ring of @dev and determines
* whether NCQ needs to be turned off, transfer speed should be
* stepped down, or falling back to PIO is necessary.
*
* ECAT_ATA_BUS : ATA_BUS error for any command
*
* ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
* IO commands
*
* ECAT_UNK_DEV : Unknown DEV error for IO commands
*
* ECAT_DUBIOUS_* : Identical to above three but occurred while
* data transfer hasn't been verified.
*
* Verdicts are
*
* NCQ_OFF : Turn off NCQ.
*
* SPEED_DOWN : Speed down transfer speed but don't fall back
* to PIO.
*
* FALLBACK_TO_PIO : Fall back to PIO.
*
* Even if multiple verdicts are returned, only one action is
* taken per error. An action triggered by non-DUBIOUS errors
* clears ering, while one triggered by DUBIOUS_* errors doesn't.
* This is to expedite speed down decisions right after device is
* initially configured.
*
* The following are speed down rules. #1 and #2 deal with
* DUBIOUS errors.
*
* 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
* occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
*
* 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
* occurred during last 5 mins, NCQ_OFF.
*
* 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
* occurred during last 5 mins, FALLBACK_TO_PIO
*
* 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
* during last 10 mins, NCQ_OFF.
*
* 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
* UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* OR of ATA_EH_SPDN_* flags.
*/
static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
{
const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
u64 j64 = get_jiffies_64();
struct speed_down_verdict_arg arg;
unsigned int verdict = 0;
/* scan past 5 mins of error history */
memset(&arg, 0, sizeof(arg));
arg.since = j64 - min(j64, j5mins);
ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
verdict |= ATA_EH_SPDN_SPEED_DOWN |
ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
arg.nr_errors[ATA_ECAT_TOUT_HSM] +
arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
/* scan past 10 mins of error history */
memset(&arg, 0, sizeof(arg));
arg.since = j64 - min(j64, j10mins);
ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
verdict |= ATA_EH_SPDN_NCQ_OFF;
if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
verdict |= ATA_EH_SPDN_SPEED_DOWN;
return verdict;
}
/**
* ata_eh_speed_down - record error and speed down if necessary
* @dev: Failed device
* @eflags: mask of ATA_EFLAG_* flags
* @err_mask: err_mask of the error
*
* Record error and examine error history to determine whether
* adjusting transmission speed is necessary. It also sets
* transmission limits appropriately if such adjustment is
* necessary.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* Determined recovery action.
*/
static unsigned int ata_eh_speed_down(struct ata_device *dev,
unsigned int eflags, unsigned int err_mask)
{
struct ata_link *link = ata_dev_phys_link(dev);
int xfer_ok = 0;
unsigned int verdict;
unsigned int action = 0;
/* don't bother if Cat-0 error */
if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
return 0;
/* record error and determine whether speed down is necessary */
ata_ering_record(&dev->ering, eflags, err_mask);
verdict = ata_eh_speed_down_verdict(dev);
/* turn off NCQ? */
if ((verdict & ATA_EH_SPDN_NCQ_OFF) && ata_ncq_enabled(dev)) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
goto done;
}
/* speed down? */
if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
/* speed down SATA link speed if possible */
if (sata_down_spd_limit(link, 0) == 0) {
action |= ATA_EH_RESET;
goto done;
}
/* lower transfer mode */
if (dev->spdn_cnt < 2) {
static const int dma_dnxfer_sel[] =
{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
static const int pio_dnxfer_sel[] =
{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
int sel;
if (dev->xfer_shift != ATA_SHIFT_PIO)
sel = dma_dnxfer_sel[dev->spdn_cnt];
else
sel = pio_dnxfer_sel[dev->spdn_cnt];
dev->spdn_cnt++;
if (ata_down_xfermask_limit(dev, sel) == 0) {
action |= ATA_EH_RESET;
goto done;
}
}
}
/* Fall back to PIO? Slowing down to PIO is meaningless for
* SATA ATA devices. Consider it only for PATA and SATAPI.
*/
if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
(link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
(dev->xfer_shift != ATA_SHIFT_PIO)) {
if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
dev->spdn_cnt = 0;
action |= ATA_EH_RESET;
goto done;
}
}
return 0;
done:
/* device has been slowed down, blow error history */
if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
ata_ering_clear(&dev->ering);
return action;
}
/**
* ata_eh_worth_retry - analyze error and decide whether to retry
* @qc: qc to possibly retry
*
* Look at the cause of the error and decide if a retry
* might be useful or not. We don't want to retry media errors
* because the drive itself has probably already taken 10-30 seconds
* doing its own internal retries before reporting the failure.
*/
static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
{
if (qc->err_mask & AC_ERR_MEDIA)
return 0; /* don't retry media errors */
if (qc->flags & ATA_QCFLAG_IO)
return 1; /* otherwise retry anything from fs stack */
if (qc->err_mask & AC_ERR_INVALID)
return 0; /* don't retry these */
return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
}
/**
* ata_eh_quiet - check if we need to be quiet about a command error
* @qc: qc to check
*
* Look at the qc flags anbd its scsi command request flags to determine
* if we need to be quiet about the command failure.
*/
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
{
if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
return qc->flags & ATA_QCFLAG_QUIET;
}
static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
qc = __ata_qc_from_tag(ap, link->active_tag);
if (!qc)
return -EIO;
if (!(qc->flags & ATA_QCFLAG_EH) ||
!(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
qc->err_mask)
return -EIO;
if (!ata_eh_request_sense(qc))
return -EIO;
/*
* If we have sense data, call scsi_check_sense() in order to set the
* correct SCSI ML byte (if any). No point in checking the return value,
* since the command has already completed successfully.
*/
scsi_check_sense(qc->scsicmd);
return 0;
}
static void ata_eh_get_success_sense(struct ata_link *link)
{
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev = link->device;
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
int tag, ret = 0;
if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE))
return;
/* if frozen, we can't do much */
if (ata_port_is_frozen(ap)) {
ata_dev_warn(dev,
"successful sense data available but port frozen\n");
goto out;
}
/*
* If the link has sactive set, then we have outstanding NCQ commands
* and have to read the Successful NCQ Commands log to get the sense
* data. Otherwise, we are dealing with a non-NCQ command and use
* request sense ext command to retrieve the sense data.
*/
if (link->sactive)
ret = ata_eh_read_sense_success_ncq_log(link);
else
ret = ata_eh_read_sense_success_non_ncq(link);
if (ret)
goto out;
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
return;
out:
/*
* If we failed to get sense data for a successful command that ought to
* have sense data, we cannot simply return BLK_STS_OK to user space.
* This is because we can't know if the sense data that we couldn't get
* was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command
* as success to user space would result in a silent data corruption.
* Thus, add a bogus ABORTED_COMMAND sense data to such commands, such
* that SCSI will report these commands as BLK_STS_IOERR to user space.
*/
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
!(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
qc->err_mask ||
ata_dev_phys_link(qc->dev) != link)
continue;
/* We managed to get sense for this success command, skip. */
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
continue;
/* This success command did not have any sense data, skip. */
if (!(qc->result_tf.status & ATA_SENSE))
continue;
/* This success command had sense data, but we failed to get. */
ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
}
/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
*
* Analyze why @link failed and determine which recovery actions
* are needed. This function also sets more detailed AC_ERR_*
* values and fills sense data for ATAPI CHECK SENSE.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_eh_link_autopsy(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
struct ata_device *dev;
unsigned int all_err_mask = 0, eflags = 0;
int tag, nr_failed = 0, nr_quiet = 0;
u32 serror;
int rc;
if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
return;
/* obtain and analyze SError */
rc = sata_scr_read(link, SCR_ERROR, &serror);
if (rc == 0) {
ehc->i.serror |= serror;
ata_eh_analyze_serror(link);
} else if (rc != -EOPNOTSUPP) {
/* SError read failed, force reset and probing */
ehc->i.probe_mask |= ATA_ALL_DEVICES;
ehc->i.action |= ATA_EH_RESET;
ehc->i.err_mask |= AC_ERR_OTHER;
}
/* analyze NCQ failure */
ata_eh_analyze_ncq_error(link);
/*
* Check if this was a successful command that simply needs sense data.
* Since the sense data is not part of the completion, we need to fetch
* it using an additional command. Since this can't be done from irq
* context, the sense data for successful commands are fetched by EH.
*/
ata_eh_get_success_sense(link);
/* any real error trumps AC_ERR_OTHER */
if (ehc->i.err_mask & ~AC_ERR_OTHER)
ehc->i.err_mask &= ~AC_ERR_OTHER;
all_err_mask |= ehc->i.err_mask;
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
qc->flags & ATA_QCFLAG_RETRY ||
qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD ||
ata_dev_phys_link(qc->dev) != link)
continue;
/* inherit upper level err_mask */
qc->err_mask |= ehc->i.err_mask;
/* analyze TF */
ehc->i.action |= ata_eh_analyze_tf(qc);
/* DEV errors are probably spurious in case of ATA_BUS error */
if (qc->err_mask & AC_ERR_ATA_BUS)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
AC_ERR_INVALID);
/* any real error trumps unknown error */
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
/*
* SENSE_VALID trumps dev/unknown error and revalidation. Upper
* layers will determine whether the command is worth retrying
* based on the sense data and device class/type. Otherwise,
* determine directly if the command is worth retrying using its
* error mask and flags.
*/
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
else if (ata_eh_worth_retry(qc))
qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */
ehc->i.dev = qc->dev;
all_err_mask |= qc->err_mask;
if (qc->flags & ATA_QCFLAG_IO)
eflags |= ATA_EFLAG_IS_IO;
trace_ata_eh_link_autopsy_qc(qc);
/* Count quiet errors */
if (ata_eh_quiet(qc))
nr_quiet++;
nr_failed++;
}
/* If all failed commands requested silence, then be quiet */
if (nr_quiet == nr_failed)
ehc->i.flags |= ATA_EHI_QUIET;
/* enforce default EH actions */
if (ata_port_is_frozen(ap) ||
all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
ehc->i.action |= ATA_EH_RESET;
else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
(!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
ehc->i.action |= ATA_EH_REVALIDATE;
/* If we have offending qcs and the associated failed device,
* perform per-dev EH action only on the offending device.
*/
if (ehc->i.dev) {
ehc->i.dev_action[ehc->i.dev->devno] |=
ehc->i.action & ATA_EH_PERDEV_MASK;
ehc->i.action &= ~ATA_EH_PERDEV_MASK;
}
/* propagate timeout to host link */
if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
/* record error and consider speeding down */
dev = ehc->i.dev;
if (!dev && ((ata_link_max_devices(link) == 1 &&
ata_dev_enabled(link->device))))
dev = link->device;
if (dev) {
if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
}
/**
* ata_eh_autopsy - analyze error and determine recovery action
* @ap: host port to perform autopsy on
*
* Analyze all links of @ap and determine why they failed and
* which recovery actions are needed.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_eh_autopsy(struct ata_port *ap)
{
struct ata_link *link;
ata_for_each_link(link, ap, EDGE)
ata_eh_link_autopsy(link);
/* Handle the frigging slave link. Autopsy is done similarly
* but actions and flags are transferred over to the master
* link and handled from there.
*/
if (ap->slave_link) {
struct ata_eh_context *mehc = &ap->link.eh_context;
struct ata_eh_context *sehc = &ap->slave_link->eh_context;
/* transfer control flags from master to slave */
sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
/* perform autopsy on the slave link */
ata_eh_link_autopsy(ap->slave_link);
/* transfer actions from slave to master and clear slave */
ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
mehc->i.action |= sehc->i.action;
mehc->i.dev_action[1] |= sehc->i.dev_action[1];
mehc->i.flags |= sehc->i.flags;
ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
}
/* Autopsy of fanout ports can affect host link autopsy.
* Perform host link autopsy last.
*/
if (sata_pmp_attached(ap))
ata_eh_link_autopsy(&ap->link);
}
/**
* ata_get_cmd_name - get name for ATA command
* @command: ATA command code to get name for
*
* Return a textual name of the given command or "unknown"
*
* LOCKING:
* None
*/
const char *ata_get_cmd_name(u8 command)
{
#ifdef CONFIG_ATA_VERBOSE_ERROR
static const struct
{
u8 command;
const char *text;
} cmd_descr[] = {
{ ATA_CMD_DEV_RESET, "DEVICE RESET" },
{ ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
{ ATA_CMD_STANDBY, "STANDBY" },
{ ATA_CMD_IDLE, "IDLE" },
{ ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
{ ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
{ ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
{ ATA_CMD_NOP, "NOP" },
{ ATA_CMD_FLUSH, "FLUSH CACHE" },
{ ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
{ ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
{ ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
{ ATA_CMD_SERVICE, "SERVICE" },
{ ATA_CMD_READ, "READ DMA" },
{ ATA_CMD_READ_EXT, "READ DMA EXT" },
{ ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
{ ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
{ ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
{ ATA_CMD_WRITE, "WRITE DMA" },
{ ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
{ ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
{ ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
{ ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
{ ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
{ ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
{ ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
{ ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
{ ATA_CMD_READ_MULTI, "READ MULTIPLE" },
{ ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
{ ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
{ ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
{ ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
{ ATA_CMD_SET_FEATURES, "SET FEATURES" },
{ ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
{ ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
{ ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
{ ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
{ ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
{ ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
{ ATA_CMD_SLEEP, "SLEEP" },
{ ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
{ ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
{ ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
{ ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
{ ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
{ ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
{ ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
{ ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
{ ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
{ ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
{ ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
{ ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
{ ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
{ ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
{ ATA_CMD_PMP_READ, "READ BUFFER" },
{ ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
{ ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
{ ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
{ ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
{ ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
{ ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
{ ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
{ ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
{ ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
{ ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
{ ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
{ ATA_CMD_SMART, "SMART" },
{ ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
{ ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
{ ATA_CMD_DSM, "DATA SET MANAGEMENT" },
{ ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
{ ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
{ ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
{ ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
{ ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
{ ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
{ ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
{ ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
{ ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
{ ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
{ ATA_CMD_READ_LONG, "READ LONG (with retries)" },
{ ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
{ ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
{ ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
{ ATA_CMD_RESTORE, "RECALIBRATE" },
{ 0, NULL } /* terminate list */
};
unsigned int i;
for (i = 0; cmd_descr[i].text; i++)
if (cmd_descr[i].command == command)
return cmd_descr[i].text;
#endif
return "unknown";
}
EXPORT_SYMBOL_GPL(ata_get_cmd_name);
/**
* ata_eh_link_report - report error handling to user
* @link: ATA link EH is going on
*
* Report EH to user.
*
* LOCKING:
* None.
*/
static void ata_eh_link_report(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
const char *frozen, *desc;
char tries_buf[6] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
return;
desc = NULL;
if (ehc->i.desc[0] != '\0')
desc = ehc->i.desc;
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
qc->err_mask == AC_ERR_DEV))
continue;
if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
continue;
nr_failed++;
}
if (!nr_failed && !ehc->i.err_mask)
return;
frozen = "";
if (ata_port_is_frozen(ap))
frozen = " frozen";
if (ap->eh_tries < ATA_EH_MAX_TRIES)
snprintf(tries_buf, sizeof(tries_buf), " t%d",
ap->eh_tries);
if (ehc->i.dev) {
ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
ehc->i.err_mask, link->sactive, ehc->i.serror,
ehc->i.action, frozen, tries_buf);
if (desc)
ata_dev_err(ehc->i.dev, "%s\n", desc);
} else {
ata_link_err(link, "exception Emask 0x%x "
"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
ehc->i.err_mask, link->sactive, ehc->i.serror,
ehc->i.action, frozen, tries_buf);
if (desc)
ata_link_err(link, "%s\n", desc);
}
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (ehc->i.serror)
ata_link_err(link,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
ehc->i.serror & SERR_CRC ? "BadCRC " : "",
ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
#endif
ata_qc_for_each_raw(ap, qc, tag) {
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
char data_buf[20] = "";
char cdb_buf[70] = "";
if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
continue;
if (qc->dma_dir != DMA_NONE) {
static const char *dma_str[] = {
[DMA_BIDIRECTIONAL] = "bidi",
[DMA_TO_DEVICE] = "out",
[DMA_FROM_DEVICE] = "in",
};
const char *prot_str = NULL;
switch (qc->tf.protocol) {
case ATA_PROT_UNKNOWN:
prot_str = "unknown";
break;
case ATA_PROT_NODATA:
prot_str = "nodata";
break;
case ATA_PROT_PIO:
prot_str = "pio";
break;
case ATA_PROT_DMA:
prot_str = "dma";
break;
case ATA_PROT_NCQ:
prot_str = "ncq dma";
break;
case ATA_PROT_NCQ_NODATA:
prot_str = "ncq nodata";
break;
case ATAPI_PROT_NODATA:
prot_str = "nodata";
break;
case ATAPI_PROT_PIO:
prot_str = "pio";
break;
case ATAPI_PROT_DMA:
prot_str = "dma";
break;
}
snprintf(data_buf, sizeof(data_buf), " %s %u %s",
prot_str, qc->nbytes, dma_str[qc->dma_dir]);
}
if (ata_is_atapi(qc->tf.protocol)) {
const u8 *cdb = qc->cdb;
size_t cdb_len = qc->dev->cdb_len;
if (qc->scsicmd) {
cdb = qc->scsicmd->cmnd;
cdb_len = qc->scsicmd->cmd_len;
}
__scsi_format_command(cdb_buf, sizeof(cdb_buf),
cdb, cdb_len);
} else
ata_dev_err(qc->dev, "failed command: %s\n",
ata_get_cmd_name(cmd->command));
ata_dev_err(qc->dev,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
"tag %d%s\n %s"
"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
"Emask 0x%x (%s)%s\n",
cmd->command, cmd->feature, cmd->nsect,
cmd->lbal, cmd->lbam, cmd->lbah,
cmd->hob_feature, cmd->hob_nsect,
cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
cmd->device, qc->tag, data_buf, cdb_buf,
res->status, res->error, res->nsect,
res->lbal, res->lbam, res->lbah,
res->hob_feature, res->hob_nsect,
res->hob_lbal, res->hob_lbam, res->hob_lbah,
res->device, qc->err_mask, ata_err_string(qc->err_mask),
qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_SENSE | ATA_ERR)) {
if (res->status & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
res->status & ATA_DRDY ? "DRDY " : "",
res->status & ATA_DF ? "DF " : "",
res->status & ATA_DRQ ? "DRQ " : "",
res->status & ATA_SENSE ? "SENSE " : "",
res->status & ATA_ERR ? "ERR " : "");
}
if (cmd->command != ATA_CMD_PACKET &&
(res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
ATA_ABORTED)))
ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->error & ATA_ICRC ? "ICRC " : "",
res->error & ATA_UNC ? "UNC " : "",
res->error & ATA_AMNF ? "AMNF " : "",
res->error & ATA_IDNF ? "IDNF " : "",
res->error & ATA_ABORTED ? "ABRT " : "");
#endif
}
}
/**
* ata_eh_report - report error handling to user
* @ap: ATA port to report EH about
*
* Report EH to user.
*
* LOCKING:
* None.
*/
void ata_eh_report(struct ata_port *ap)
{
struct ata_link *link;
ata_for_each_link(link, ap, HOST_FIRST)
ata_eh_link_report(link);
}
static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
unsigned int *classes, unsigned long deadline,
bool clear_classes)
{
struct ata_device *dev;
if (clear_classes)
ata_for_each_dev(dev, link, ALL)
classes[dev->devno] = ATA_DEV_UNKNOWN;
return reset(link, classes, deadline);
}
static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
return 0;
if (rc == -EAGAIN)
return 1;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
return 1;
return 0;
}
int ata_eh_reset(struct ata_link *link, int classify,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
int max_tries = 0, try = 0;
struct ata_link *failed_link;
struct ata_device *dev;
unsigned long deadline, now;
ata_reset_fn_t reset;
unsigned long flags;
u32 sstatus;
int nr_unknown, rc;
/*
* Prepare to reset
*/
while (ata_eh_reset_timeouts[max_tries] != UINT_MAX)
max_tries++;
if (link->flags & ATA_LFLAG_RST_ONCE)
max_tries = 1;
if (link->flags & ATA_LFLAG_NO_HRST)
hardreset = NULL;
if (link->flags & ATA_LFLAG_NO_SRST)
softreset = NULL;
/* make sure each reset attempt is at least COOL_DOWN apart */
if (ehc->i.flags & ATA_EHI_DID_RESET) {
now = jiffies;
WARN_ON(time_after(ehc->last_reset, now));
deadline = ata_deadline(ehc->last_reset,
ATA_EH_RESET_COOL_DOWN);
if (time_before(now, deadline))
schedule_timeout_uninterruptible(deadline - now);
}
spin_lock_irqsave(ap->lock, flags);
ap->pflags |= ATA_PFLAG_RESETTING;
spin_unlock_irqrestore(ap->lock, flags);
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
ata_for_each_dev(dev, link, ALL) {
/* If we issue an SRST then an ATA drive (not ATAPI)
* may change configuration and be in PIO0 timing. If
* we do a hard reset (or are coming from power on)
* this is true for ATA or ATAPI. Until we've set a
* suitable controller mode we should not touch the
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
* touch the DMA setup as that will be dealt with when
* configuring devices.
*/
if (ap->ops->set_piomode)
ap->ops->set_piomode(ap, dev);
}
/* prefer hardreset */
reset = NULL;
ehc->i.action &= ~ATA_EH_RESET;
if (hardreset) {
reset = hardreset;
ehc->i.action |= ATA_EH_HARDRESET;
} else if (softreset) {
reset = softreset;
ehc->i.action |= ATA_EH_SOFTRESET;
}
if (prereset) {
unsigned long deadline = ata_deadline(jiffies,
ATA_EH_PRERESET_TIMEOUT);
if (slave) {
sehc->i.action &= ~ATA_EH_RESET;
sehc->i.action |= ehc->i.action;
}
rc = prereset(link, deadline);
/* If present, do prereset on slave link too. Reset
* is skipped iff both master and slave links report
* -ENOENT or clear ATA_EH_RESET.
*/
if (slave && (rc == 0 || rc == -ENOENT)) {
int tmp;
tmp = prereset(slave, deadline);
if (tmp != -ENOENT)
rc = tmp;
ehc->i.action |= sehc->i.action;
}
if (rc) {
if (rc == -ENOENT) {
ata_link_dbg(link, "port disabled--ignoring\n");
ehc->i.action &= ~ATA_EH_RESET;
ata_for_each_dev(dev, link, ALL)
classes[dev->devno] = ATA_DEV_NONE;
rc = 0;
} else
ata_link_err(link,
"prereset failed (errno=%d)\n",
rc);
goto out;
}
/* prereset() might have cleared ATA_EH_RESET. If so,
* bang classes, thaw and return.
*/
if (reset && !(ehc->i.action & ATA_EH_RESET)) {
ata_for_each_dev(dev, link, ALL)
classes[dev->devno] = ATA_DEV_NONE;
if (ata_port_is_frozen(ap) && ata_is_host_link(link))
ata_eh_thaw_port(ap);
rc = 0;
goto out;
}
}
retry:
/*
* Perform reset
*/
if (ata_is_host_link(link))
ata_eh_freeze_port(ap);
deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
if (reset) {
if (verbose)
ata_link_info(link, "%s resetting link\n",
reset == softreset ? "soft" : "hard");
/* mark that this EH session started with reset */
ehc->last_reset = jiffies;
if (reset == hardreset) {
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
trace_ata_link_hardreset_begin(link, classes, deadline);
} else {
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
trace_ata_link_softreset_begin(link, classes, deadline);
}
rc = ata_do_reset(link, reset, classes, deadline, true);
if (reset == hardreset)
trace_ata_link_hardreset_end(link, classes, rc);
else
trace_ata_link_softreset_end(link, classes, rc);
if (rc && rc != -EAGAIN) {
failed_link = link;
goto fail;
}
/* hardreset slave link if existent */
if (slave && reset == hardreset) {
int tmp;
if (verbose)
ata_link_info(slave, "hard resetting link\n");
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
trace_ata_slave_hardreset_begin(slave, classes,
deadline);
tmp = ata_do_reset(slave, reset, classes, deadline,
false);
trace_ata_slave_hardreset_end(slave, classes, tmp);
switch (tmp) {
case -EAGAIN:
rc = -EAGAIN;
break;
case 0:
break;
default:
failed_link = slave;
rc = tmp;
goto fail;
}
}
/* perform follow-up SRST if necessary */
if (reset == hardreset &&
ata_eh_followup_srst_needed(link, rc)) {
reset = softreset;
if (!reset) {
ata_link_err(link,
"follow-up softreset required but no softreset available\n");
failed_link = link;
rc = -EINVAL;
goto fail;
}
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
trace_ata_link_softreset_begin(link, classes, deadline);
rc = ata_do_reset(link, reset, classes, deadline, true);
trace_ata_link_softreset_end(link, classes, rc);
if (rc) {
failed_link = link;
goto fail;
}
}
} else {
if (verbose)
ata_link_info(link,
"no reset method available, skipping reset\n");
if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
lflags |= ATA_LFLAG_ASSUME_ATA;
}
/*
* Post-reset processing
*/
ata_for_each_dev(dev, link, ALL) {
/* After the reset, the device state is PIO 0 and the
* controller state is undefined. Reset also wakes up
* drives from sleeping mode.
*/
dev->pio_mode = XFER_PIO_0;
dev->flags &= ~ATA_DFLAG_SLEEPING;
if (ata_phys_link_offline(ata_dev_phys_link(dev)))
continue;
/* apply class override */
if (lflags & ATA_LFLAG_ASSUME_ATA)
classes[dev->devno] = ATA_DEV_ATA;
else if (lflags & ATA_LFLAG_ASSUME_SEMB)
classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
}
/* record current link speed */
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
link->sata_spd = (sstatus >> 4) & 0xf;
if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
slave->sata_spd = (sstatus >> 4) & 0xf;
/* thaw the port */
if (ata_is_host_link(link))
ata_eh_thaw_port(ap);
/* postreset() should clear hardware SError. Although SError
* is cleared during link resume, clearing SError here is
* necessary as some PHYs raise hotplug events after SRST.
* This introduces race condition where hotplug occurs between
* reset and here. This race is mediated by cross checking
* link onlineness and classification result later.
*/
if (postreset) {
postreset(link, classes);
trace_ata_link_postreset(link, classes, rc);
if (slave) {
postreset(slave, classes);
trace_ata_slave_postreset(slave, classes, rc);
}
}
/* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
link->eh_info.serror = 0;
if (slave)
slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some
* controllers fail to wait while a drive is spinning up after
* being hotplugged causing misdetection. By cross checking
* link on/offlineness and classification result, those
* conditions can be reliably detected and retried.
*/
nr_unknown = 0;
ata_for_each_dev(dev, link, ALL) {
if (ata_phys_link_online(ata_dev_phys_link(dev))) {
if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
ata_dev_dbg(dev, "link online but device misclassified\n");
classes[dev->devno] = ATA_DEV_NONE;
nr_unknown++;
}
} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
if (ata_class_enabled(classes[dev->devno]))
ata_dev_dbg(dev,
"link offline, clearing class %d to NONE\n",
classes[dev->devno]);
classes[dev->devno] = ATA_DEV_NONE;
} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
ata_dev_dbg(dev,
"link status unknown, clearing UNKNOWN to NONE\n");
classes[dev->devno] = ATA_DEV_NONE;
}
}
if (classify && nr_unknown) {
if (try < max_tries) {
ata_link_warn(link,
"link online but %d devices misclassified, retrying\n",
nr_unknown);
failed_link = link;
rc = -EAGAIN;
goto fail;
}
ata_link_warn(link,
"link online but %d devices misclassified, "
"device detection might fail\n", nr_unknown);
}
/* reset successful, schedule revalidation */
ata_eh_done(link, NULL, ATA_EH_RESET);
if (slave)
ata_eh_done(slave, NULL, ATA_EH_RESET);
ehc->last_reset = jiffies; /* update to completion time */
ehc->i.action |= ATA_EH_REVALIDATE;
link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
rc = 0;
out:
/* clear hotplug flag */
ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
if (slave)
sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_RESETTING;
spin_unlock_irqrestore(ap->lock, flags);
return rc;
fail:
/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
if (!ata_is_host_link(link) &&
sata_scr_read(link, SCR_STATUS, &sstatus))
rc = -ERESTART;
if (try >= max_tries) {
/*
* Thaw host port even if reset failed, so that the port
* can be retried on the next phy event. This risks
* repeated EH runs but seems to be a better tradeoff than
* shutting down a port after a botched hotplug attempt.
*/
if (ata_is_host_link(link))
ata_eh_thaw_port(ap);
goto out;
}
now = jiffies;
if (time_before(now, deadline)) {
unsigned long delta = deadline - now;
ata_link_warn(failed_link,
"reset failed (errno=%d), retrying in %u secs\n",
rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
ata_eh_release(ap);
while (delta)
delta = schedule_timeout_uninterruptible(delta);
ata_eh_acquire(ap);
}
/*
* While disks spinup behind PMP, some controllers fail sending SRST.
* They need to be reset - as well as the PMP - before retrying.
*/
if (rc == -ERESTART) {
if (ata_is_host_link(link))
ata_eh_thaw_port(ap);
goto out;
}
if (try == max_tries - 1) {
sata_down_spd_limit(link, 0);
if (slave)
sata_down_spd_limit(slave, 0);
} else if (rc == -EPIPE)
sata_down_spd_limit(failed_link, 0);
if (hardreset)
reset = hardreset;
goto retry;
}
static inline void ata_eh_pull_park_action(struct ata_port *ap)
{
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
/*
* This function can be thought of as an extended version of
* ata_eh_about_to_do() specially crafted to accommodate the
* requirements of ATA_EH_PARK handling. Since the EH thread
* does not leave the do {} while () loop in ata_eh_recover as
* long as the timeout for a park request to *one* device on
* the port has not expired, and since we still want to pick
* up park requests to other devices on the same port or
* timeout updates for the same device, we have to pull
* ATA_EH_PARK actions from eh_info into eh_context.i
* ourselves at the beginning of each pass over the loop.
*
* Additionally, all write accesses to &ap->park_req_pending
* through reinit_completion() (see below) or complete_all()
* (see ata_scsi_park_store()) are protected by the host lock.
* As a result we have that park_req_pending.done is zero on
* exit from this function, i.e. when ATA_EH_PARK actions for
* *all* devices on port ap have been pulled into the
* respective eh_context structs. If, and only if,
* park_req_pending.done is non-zero by the time we reach
* wait_for_completion_timeout(), another ATA_EH_PARK action
* has been scheduled for at least one of the devices on port
* ap and we have to cycle over the do {} while () loop in
* ata_eh_recover() again.
*/
spin_lock_irqsave(ap->lock, flags);
reinit_completion(&ap->park_req_pending);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ALL) {
struct ata_eh_info *ehi = &link->eh_info;
link->eh_context.i.dev_action[dev->devno] |=
ehi->dev_action[dev->devno] & ATA_EH_PARK;
ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
}
}
spin_unlock_irqrestore(ap->lock, flags);
}
static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
struct ata_taskfile tf;
unsigned int err_mask;
ata_tf_init(dev, &tf);
if (park) {
ehc->unloaded_mask |= 1 << dev->devno;
tf.command = ATA_CMD_IDLEIMMEDIATE;
tf.feature = 0x44;
tf.lbal = 0x4c;
tf.lbam = 0x4e;
tf.lbah = 0x55;
} else {
ehc->unloaded_mask &= ~(1 << dev->devno);
tf.command = ATA_CMD_CHK_POWER;
}
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
ata_dev_err(dev, "head unload failed!\n");
ehc->unloaded_mask &= ~(1 << dev->devno);
}
}
static int ata_eh_revalidate_and_attach(struct ata_link *link,
struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev;
unsigned int new_mask = 0;
unsigned long flags;
int rc = 0;
/* For PATA drive side cable detection to work, IDENTIFY must
* be done backwards such that PDIAG- is released by the slave
* device before the master device is identified.
*/
ata_for_each_dev(dev, link, ALL_REVERSE) {
unsigned int action = ata_eh_dev_action(dev);
unsigned int readid_flags = 0;
if (ehc->i.flags & ATA_EHI_DID_RESET)
readid_flags |= ATA_READID_POSTRESET;
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
/*
* The link may be in a deep sleep, wake it up.
*
* If the link is in deep sleep, ata_phys_link_offline()
* will return true, causing the revalidation to fail,
* which leads to a (potentially) needless hard reset.
*
* ata_eh_recover() will later restore the link policy
* to ap->target_lpm_policy after revalidation is done.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
r_failed_dev);
if (rc)
goto err;
}
if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
readid_flags);
if (rc)
goto err;
ata_eh_done(link, dev, ATA_EH_REVALIDATE);
/* Configuration may have changed, reconfigure
* transfer mode.
*/
ehc->i.flags |= ATA_EHI_SETMODE;
/* schedule the scsi_rescan_device() here */
schedule_delayed_work(&ap->scsi_rescan_task, 0);
} else if (dev->class == ATA_DEV_UNKNOWN &&
ehc->tries[dev->devno] &&
ata_class_enabled(ehc->classes[dev->devno])) {
/* Temporarily set dev->class, it will be
* permanently set once all configurations are
* complete. This is necessary because new
* device configuration is done in two
* separate loops.
*/
dev->class = ehc->classes[dev->devno];
if (dev->class == ATA_DEV_PMP)
rc = sata_pmp_attach(dev);
else
rc = ata_dev_read_id(dev, &dev->class,
readid_flags, dev->id);
/* read_id might have changed class, store and reset */
ehc->classes[dev->devno] = dev->class;
dev->class = ATA_DEV_UNKNOWN;
switch (rc) {
case 0:
/* clear error info accumulated during probe */
ata_ering_clear(&dev->ering);
new_mask |= 1 << dev->devno;
break;
case -ENOENT:
/* IDENTIFY was issued to non-existent
* device. No need to reset. Just
* thaw and ignore the device.
*/
ata_eh_thaw_port(ap);
break;
default:
goto err;
}
}
}
/* PDIAG- should have been released, ask cable type if post-reset */
if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
if (ap->ops->cable_detect)
ap->cbl = ap->ops->cable_detect(ap);
ata_force_cbl(ap);
}
/* Configure new devices forward such that user doesn't see
* device detection messages backwards.
*/
ata_for_each_dev(dev, link, ALL) {
if (!(new_mask & (1 << dev->devno)))
continue;
dev->class = ehc->classes[dev->devno];
if (dev->class == ATA_DEV_PMP)
continue;
ehc->i.flags |= ATA_EHI_PRINTINFO;
rc = ata_dev_configure(dev);
ehc->i.flags &= ~ATA_EHI_PRINTINFO;
if (rc) {
dev->class = ATA_DEV_UNKNOWN;
goto err;
}
spin_lock_irqsave(ap->lock, flags);
ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
spin_unlock_irqrestore(ap->lock, flags);
/* new device discovered, configure xfermode */
ehc->i.flags |= ATA_EHI_SETMODE;
}
return 0;
err:
*r_failed_dev = dev;
return rc;
}
/**
* ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
* ata_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* 0 on success, negative errno otherwise
*/
int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
int rc;
/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
ata_for_each_dev(dev, link, ENABLED) {
if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
struct ata_ering_entry *ent;
ent = ata_ering_top(&dev->ering);
if (ent)
ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
}
}
/* has private set_mode? */
if (ap->ops->set_mode)
rc = ap->ops->set_mode(link, r_failed_dev);
else
rc = ata_do_set_mode(link, r_failed_dev);
/* if transfer mode has changed, set DUBIOUS_XFER on device */
ata_for_each_dev(dev, link, ENABLED) {
struct ata_eh_context *ehc = &link->eh_context;
u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
if (dev->xfer_mode != saved_xfer_mode ||
ata_ncq_enabled(dev) != saved_ncq)
dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
}
return rc;
}
/**
* atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
* @dev: ATAPI device to clear UA for
*
* Resets and other operations can make an ATAPI device raise
* UNIT ATTENTION which causes the next operation to fail. This
* function clears UA.
*
* LOCKING:
* EH context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int atapi_eh_clear_ua(struct ata_device *dev)
{
int i;
for (i = 0; i < ATA_EH_UA_TRIES; i++) {
u8 *sense_buffer = dev->link->ap->sector_buf;
u8 sense_key = 0;
unsigned int err_mask;
err_mask = atapi_eh_tur(dev, &sense_key);
if (err_mask != 0 && err_mask != AC_ERR_DEV) {
ata_dev_warn(dev,
"TEST_UNIT_READY failed (err_mask=0x%x)\n",
err_mask);
return -EIO;
}
if (!err_mask || sense_key != UNIT_ATTENTION)
return 0;
err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
if (err_mask) {
ata_dev_warn(dev, "failed to clear "
"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
return -EIO;
}
}
ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
ATA_EH_UA_TRIES);
return 0;
}
/**
* ata_eh_maybe_retry_flush - Retry FLUSH if necessary
* @dev: ATA device which may need FLUSH retry
*
* If @dev failed FLUSH, it needs to be reported upper layer
* immediately as it means that @dev failed to remap and already
* lost at least a sector and further FLUSH retrials won't make
* any difference to the lost sector. However, if FLUSH failed
* for other reasons, for example transmission error, FLUSH needs
* to be retried.
*
* This function determines whether FLUSH failure retry is
* necessary and performs it if so.
*
* RETURNS:
* 0 if EH can continue, -errno if EH needs to be repeated.
*/
static int ata_eh_maybe_retry_flush(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_queued_cmd *qc;
struct ata_taskfile tf;
unsigned int err_mask;
int rc = 0;
/* did flush fail for this device? */
if (!ata_tag_valid(link->active_tag))
return 0;
qc = __ata_qc_from_tag(ap, link->active_tag);
if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
qc->tf.command != ATA_CMD_FLUSH))
return 0;
/* if the device failed it, it should be reported to upper layers */
if (qc->err_mask & AC_ERR_DEV)
return 0;
/* flush failed for some other reason, give it another shot */
ata_tf_init(dev, &tf);
tf.command = qc->tf.command;
tf.flags |= ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
tf.command, qc->err_mask);
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (!err_mask) {
/*
* FLUSH is complete but there's no way to
* successfully complete a failed command from EH.
* Making sure retry is allowed at least once and
* retrying it should do the trick - whatever was in
* the cache is already on the platter and this won't
* cause infinite loop.
*/
qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
} else {
ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
err_mask);
rc = -EIO;
/* if device failed it, report it to upper layers */
if (err_mask & AC_ERR_DEV) {
qc->err_mask |= AC_ERR_DEV;
qc->result_tf = tf;
if (!ata_port_is_frozen(ap))
rc = 0;
}
}
return rc;
}
/**
* ata_eh_set_lpm - configure SATA interface power management
* @link: link to configure power management
* @policy: the link power management policy
* @r_failed_dev: out parameter for failed device
*
* Enable SATA Interface power management. This will enable
* Device Interface Power Management (DIPM) for min_power and
* medium_power_with_dipm policies, and then call driver specific
* callbacks for enabling Host Initiated Power management.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_device **r_failed_dev)
{
struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
/* if the link or host doesn't do LPM, noop */
if (!IS_ENABLED(CONFIG_SATA_HOST) ||
(link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
return 0;
/*
* DIPM is enabled only for MIN_POWER as some devices
* misbehave when the host NACKs transition to SLUMBER. Order
* device and link configurations such that the host always
* allows DIPM requests.
*/
ata_for_each_dev(dev, link, ENABLED) {
bool hipm = ata_id_has_hipm(dev->id);
bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
/* find the first enabled and LPM enabled devices */
if (!link_dev)
link_dev = dev;
if (!lpm_dev && (hipm || dipm))
lpm_dev = dev;
hints &= ~ATA_LPM_EMPTY;
if (!hipm)
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
ata_dev_warn(dev,
"failed to disable DIPM, Emask 0x%x\n",
err_mask);
rc = -EIO;
goto fail;
}
}
}
if (ap) {
rc = ap->ops->set_lpm(link, policy, hints);
if (!rc && ap->slave_link)
rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
} else
rc = sata_pmp_set_lpm(link, policy, hints);
/*
* Attribute link config failure to the first (LPM) enabled
* device on the link.
*/
if (rc) {
if (rc == -EOPNOTSUPP) {
link->flags |= ATA_LFLAG_NO_LPM;
return 0;
}
dev = lpm_dev ? lpm_dev : link_dev;
goto fail;
}
/*
* Low level driver acked the transition. Issue DIPM command
* with the new policy set.
*/
link->lpm_policy = policy;
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ata_id_has_dipm(dev->id)) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
ata_dev_warn(dev,
"failed to enable DIPM, Emask 0x%x\n",
err_mask);
rc = -EIO;
goto fail;
}
}
}
link->last_lpm_change = jiffies;
link->flags |= ATA_LFLAG_CHANGED;
return 0;
fail:
/* restore the old policy */
link->lpm_policy = old_policy;
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = old_policy;
/* if no device or only one more chance is left, disable LPM */
if (!dev || ehc->tries[dev->devno] <= 2) {
ata_link_warn(link, "disabling LPM on the link\n");
link->flags |= ATA_LFLAG_NO_LPM;
}
if (r_failed_dev)
*r_failed_dev = dev;
return rc;
}
int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
int cnt = 0;
ata_for_each_dev(dev, link, ENABLED)
cnt++;
return cnt;
}
static int ata_link_nr_vacant(struct ata_link *link)
{
struct ata_device *dev;
int cnt = 0;
ata_for_each_dev(dev, link, ALL)
if (dev->class == ATA_DEV_UNKNOWN)
cnt++;
return cnt;
}
static int ata_eh_skip_recovery(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev;
/* skip disabled links */
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
/* skip if explicitly requested */
if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
return 1;
/* thaw frozen port and recover failed devices */
if (ata_port_is_frozen(ap) || ata_link_nr_enabled(link))
return 0;
/* reset at least once if reset is requested */
if ((ehc->i.action & ATA_EH_RESET) &&
!(ehc->i.flags & ATA_EHI_DID_RESET))
return 0;
/* skip if class codes for all vacant slots are ATA_DEV_NONE */
ata_for_each_dev(dev, link, ALL) {
if (dev->class == ATA_DEV_UNKNOWN &&
ehc->classes[dev->devno] != ATA_DEV_NONE)
return 0;
}
return 1;
}
static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
{
u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
u64 now = get_jiffies_64();
int *trials = void_arg;
if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
(ent->timestamp < now - min(now, interval)))
return -1;
(*trials)++;
return 0;
}
static int ata_eh_schedule_probe(struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
struct ata_link *link = ata_dev_phys_link(dev);
int trials = 0;
if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
(ehc->did_probe_mask & (1 << dev->devno)))
return 0;
ata_eh_detach_dev(dev);
ata_dev_init(dev);
ehc->did_probe_mask |= (1 << dev->devno);
ehc->i.action |= ATA_EH_RESET;
ehc->saved_xfer_mode[dev->devno] = 0;
ehc->saved_ncq_enabled &= ~(1 << dev->devno);
/* the link maybe in a deep sleep, wake it up */
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
if (ata_is_host_link(link))
link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
ATA_LPM_EMPTY);
else
sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
ATA_LPM_EMPTY);
}
/* Record and count probe trials on the ering. The specific
* error mask used is irrelevant. Because a successful device
* detection clears the ering, this count accumulates only if
* there are consecutive failed probes.
*
* If the count is equal to or higher than ATA_EH_PROBE_TRIALS
* in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
* forced to 1.5Gbps.
*
* This is to work around cases where failed link speed
* negotiation results in device misdetection leading to
* infinite DEVXCHG or PHRDY CHG events.
*/
ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
if (trials > ATA_EH_PROBE_TRIALS)
sata_down_spd_limit(link, 1);
return 1;
}
static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
/* -EAGAIN from EH routine indicates retry without prejudice.
* The requester is responsible for ensuring forward progress.
*/
if (err != -EAGAIN)
ehc->tries[dev->devno]--;
switch (err) {
case -ENODEV:
/* device missing or wrong IDENTIFY data, schedule probing */
ehc->i.probe_mask |= (1 << dev->devno);
fallthrough;
case -EINVAL:
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
fallthrough;
case -EIO:
if (ehc->tries[dev->devno] == 1) {
/* This is the last chance, better to slow
* down than lose it.
*/
sata_down_spd_limit(ata_dev_phys_link(dev), 0);
if (dev->pio_mode > XFER_PIO_0)
ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
}
}
if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
/* disable device if it has used up all its chances */
ata_dev_disable(dev);
/* detach if offline */
if (ata_phys_link_offline(ata_dev_phys_link(dev)))
ata_eh_detach_dev(dev);
/* schedule probe if necessary */
if (ata_eh_schedule_probe(dev)) {
ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
memset(ehc->cmd_timeout_idx[dev->devno], 0,
sizeof(ehc->cmd_timeout_idx[dev->devno]));
}
return 1;
} else {
ehc->i.action |= ATA_EH_RESET;
return 0;
}
}
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
* @prereset: prereset method (can be NULL)
* @softreset: softreset method (can be NULL)
* @hardreset: hardreset method (can be NULL)
* @postreset: postreset method (can be NULL)
* @r_failed_link: out parameter for failed link
*
* This is the alpha and omega, eum and yang, heart and soul of
* libata exception handling. On entry, actions required to
* recover each link and hotplug requests are recorded in the
* link's eh_context. This function executes all the operations
* with appropriate retrials and fallbacks to resurrect failed
* devices, detach goners and greet newcomers.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset,
struct ata_link **r_failed_link)
{
struct ata_link *link;
struct ata_device *dev;
int rc, nr_fails;
unsigned long flags, deadline;
/* prep for recovery */
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_context *ehc = &link->eh_context;
/* re-enable link? */
if (ehc->i.action & ATA_EH_ENABLE_LINK) {
ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
spin_lock_irqsave(ap->lock, flags);
link->flags &= ~ATA_LFLAG_DISABLED;
spin_unlock_irqrestore(ap->lock, flags);
ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
}
ata_for_each_dev(dev, link, ALL) {
if (link->flags & ATA_LFLAG_NO_RETRY)
ehc->tries[dev->devno] = 1;
else
ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
/* collect port action mask recorded in dev actions */
ehc->i.action |= ehc->i.dev_action[dev->devno] &
~ATA_EH_PERDEV_MASK;
ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
/* process hotplug request */
if (dev->flags & ATA_DFLAG_DETACH)
ata_eh_detach_dev(dev);
/* schedule probe if necessary */
if (!ata_dev_enabled(dev))
ata_eh_schedule_probe(dev);
}
}
retry:
rc = 0;
/* if UNLOADING, finish immediately */
if (ap->pflags & ATA_PFLAG_UNLOADING)
goto out;
/* prep for EH */
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_context *ehc = &link->eh_context;
/* skip EH if possible. */
if (ata_eh_skip_recovery(link))
ehc->i.action = 0;
ata_for_each_dev(dev, link, ALL)
ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
}
/* reset */
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_context *ehc = &link->eh_context;
if (!(ehc->i.action & ATA_EH_RESET))
continue;
rc = ata_eh_reset(link, ata_link_nr_vacant(link),
prereset, softreset, hardreset, postreset);
if (rc) {
ata_link_err(link, "reset failed, giving up\n");
goto out;
}
}
do {
unsigned long now;
/*
* clears ATA_EH_PARK in eh_info and resets
* ap->park_req_pending
*/
ata_eh_pull_park_action(ap);
deadline = jiffies;
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ALL) {
struct ata_eh_context *ehc = &link->eh_context;
unsigned long tmp;
if (dev->class != ATA_DEV_ATA &&
dev->class != ATA_DEV_ZAC)
continue;
if (!(ehc->i.dev_action[dev->devno] &
ATA_EH_PARK))
continue;
tmp = dev->unpark_deadline;
if (time_before(deadline, tmp))
deadline = tmp;
else if (time_before_eq(tmp, jiffies))
continue;
if (ehc->unloaded_mask & (1 << dev->devno))
continue;
ata_eh_park_issue_cmd(dev, 1);
}
}
now = jiffies;
if (time_before_eq(deadline, now))
break;
ata_eh_release(ap);
deadline = wait_for_completion_timeout(&ap->park_req_pending,
deadline - now);
ata_eh_acquire(ap);
} while (deadline);
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ALL) {
if (!(link->eh_context.unloaded_mask &
(1 << dev->devno)))
continue;
ata_eh_park_issue_cmd(dev, 0);
ata_eh_done(link, dev, ATA_EH_PARK);
}
}
/* the rest */
nr_fails = 0;
ata_for_each_link(link, ap, PMP_FIRST) {
struct ata_eh_context *ehc = &link->eh_context;
if (sata_pmp_attached(ap) && ata_is_host_link(link))
goto config_lpm;
/* revalidate existing devices and attach new ones */
rc = ata_eh_revalidate_and_attach(link, &dev);
if (rc)
goto rest_fail;
/* if PMP got attached, return, pmp EH will take care of it */
if (link->device->class == ATA_DEV_PMP) {
ehc->i.action = 0;
return 0;
}
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
rc = ata_set_mode(link, &dev);
if (rc)
goto rest_fail;
ehc->i.flags &= ~ATA_EHI_SETMODE;
}
/* If reset has been issued, clear UA to avoid
* disrupting the current users of the device.
*/
if (ehc->i.flags & ATA_EHI_DID_RESET) {
ata_for_each_dev(dev, link, ALL) {
if (dev->class != ATA_DEV_ATAPI)
continue;
rc = atapi_eh_clear_ua(dev);
if (rc)
goto rest_fail;
if (zpodd_dev_enabled(dev))
zpodd_post_poweron(dev);
}
}
/* retry flush if necessary */
ata_for_each_dev(dev, link, ALL) {
if (dev->class != ATA_DEV_ATA &&
dev->class != ATA_DEV_ZAC)
continue;
rc = ata_eh_maybe_retry_flush(dev);
if (rc)
goto rest_fail;
}
config_lpm:
/* configure link power saving */
if (link->lpm_policy != ap->target_lpm_policy) {
rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
if (rc)
goto rest_fail;
}
/* this link is okay now */
ehc->i.flags = 0;
continue;
rest_fail:
nr_fails++;
if (dev)
ata_eh_handle_dev_fail(dev, rc);
if (ata_port_is_frozen(ap)) {
/* PMP reset requires working host port.
* Can't retry if it's frozen.
*/
if (sata_pmp_attached(ap))
goto out;
break;
}
}
if (nr_fails)
goto retry;
out:
if (rc && r_failed_link)
*r_failed_link = link;
return rc;
}
/**
* ata_eh_finish - finish up EH
* @ap: host port to finish EH for
*
* Recovery is complete. Clean up EH states and retry or finish
* failed qcs.
*
* LOCKING:
* None.
*/
void ata_eh_finish(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
int tag;
/* retry or finish qcs */
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask) {
/* FIXME: Once EH migration is complete,
* generate sense data in this function,
* considering both err_mask and tf.
*/
if (qc->flags & ATA_QCFLAG_RETRY) {
/*
* Since qc->err_mask is set, ata_eh_qc_retry()
* will not increment scmd->allowed, so upper
* layer will only retry the command if it has
* not already been retried too many times.
*/
ata_eh_qc_retry(qc);
} else {
ata_eh_qc_complete(qc);
}
} else {
if (qc->flags & ATA_QCFLAG_SENSE_VALID ||
qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) {
ata_eh_qc_complete(qc);
} else {
/* feed zero TF to sense generation */
memset(&qc->result_tf, 0, sizeof(qc->result_tf));
/*
* Since qc->err_mask is not set,
* ata_eh_qc_retry() will increment
* scmd->allowed, so upper layer is guaranteed
* to retry the command.
*/
ata_eh_qc_retry(qc);
}
}
}
/* make sure nr_active_links is zero after EH */
WARN_ON(ap->nr_active_links);
ap->nr_active_links = 0;
}
/**
* ata_do_eh - do standard error handling
* @ap: host port to handle error for
*
* @prereset: prereset method (can be NULL)
* @softreset: softreset method (can be NULL)
* @hardreset: hardreset method (can be NULL)
* @postreset: postreset method (can be NULL)
*
* Perform standard error handling sequence.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset)
{
struct ata_device *dev;
int rc;
ata_eh_autopsy(ap);
ata_eh_report(ap);
rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
}
ata_eh_finish(ap);
}
/**
* ata_std_error_handler - standard error handler
* @ap: host port to handle error for
*
* Standard error handler
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_std_error_handler(struct ata_port *ap)
{
struct ata_port_operations *ops = ap->ops;
ata_reset_fn_t hardreset = ops->hardreset;
/* ignore built-in hardreset if SCR access is not available */
if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
}
EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
/**
* ata_eh_handle_port_suspend - perform port suspend operation
* @ap: port to suspend
*
* Suspend @ap.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_eh_handle_port_suspend(struct ata_port *ap)
{
unsigned long flags;
int rc = 0;
struct ata_device *dev;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
ap->pm_mesg.event & PM_EVENT_RESUME) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
spin_unlock_irqrestore(ap->lock, flags);
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
/*
* If we have a ZPODD attached, check its zero
* power ready status before the port is frozen.
* Only needed for runtime suspend.
*/
if (PMSG_IS_AUTO(ap->pm_mesg)) {
ata_for_each_dev(dev, &ap->link, ENABLED) {
if (zpodd_dev_enabled(dev))
zpodd_on_suspend(dev);
}
}
/* suspend */
ata_eh_freeze_port(ap);
if (ap->ops->port_suspend)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
ata_acpi_set_state(ap, ap->pm_mesg);
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_PM_PENDING;
if (rc == 0)
ap->pflags |= ATA_PFLAG_SUSPENDED;
else if (ata_port_is_frozen(ap))
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
return;
}
/**
* ata_eh_handle_port_resume - perform port resume operation
* @ap: port to resume
*
* Resume @ap.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_eh_handle_port_resume(struct ata_port *ap)
{
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
!(ap->pm_mesg.event & PM_EVENT_RESUME)) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
spin_unlock_irqrestore(ap->lock, flags);
WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
/*
* Error timestamps are in jiffies which doesn't run while
* suspended and PHY events during resume isn't too uncommon.
* When the two are combined, it can lead to unnecessary speed
* downs if the machine is suspended and resumed repeatedly.
* Clear error history.
*/
ata_for_each_link(link, ap, HOST_FIRST)
ata_for_each_dev(dev, link, ALL)
ata_ering_clear(&dev->ering);
ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
ap->ops->port_resume(ap);
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
| linux-master | drivers/ata/libata-eh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AHCI SATA platform driver
*
* Copyright 2004-2005 Red Hat, Inc.
* Jeff Garzik <[email protected]>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include <linux/pci_ids.h>
#include "ahci.h"
#define DRV_NAME "ahci"
static const struct ata_port_info ahci_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
};
static const struct ata_port_info ahci_port_info_nolpm = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_LPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
const struct ata_port_info *port;
int rc;
hpriv = ahci_platform_get_resources(pdev,
AHCI_PLATFORM_GET_RESETS);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
if (device_is_compatible(dev, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
port = device_get_match_data(dev);
if (!port)
port = &ahci_port_info;
rc = ahci_platform_init_host(pdev, hpriv, port,
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
ahci_platform_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "generic-ahci", },
/* Keep the following compatibles for device tree compatibility */
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
{ .compatible = "cavium,octeon-7130-ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
static const struct acpi_device_id ahci_acpi_match[] = {
{ "APMC0D33", (unsigned long)&ahci_port_info_nolpm },
{ ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
{},
};
MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove_new = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
.acpi_match_table = ahci_acpi_match,
.pm = &ahci_pm_ops,
},
};
module_platform_driver(ahci_driver);
MODULE_DESCRIPTION("AHCI SATA platform driver");
MODULE_AUTHOR("Anton Vorontsov <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ahci");
| linux-master | drivers/ata/ahci_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_sch.c - Intel SCH PATA controllers
*
* Copyright (c) 2008 Alek Du <[email protected]>
*/
/*
* Supports:
* Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
* http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_sch"
#define DRV_VERSION "0.2"
/* see SCH datasheet page 351 */
enum {
D0TIM = 0x80, /* Device 0 Timing Register */
D1TIM = 0x84, /* Device 1 Timing Register */
PM = 0x07, /* PIO Mode Bit Mask */
MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */
UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */
PPE = (1 << 30), /* Prefetch/Post Enable */
USD = (1 << 31), /* Use Synchronous DMA */
};
static int sch_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static const struct pci_device_id sch_pci_tbl[] = {
/* Intel SCH PATA Controller */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
{ } /* terminate list */
};
static struct pci_driver sch_pci_driver = {
.name = DRV_NAME,
.id_table = sch_pci_tbl,
.probe = sch_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static const struct scsi_host_template sch_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sch_pata_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_unknown,
.set_piomode = sch_set_piomode,
.set_dmamode = sch_set_dmamode,
};
static const struct ata_port_info sch_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sch_pata_ops,
};
MODULE_AUTHOR("Alek Du <[email protected]>");
MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
MODULE_VERSION(DRV_VERSION);
/**
* sch_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: ATA device
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int port = adev->devno ? D1TIM : D0TIM;
unsigned int data;
pci_read_config_dword(dev, port, &data);
/* see SCH datasheet page 351 */
/* set PIO mode */
data &= ~(PM | PPE);
data |= pio;
/* enable PPE for block device */
if (adev->class == ATA_DEV_ATA)
data |= PPE;
pci_write_config_dword(dev, port, data);
}
/**
* sch_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: ATA device
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int dma_mode = adev->dma_mode;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int port = adev->devno ? D1TIM : D0TIM;
unsigned int data;
pci_read_config_dword(dev, port, &data);
/* see SCH datasheet page 351 */
if (dma_mode >= XFER_UDMA_0) {
/* enable Synchronous DMA mode */
data |= USD;
data &= ~UDM;
data |= (dma_mode - XFER_UDMA_0) << 16;
} else { /* must be MWDMA mode, since we masked SWDMA already */
data &= ~(USD | MDM);
data |= (dma_mode - XFER_MW_DMA_0) << 8;
}
pci_write_config_dword(dev, port, data);
}
/**
* sch_init_one - Register SCH ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in sch_pci_tbl matching with @pdev
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
module_pci_driver(sch_pci_driver);
| linux-master | drivers/ata/pata_sch.c |
/*
* Freescale iMX PATA driver
*
* Copyright (C) 2011 Arnaud Patard <[email protected]>
*
* Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* TODO:
* - dmaengine support
*/
#include <linux/ata.h>
#include <linux/clk.h>
#include <linux/libata.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_imx"
#define PATA_IMX_ATA_TIME_OFF 0x00
#define PATA_IMX_ATA_TIME_ON 0x01
#define PATA_IMX_ATA_TIME_1 0x02
#define PATA_IMX_ATA_TIME_2W 0x03
#define PATA_IMX_ATA_TIME_2R 0x04
#define PATA_IMX_ATA_TIME_AX 0x05
#define PATA_IMX_ATA_TIME_PIO_RDX 0x06
#define PATA_IMX_ATA_TIME_4 0x07
#define PATA_IMX_ATA_TIME_9 0x08
#define PATA_IMX_ATA_CONTROL 0x24
#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
#define PATA_IMX_ATA_INT_EN 0x2C
#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
#define PATA_IMX_DRIVE_DATA 0xA0
#define PATA_IMX_DRIVE_CONTROL 0xD8
static u32 pio_t4[] = { 30, 20, 15, 10, 10 };
static u32 pio_t9[] = { 20, 15, 10, 10, 10 };
static u32 pio_tA[] = { 35, 35, 35, 35, 35 };
struct pata_imx_priv {
struct clk *clk;
/* timings/interrupt/control regs */
void __iomem *host_regs;
u32 ata_ctl;
};
static void pata_imx_set_timing(struct ata_device *adev,
struct pata_imx_priv *priv)
{
struct ata_timing timing;
unsigned long clkrate;
u32 T, mode;
clkrate = clk_get_rate(priv->clk);
if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
!clkrate)
return;
T = 1000000000 / clkrate;
ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
mode = adev->pio_mode - XFER_PIO_0;
writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
}
static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pata_imx_priv *priv = ap->host->private_data;
u32 val;
pata_imx_set_timing(adev, priv);
val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
if (ata_pio_need_iordy(adev))
val |= PATA_IMX_ATA_CTRL_IORDY_EN;
else
val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
}
static const struct scsi_host_template pata_imx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
.cable_detect = ata_cable_unknown,
.set_piomode = pata_imx_set_piomode,
};
static void pata_imx_setup_port(struct ata_ioports *ioaddr)
{
/* Fixup the port shift for platforms that need it */
ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
}
static int pata_imx_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
struct pata_imx_priv *priv;
int irq = 0;
struct resource *io_res;
int ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_imx_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "Failed to get and enable clock\n");
return PTR_ERR(priv->clk);
}
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
host->private_data = priv;
ap = host->ports[0];
ap->ops = &pata_imx_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
priv->host_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &io_res);
if (IS_ERR(priv->host_regs))
return PTR_ERR(priv->host_regs);
ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
pata_imx_setup_port(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
(unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
(unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
/* deassert resets */
__raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
PATA_IMX_ATA_CTRL_ATA_RST_B,
priv->host_regs + PATA_IMX_ATA_CONTROL);
/* enable interrupts */
__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
priv->host_regs + PATA_IMX_ATA_INT_EN);
/* activate */
ret = ata_host_activate(host, irq, ata_sff_interrupt, 0,
&pata_imx_sht);
if (ret)
return ret;
return 0;
}
static void pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_imx_priv *priv = host->private_data;
ata_host_detach(host);
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
}
#ifdef CONFIG_PM_SLEEP
static int pata_imx_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
ata_host_suspend(host, PMSG_SUSPEND);
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
priv->ata_ctl = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
clk_disable_unprepare(priv->clk);
return 0;
}
static int pata_imx_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
int ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
priv->host_regs + PATA_IMX_ATA_INT_EN);
ata_host_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pata_imx_pm_ops, pata_imx_suspend, pata_imx_resume);
static const struct of_device_id imx_pata_dt_ids[] = {
{
.compatible = "fsl,imx27-pata",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
.remove_new = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
.pm = &pata_imx_pm_ops,
},
};
module_platform_driver(pata_imx_driver);
MODULE_AUTHOR("Arnaud Patard <[email protected]>");
MODULE_DESCRIPTION("low-level driver for iMX PATA");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_imx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ata_piix.c - Intel PATA/SATA controllers
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003-2005 Red Hat Inc
* Copyright 2003-2005 Jeff Garzik
*
* Copyright header from piix.c:
*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <[email protected]>
* Copyright (C) 2003 Red Hat Inc
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available at http://developer.intel.com/
*
* Documentation
* Publicly available from Intel web site. Errata documentation
* is also publicly available. As an aide to anyone hacking on this
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
* The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independent device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
* driver supports only the chips with independent timing (that is those
* with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
* for the early chip drivers.
*
* Errata of note:
*
* Unfixable
* PIIX4 errata #9 - Only on ultra obscure hw
* ICH3 errata #13 - Not observed to affect real hw
* by Intel
*
* Things we must deal with
* PIIX4 errata #10 - BM IDE hang with non UDMA
* (must stop/start dma to recover)
* 440MX errata #15 - As PIIX4 errata #10
* PIIX4 errata #15 - Must not read control registers
* during a PIO transfer
* 440MX errata #13 - As PIIX4 errata #15
* ICH2 errata #21 - DMA mode 0 doesn't work right
* ICH0/1 errata #55 - As ICH2 errata #21
* ICH2 spec c #9 - Extra operations needed to handle
* drive hotswap [NOT YET SUPPORTED]
* ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
* and must be dword aligned
* ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
* ICH7 errata #16 - MWDMA1 timings are incorrect
*
* Should have been BIOS fixed:
* 450NX: errata #19 - DMA hangs on old 450NX
* 450NX: errata #20 - DMA hangs on old 450NX
* 450NX: errata #25 - Corruption with DMA on old 450NX
* ICH3 errata #15 - IDE deadlock under high load
* (BIOS must set dev 31 fn 0 bit 23)
* ICH3 errata #18 - Don't use native mode
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#include <trace/events/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.13"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
ICH5_PMR = 0x90, /* address map register */
ICH5_PCS = 0x92, /* port control and status */
PIIX_SIDPR_BAR = 5,
PIIX_SIDPR_LEN = 16,
PIIX_SIDPR_IDX = 0,
PIIX_SIDPR_DATA = 4,
PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */
PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/
PIIX_80C_PRI = (1 << 5) | (1 << 4),
PIIX_80C_SEC = (1 << 7) | (1 << 6),
/* constants for mapping table */
P0 = 0, /* port 0 */
P1 = 1, /* port 1 */
P2 = 2, /* port 2 */
P3 = 3, /* port 3 */
IDE = -1, /* IDE */
NA = -2, /* not available */
RV = -3, /* reserved */
PIIX_AHCI_DEVICE = 6,
/* host->flags bits */
PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
};
enum piix_controller_ids {
/* controller IDs */
piix_pata_mwdma, /* PIIX3 MWDMA only */
piix_pata_33, /* PIIX4 at 33Mhz */
ich_pata_33, /* ICH up to UDMA 33 only */
ich_pata_66, /* ICH up to 66 Mhz */
ich_pata_100, /* ICH up to UDMA 100 */
ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/
ich5_sata,
ich6_sata,
ich6m_sata,
ich8_sata,
ich8_2port_sata,
ich8m_apple_sata, /* locks up on second port enable */
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
ich8_2port_sata_snb,
ich8_2port_sata_byt,
};
struct piix_map_db {
const u32 mask;
const u16 port_enable;
const int map[][4];
};
struct piix_host_priv {
const int *map;
u32 saved_iocfg;
void __iomem *sidpr;
};
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
/* Intel PIIX3 for the 430HX etc */
{ 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
/* VMware ICH4 */
{ 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX */
{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel ICH (i810, i815, i840) UDMA 66*/
{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
/* Intel ICH0 : UDMA 33*/
{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
/* Intel ICH2M */
{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3M */
{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3 (E7500/1) UDMA 100 */
{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH4-L */
{ 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH5 */
{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* C-ICH (i810E2) */
{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ESB (855GME/875P + 6300ESB) UDMA 100 */
{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH6 (and 6) (i915) UDMA 100 */
{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH7/7-R (i945, i975) UDMA 100*/
{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
/* ICH8 Mobile PATA Controller */
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* SATA ports */
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 6300ESB (ICH5 variant with broken PCS present bits) */
{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 6300ESB pretending RAID */
{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801FB/FW (ICH6/ICH6W) */
{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 82801FR/FRW (ICH6R/ICH6RW) */
{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
* Attach iff the controller is in IDE mode. */
{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 82801GBM/GHM (ICH7M, identical to ICH6M) */
{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
/* Enterprise Southbridge 2 (631xESB/632xESB) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* SATA Controller 1 IDE (ICH8) */
{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller 2 IDE (ICH8) */
{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* Mobile SATA Controller IDE (ICH8M), Apple */
{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
{ 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
{ 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
/* Mobile SATA Controller IDE (ICH8M) */
{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (Tolapai) */
{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (BayTrail) */
{ 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
/* SATA Controller IDE (Coleto Creek) */
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (9 Series) */
{ 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (9 Series) */
{ 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (9 Series) */
{ 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (9 Series) */
{ 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
{ } /* terminate list */
};
static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 000b */
{ P1, NA, P0, NA }, /* 001b */
{ RV, RV, RV, RV },
{ RV, RV, RV, RV },
{ P0, P1, IDE, IDE }, /* 100b */
{ P1, P0, IDE, IDE }, /* 101b */
{ IDE, IDE, P0, P1 }, /* 110b */
{ IDE, IDE, P1, P0 }, /* 111b */
},
};
static const struct piix_map_db ich6_map_db = {
.mask = 0x3,
.port_enable = 0xf,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, P1, P3 }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich6m_map_db = {
.mask = 0x3,
.port_enable = 0x5,
/* Map 01b isn't specified in the doc but some notebooks use
* it anyway. MAP 01b have been spotted on both ICH6M and
* ICH7M.
*/
.map = {
/* PM PS SM SS MAP */
{ P0, P2, NA, NA }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8_map_db = {
.mask = 0x3,
.port_enable = 0xf,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
{ RV, RV, RV, RV },
{ P0, P2, IDE, IDE }, /* 10b (IDE mode) */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8_2port_map_db = {
.mask = 0x3,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 00b */
{ RV, RV, RV, RV }, /* 01b */
{ RV, RV, RV, RV }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8m_apple_map_db = {
.mask = 0x3,
.port_enable = 0x1,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, NA, NA }, /* 00b */
{ RV, RV, RV, RV },
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db tolapai_map_db = {
.mask = 0x3,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 00b */
{ RV, RV, RV, RV }, /* 01b */
{ RV, RV, RV, RV }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db *piix_map_db_table[] = {
[ich5_sata] = &ich5_map_db,
[ich6_sata] = &ich6_map_db,
[ich6m_sata] = &ich6m_map_db,
[ich8_sata] = &ich8_map_db,
[ich8_2port_sata] = &ich8_2port_map_db,
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
[ich8_2port_sata_snb] = &ich8_2port_map_db,
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
static const struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
MODULE_VERSION(DRV_VERSION);
struct ich_laptop {
u16 device;
u16 subvendor;
u16 subdevice;
};
/*
* List of laptops that use short cables rather than 80 wire
*/
static const struct ich_laptop ich_laptop[] = {
/* devid, subvendor, subdev */
{ 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
{ 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
{ 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */
{ 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
/* end marker */
{ 0, }
};
static int piix_port_start(struct ata_port *ap)
{
if (!(ap->flags & PIIX_FLAG_PIO16))
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
return ata_bmdma_port_start(ap);
}
/**
* ich_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from ATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static int ich_pata_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct piix_host_priv *hpriv = ap->host->private_data;
const struct ich_laptop *lap = &ich_laptop[0];
u8 mask;
/* Check for specials */
while (lap->device) {
if (lap->device == pdev->device &&
lap->subvendor == pdev->subsystem_vendor &&
lap->subdevice == pdev->subsystem_device)
return ATA_CBL_PATA40_SHORT;
lap++;
}
/* check BIOS cable detect results */
mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
if ((hpriv->saved_iocfg & mask) == 0)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* piix_pata_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* LOCKING:
* None (inherited from caller).
*/
static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static DEFINE_SPINLOCK(piix_lock);
static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
u8 pio)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for ICH controllers.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
/*
* If the drive MWDMA is faster than it can do PIO then
* we must force PIO into PIO0
*/
if (adev->pio_mode < XFER_PIO_0 + pio)
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
spin_lock_irqsave(&piix_lock, flags);
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
*/
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
<< (ap->port_no ? 4 : 0);
} else {
/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
master_data &= 0xccf0;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
/* load ISP and RCT */
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
/* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
if (ap->udma_mask) {
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
}
spin_unlock_irqrestore(&piix_lock, flags);
}
/**
* piix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* do_pata_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
* @isich: set if the chip is an ICH device
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
if (speed >= XFER_UDMA_0) {
unsigned int udma = speed - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
spin_lock_irqsave(&piix_lock, flags);
pci_read_config_byte(dev, 0x48, &udma_enable);
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
if (isich) {
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
/* For ICH or later we should set bit 10 for better
performance (WR_PingPong_En) */
pci_write_config_word(dev, 0x54, ideconf);
}
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&piix_lock, flags);
} else {
/* MWDMA is driven by the PIO timings. */
unsigned int mwdma = speed - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
/* XFER_PIO_0 is never used currently */
piix_set_timings(ap, adev, pio);
}
}
/**
* piix_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
do_pata_set_dmamode(ap, adev, 0);
}
/**
* ich_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
do_pata_set_dmamode(ap, adev, 1);
}
/*
* Serial ATA Index/Data Pair Superset Registers access
*
* Beginning from ICH8, there's a sane way to access SCRs using index
* and data register pair located at BAR5 which means that we have
* separate SCRs for master and slave. This is handled using libata
* slave_link facility.
*/
static const int piix_sidx_map[] = {
[SCR_STATUS] = 0,
[SCR_ERROR] = 2,
[SCR_CONTROL] = 1,
};
static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
{
struct ata_port *ap = link->ap;
struct piix_host_priv *hpriv = ap->host->private_data;
iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
hpriv->sidpr + PIIX_SIDPR_IDX);
}
static int piix_sidpr_scr_read(struct ata_link *link,
unsigned int reg, u32 *val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
piix_sidpr_sel(link, reg);
*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
static int piix_sidpr_scr_write(struct ata_link *link,
unsigned int reg, u32 val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
piix_sidpr_sel(link, reg);
iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned hints)
{
return sata_link_scr_lpm(link, policy, false);
}
static bool piix_irq_check(struct ata_port *ap)
{
unsigned char host_stat;
if (unlikely(!ap->ioaddr.bmdma_addr))
return false;
host_stat = ap->ops->bmdma_status(ap);
trace_ata_bmdma_status(ap, host_stat);
return host_stat & ATA_DMA_INTR;
}
#ifdef CONFIG_PM_SLEEP
static int piix_broken_suspend(void)
{
static const struct dmi_system_id sysids[] = {
{
.ident = "TECRA M3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
},
},
{
.ident = "TECRA M3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
},
},
{
.ident = "TECRA M3",
.matches = {
DMI_MATCH(DMI_OEM_STRING, "Tecra M3,"),
},
},
{
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
},
},
{
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
},
},
{
.ident = "TECRA M5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
},
},
{
.ident = "TECRA M6",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
},
},
{
.ident = "TECRA M7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
},
},
{
.ident = "TECRA A8",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
},
},
{
.ident = "Satellite R20",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
},
},
{
.ident = "Satellite R25",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
},
},
{
.ident = "Satellite U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
},
},
{
.ident = "Satellite U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
},
},
{
.ident = "Satellite Pro U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
},
},
{
.ident = "Satellite U205",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
},
},
{
.ident = "SATELLITE U205",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
},
},
{
.ident = "Satellite Pro A120",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
},
},
{
.ident = "Portege M500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
},
},
{
.ident = "VGN-BX297XP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"),
},
},
{ } /* terminate list */
};
if (dmi_check_system(sysids))
return 1;
/* TECRA M4 sometimes forgets its identify and reports bogus
* DMI information. As the bogus information is a bit
* generic, match as many entries as possible. This manual
* matching is necessary because dmi_system_id.matches is
* limited to four entries.
*/
if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") &&
dmi_match(DMI_PRODUCT_NAME, "000000") &&
dmi_match(DMI_PRODUCT_VERSION, "000000") &&
dmi_match(DMI_PRODUCT_SERIAL, "000000") &&
dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") &&
dmi_match(DMI_BOARD_NAME, "Portable PC") &&
dmi_match(DMI_BOARD_VERSION, "Version A0"))
return 1;
return 0;
}
static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
unsigned long flags;
ata_host_suspend(host, mesg);
/* Some braindamaged ACPI suspend implementations expect the
* controller to be awake on entry; otherwise, it burns cpu
* cycles and power trying to do something to the sleeping
* beauty.
*/
if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
pci_save_state(pdev);
/* mark its power state as "unknown", since we don't
* know if e.g. the BIOS will change its device state
* when we suspend.
*/
if (pdev->current_state == PCI_D0)
pdev->current_state = PCI_UNKNOWN;
/* tell resume that it's waking up from broken suspend */
spin_lock_irqsave(&host->lock, flags);
host->flags |= PIIX_HOST_BROKEN_SUSPEND;
spin_unlock_irqrestore(&host->lock, flags);
} else
ata_pci_device_do_suspend(pdev, mesg);
return 0;
}
static int piix_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
unsigned long flags;
int rc;
if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
spin_lock_irqsave(&host->lock, flags);
host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
spin_unlock_irqrestore(&host->lock, flags);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* PCI device wasn't disabled during suspend. Use
* pci_reenable_device() to avoid affecting the enable
* count.
*/
rc = pci_reenable_device(pdev);
if (rc)
dev_err(&pdev->dev,
"failed to enable device after resume (%d)\n",
rc);
} else
rc = ata_pci_device_do_resume(pdev);
if (rc == 0)
ata_host_resume(host);
return rc;
}
#endif
static u8 piix_vmw_bmdma_status(struct ata_port *ap)
{
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
static const struct scsi_host_template piix_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations piix_sata_ops = {
.inherits = &ata_bmdma32_port_ops,
.sff_irq_check = piix_irq_check,
.port_start = piix_port_start,
};
static struct ata_port_operations piix_pata_ops = {
.inherits = &piix_sata_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
.inherits = &piix_pata_ops,
.bmdma_status = piix_vmw_bmdma_status,
};
static struct ata_port_operations ich_pata_ops = {
.inherits = &piix_pata_ops,
.cable_detect = ich_pata_cable_detect,
.set_dmamode = ich_set_dmamode,
};
static struct attribute *piix_sidpr_shost_attrs[] = {
&dev_attr_link_power_management_policy.attr,
NULL
};
ATTRIBUTE_GROUPS(piix_sidpr_shost);
static const struct scsi_host_template piix_sidpr_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.shost_groups = piix_sidpr_shost_groups,
};
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
};
static struct ata_port_info piix_port_info[] = {
[piix_pata_mwdma] = /* PIIX3 MWDMA only */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.port_ops = &piix_pata_ops,
},
[piix_pata_33] = /* PIIX4 at 33MHz */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.udma_mask = ATA_UDMA2,
.port_ops = &piix_pata_ops,
},
[ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
.udma_mask = ATA_UDMA2,
.port_ops = &ich_pata_ops,
},
[ich_pata_66] = /* ICH controllers up to 66MHz */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
.udma_mask = ATA_UDMA4,
.port_ops = &ich_pata_ops,
},
[ich_pata_100] =
{
.flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &ich_pata_ops,
},
[ich_pata_100_nomwdma1] =
{
.flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &ich_pata_ops,
},
[ich5_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich6_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich6m_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_sata] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[tolapai_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8m_apple_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[piix_pata_vmw] =
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.udma_mask = ATA_UDMA2,
.port_ops = &piix_vmw_ops,
},
/*
* some Sandybridge chipsets have broken 32 mode up to now,
* see https://bugzilla.kernel.org/show_bug.cgi?id=40592
*/
[ich8_sata_snb] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata_snb] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
| PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata_byt] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
};
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
static int piix_disable_ahci(struct pci_dev *pdev)
{
void __iomem *mmio;
u32 tmp;
int rc = 0;
/* BUG: pci_enable_device has not yet been called. This
* works because this device is usually set up by BIOS.
*/
if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
!pci_resource_len(pdev, AHCI_PCI_BAR))
return 0;
mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
if (!mmio)
return -ENOMEM;
tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
if (tmp & AHCI_ENABLE) {
tmp &= ~AHCI_ENABLE;
iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
if (tmp & AHCI_ENABLE)
rc = -EIO;
}
pci_iounmap(pdev, mmio);
return rc;
}
/**
* piix_check_450nx_errata - Check for problem 450NX setup
* @ata_dev: the PCI device to check
*
* Check for the present of 450NX errata #19 and errata #25. If
* they are found return an error code so we can turn off DMA
*/
static int piix_check_450nx_errata(struct pci_dev *ata_dev)
{
struct pci_dev *pdev = NULL;
u16 cfg;
int no_piix_dma = 0;
while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
/* Look for 450NX PXB. Check for problem configurations
A PCI quirk checks bit 6 already */
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
if (pdev->revision == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
else if (cfg & (1<<14) && pdev->revision < 5)
no_piix_dma = 2;
}
if (no_piix_dma)
dev_warn(&ata_dev->dev,
"450NX errata present, disabling IDE DMA%s\n",
no_piix_dma == 2 ? " - a BIOS update may resolve this"
: "");
return no_piix_dma;
}
static void piix_init_pcs(struct ata_host *host,
const struct piix_map_db *map_db)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 pcs, new_pcs;
pci_read_config_word(pdev, ICH5_PCS, &pcs);
new_pcs = pcs | map_db->port_enable;
if (new_pcs != pcs) {
pci_write_config_word(pdev, ICH5_PCS, new_pcs);
msleep(150);
}
}
static const int *piix_init_sata_map(struct pci_dev *pdev,
struct ata_port_info *pinfo,
const struct piix_map_db *map_db)
{
const int *map;
int i, invalid_map = 0;
u8 map_value;
char buf[32];
char *p = buf, *end = buf + sizeof(buf);
pci_read_config_byte(pdev, ICH5_PMR, &map_value);
map = map_db->map[map_value & map_db->mask];
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
p += scnprintf(p, end - p, " XX");
break;
case NA:
p += scnprintf(p, end - p, " --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
p += scnprintf(p, end - p, " IDE IDE");
break;
default:
p += scnprintf(p, end - p, " P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
dev_info(&pdev->dev, "MAP [%s ]\n", buf);
if (invalid_map)
dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
return map;
}
static bool piix_no_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
/*
* Samsung DB-P70 only has three ATA ports exposed and
* curiously the unconnected first port reports link online
* while not responding to SRST protocol causing excessive
* detection delay.
*
* Unfortunately, the system doesn't carry enough DMI
* information to identify the machine but does have subsystem
* vendor and device set. As it's unclear whether the
* subsystem vendor/device is used only for this specific
* board, the port can't be disabled solely with the
* information; however, turning off SIDPR access works around
* the problem. Turn it off.
*
* This problem is reported in bnc#441240.
*
* https://bugzilla.novell.com/show_bug.cgi?id=441420
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
pdev->subsystem_device == 0xb049) {
dev_warn(host->dev,
"Samsung DB-P70 detected, disabling SIDPR\n");
return true;
}
return false;
}
static int piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
struct ata_link *link0 = &host->ports[0]->link;
u32 scontrol;
int i, rc;
/* check for availability */
for (i = 0; i < 4; i++)
if (hpriv->map[i] == IDE)
return 0;
/* is it blacklisted? */
if (piix_no_sidpr(host))
return 0;
if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
return 0;
if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
return 0;
if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
return 0;
hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
/* SCR access via SIDPR doesn't work on some configurations.
* Give it a test drive by inhibiting power save modes which
* we'll do anyway.
*/
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
/* if IPM is already 3, SCR access is probably working. Don't
* un-inhibit power save modes as BIOS might have inhibited
* them for a reason.
*/
if ((scontrol & 0xf00) != 0x300) {
scontrol |= 0x300;
piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
if ((scontrol & 0xf00) != 0x300) {
dev_info(host->dev,
"SCR access via SIDPR is available but doesn't work\n");
return 0;
}
}
/* okay, SCRs available, set ops and ask libata for slave_link */
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
ap->ops = &piix_sidpr_sata_ops;
if (ap->flags & ATA_FLAG_SLAVE_POSS) {
rc = ata_slave_link_init(ap);
if (rc)
return rc;
}
}
return 0;
}
static void piix_iocfg_bit18_quirk(struct ata_host *host)
{
static const struct dmi_system_id sysids[] = {
{
/* Clevo M570U sets IOCFG bit 18 if the cdrom
* isn't used to boot the system which
* disables the channel.
*/
.ident = "M570U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
},
},
{ } /* terminate list */
};
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
if (!dmi_check_system(sysids))
return;
/* The datasheet says that bit 18 is NOOP but certain systems
* seem to use it to disable a channel. Clear the bit on the
* affected systems.
*/
if (hpriv->saved_iocfg & (1 << 18)) {
dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n");
pci_write_config_dword(pdev, PIIX_IOCFG,
hpriv->saved_iocfg & ~(1 << 18));
}
}
static bool piix_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
{
.ident = "HP Compaq 2510p",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{
.ident = "HP Compaq nc6000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
if (dmi) {
unsigned long slot = (unsigned long)dmi->driver_data;
/* apply the quirk only to on-board controllers */
return slot == PCI_SLOT(pdev->devfn);
}
return false;
}
static int prefer_ms_hyperv = 1;
module_param(prefer_ms_hyperv, int, 0);
MODULE_PARM_DESC(prefer_ms_hyperv,
"Prefer Hyper-V paravirtualization drivers instead of ATA, "
"0 - Use ATA drivers, "
"1 (Default) - Use the paravirtualization drivers.");
static void piix_ignore_devices_quirk(struct ata_host *host)
{
#if IS_ENABLED(CONFIG_HYPERV_STORAGE)
static const struct dmi_system_id ignore_hyperv[] = {
{
/* On Hyper-V hypervisors the disks are exposed on
* both the emulated SATA controller and on the
* paravirtualised drivers. The CD/DVD devices
* are only exposed on the emulated controller.
* Request we ignore ATA devices on this host.
*/
.ident = "Hyper-V Virtual Machine",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
},
},
{ } /* terminate list */
};
static const struct dmi_system_id allow_virtual_pc[] = {
{
/* In MS Virtual PC guests the DMI ident is nearly
* identical to a Hyper-V guest. One difference is the
* product version which is used here to identify
* a Virtual PC guest. This entry allows ata_piix to
* drive the emulated hardware.
*/
.ident = "MS Virtual PC 2007",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
},
{ } /* terminate list */
};
const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
if (ignore && !allow && prefer_ms_hyperv) {
host->flags |= ATA_HOST_IGNORE_ATA;
dev_info(host->dev, "%s detected, ATA device ignore set\n",
ignore->ident);
}
#endif
}
/**
* piix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in piix_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
const struct scsi_host_template *sht = &piix_sht;
unsigned long port_flags;
struct ata_host *host;
struct piix_host_priv *hpriv;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* no hotplugging support for later devices (FIXME) */
if (!in_module_init && ent->driver_data >= ich5_sata)
return -ENODEV;
if (piix_broken_system_poweroff(pdev)) {
piix_port_info[ent->driver_data].flags |=
ATA_FLAG_NO_POWEROFF_SPINDOWN |
ATA_FLAG_NO_HIBERNATE_SPINDOWN;
dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
"on poweroff and hibernation\n");
}
port_info[0] = piix_port_info[ent->driver_data];
port_info[1] = piix_port_info[ent->driver_data];
port_flags = port_info[0].flags;
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach. This is necessary
* because some ACPI implementations mess up cable related
* bits on _STM. Reported on kernel bz#11879.
*/
pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
/* ICH6R may be driven by either ata_piix or ahci driver
* regardless of BIOS configuration. Make sure AHCI mode is
* off.
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
rc = piix_disable_ahci(pdev);
if (rc)
return rc;
}
/* SATA map init can change port_info, do it before prepping host */
if (port_flags & ATA_FLAG_SATA)
hpriv->map = piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
/* initialize controller */
if (port_flags & ATA_FLAG_SATA) {
piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
rc = piix_init_sidpr(host);
if (rc)
return rc;
if (host->ports[0]->ops == &piix_sidpr_sata_ops)
sht = &piix_sidpr_sht;
}
/* apply IOCFG bit18 quirk */
piix_iocfg_bit18_quirk(host);
/* On ICH5, some BIOSen disable the interrupt using the
* PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
* On ICH6, this bit has the same effect, but only when
* MSI is disabled (and it is disabled, as we don't use
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
pci_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
really matter for this errata as we will apply it to
all the PIIX devices on the board */
host->ports[0]->mwdma_mask = 0;
host->ports[0]->udma_mask = 0;
host->ports[1]->mwdma_mask = 0;
host->ports[1]->udma_mask = 0;
}
host->flags |= ATA_HOST_PARALLEL_SCAN;
/* Allow hosts to specify device types to ignore when scanning. */
piix_ignore_devices_quirk(host);
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
}
static void piix_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct piix_host_priv *hpriv = host->private_data;
pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
ata_pci_remove_one(pdev);
}
static struct pci_driver piix_pci_driver = {
.name = DRV_NAME,
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = piix_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = piix_pci_device_suspend,
.resume = piix_pci_device_resume,
#endif
};
static int __init piix_init(void)
{
int rc;
rc = pci_register_driver(&piix_pci_driver);
if (rc)
return rc;
in_module_init = 0;
return 0;
}
static void __exit piix_exit(void)
{
pci_unregister_driver(&piix_pci_driver);
}
module_init(piix_init);
module_exit(piix_exit);
| linux-master | drivers/ata/ata_piix.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* libata-pmp.c - libata port multiplier support
*
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/libata.h>
#include <linux/slab.h>
#include "libata.h"
#include "libata-transport.h"
const struct ata_port_operations sata_pmp_port_ops = {
.inherits = &sata_port_ops,
.pmp_prereset = ata_std_prereset,
.pmp_hardreset = sata_std_hardreset,
.pmp_postreset = ata_std_postreset,
.error_handler = sata_pmp_error_handler,
};
/**
* sata_pmp_read - read PMP register
* @link: link to read PMP register for
* @reg: register to read
* @r_val: resulting value
*
* Read PMP register.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
{
struct ata_port *ap = link->ap;
struct ata_device *pmp_dev = ap->link.device;
struct ata_taskfile tf;
unsigned int err_mask;
ata_tf_init(pmp_dev, &tf);
tf.command = ATA_CMD_PMP_READ;
tf.protocol = ATA_PROT_NODATA;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
tf.feature = reg;
tf.device = link->pmp;
err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
SATA_PMP_RW_TIMEOUT);
if (err_mask)
return err_mask;
*r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
return 0;
}
/**
* sata_pmp_write - write PMP register
* @link: link to write PMP register for
* @reg: register to write
* @val: value to write
*
* Write PMP register.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
{
struct ata_port *ap = link->ap;
struct ata_device *pmp_dev = ap->link.device;
struct ata_taskfile tf;
ata_tf_init(pmp_dev, &tf);
tf.command = ATA_CMD_PMP_WRITE;
tf.protocol = ATA_PROT_NODATA;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
tf.feature = reg;
tf.device = link->pmp;
tf.nsect = val & 0xff;
tf.lbal = (val >> 8) & 0xff;
tf.lbam = (val >> 16) & 0xff;
tf.lbah = (val >> 24) & 0xff;
return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
SATA_PMP_RW_TIMEOUT);
}
/**
* sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP
* @qc: ATA command in question
*
* A host which has command switching PMP support cannot issue
* commands to multiple links simultaneously.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* ATA_DEFER_* if deferring is needed, 0 otherwise.
*/
int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
struct ata_port *ap = link->ap;
if (ap->excl_link == NULL || ap->excl_link == link) {
if (ap->nr_active_links == 0 || ata_link_active(link)) {
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
return ata_std_qc_defer(qc);
}
ap->excl_link = link;
}
return ATA_DEFER_PORT;
}
/**
* sata_pmp_scr_read - read PSCR
* @link: ATA link to read PSCR for
* @reg: PSCR to read
* @r_val: resulting value
*
* Read PSCR @reg into @r_val for @link, to be called from
* ata_scr_read().
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
{
unsigned int err_mask;
if (reg > SATA_PMP_PSCR_CONTROL)
return -EINVAL;
err_mask = sata_pmp_read(link, reg, r_val);
if (err_mask) {
ata_link_warn(link, "failed to read SCR %d (Emask=0x%x)\n",
reg, err_mask);
return -EIO;
}
return 0;
}
/**
* sata_pmp_scr_write - write PSCR
* @link: ATA link to write PSCR for
* @reg: PSCR to write
* @val: value to be written
*
* Write @val to PSCR @reg for @link, to be called from
* ata_scr_write() and ata_scr_write_flush().
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
{
unsigned int err_mask;
if (reg > SATA_PMP_PSCR_CONTROL)
return -EINVAL;
err_mask = sata_pmp_write(link, reg, val);
if (err_mask) {
ata_link_warn(link, "failed to write SCR %d (Emask=0x%x)\n",
reg, err_mask);
return -EIO;
}
return 0;
}
/**
* sata_pmp_set_lpm - configure LPM for a PMP link
* @link: PMP link to configure LPM for
* @policy: target LPM policy
* @hints: LPM hints
*
* Configure LPM for @link. This function will contain any PMP
* specific workarounds if necessary.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned hints)
{
return sata_link_scr_lpm(link, policy, true);
}
/**
* sata_pmp_read_gscr - read GSCR block of SATA PMP
* @dev: PMP device
* @gscr: buffer to read GSCR block into
*
* Read selected PMP GSCRs from the PMP at @dev. This will serve
* as configuration and identification info for the PMP.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
{
static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 };
int i;
for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) {
int reg = gscr_to_read[i];
unsigned int err_mask;
err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
if (err_mask) {
ata_dev_err(dev, "failed to read PMP GSCR[%d] (Emask=0x%x)\n",
reg, err_mask);
return -EIO;
}
}
return 0;
}
static const char *sata_pmp_spec_rev_str(const u32 *gscr)
{
u32 rev = gscr[SATA_PMP_GSCR_REV];
if (rev & (1 << 3))
return "1.2";
if (rev & (1 << 2))
return "1.1";
if (rev & (1 << 1))
return "1.0";
return "<unknown>";
}
#define PMP_GSCR_SII_POL 129
static int sata_pmp_configure(struct ata_device *dev, int print_info)
{
struct ata_port *ap = dev->link->ap;
u32 *gscr = dev->gscr;
u16 vendor = sata_pmp_gscr_vendor(gscr);
u16 devid = sata_pmp_gscr_devid(gscr);
unsigned int err_mask = 0;
const char *reason;
int nr_ports, rc;
nr_ports = sata_pmp_gscr_ports(gscr);
if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) {
rc = -EINVAL;
reason = "invalid nr_ports";
goto fail;
}
if ((ap->flags & ATA_FLAG_AN) &&
(gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY))
dev->flags |= ATA_DFLAG_AN;
/* monitor SERR_PHYRDY_CHG on fan-out ports */
err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN,
SERR_PHYRDY_CHG);
if (err_mask) {
rc = -EIO;
reason = "failed to write GSCR_ERROR_EN";
goto fail;
}
/* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA
* host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover.
*/
if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®);
if (err_mask) {
rc = -EIO;
reason = "failed to read Sil3x26 Private Register";
goto fail;
}
reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) {
rc = -EIO;
reason = "failed to write Sil3x26 Private Register";
goto fail;
}
}
if (print_info) {
ata_dev_info(dev, "Port Multiplier %s, "
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
sata_pmp_spec_rev_str(gscr), vendor, devid,
sata_pmp_gscr_rev(gscr),
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
gscr[SATA_PMP_GSCR_FEAT]);
if (!(dev->flags & ATA_DFLAG_AN))
ata_dev_info(dev,
"Asynchronous notification not supported, "
"hotplug won't work on fan-out ports. Use warm-plug instead.\n");
}
return 0;
fail:
ata_dev_err(dev,
"failed to configure Port Multiplier (%s, Emask=0x%x)\n",
reason, err_mask);
return rc;
}
static int sata_pmp_init_links (struct ata_port *ap, int nr_ports)
{
struct ata_link *pmp_link = ap->pmp_link;
int i, err;
if (!pmp_link) {
pmp_link = kcalloc(SATA_PMP_MAX_PORTS, sizeof(pmp_link[0]),
GFP_NOIO);
if (!pmp_link)
return -ENOMEM;
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_link_init(ap, &pmp_link[i], i);
ap->pmp_link = pmp_link;
for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
err = ata_tlink_add(&pmp_link[i]);
if (err) {
goto err_tlink;
}
}
}
for (i = 0; i < nr_ports; i++) {
struct ata_link *link = &pmp_link[i];
struct ata_eh_context *ehc = &link->eh_context;
link->flags = 0;
ehc->i.probe_mask |= ATA_ALL_DEVICES;
ehc->i.action |= ATA_EH_RESET;
}
return 0;
err_tlink:
while (--i >= 0)
ata_tlink_delete(&pmp_link[i]);
kfree(pmp_link);
ap->pmp_link = NULL;
return err;
}
static void sata_pmp_quirks(struct ata_port *ap)
{
u32 *gscr = ap->link.device->gscr;
u16 vendor = sata_pmp_gscr_vendor(gscr);
u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link;
if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
/* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
/*
* Class code report is unreliable and SRST times
* out under certain configurations.
*/
if (link->pmp < 5)
link->flags |= ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
/* port 5 is for SEMB device and it doesn't like SRST */
if (link->pmp == 5)
link->flags |= ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_SEMB;
}
} else if (vendor == 0x1095 && devid == 0x4723) {
/*
* sil4723 quirks
*
* Link reports offline after LPM. Class code report is
* unreliable. SIMG PMPs never got SRST reliable and the
* config device at port 2 locks up on SRST.
*/
ata_for_each_link(link, ap, EDGE)
link->flags |= ATA_LFLAG_NO_LPM |
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
} else if (vendor == 0x1095 && devid == 0x4726) {
/* sil4726 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
/* Class code report is unreliable and SRST
* times out under certain configurations.
* Config device can be at port 0 or 5 and
* locks up on SRST.
*/
if (link->pmp <= 5)
link->flags |= ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
/* Port 6 is for SEMB device which doesn't
* like SRST either.
*/
if (link->pmp == 6)
link->flags |= ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_SEMB;
}
} else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 ||
devid == 0x5734 || devid == 0x5744)) {
/* sil5723/5744 quirks */
/* sil5723/5744 has either two or three downstream
* ports depending on operation mode. The last port
* is empty if any actual IO device is available or
* occupied by a pseudo configuration device
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
} else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
/*
* 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
* 0x0325: jmicron JMB394.
*/
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
*/
link->flags |= ATA_LFLAG_NO_LPM |
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
}
} else if (vendor == 0x11ab && devid == 0x4140) {
/* Marvell 4140 quirks */
ata_for_each_link(link, ap, EDGE) {
/* port 4 is for SEMB device and it doesn't like SRST */
if (link->pmp == 4)
link->flags |= ATA_LFLAG_DISABLED;
}
}
}
/**
* sata_pmp_attach - attach a SATA PMP device
* @dev: SATA PMP device to attach
*
* Configure and attach SATA PMP device @dev. This function is
* also responsible for allocating and initializing PMP links.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_pmp_attach(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
unsigned long flags;
struct ata_link *tlink;
int rc;
/* is it hanging off the right place? */
if (!sata_pmp_supported(ap)) {
ata_dev_err(dev, "host does not support Port Multiplier\n");
return -EINVAL;
}
if (!ata_is_host_link(link)) {
ata_dev_err(dev, "Port Multipliers cannot be nested\n");
return -EINVAL;
}
if (dev->devno) {
ata_dev_err(dev, "Port Multiplier must be the first device\n");
return -EINVAL;
}
WARN_ON(link->pmp != 0);
link->pmp = SATA_PMP_CTRL_PORT;
/* read GSCR block */
rc = sata_pmp_read_gscr(dev, dev->gscr);
if (rc)
goto fail;
/* config PMP */
rc = sata_pmp_configure(dev, 1);
if (rc)
goto fail;
rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
if (rc) {
ata_dev_info(dev, "failed to initialize PMP links\n");
goto fail;
}
/* attach it */
spin_lock_irqsave(ap->lock, flags);
WARN_ON(ap->nr_pmp_links);
ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr);
spin_unlock_irqrestore(ap->lock, flags);
sata_pmp_quirks(ap);
if (ap->ops->pmp_attach)
ap->ops->pmp_attach(ap);
ata_for_each_link(tlink, ap, EDGE)
sata_link_init_spd(tlink);
return 0;
fail:
link->pmp = 0;
return rc;
}
/**
* sata_pmp_detach - detach a SATA PMP device
* @dev: SATA PMP device to detach
*
* Detach SATA PMP device @dev. This function is also
* responsible for deconfiguring PMP links.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void sata_pmp_detach(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_link *tlink;
unsigned long flags;
ata_dev_info(dev, "Port Multiplier detaching\n");
WARN_ON(!ata_is_host_link(link) || dev->devno ||
link->pmp != SATA_PMP_CTRL_PORT);
if (ap->ops->pmp_detach)
ap->ops->pmp_detach(ap);
ata_for_each_link(tlink, ap, EDGE)
ata_eh_detach_dev(tlink->device);
spin_lock_irqsave(ap->lock, flags);
ap->nr_pmp_links = 0;
link->pmp = 0;
spin_unlock_irqrestore(ap->lock, flags);
}
/**
* sata_pmp_same_pmp - does new GSCR matches the configured PMP?
* @dev: PMP device to compare against
* @new_gscr: GSCR block of the new device
*
* Compare @new_gscr against @dev and determine whether @dev is
* the PMP described by @new_gscr.
*
* LOCKING:
* None.
*
* RETURNS:
* 1 if @dev matches @new_gscr, 0 otherwise.
*/
static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
{
const u32 *old_gscr = dev->gscr;
u16 old_vendor, new_vendor, old_devid, new_devid;
int old_nr_ports, new_nr_ports;
old_vendor = sata_pmp_gscr_vendor(old_gscr);
new_vendor = sata_pmp_gscr_vendor(new_gscr);
old_devid = sata_pmp_gscr_devid(old_gscr);
new_devid = sata_pmp_gscr_devid(new_gscr);
old_nr_ports = sata_pmp_gscr_ports(old_gscr);
new_nr_ports = sata_pmp_gscr_ports(new_gscr);
if (old_vendor != new_vendor) {
ata_dev_info(dev,
"Port Multiplier vendor mismatch '0x%x' != '0x%x'\n",
old_vendor, new_vendor);
return 0;
}
if (old_devid != new_devid) {
ata_dev_info(dev,
"Port Multiplier device ID mismatch '0x%x' != '0x%x'\n",
old_devid, new_devid);
return 0;
}
if (old_nr_ports != new_nr_ports) {
ata_dev_info(dev,
"Port Multiplier nr_ports mismatch '0x%x' != '0x%x'\n",
old_nr_ports, new_nr_ports);
return 0;
}
return 1;
}
/**
* sata_pmp_revalidate - revalidate SATA PMP
* @dev: PMP device to revalidate
* @new_class: new class code
*
* Re-read GSCR block and make sure @dev is still attached to the
* port and properly configured.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
u32 *gscr = (void *)ap->sector_buf;
int rc;
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
if (!ata_dev_enabled(dev)) {
rc = -ENODEV;
goto fail;
}
/* wrong class? */
if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) {
rc = -ENODEV;
goto fail;
}
/* read GSCR */
rc = sata_pmp_read_gscr(dev, gscr);
if (rc)
goto fail;
/* is the pmp still there? */
if (!sata_pmp_same_pmp(dev, gscr)) {
rc = -ENODEV;
goto fail;
}
memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS);
rc = sata_pmp_configure(dev, 0);
if (rc)
goto fail;
ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
return 0;
fail:
ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
return rc;
}
/**
* sata_pmp_revalidate_quick - revalidate SATA PMP quickly
* @dev: PMP device to revalidate
*
* Make sure the attached PMP is accessible.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
static int sata_pmp_revalidate_quick(struct ata_device *dev)
{
unsigned int err_mask;
u32 prod_id;
err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
if (err_mask) {
ata_dev_err(dev,
"failed to read PMP product ID (Emask=0x%x)\n",
err_mask);
return -EIO;
}
if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
ata_dev_err(dev, "PMP product ID mismatch\n");
/* something weird is going on, request full PMP recovery */
return -EIO;
}
return 0;
}
/**
* sata_pmp_eh_recover_pmp - recover PMP
* @ap: ATA port PMP is attached to
* @prereset: prereset method (can be NULL)
* @softreset: softreset method
* @hardreset: hardreset method
* @postreset: postreset method (can be NULL)
*
* Recover PMP attached to @ap. Recovery procedure is somewhat
* similar to that of ata_eh_recover() except that reset should
* always be performed in hard->soft sequence and recovery
* failure results in PMP detachment.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
struct ata_link *link = &ap->link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev = link->device;
int tries = ATA_EH_PMP_TRIES;
int detach = 0, rc = 0;
int reval_failed = 0;
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
rc = -ENODEV;
goto fail;
}
retry:
ehc->classes[0] = ATA_DEV_UNKNOWN;
if (ehc->i.action & ATA_EH_RESET) {
struct ata_link *tlink;
/* reset */
rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
postreset);
if (rc) {
ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
}
/* PMP is reset, SErrors cannot be trusted, scan all */
ata_for_each_link(tlink, ap, EDGE) {
struct ata_eh_context *ehc = &tlink->eh_context;
ehc->i.probe_mask |= ATA_ALL_DEVICES;
ehc->i.action |= ATA_EH_RESET;
}
}
/* If revalidation is requested, revalidate and reconfigure;
* otherwise, do quick revalidation.
*/
if (ehc->i.action & ATA_EH_REVALIDATE)
rc = sata_pmp_revalidate(dev, ehc->classes[0]);
else
rc = sata_pmp_revalidate_quick(dev);
if (rc) {
tries--;
if (rc == -ENODEV) {
ehc->i.probe_mask |= ATA_ALL_DEVICES;
detach = 1;
/* give it just two more chances */
tries = min(tries, 2);
}
if (tries) {
/* consecutive revalidation failures? speed down */
if (reval_failed)
sata_down_spd_limit(link, 0);
else
reval_failed = 1;
ehc->i.action |= ATA_EH_RESET;
goto retry;
} else {
ata_dev_err(dev,
"failed to recover PMP after %d tries, giving up\n",
ATA_EH_PMP_TRIES);
goto fail;
}
}
/* okay, PMP resurrected */
ehc->i.flags = 0;
return 0;
fail:
sata_pmp_detach(dev);
if (detach)
ata_eh_detach_dev(dev);
else
ata_dev_disable(dev);
return rc;
}
static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
{
struct ata_link *link;
unsigned long flags;
int rc;
spin_lock_irqsave(ap->lock, flags);
ata_for_each_link(link, ap, EDGE) {
if (!(link->flags & ATA_LFLAG_DISABLED))
continue;
spin_unlock_irqrestore(ap->lock, flags);
/* Some PMPs require hardreset sequence to get
* SError.N working.
*/
sata_link_hardreset(link, sata_deb_timing_normal,
ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK),
NULL, NULL);
/* unconditionally clear SError.N */
rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
if (rc) {
ata_link_err(link,
"failed to clear SError.N (errno=%d)\n",
rc);
return rc;
}
spin_lock_irqsave(ap->lock, flags);
}
spin_unlock_irqrestore(ap->lock, flags);
return 0;
}
static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
{
struct ata_port *ap = link->ap;
unsigned long flags;
if (link_tries[link->pmp] && --link_tries[link->pmp])
return 1;
/* disable this link */
if (!(link->flags & ATA_LFLAG_DISABLED)) {
ata_link_warn(link,
"failed to recover link after %d tries, disabling\n",
ATA_EH_PMP_LINK_TRIES);
spin_lock_irqsave(ap->lock, flags);
link->flags |= ATA_LFLAG_DISABLED;
spin_unlock_irqrestore(ap->lock, flags);
}
ata_dev_disable(link->device);
link->eh_context.i.action = 0;
return 0;
}
/**
* sata_pmp_eh_recover - recover PMP-enabled port
* @ap: ATA port to recover
*
* Drive EH recovery operation for PMP enabled port @ap. This
* function recovers host and PMP ports with proper retrials and
* fallbacks. Actual recovery operations are performed using
* ata_eh_recover() and sata_pmp_eh_recover_pmp().
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover(struct ata_port *ap)
{
struct ata_port_operations *ops = ap->ops;
int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
struct ata_link *pmp_link = &ap->link;
struct ata_device *pmp_dev = pmp_link->device;
struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
u32 *gscr = pmp_dev->gscr;
struct ata_link *link;
struct ata_device *dev;
unsigned int err_mask;
u32 gscr_error, sntf;
int cnt, rc;
pmp_tries = ATA_EH_PMP_TRIES;
ata_for_each_link(link, ap, EDGE)
link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
retry:
/* PMP attached? */
if (!sata_pmp_attached(ap)) {
rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
ops->hardreset, ops->postreset, NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
return rc;
}
if (pmp_dev->class != ATA_DEV_PMP)
return 0;
/* new PMP online */
ata_for_each_link(link, ap, EDGE)
link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
/* fall through */
}
/* recover pmp */
rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
ops->hardreset, ops->postreset);
if (rc)
goto pmp_fail;
/* PHY event notification can disturb reset and other recovery
* operations. Turn it off.
*/
if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
ata_link_warn(pmp_link,
"failed to disable NOTIFY (err_mask=0x%x)\n",
err_mask);
goto pmp_fail;
}
}
/* handle disabled links */
rc = sata_pmp_eh_handle_disabled_links(ap);
if (rc)
goto pmp_fail;
/* recover links */
rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
ops->pmp_hardreset, ops->pmp_postreset, &link);
if (rc)
goto link_fail;
/* clear SNotification */
rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
if (rc == 0)
sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
/*
* If LPM is active on any fan-out port, hotplug wouldn't
* work. Return w/ PHY event notification disabled.
*/
ata_for_each_link(link, ap, EDGE)
if (link->lpm_policy > ATA_LPM_MAX_POWER)
return 0;
/*
* Connection status might have changed while resetting other
* links, enable notification and check SATA_PMP_GSCR_ERROR
* before returning.
*/
/* enable notification */
if (pmp_dev->flags & ATA_DFLAG_AN) {
gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
ata_dev_err(pmp_dev,
"failed to write PMP_FEAT_EN (Emask=0x%x)\n",
err_mask);
rc = -EIO;
goto pmp_fail;
}
}
/* check GSCR_ERROR */
err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
if (err_mask) {
ata_dev_err(pmp_dev,
"failed to read PMP_GSCR_ERROR (Emask=0x%x)\n",
err_mask);
rc = -EIO;
goto pmp_fail;
}
cnt = 0;
ata_for_each_link(link, ap, EDGE) {
if (!(gscr_error & (1 << link->pmp)))
continue;
if (sata_pmp_handle_link_fail(link, link_tries)) {
ata_ehi_hotplugged(&link->eh_context.i);
cnt++;
} else {
ata_link_warn(link,
"PHY status changed but maxed out on retries, giving up\n");
ata_link_warn(link,
"Manually issue scan to resume this link\n");
}
}
if (cnt) {
ata_port_info(ap,
"PMP SError.N set for some ports, repeating recovery\n");
goto retry;
}
return 0;
link_fail:
if (sata_pmp_handle_link_fail(link, link_tries)) {
pmp_ehc->i.action |= ATA_EH_RESET;
goto retry;
}
/* fall through */
pmp_fail:
/* Control always ends up here after detaching PMP. Shut up
* and return if we're unloading.
*/
if (ap->pflags & ATA_PFLAG_UNLOADING)
return rc;
if (!sata_pmp_attached(ap))
goto retry;
if (--pmp_tries) {
pmp_ehc->i.action |= ATA_EH_RESET;
goto retry;
}
ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n",
ATA_EH_PMP_TRIES);
sata_pmp_detach(pmp_dev);
ata_dev_disable(pmp_dev);
return rc;
}
/**
* sata_pmp_error_handler - do standard error handling for PMP-enabled host
* @ap: host port to handle error for
*
* Perform standard error handling sequence for PMP-enabled host
* @ap.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void sata_pmp_error_handler(struct ata_port *ap)
{
ata_eh_autopsy(ap);
ata_eh_report(ap);
sata_pmp_eh_recover(ap);
ata_eh_finish(ap);
}
EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
| linux-master | drivers/ata/libata-pmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AHCI SATA platform library
*
* Copyright 2004-2005 Red Hat, Inc.
* Jeff Garzik <[email protected]>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <[email protected]>
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
struct ata_port_operations ahci_platform_ops = {
.inherits = &ahci_ops,
.host_stop = ahci_host_stop,
};
EXPORT_SYMBOL_GPL(ahci_platform_ops);
/**
* ahci_platform_enable_phys - Enable PHYs
* @hpriv: host private area to store config values
*
* This function enables all the PHYs found in hpriv->phys, if any.
* If a PHY fails to be enabled, it disables all the PHYs already
* enabled in reverse order and returns an error.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
if (rc) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
}
return 0;
disable_phys:
while (--i >= 0) {
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
/**
* ahci_platform_disable_phys - Disable PHYs
* @hpriv: host private area to store config values
*
* This function disables all PHYs found in hpriv->phys.
*/
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
for (i = 0; i < hpriv->nports; i++) {
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
* ahci_platform_find_clk - Find platform clock
* @hpriv: host private area to store config values
* @con_id: clock connection ID
*
* This function returns a pointer to the clock descriptor of the clock with
* the passed ID.
*
* RETURNS:
* Pointer to the clock descriptor on success otherwise NULL
*/
struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv, const char *con_id)
{
int i;
for (i = 0; i < hpriv->n_clks; i++) {
if (hpriv->clks[i].id && !strcmp(hpriv->clks[i].id, con_id))
return hpriv->clks[i].clk;
}
return NULL;
}
EXPORT_SYMBOL_GPL(ahci_platform_find_clk);
/**
* ahci_platform_enable_clks - Enable platform clocks
* @hpriv: host private area to store config values
*
* This function enables all the clks found for the AHCI device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
{
return clk_bulk_prepare_enable(hpriv->n_clks, hpriv->clks);
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
/**
* ahci_platform_disable_clks - Disable platform clocks
* @hpriv: host private area to store config values
*
* This function disables all the clocks enabled before
* (bulk-clocks-disable function is supposed to do that in reverse
* from the enabling procedure order).
*/
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
{
clk_bulk_disable_unprepare(hpriv->n_clks, hpriv->clks);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
/**
* ahci_platform_deassert_rsts - Deassert/trigger platform resets
* @hpriv: host private area to store config values
*
* This function deasserts or triggers all the reset lines found for
* the AHCI device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv)
{
if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER)
return reset_control_reset(hpriv->rsts);
return reset_control_deassert(hpriv->rsts);
}
EXPORT_SYMBOL_GPL(ahci_platform_deassert_rsts);
/**
* ahci_platform_assert_rsts - Assert/rearm platform resets
* @hpriv: host private area to store config values
*
* This function asserts or rearms (for self-deasserting resets) all
* the reset controls found for the AHCI device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv)
{
if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER)
return reset_control_rearm(hpriv->rsts);
return reset_control_assert(hpriv->rsts);
}
EXPORT_SYMBOL_GPL(ahci_platform_assert_rsts);
/**
* ahci_platform_enable_regulators - Enable regulators
* @hpriv: host private area to store config values
*
* This function enables all the regulators found in controller and
* hpriv->target_pwrs, if any. If a regulator fails to be enabled, it
* disables all the regulators already enabled in reverse order and
* returns an error.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
{
int rc, i;
rc = regulator_enable(hpriv->ahci_regulator);
if (rc)
return rc;
rc = regulator_enable(hpriv->phy_regulator);
if (rc)
goto disable_ahci_pwrs;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
continue;
rc = regulator_enable(hpriv->target_pwrs[i]);
if (rc)
goto disable_target_pwrs;
}
return 0;
disable_target_pwrs:
while (--i >= 0)
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
regulator_disable(hpriv->phy_regulator);
disable_ahci_pwrs:
regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
/**
* ahci_platform_disable_regulators - Disable regulators
* @hpriv: host private area to store config values
*
* This function disables all regulators found in hpriv->target_pwrs and
* AHCI controller.
*/
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
{
int i;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
continue;
regulator_disable(hpriv->target_pwrs[i]);
}
regulator_disable(hpriv->ahci_regulator);
regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
* ahci_platform_enable_resources - Enable platform resources
* @hpriv: host private area to store config values
*
* This function enables all ahci_platform managed resources in the
* following order:
* 1) Regulator
* 2) Clocks (through ahci_platform_enable_clks)
* 3) Resets
* 4) Phys
*
* If resource enabling fails at any point the previous enabled resources
* are disabled in reverse order.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
{
int rc;
rc = ahci_platform_enable_regulators(hpriv);
if (rc)
return rc;
rc = ahci_platform_enable_clks(hpriv);
if (rc)
goto disable_regulator;
rc = ahci_platform_deassert_rsts(hpriv);
if (rc)
goto disable_clks;
rc = ahci_platform_enable_phys(hpriv);
if (rc)
goto disable_rsts;
return 0;
disable_rsts:
ahci_platform_assert_rsts(hpriv);
disable_clks:
ahci_platform_disable_clks(hpriv);
disable_regulator:
ahci_platform_disable_regulators(hpriv);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
/**
* ahci_platform_disable_resources - Disable platform resources
* @hpriv: host private area to store config values
*
* This function disables all ahci_platform managed resources in the
* following order:
* 1) Phys
* 2) Resets
* 3) Clocks (through ahci_platform_disable_clks)
* 4) Regulator
*/
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
ahci_platform_assert_rsts(hpriv);
ahci_platform_disable_clks(hpriv);
ahci_platform_disable_regulators(hpriv);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
static void ahci_platform_put_resources(struct device *dev, void *res)
{
struct ahci_host_priv *hpriv = res;
int c;
if (hpriv->got_runtime_pm) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
}
/*
* The regulators are tied to child node device and not to the
* SATA device itself. So we can't use devm for automatically
* releasing them. We have to do it manually here.
*/
for (c = 0; c < hpriv->nports; c++)
if (hpriv->target_pwrs && hpriv->target_pwrs[c])
regulator_put(hpriv->target_pwrs[c]);
kfree(hpriv->target_pwrs);
}
static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
struct device *dev, struct device_node *node)
{
int rc;
hpriv->phys[port] = devm_of_phy_get(dev, node, NULL);
if (!IS_ERR(hpriv->phys[port]))
return 0;
rc = PTR_ERR(hpriv->phys[port]);
switch (rc) {
case -ENOSYS:
/* No PHY support. Check if PHY is required. */
if (of_property_present(node, "phys")) {
dev_err(dev,
"couldn't get PHY in node %pOFn: ENOSYS\n",
node);
break;
}
fallthrough;
case -ENODEV:
/* continue normally */
hpriv->phys[port] = NULL;
rc = 0;
break;
case -EPROBE_DEFER:
/* Do not complain yet */
break;
default:
dev_err(dev,
"couldn't get PHY in node %pOFn: %d\n",
node, rc);
break;
}
return rc;
}
static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
struct device *dev)
{
struct regulator *target_pwr;
int rc = 0;
target_pwr = regulator_get(dev, "target");
if (!IS_ERR(target_pwr))
hpriv->target_pwrs[port] = target_pwr;
else
rc = PTR_ERR(target_pwr);
return rc;
}
static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct device_node *child;
u32 port;
if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap))
hpriv->saved_cap &= (HOST_CAP_SSS | HOST_CAP_MPS);
of_property_read_u32(dev->of_node,
"ports-implemented", &hpriv->saved_port_map);
for_each_child_of_node(dev->of_node, child) {
if (!of_device_is_available(child))
continue;
if (of_property_read_u32(child, "reg", &port)) {
of_node_put(child);
return -EINVAL;
}
if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port]))
hpriv->saved_port_cap[port] &= PORT_CMD_CAP;
}
return 0;
}
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
* @flags: bitmap representing the resource to get
*
* This function allocates an ahci_host_priv struct, and gets the following
* resources, storing a reference to them inside the returned struct:
*
* 1) mmio registers (IORESOURCE_MEM 0, mandatory)
* 2) regulator for controlling the targets power (optional)
* regulator for controlling the AHCI controller (optional)
* 3) all clocks specified in the devicetree node, or a single
* clock for non-OF platforms (optional)
* 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
* 5) phys (optional)
*
* RETURNS:
* The allocated ahci_host_priv on success, otherwise an ERR_PTR value
*/
struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
unsigned int flags)
{
int child_nodes, rc = -ENOMEM, enabled_ports = 0;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct device_node *child;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
GFP_KERNEL);
if (!hpriv)
goto err_out;
devres_add(dev, hpriv);
/*
* If the DT provided an "ahci" named resource, use it. Otherwise,
* fallback to using the default first resource for the device node.
*/
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"))
hpriv->mmio = devm_platform_ioremap_resource_byname(pdev, "ahci");
else
hpriv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hpriv->mmio)) {
rc = PTR_ERR(hpriv->mmio);
goto err_out;
}
/*
* Bulk clocks getting procedure can fail to find any clock due to
* running on a non-OF platform or due to the clocks being defined in
* bypass of the DT firmware (like da850, spear13xx). In that case we
* fallback to getting a single clock source right from the dev clocks
* list.
*/
rc = devm_clk_bulk_get_all(dev, &hpriv->clks);
if (rc < 0)
goto err_out;
if (rc > 0) {
/* Got clocks in bulk */
hpriv->n_clks = rc;
} else {
/*
* No clock bulk found: fallback to manually getting
* the optional clock.
*/
hpriv->clks = devm_kzalloc(dev, sizeof(*hpriv->clks), GFP_KERNEL);
if (!hpriv->clks) {
rc = -ENOMEM;
goto err_out;
}
hpriv->clks->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(hpriv->clks->clk)) {
rc = PTR_ERR(hpriv->clks->clk);
goto err_out;
} else if (hpriv->clks->clk) {
hpriv->clks->id = "ahci";
hpriv->n_clks = 1;
}
}
hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
if (IS_ERR(hpriv->ahci_regulator)) {
rc = PTR_ERR(hpriv->ahci_regulator);
if (rc != 0)
goto err_out;
}
hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator);
goto err_out;
}
if (flags & AHCI_PLATFORM_GET_RESETS) {
hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(hpriv->rsts)) {
rc = PTR_ERR(hpriv->rsts);
goto err_out;
}
hpriv->f_rsts = flags & AHCI_PLATFORM_RST_TRIGGER;
}
/*
* Too many sub-nodes most likely means having something wrong with
* the firmware.
*/
child_nodes = of_get_child_count(dev->of_node);
if (child_nodes > AHCI_MAX_PORTS) {
rc = -EINVAL;
goto err_out;
}
/*
* If no sub-node was found, we still need to set nports to
* one in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
if (child_nodes)
hpriv->nports = child_nodes;
else
hpriv->nports = 1;
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
rc = -ENOMEM;
goto err_out;
}
/*
* We cannot use devm_ here, since ahci_platform_put_resources() uses
* target_pwrs after devm_ have freed memory
*/
hpriv->target_pwrs = kcalloc(hpriv->nports, sizeof(*hpriv->target_pwrs), GFP_KERNEL);
if (!hpriv->target_pwrs) {
rc = -ENOMEM;
goto err_out;
}
if (child_nodes) {
for_each_child_of_node(dev->of_node, child) {
u32 port;
struct platform_device *port_dev __maybe_unused;
if (!of_device_is_available(child))
continue;
if (of_property_read_u32(child, "reg", &port)) {
rc = -EINVAL;
of_node_put(child);
goto err_out;
}
if (port >= hpriv->nports) {
dev_warn(dev, "invalid port number %d\n", port);
continue;
}
mask_port_map |= BIT(port);
#ifdef CONFIG_OF_ADDRESS
of_platform_device_create(child, NULL, NULL);
port_dev = of_find_device_by_node(child);
if (port_dev) {
rc = ahci_platform_get_regulator(hpriv, port,
&port_dev->dev);
if (rc == -EPROBE_DEFER) {
of_node_put(child);
goto err_out;
}
}
#endif
rc = ahci_platform_get_phy(hpriv, port, dev, child);
if (rc) {
of_node_put(child);
goto err_out;
}
enabled_ports++;
}
if (!enabled_ports) {
dev_warn(dev, "No port enabled\n");
rc = -ENODEV;
goto err_out;
}
if (!hpriv->mask_port_map)
hpriv->mask_port_map = mask_port_map;
} else {
/*
* If no sub-node was found, keep this for device tree
* compatibility
*/
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;
rc = ahci_platform_get_regulator(hpriv, 0, dev);
if (rc == -EPROBE_DEFER)
goto err_out;
}
/*
* Retrieve firmware-specific flags which then will be used to set
* the HW-init fields of HBA and its ports
*/
rc = ahci_platform_get_firmware(hpriv, dev);
if (rc)
goto err_out;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
hpriv->got_runtime_pm = true;
devres_remove_group(dev, NULL);
return hpriv;
err_out:
devres_release_group(dev, NULL);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
/**
* ahci_platform_init_host - Bring up an ahci-platform host
* @pdev: platform device pointer for the host
* @hpriv: ahci-host private data for the host
* @pi_template: template for the ata_port_info to use
* @sht: scsi_host_template to use when registering
*
* This function does all the usual steps needed to bring up an
* ahci-platform host, note any necessary resources (ie clks, phys, etc.)
* must be initialized / enabled before calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
const struct scsi_host_template *sht)
{
struct device *dev = &pdev->dev;
struct ata_port_info pi = *pi_template;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ata_host *host;
int i, irq, n_ports, rc;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!irq)
return -EINVAL;
hpriv->irq = irq;
/* prepare host */
pi.private_data = (void *)(unsigned long)hpriv->flags;
ahci_save_initial_config(dev, hpriv);
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
ahci_set_em_messages(hpriv, &pi);
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_desc(ap, "mmio %pR",
platform_get_resource(pdev, IORESOURCE_MEM, 0));
ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
if (hpriv->cap & HOST_CAP_64) {
rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (rc) {
dev_err(dev, "Failed to enable 64-bit DMA.\n");
return rc;
}
}
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
ahci_print_info(host, "platform");
return ahci_host_activate(host, sht);
}
EXPORT_SYMBOL_GPL(ahci_platform_init_host);
static void ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
ahci_platform_disable_resources(hpriv);
}
/**
* ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
* @pdev: platform device pointer for the host
*
* This function is called during system shutdown and performs the minimal
* deconfiguration required to ensure that an ahci_platform host cannot
* corrupt or otherwise interfere with a new kernel being started with kexec.
*/
void ahci_platform_shutdown(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
/* Disable port interrupts */
if (ap->ops->freeze)
ap->ops->freeze(ap);
/* Stop the port DMA engines */
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
/* Disable and clear host interrupts */
writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT);
}
EXPORT_SYMBOL_GPL(ahci_platform_shutdown);
#ifdef CONFIG_PM_SLEEP
/**
* ahci_platform_suspend_host - Suspend an ahci-platform host
* @dev: device pointer for the host
*
* This function does all the usual steps needed to suspend an
* ahci-platform host, note any necessary resources (ie clks, phys, etc.)
* must be disabled after calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_suspend_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(dev, "firmware update required for suspend/resume\n");
return -EIO;
}
/*
* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
ahci_platform_disable_phys(hpriv);
ata_host_suspend(host, PMSG_SUSPEND);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
/**
* ahci_platform_resume_host - Resume an ahci-platform host
* @dev: device pointer for the host
*
* This function does all the usual steps needed to resume an ahci-platform
* host, note any necessary resources (ie clks, phys, etc.) must be
* initialized / enabled before calling this.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_resume_host(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
}
if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
ahci_platform_enable_phys(hpriv);
ata_host_resume(host);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
/**
* ahci_platform_suspend - Suspend an ahci-platform device
* @dev: the platform device to suspend
*
* This function suspends the host associated with the device, followed by
* disabling all the resources of the device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_suspend_host(dev);
if (rc)
return rc;
ahci_platform_disable_resources(hpriv);
return 0;
}
EXPORT_SYMBOL_GPL(ahci_platform_suspend);
/**
* ahci_platform_resume - Resume an ahci-platform device
* @dev: the platform device to resume
*
* This function enables all the resources of the device followed by
* resuming the host associated with the device.
*
* RETURNS:
* 0 on success otherwise a negative error code
*/
int ahci_platform_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_platform_resume_host(dev);
if (rc)
goto disable_resources;
/* We resumed so update PM runtime state */
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_resume);
#endif
MODULE_DESCRIPTION("AHCI SATA platform library");
MODULE_AUTHOR("Anton Vorontsov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/libahci_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* (C) 2009-2010 Bartlomiej Zolnierkiewicz
*
* Based on
*
* linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
*
* Copyright (C) 2003 ATI Inc. <[email protected]>
* Copyright (C) 2004 Bartlomiej Zolnierkiewicz
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_atiixp"
#define DRV_VERSION "0.4.6"
enum {
ATIIXP_IDE_PIO_TIMING = 0x40,
ATIIXP_IDE_MWDMA_TIMING = 0x44,
ATIIXP_IDE_PIO_CONTROL = 0x48,
ATIIXP_IDE_PIO_MODE = 0x4a,
ATIIXP_IDE_UDMA_CONTROL = 0x54,
ATIIXP_IDE_UDMA_MODE = 0x56
};
static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
{
/* Board has onboard PATA<->SATA converters */
.ident = "MSI E350DM-E33",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
},
},
{ }
};
static int atiixp_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udma;
if (dmi_check_system(attixp_cable_override_dmi_table))
return ATA_CBL_PATA40_SHORT;
/* Hack from drivers/ide/pci. Really we want to know how to do the
raw detection not play follow the bios mode guess */
pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
static DEFINE_SPINLOCK(atiixp_lock);
/**
* atiixp_prereset - perform reset handling
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Reset sequence checking enable bits to see which ports are
* active.
*/
static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits atiixp_enable_bits[] = {
{ 0x48, 1, 0x01, 0x00 },
{ 0x48, 1, 0x08, 0x00 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
* @pio: Requested PIO
*
* Called by both the pio and dma setup functions to set the controller
* timings for PIO transfers. We must load both the mode number and
* timing values into the controller.
*/
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
static const u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 pio_timing_data;
u16 pio_mode_data;
pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
pio_mode_data &= ~(0x7 << (4 * dn));
pio_mode_data |= pio << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
pio_timing_data &= ~(0xFF << timing_shift);
pio_timing_data |= (pio_timings[pio] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
}
/**
* atiixp_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. We use a shared helper for this
* as the DMA setup must also adjust the PIO timing information.
*/
static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup. We use timing tables for most
* modes but must tune an appropriate PIO mode to match.
*/
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
int dn = 2 * ap->port_no + adev->devno;
int wanted_pio;
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
if (adev->dma_mode >= XFER_UDMA_0) {
u16 udma_mode_data;
dma -= XFER_UDMA_0;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
udma_mode_data &= ~(0x7 << (4 * dn));
udma_mode_data |= dma << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
} else {
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 mwdma_timing_data;
dma -= XFER_MW_DMA_0;
pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
&mwdma_timing_data);
mwdma_timing_data &= ~(0xFF << timing_shift);
mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
mwdma_timing_data);
}
/*
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
if (adev->dma_mode >= XFER_MW_DMA_2)
wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
wanted_pio = 0;
else BUG();
if (adev->pio_mode != wanted_pio)
atiixp_set_pio_timing(ap, adev, wanted_pio);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_bmdma_start - DMA start callback
* @qc: Command in progress
*
* When DMA begins we need to ensure that the UDMA control
* register for the channel is correctly set.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + adev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
if (ata_using_udma(adev))
tmp16 |= (1 << dn);
else
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_start(qc);
}
/**
* atiixp_bmdma_stop - DMA stop callback
* @qc: Command in progress
*
* DMA has completed. Clear the UDMA flag as the next operations will
* be PIO ones not UDMA data transfer.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + qc->dev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_stop(qc);
}
static const struct scsi_host_template atiixp_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
};
static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &atiixp_port_ops
};
const struct ata_port_info *ppi[] = { &info, &info };
/* SB600 doesn't have secondary port wired */
if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
ppi[1] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
{ },
};
static struct pci_driver atiixp_pci_driver = {
.name = DRV_NAME,
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.resume = ata_pci_device_resume,
.suspend = ata_pci_device_suspend,
#endif
};
module_pci_driver(atiixp_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, atiixp);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_atiixp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <[email protected]>
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#define DRV_NAME "sata_rcar"
/* SH-Navi2G/ATAPI module compatible control registers */
#define ATAPI_CONTROL1_REG 0x180
#define ATAPI_STATUS_REG 0x184
#define ATAPI_INT_ENABLE_REG 0x188
#define ATAPI_DTB_ADR_REG 0x198
#define ATAPI_DMA_START_ADR_REG 0x19C
#define ATAPI_DMA_TRANS_CNT_REG 0x1A0
#define ATAPI_CONTROL2_REG 0x1A4
#define ATAPI_SIG_ST_REG 0x1B0
#define ATAPI_BYTE_SWAP_REG 0x1BC
/* ATAPI control 1 register (ATAPI_CONTROL1) bits */
#define ATAPI_CONTROL1_ISM BIT(16)
#define ATAPI_CONTROL1_DTA32M BIT(11)
#define ATAPI_CONTROL1_RESET BIT(7)
#define ATAPI_CONTROL1_DESE BIT(3)
#define ATAPI_CONTROL1_RW BIT(2)
#define ATAPI_CONTROL1_STOP BIT(1)
#define ATAPI_CONTROL1_START BIT(0)
/* ATAPI status register (ATAPI_STATUS) bits */
#define ATAPI_STATUS_SATAINT BIT(11)
#define ATAPI_STATUS_DNEND BIT(6)
#define ATAPI_STATUS_DEVTRM BIT(5)
#define ATAPI_STATUS_DEVINT BIT(4)
#define ATAPI_STATUS_ERR BIT(2)
#define ATAPI_STATUS_NEND BIT(1)
#define ATAPI_STATUS_ACT BIT(0)
/* Interrupt enable register (ATAPI_INT_ENABLE) bits */
#define ATAPI_INT_ENABLE_SATAINT BIT(11)
#define ATAPI_INT_ENABLE_DNEND BIT(6)
#define ATAPI_INT_ENABLE_DEVTRM BIT(5)
#define ATAPI_INT_ENABLE_DEVINT BIT(4)
#define ATAPI_INT_ENABLE_ERR BIT(2)
#define ATAPI_INT_ENABLE_NEND BIT(1)
#define ATAPI_INT_ENABLE_ACT BIT(0)
/* Access control registers for physical layer control register */
#define SATAPHYADDR_REG 0x200
#define SATAPHYWDATA_REG 0x204
#define SATAPHYACCEN_REG 0x208
#define SATAPHYRESET_REG 0x20C
#define SATAPHYRDATA_REG 0x210
#define SATAPHYACK_REG 0x214
/* Physical layer control address command register (SATAPHYADDR) bits */
#define SATAPHYADDR_PHYRATEMODE BIT(10)
#define SATAPHYADDR_PHYCMD_READ BIT(9)
#define SATAPHYADDR_PHYCMD_WRITE BIT(8)
/* Physical layer control enable register (SATAPHYACCEN) bits */
#define SATAPHYACCEN_PHYLANE BIT(0)
/* Physical layer control reset register (SATAPHYRESET) bits */
#define SATAPHYRESET_PHYRST BIT(1)
#define SATAPHYRESET_PHYSRES BIT(0)
/* Physical layer control acknowledge register (SATAPHYACK) bits */
#define SATAPHYACK_PHYACK BIT(0)
/* Serial-ATA HOST control registers */
#define BISTCONF_REG 0x102C
#define SDATA_REG 0x1100
#define SSDEVCON_REG 0x1204
#define SCRSSTS_REG 0x1400
#define SCRSERR_REG 0x1404
#define SCRSCON_REG 0x1408
#define SCRSACT_REG 0x140C
#define SATAINTSTAT_REG 0x1508
#define SATAINTMASK_REG 0x150C
/* SATA INT status register (SATAINTSTAT) bits */
#define SATAINTSTAT_SERR BIT(3)
#define SATAINTSTAT_ATA BIT(0)
/* SATA INT mask register (SATAINTSTAT) bits */
#define SATAINTMASK_SERRMSK BIT(3)
#define SATAINTMASK_ERRMSK BIT(2)
#define SATAINTMASK_ERRCRTMSK BIT(1)
#define SATAINTMASK_ATAMSK BIT(0)
#define SATAINTMASK_ALL_GEN1 0x7ff
#define SATAINTMASK_ALL_GEN2 0xfff
#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \
SATAINTMASK_ATAMSK)
/* Physical Layer Control Registers */
#define SATAPCTLR1_REG 0x43
#define SATAPCTLR2_REG 0x52
#define SATAPCTLR3_REG 0x5A
#define SATAPCTLR4_REG 0x60
/* Descriptor table word 0 bit (when DTA32M = 1) */
#define SATA_RCAR_DTEND BIT(0)
#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFFUL
/* Gen2 Physical Layer Control Registers */
#define RCAR_GEN2_PHY_CTL1_REG 0x1704
#define RCAR_GEN2_PHY_CTL1 0x34180002
#define RCAR_GEN2_PHY_CTL1_SS 0xC180 /* Spread Spectrum */
#define RCAR_GEN2_PHY_CTL2_REG 0x170C
#define RCAR_GEN2_PHY_CTL2 0x00002303
#define RCAR_GEN2_PHY_CTL3_REG 0x171C
#define RCAR_GEN2_PHY_CTL3 0x000B0194
#define RCAR_GEN2_PHY_CTL4_REG 0x1724
#define RCAR_GEN2_PHY_CTL4 0x00030994
#define RCAR_GEN2_PHY_CTL5_REG 0x1740
#define RCAR_GEN2_PHY_CTL5 0x03004001
#define RCAR_GEN2_PHY_CTL5_DC BIT(1) /* DC connection */
#define RCAR_GEN2_PHY_CTL5_TR BIT(2) /* Termination Resistor */
enum sata_rcar_type {
RCAR_GEN1_SATA,
RCAR_GEN2_SATA,
RCAR_GEN3_SATA,
RCAR_R8A7790_ES1_SATA,
};
struct sata_rcar_priv {
void __iomem *base;
u32 sataint_mask;
enum sata_rcar_type type;
};
static void sata_rcar_gen1_phy_preinit(struct sata_rcar_priv *priv)
{
void __iomem *base = priv->base;
/* idle state */
iowrite32(0, base + SATAPHYADDR_REG);
/* reset */
iowrite32(SATAPHYRESET_PHYRST, base + SATAPHYRESET_REG);
udelay(10);
/* deassert reset */
iowrite32(0, base + SATAPHYRESET_REG);
}
static void sata_rcar_gen1_phy_write(struct sata_rcar_priv *priv, u16 reg,
u32 val, int group)
{
void __iomem *base = priv->base;
int timeout;
/* deassert reset */
iowrite32(0, base + SATAPHYRESET_REG);
/* lane 1 */
iowrite32(SATAPHYACCEN_PHYLANE, base + SATAPHYACCEN_REG);
/* write phy register value */
iowrite32(val, base + SATAPHYWDATA_REG);
/* set register group */
if (group)
reg |= SATAPHYADDR_PHYRATEMODE;
/* write command */
iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, base + SATAPHYADDR_REG);
/* wait for ack */
for (timeout = 0; timeout < 100; timeout++) {
val = ioread32(base + SATAPHYACK_REG);
if (val & SATAPHYACK_PHYACK)
break;
}
if (timeout >= 100)
pr_err("%s timeout\n", __func__);
/* idle state */
iowrite32(0, base + SATAPHYADDR_REG);
}
static void sata_rcar_gen1_phy_init(struct sata_rcar_priv *priv)
{
sata_rcar_gen1_phy_preinit(priv);
sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
sata_rcar_gen1_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
sata_rcar_gen1_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
sata_rcar_gen1_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
sata_rcar_gen1_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
}
static void sata_rcar_gen2_phy_init(struct sata_rcar_priv *priv)
{
void __iomem *base = priv->base;
iowrite32(RCAR_GEN2_PHY_CTL1, base + RCAR_GEN2_PHY_CTL1_REG);
iowrite32(RCAR_GEN2_PHY_CTL2, base + RCAR_GEN2_PHY_CTL2_REG);
iowrite32(RCAR_GEN2_PHY_CTL3, base + RCAR_GEN2_PHY_CTL3_REG);
iowrite32(RCAR_GEN2_PHY_CTL4, base + RCAR_GEN2_PHY_CTL4_REG);
iowrite32(RCAR_GEN2_PHY_CTL5 | RCAR_GEN2_PHY_CTL5_DC |
RCAR_GEN2_PHY_CTL5_TR, base + RCAR_GEN2_PHY_CTL5_REG);
}
static void sata_rcar_freeze(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
/* mask */
iowrite32(priv->sataint_mask, priv->base + SATAINTMASK_REG);
ata_sff_freeze(ap);
}
static void sata_rcar_thaw(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
void __iomem *base = priv->base;
/* ack */
iowrite32(~(u32)SATA_RCAR_INT_MASK, base + SATAINTSTAT_REG);
ata_sff_thaw(ap);
/* unmask */
iowrite32(priv->sataint_mask & ~SATA_RCAR_INT_MASK, base + SATAINTMASK_REG);
}
static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
{
u16 *ptr = buffer;
while (count--) {
u16 data = ioread32(reg);
*ptr++ = data;
}
}
static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count)
{
const u16 *ptr = buffer;
while (count--)
iowrite32(*ptr++, reg);
}
static u8 sata_rcar_check_status(struct ata_port *ap)
{
return ioread32(ap->ioaddr.status_addr);
}
static u8 sata_rcar_check_altstatus(struct ata_port *ap)
{
return ioread32(ap->ioaddr.altstatus_addr);
}
static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl)
{
iowrite32(ctl, ap->ioaddr.ctl_addr);
}
static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
{
iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
ata_sff_pause(ap); /* needed; also flushes, for mmio */
}
static bool sata_rcar_ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
sata_rcar_dev_select(ap, device);
iowrite32(0x55, ioaddr->nsect_addr);
iowrite32(0xaa, ioaddr->lbal_addr);
iowrite32(0xaa, ioaddr->nsect_addr);
iowrite32(0x55, ioaddr->lbal_addr);
iowrite32(0x55, ioaddr->nsect_addr);
iowrite32(0xaa, ioaddr->lbal_addr);
nsect = ioread32(ioaddr->nsect_addr);
lbal = ioread32(ioaddr->lbal_addr);
if (nsect == 0x55 && lbal == 0xaa)
return true; /* found a device */
return false; /* nothing found */
}
static int sata_rcar_wait_after_reset(struct ata_link *link,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
return ata_sff_wait_ready(link, deadline);
}
static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
/* software reset. causes dev0 to be selected */
iowrite32(ap->ctl, ioaddr->ctl_addr);
udelay(20);
iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
udelay(20);
iowrite32(ap->ctl, ioaddr->ctl_addr);
ap->last_ctl = ap->ctl;
/* wait the port to become ready */
return sata_rcar_wait_after_reset(&ap->link, deadline);
}
static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
unsigned int devmask = 0;
int rc;
u8 err;
/* determine if device 0 is present */
if (sata_rcar_ata_devchk(ap, 0))
devmask |= 1 << 0;
/* issue bus reset */
rc = sata_rcar_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
return 0;
}
static void sata_rcar_tf_load(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
if (tf->ctl != ap->last_ctl) {
iowrite32(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
iowrite32(tf->hob_feature, ioaddr->feature_addr);
iowrite32(tf->hob_nsect, ioaddr->nsect_addr);
iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
}
if (is_addr) {
iowrite32(tf->feature, ioaddr->feature_addr);
iowrite32(tf->nsect, ioaddr->nsect_addr);
iowrite32(tf->lbal, ioaddr->lbal_addr);
iowrite32(tf->lbam, ioaddr->lbam_addr);
iowrite32(tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
iowrite32(tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->status = sata_rcar_check_status(ap);
tf->error = ioread32(ioaddr->error_addr);
tf->nsect = ioread32(ioaddr->nsect_addr);
tf->lbal = ioread32(ioaddr->lbal_addr);
tf->lbam = ioread32(ioaddr->lbam_addr);
tf->lbah = ioread32(ioaddr->lbah_addr);
tf->device = ioread32(ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
tf->hob_feature = ioread32(ioaddr->error_addr);
tf->hob_nsect = ioread32(ioaddr->nsect_addr);
tf->hob_lbal = ioread32(ioaddr->lbal_addr);
tf->hob_lbam = ioread32(ioaddr->lbam_addr);
tf->hob_lbah = ioread32(ioaddr->lbah_addr);
iowrite32(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
}
}
static void sata_rcar_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
iowrite32(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
static unsigned int sata_rcar_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
if (rw == READ)
sata_rcar_ioread16_rep(data_addr, buf, words);
else
sata_rcar_iowrite16_rep(data_addr, buf, words);
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
/*
* Use io*16_rep() accessors here as well to avoid pointlessly
* swapping bytes to and from on the big endian machines...
*/
if (rw == READ) {
sata_rcar_ioread16_rep(data_addr, pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
sata_rcar_iowrite16_rep(data_addr, pad, 1);
}
words++;
}
return words << 1;
}
static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
struct ata_port *ap;
/* We only need to flush incoming data when a command was running */
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
return;
ap = qc->ap;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) &&
count < 65536; count += 2)
ioread32(ap->ioaddr.data_addr);
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg,
u32 *val)
{
if (sc_reg > SCR_ACTIVE)
return -EINVAL;
*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2));
return 0;
}
static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg,
u32 val)
{
if (sc_reg > SCR_ACTIVE)
return -EINVAL;
iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2));
return 0;
}
static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, sg_len;
/*
* Note: h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32)sg_dma_address(sg);
sg_len = sg_dma_len(sg);
prd[si].addr = cpu_to_le32(addr);
prd[si].flags_len = cpu_to_le32(sg_len);
}
/* end-of-table flag */
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
}
static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
sata_rcar_bmdma_fill_sg(qc);
return AC_ERR_OK;
}
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE;
struct sata_rcar_priv *priv = ap->host->private_data;
void __iomem *base = priv->base;
u32 dmactl;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
iowrite32(ap->bmdma_prd_dma, base + ATAPI_DTB_ADR_REG);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread32(base + ATAPI_CONTROL1_REG);
dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP);
if (dmactl & ATAPI_CONTROL1_START) {
dmactl &= ~ATAPI_CONTROL1_START;
dmactl |= ATAPI_CONTROL1_STOP;
}
if (!rw)
dmactl |= ATAPI_CONTROL1_RW;
iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_rcar_priv *priv = ap->host->private_data;
void __iomem *base = priv->base;
u32 dmactl;
/* start host DMA transaction */
dmactl = ioread32(base + ATAPI_CONTROL1_REG);
dmactl &= ~ATAPI_CONTROL1_STOP;
dmactl |= ATAPI_CONTROL1_START;
iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
}
static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_rcar_priv *priv = ap->host->private_data;
void __iomem *base = priv->base;
u32 dmactl;
/* force termination of DMA transfer if active */
dmactl = ioread32(base + ATAPI_CONTROL1_REG);
if (dmactl & ATAPI_CONTROL1_START) {
dmactl &= ~ATAPI_CONTROL1_START;
dmactl |= ATAPI_CONTROL1_STOP;
iowrite32(dmactl, base + ATAPI_CONTROL1_REG);
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
static u8 sata_rcar_bmdma_status(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
u8 host_stat = 0;
u32 status;
status = ioread32(priv->base + ATAPI_STATUS_REG);
if (status & ATAPI_STATUS_DEVINT)
host_stat |= ATA_DMA_INTR;
if (status & ATAPI_STATUS_ACT)
host_stat |= ATA_DMA_ACTIVE;
return host_stat;
}
static const struct scsi_host_template sata_rcar_sht = {
ATA_BASE_SHT(DRV_NAME),
/*
* This controller allows transfer chunks up to 512MB which cross 64KB
* boundaries, therefore the DMA limits are more relaxed than standard
* ATA SFF.
*/
.sg_tablesize = ATA_MAX_PRD,
.dma_boundary = SATA_RCAR_DMA_BOUNDARY,
};
static struct ata_port_operations sata_rcar_port_ops = {
.inherits = &ata_bmdma_port_ops,
.freeze = sata_rcar_freeze,
.thaw = sata_rcar_thaw,
.softreset = sata_rcar_softreset,
.scr_read = sata_rcar_scr_read,
.scr_write = sata_rcar_scr_write,
.sff_dev_select = sata_rcar_dev_select,
.sff_set_devctl = sata_rcar_set_devctl,
.sff_check_status = sata_rcar_check_status,
.sff_check_altstatus = sata_rcar_check_altstatus,
.sff_tf_load = sata_rcar_tf_load,
.sff_tf_read = sata_rcar_tf_read,
.sff_exec_command = sata_rcar_exec_command,
.sff_data_xfer = sata_rcar_data_xfer,
.sff_drain_fifo = sata_rcar_drain_fifo,
.qc_prep = sata_rcar_qc_prep,
.bmdma_setup = sata_rcar_bmdma_setup,
.bmdma_start = sata_rcar_bmdma_start,
.bmdma_stop = sata_rcar_bmdma_stop,
.bmdma_status = sata_rcar_bmdma_status,
};
static void sata_rcar_serr_interrupt(struct ata_port *ap)
{
struct sata_rcar_priv *priv = ap->host->private_data;
struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
u32 serror;
serror = ioread32(priv->base + SCRSERR_REG);
if (!serror)
return;
ata_port_dbg(ap, "SError @host_intr: 0x%x\n", serror);
/* first, analyze and record host port events */
ata_ehi_clear_desc(ehi);
if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
/* Setup a soft-reset EH action */
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "%s", "hotplug");
freeze = serror & SERR_COMM_WAKE ? 0 : 1;
}
/* freeze or abort */
if (freeze)
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static void sata_rcar_ata_interrupt(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
int handled = 0;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
handled |= ata_bmdma_port_intr(ap, qc);
/* be sure to clear ATA interrupt */
if (!handled)
sata_rcar_check_status(ap);
}
static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
unsigned int handled = 0;
struct ata_port *ap;
u32 sataintstat;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
sataintstat = ioread32(base + SATAINTSTAT_REG);
sataintstat &= SATA_RCAR_INT_MASK;
if (!sataintstat)
goto done;
/* ack */
iowrite32(~sataintstat & priv->sataint_mask, base + SATAINTSTAT_REG);
ap = host->ports[0];
if (sataintstat & SATAINTSTAT_ATA)
sata_rcar_ata_interrupt(ap);
if (sataintstat & SATAINTSTAT_SERR)
sata_rcar_serr_interrupt(ap);
handled = 1;
done:
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static void sata_rcar_setup_port(struct ata_host *host)
{
struct ata_port *ap = host->ports[0];
struct ata_ioports *ioaddr = &ap->ioaddr;
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
ap->ops = &sata_rcar_port_ops;
ap->pio_mask = ATA_PIO4;
ap->udma_mask = ATA_UDMA6;
ap->flags |= ATA_FLAG_SATA;
if (priv->type == RCAR_R8A7790_ES1_SATA)
ap->flags |= ATA_FLAG_NO_DIPM;
ioaddr->cmd_addr = base + SDATA_REG;
ioaddr->ctl_addr = base + SSDEVCON_REG;
ioaddr->scr_addr = base + SCRSSTS_REG;
ioaddr->altstatus_addr = ioaddr->ctl_addr;
ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
}
static void sata_rcar_init_module(struct sata_rcar_priv *priv)
{
void __iomem *base = priv->base;
u32 val;
/* SATA-IP reset state */
val = ioread32(base + ATAPI_CONTROL1_REG);
val |= ATAPI_CONTROL1_RESET;
iowrite32(val, base + ATAPI_CONTROL1_REG);
/* ISM mode, PRD mode, DTEND flag at bit 0 */
val = ioread32(base + ATAPI_CONTROL1_REG);
val |= ATAPI_CONTROL1_ISM;
val |= ATAPI_CONTROL1_DESE;
val |= ATAPI_CONTROL1_DTA32M;
iowrite32(val, base + ATAPI_CONTROL1_REG);
/* Release the SATA-IP from the reset state */
val = ioread32(base + ATAPI_CONTROL1_REG);
val &= ~ATAPI_CONTROL1_RESET;
iowrite32(val, base + ATAPI_CONTROL1_REG);
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
/* enable interrupts */
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
}
static void sata_rcar_init_controller(struct ata_host *host)
{
struct sata_rcar_priv *priv = host->private_data;
priv->sataint_mask = SATAINTMASK_ALL_GEN2;
/* reset and setup phy */
switch (priv->type) {
case RCAR_GEN1_SATA:
priv->sataint_mask = SATAINTMASK_ALL_GEN1;
sata_rcar_gen1_phy_init(priv);
break;
case RCAR_GEN2_SATA:
case RCAR_R8A7790_ES1_SATA:
sata_rcar_gen2_phy_init(priv);
break;
case RCAR_GEN3_SATA:
break;
default:
dev_warn(host->dev, "SATA phy is not initialized\n");
break;
}
sata_rcar_init_module(priv);
}
static const struct of_device_id sata_rcar_match[] = {
{
/* Deprecated by "renesas,sata-r8a7779" */
.compatible = "renesas,rcar-sata",
.data = (void *)RCAR_GEN1_SATA,
},
{
.compatible = "renesas,sata-r8a7779",
.data = (void *)RCAR_GEN1_SATA,
},
{
.compatible = "renesas,sata-r8a7790",
.data = (void *)RCAR_GEN2_SATA
},
{
.compatible = "renesas,sata-r8a7790-es1",
.data = (void *)RCAR_R8A7790_ES1_SATA
},
{
.compatible = "renesas,sata-r8a7791",
.data = (void *)RCAR_GEN2_SATA
},
{
.compatible = "renesas,sata-r8a7793",
.data = (void *)RCAR_GEN2_SATA
},
{
.compatible = "renesas,sata-r8a7795",
.data = (void *)RCAR_GEN3_SATA
},
{
.compatible = "renesas,rcar-gen2-sata",
.data = (void *)RCAR_GEN2_SATA
},
{
.compatible = "renesas,rcar-gen3-sata",
.data = (void *)RCAR_GEN3_SATA
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
static int sata_rcar_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host;
struct sata_rcar_priv *priv;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->type = (unsigned long)of_device_get_match_data(dev);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_put;
host = ata_host_alloc(dev, 1);
if (!host) {
ret = -ENOMEM;
goto err_pm_put;
}
host->private_data = priv;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_pm_put;
}
/* setup port */
sata_rcar_setup_port(host);
/* initialize host controller */
sata_rcar_init_controller(host);
ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0,
&sata_rcar_sht);
if (!ret)
return 0;
err_pm_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
static void sata_rcar_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
ata_host_detach(host);
/* disable interrupts */
iowrite32(0, base + ATAPI_INT_ENABLE_REG);
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
static int sata_rcar_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
ata_host_suspend(host, PMSG_SUSPEND);
/* disable interrupts */
iowrite32(0, base + ATAPI_INT_ENABLE_REG);
/* mask */
iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
pm_runtime_put(dev);
return 0;
}
static int sata_rcar_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct sata_rcar_priv *priv = host->private_data;
void __iomem *base = priv->base;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put(dev);
return ret;
}
if (priv->type == RCAR_GEN3_SATA) {
sata_rcar_init_module(priv);
} else {
/* ack and mask */
iowrite32(0, base + SATAINTSTAT_REG);
iowrite32(priv->sataint_mask, base + SATAINTMASK_REG);
/* enable interrupts */
iowrite32(ATAPI_INT_ENABLE_SATAINT,
base + ATAPI_INT_ENABLE_REG);
}
ata_host_resume(host);
return 0;
}
static int sata_rcar_restore(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put(dev);
return ret;
}
sata_rcar_setup_port(host);
/* initialize host controller */
sata_rcar_init_controller(host);
ata_host_resume(host);
return 0;
}
static const struct dev_pm_ops sata_rcar_pm_ops = {
.suspend = sata_rcar_suspend,
.resume = sata_rcar_resume,
.freeze = sata_rcar_suspend,
.thaw = sata_rcar_resume,
.poweroff = sata_rcar_suspend,
.restore = sata_rcar_restore,
};
#endif
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
.remove_new = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
#ifdef CONFIG_PM_SLEEP
.pm = &sata_rcar_pm_ops,
#endif
},
};
module_platform_driver(sata_rcar_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vladimir Barinov");
MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver");
| linux-master | drivers/ata/sata_rcar.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 STMicroelectronics Limited
*
* Authors: Francesco Virlinzi <[email protected]>
* Alexandre Torgue <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/ahci_platform.h>
#include <linux/libata.h>
#include <linux/reset.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include "ahci.h"
#define DRV_NAME "st_ahci"
#define ST_AHCI_OOBR 0xbc
#define ST_AHCI_OOBR_WE BIT(31)
#define ST_AHCI_OOBR_CWMIN_SHIFT 24
#define ST_AHCI_OOBR_CWMAX_SHIFT 16
#define ST_AHCI_OOBR_CIMIN_SHIFT 8
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
struct st_ahci_drv_data {
struct platform_device *ahci;
struct reset_control *pwr;
struct reset_control *sw_rst;
struct reset_control *pwr_rst;
};
static void st_ahci_configure_oob(void __iomem *mmio)
{
unsigned long old_val, new_val;
new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) |
(0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) |
(0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) |
(0x0C << ST_AHCI_OOBR_CIMAX_SHIFT);
old_val = readl(mmio + ST_AHCI_OOBR);
writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
writel(new_val, mmio + ST_AHCI_OOBR);
}
static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
if (drv_data->pwr) {
err = reset_control_deassert(drv_data->pwr);
if (err) {
dev_err(dev, "unable to bring out of pwrdwn\n");
return err;
}
}
if (drv_data->sw_rst) {
err = reset_control_deassert(drv_data->sw_rst);
if (err) {
dev_err(dev, "unable to bring out of sw-rst\n");
return err;
}
}
if (drv_data->pwr_rst) {
err = reset_control_deassert(drv_data->pwr_rst);
if (err) {
dev_err(dev, "unable to bring out of pwr-rst\n");
return err;
}
}
return 0;
}
static void st_ahci_host_stop(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
struct device *dev = host->dev;
int err;
if (drv_data->pwr) {
err = reset_control_assert(drv_data->pwr);
if (err)
dev_err(dev, "unable to pwrdwn\n");
}
ahci_platform_disable_resources(hpriv);
}
static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
if (IS_ERR(drv_data->pwr)) {
dev_info(dev, "power reset control not defined\n");
drv_data->pwr = NULL;
}
drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
if (IS_ERR(drv_data->sw_rst)) {
dev_info(dev, "soft reset control not defined\n");
drv_data->sw_rst = NULL;
}
drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
if (IS_ERR(drv_data->pwr_rst)) {
dev_dbg(dev, "power soft reset control not defined\n");
drv_data->pwr_rst = NULL;
}
return st_ahci_deassert_resets(hpriv, dev);
}
static struct ata_port_operations st_ahci_port_ops = {
.inherits = &ahci_platform_ops,
.host_stop = st_ahci_host_stop,
};
static const struct ata_port_info st_ahci_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &st_ahci_port_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int st_ahci_probe(struct platform_device *pdev)
{
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = drv_data;
err = st_ahci_probe_resets(hpriv, &pdev->dev);
if (err)
return err;
err = ahci_platform_enable_resources(hpriv);
if (err)
return err;
st_ahci_configure_oob(hpriv->mmio);
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct st_ahci_drv_data *drv_data = hpriv->plat_data;
int err;
err = ahci_platform_suspend_host(dev);
if (err)
return err;
if (drv_data->pwr) {
err = reset_control_assert(drv_data->pwr);
if (err) {
dev_err(dev, "unable to pwrdwn");
return err;
}
}
ahci_platform_disable_resources(hpriv);
return 0;
}
static int st_ahci_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int err;
err = ahci_platform_enable_resources(hpriv);
if (err)
return err;
err = st_ahci_deassert_resets(hpriv, dev);
if (err) {
ahci_platform_disable_resources(hpriv);
return err;
}
st_ahci_configure_oob(hpriv->mmio);
return ahci_platform_resume_host(dev);
}
#endif
static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
.pm = &st_ahci_pm_ops,
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
.remove_new = ata_platform_remove_one,
};
module_platform_driver(st_ahci_driver);
MODULE_AUTHOR("Alexandre Torgue <[email protected]>");
MODULE_AUTHOR("Francesco Virlinzi <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ata/ahci_st.c |
/*
* Generic platform device PATA driver
*
* Copyright (C) 2006 - 2007 Paul Mundt
*
* Based on pata_pcmcia:
*
* Copyright 2005-2006 Red Hat Inc, all rights reserved.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#define DRV_NAME "pata_platform"
#define DRV_VERSION "1.2"
static int pio_mask = 1;
module_param(pio_mask, int, 0);
MODULE_PARM_DESC(pio_mask, "PIO modes supported, mode 0 only by default");
/*
* Provide our own set_mode() as we don't want to change anything that has
* already been configured..
*/
static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static const struct scsi_host_template pata_platform_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static void pata_platform_setup_port(struct ata_ioports *ioaddr,
unsigned int shift)
{
/* Fixup the port shift for platforms that need it */
ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift);
ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift);
ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift);
ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift);
ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift);
ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift);
ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift);
ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift);
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift);
}
/**
* __pata_platform_probe - attach a platform interface
* @dev: device
* @io_res: Resource representing I/O base
* @ctl_res: Resource representing CTL base
* @irq_res: Resource representing IRQ and its flags
* @ioport_shift: I/O port shift
* @__pio_mask: PIO mask
* @sht: scsi_host_template to use when registering
* @use16bit: Flag to indicate 16-bit IO instead of 32-bit
*
* Register a platform bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
*
* Platform devices are expected to contain at least 2 resources per port:
*
* - I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
* - CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
*
* and optionally:
*
* - IRQ (IORESOURCE_IRQ)
*
* If the base resources are both mem types, the ioremap() is handled
* here. For IORESOURCE_IO, it's assumed that there's no remapping
* necessary.
*
* If no IRQ resource is present, PIO polling mode is used instead.
*/
int __pata_platform_probe(struct device *dev, struct resource *io_res,
struct resource *ctl_res, struct resource *irq_res,
unsigned int ioport_shift, int __pio_mask,
const struct scsi_host_template *sht, bool use16bit)
{
struct ata_host *host;
struct ata_port *ap;
unsigned int mmio;
int irq = 0;
int irq_flags = 0;
/*
* Check for MMIO
*/
mmio = (( io_res->flags == IORESOURCE_MEM) &&
(ctl_res->flags == IORESOURCE_MEM));
/*
* And the IRQ
*/
if (irq_res && irq_res->start > 0) {
irq = irq_res->start;
irq_flags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
}
/*
* Now that that's out of the way, wire up the port..
*/
host = ata_host_alloc(dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->ops = devm_kzalloc(dev, sizeof(*ap->ops), GFP_KERNEL);
if (!ap->ops)
return -ENOMEM;
ap->ops->inherits = &ata_sff_port_ops;
ap->ops->cable_detect = ata_cable_unknown;
ap->ops->set_mode = pata_platform_set_mode;
if (use16bit)
ap->ops->sff_data_xfer = ata_sff_data_xfer;
else
ap->ops->sff_data_xfer = ata_sff_data_xfer32;
ap->pio_mask = __pio_mask;
ap->flags |= ATA_FLAG_SLAVE_POSS;
/*
* Use polling mode if there's no IRQ
*/
if (!irq) {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
/*
* Handle the MMIO case
*/
if (mmio) {
ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
resource_size(ctl_res));
} else {
ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
resource_size(ctl_res));
}
if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
dev_err(dev, "failed to map IO/CTL base\n");
return -ENOMEM;
}
ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
pata_platform_setup_port(&ap->ioaddr, ioport_shift);
ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
(unsigned long long)io_res->start,
(unsigned long long)ctl_res->start);
/* activate */
return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
irq_flags, sht);
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
static int pata_platform_probe(struct platform_device *pdev)
{
struct resource *io_res;
struct resource *ctl_res;
struct resource *irq_res;
struct pata_platform_info *pp_info = dev_get_platdata(&pdev->dev);
/*
* Simple resource validation ..
*/
if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* Get the I/O base first
*/
io_res = platform_get_mem_or_io(pdev, 0);
if (!io_res)
return -EINVAL;
/*
* Then the CTL base
*/
ctl_res = platform_get_mem_or_io(pdev, 1);
if (!ctl_res)
return -EINVAL;
/*
* And the IRQ
*/
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
pp_info ? pp_info->ioport_shift : 0,
pio_mask, &pata_platform_sht, false);
}
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
},
};
module_platform_driver(pata_platform_driver);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("low-level driver for platform device ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_ninja32.c - Ninja32 PATA for new ATA layer
* (C) 2007 Red Hat Inc
*
* Note: The controller like many controllers has shared timings for
* PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
* in the dma_stop function. Thus we actually don't need a set_dmamode
* method as the PIO method is always called and will set the right PIO
* timing parameters.
*
* The Ninja32 Cardbus is not a generic SFF controller. Instead it is
* laid out as follows off BAR 0. This is based upon Mark Lord's delkin
* driver and the extensive analysis done by the BSD developers, notably
* ITOH Yasufumi.
*
* Base + 0x00 IRQ Status
* Base + 0x01 IRQ control
* Base + 0x02 Chipset control
* Base + 0x03 Unknown
* Base + 0x04 VDMA and reset control + wait bits
* Base + 0x08 BMIMBA
* Base + 0x0C DMA Length
* Base + 0x10 Taskfile
* Base + 0x18 BMDMA Status ?
* Base + 0x1C
* Base + 0x1D Bus master control
* bit 0 = enable
* bit 1 = 0 write/1 read
* bit 2 = 1 sgtable
* bit 3 = go
* bit 4-6 wait bits
* bit 7 = done
* Base + 0x1E AltStatus
* Base + 0x1F timing register
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
#define DRV_VERSION "0.1.5"
/**
* ninja32_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static u16 pio_timing[5] = {
0xd6, 0x85, 0x44, 0x33, 0x13
};
iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
ap->ioaddr.bmdma_addr + 0x1f);
ap->private_data = adev;
}
static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
{
struct ata_device *adev = &ap->link.device[device];
if (ap->private_data != adev) {
iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
ata_sff_dev_select(ap, device);
ninja32_set_piomode(ap, adev);
}
}
static const struct scsi_host_template ninja32_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations ninja32_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_dev_select = ninja32_dev_select,
.cable_detect = ata_cable_40wire,
.set_piomode = ninja32_set_piomode,
.sff_data_xfer = ata_sff_data_xfer32
};
static void ninja32_program(void __iomem *base)
{
iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
iowrite8(0x01, base + 0x03); /* Unknown */
iowrite8(0x20, base + 0x04); /* WAIT0 */
iowrite8(0x8f, base + 0x05); /* Unknown */
iowrite8(0xa4, base + 0x1c); /* Unknown */
iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
}
static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ata_host *host;
struct ata_port *ap;
void __iomem *base;
int rc;
host = ata_host_alloc(&dev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
/* Set up the PCI device */
rc = pcim_enable_device(dev);
if (rc)
return rc;
rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(dev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(dev);
rc = dma_set_mask_and_coherent(&dev->dev, ATA_DMA_MASK);
if (rc)
return rc;
pci_set_master(dev);
/* Set up the register mappings. We use the I/O mapping as only the
older chips also have MMIO on BAR 1 */
base = host->iomap[0];
if (!base)
return -ENOMEM;
ap->ops = &ninja32_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = base + 0x10;
ap->ioaddr.ctl_addr = base + 0x1E;
ap->ioaddr.altstatus_addr = base + 0x1E;
ap->ioaddr.bmdma_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &ninja32_sht);
}
#ifdef CONFIG_PM_SLEEP
static int ninja32_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
ninja32_program(host->iomap[0]);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id ninja32[] = {
{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ },
};
static struct pci_driver ninja32_pci_driver = {
.name = DRV_NAME,
.id_table = ninja32,
.probe = ninja32_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ninja32_reinit_one,
#endif
};
module_pci_driver(ninja32_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ninja32);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_ninja32.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_efar.c - EFAR PIIX clone controller driver
*
* (C) 2005 Red Hat
* (C) 2009-2010 Bartlomiej Zolnierkiewicz
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
* Intel ICH controllers the EFAR widened the UDMA mode register bits
* and doesn't require the funky clock selection.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_efar"
#define DRV_VERSION "0.4.5"
/**
* efar_pre_reset - Enable bits
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform cable detection for the EFAR ATA interface. This is
* different to the PIIX arrangement
*/
static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits efar_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* efar_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection for the EFAR ATA interface. This is
* different to the PIIX arrangement
*/
static int efar_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x47, &tmp);
if (tmp & (2 >> ap->port_no))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static DEFINE_SPINLOCK(efar_lock);
/**
* efar_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int master_port = ap->port_no ? 0x42 : 0x40;
unsigned long flags;
u16 master_data;
u8 udma_enable;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. The EFAR is a clone so very similar
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 1)
control |= 1; /* TIME */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
control |= 2; /* IE */
/* Intel specifies that the prefetch/posting is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE */
spin_lock_irqsave(&efar_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
master_data &= 0xCCF0;
master_data |= control;
master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
int shift = 4 * ap->port_no;
u8 slave_data;
master_data &= 0xFF0F;
master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= ap->port_no ? 0x0F : 0xF0;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
pci_write_config_byte(dev, 0x44, slave_data);
}
master_data |= 0x4000; /* Ensure SITRE is set */
pci_write_config_word(dev, master_port, master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&efar_lock, flags);
}
/**
* efar_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
unsigned long flags;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
spin_lock_irqsave(&efar_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
udma_enable |= (1 << devid);
/* Load the UDMA mode number */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(7 << (4 * devid));
udma_timing |= udma << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= ap->port_no ? 0x0F : 0xF0;
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&efar_lock, flags);
}
static const struct scsi_host_template efar_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations efar_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
.prereset = efar_pre_reset,
};
/**
* efar_init_one - Register EFAR ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in efar_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA4,
.port_ops = &efar_ops,
};
const struct ata_port_info *ppi[] = { &info, &info };
ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id efar_pci_tbl[] = {
{ PCI_VDEVICE(EFAR, 0x9130), },
{ } /* terminate list */
};
static struct pci_driver efar_pci_driver = {
.name = DRV_NAME,
.id_table = efar_pci_tbl,
.probe = efar_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(efar_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_efar.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_sis.c - SiS ATA driver
*
* (C) 2005 Red Hat
* (C) 2007,2009 Bartlomiej Zolnierkiewicz
*
* Based upon linux/drivers/ide/pci/sis5513.c
* Copyright (C) 1999-2000 Andre Hedrick <[email protected]>
* Copyright (C) 2002 Lionel Bouton <[email protected]>, Maintainer
* Copyright (C) 2003 Vojtech Pavlik <[email protected]>
* SiS Taiwan : for direct support and hardware.
* Daniela Engert : for initial ATA100 advices and numerous others.
* John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
* for checking code correctness, providing patches.
* Original tests and design on the SiS620 chipset.
* ATA100 tests and design on the SiS735 chipset.
* ATA16/33 support from specs
* ATA133 support for SiS961/962 by L.C. Chang <[email protected]>
*
*
* TODO
* Check MWDMA on drives that don't support MWDMA speed pio cycles ?
* More Testing
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#include "sis.h"
#define DRV_NAME "pata_sis"
#define DRV_VERSION "0.5.2"
struct sis_chipset {
u16 device; /* PCI host ID */
const struct ata_port_info *info; /* Info block */
/* Probably add family, cable detect type etc here to clean
up code later */
};
struct sis_laptop {
u16 device;
u16 subvendor;
u16 subdevice;
};
static const struct sis_laptop sis_laptop[] = {
/* devid, subvendor, subdev */
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
/* end marker */
{ 0, }
};
static int sis_short_ata40(struct pci_dev *dev)
{
const struct sis_laptop *lap = &sis_laptop[0];
while (lap->device) {
if (lap->device == dev->device &&
lap->subvendor == dev->subsystem_vendor &&
lap->subdevice == dev->subsystem_device)
return 1;
lap++;
}
return 0;
}
/**
* sis_old_port_base - return PCI configuration base for dev
* @adev: device
*
* Returns the base of the PCI configuration registers for this port
* number.
*/
static int sis_old_port_base(struct ata_device *adev)
{
return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
}
/**
* sis_port_base - return PCI configuration base for dev
* @adev: device
*
* Returns the base of the PCI configuration registers for this port
* number.
*/
static int sis_port_base(struct ata_device *adev)
{
struct ata_port *ap = adev->link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x40;
u32 reg54;
/* If bit 30 is set then the registers are mapped at 0x70 not 0x40 */
pci_read_config_dword(pdev, 0x54, ®54);
if (reg54 & 0x40000000)
port = 0x70;
return port + (8 * ap->port_no) + (4 * adev->devno);
}
/**
* sis_133_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection for the later UDMA133 capable
* SiS chipset.
*/
static int sis_133_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 tmp;
/* The top bit of this register is the cable detect bit */
pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
if ((tmp & 0x8000) && !sis_short_ata40(pdev))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* sis_66_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection on the UDMA66, UDMA100 and early UDMA133
* SiS IDE controllers.
*/
static int sis_66_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
/* Older chips keep cable detect in bits 4/5 of reg 0x48 */
pci_read_config_byte(pdev, 0x48, &tmp);
tmp >>= ap->port_no;
if ((tmp & 0x10) && !sis_short_ata40(pdev))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* sis_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits sis_enable_bits[] = {
{ 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
return -ENOENT;
/* Clear the FIFO settings. We can't enable the FIFO until
we know we are poking at a disk */
pci_write_config_byte(pdev, 0x4B, 0);
return ata_sff_prereset(link, deadline);
}
/**
* sis_set_fifo - Set RWP fifo bits for this device
* @ap: Port
* @adev: Device
*
* SIS chipsets implement prefetch/postwrite bits for each device
* on both channels. This functionality is not ATAPI compatible and
* must be configured according to the class of device present
*/
static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 fifoctrl;
u8 mask = 0x11;
mask <<= (2 * ap->port_no);
mask <<= adev->devno;
/* This holds various bits including the FIFO control */
pci_read_config_byte(pdev, 0x4B, &fifoctrl);
fifoctrl &= ~mask;
/* Enable for ATA (disk) only */
if (adev->class == ATA_DEV_ATA)
fifoctrl |= mask;
pci_write_config_byte(pdev, 0x4B, fifoctrl);
}
/**
* sis_old_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring for.
*
* Set PIO mode for device, in host controller PCI config space. This
* function handles PIO set up for all chips that are pre ATA100 and
* also early ATA100 devices.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
u8 t1, t2;
int speed = adev->pio_mode - XFER_PIO_0;
static const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
static const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
sis_set_fifo(ap, adev);
pci_read_config_byte(pdev, port, &t1);
pci_read_config_byte(pdev, port + 1, &t2);
t1 &= ~0x0F; /* Clear active/recovery timings */
t2 &= ~0x07;
t1 |= active[speed];
t2 |= recovery[speed];
pci_write_config_byte(pdev, port, t1);
pci_write_config_byte(pdev, port + 1, t2);
}
/**
* sis_100_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring for.
*
* Set PIO mode for device, in host controller PCI config space. This
* function handles PIO set up for ATA100 devices and early ATA133.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
int speed = adev->pio_mode - XFER_PIO_0;
static const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
sis_set_fifo(ap, adev);
pci_write_config_byte(pdev, port, actrec[speed]);
}
/**
* sis_133_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring for.
*
* Set PIO mode for device, in host controller PCI config space. This
* function handles PIO set up for the later ATA133 devices.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port;
u32 t1;
int speed = adev->pio_mode - XFER_PIO_0;
static const u32 timing133[] = {
0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x0C266000,
0x04263000,
0x0C0A3000,
0x05093000
};
static const u32 timing100[] = {
0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x091C4000,
0x031C2000,
0x09072000,
0x04062000
};
sis_set_fifo(ap, adev);
port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
t1 &= 0xC0C00FFF; /* Mask out timing */
if (t1 & 0x08) /* 100 or 133 ? */
t1 |= timing133[speed];
else
t1 |= timing100[speed];
pci_write_config_byte(pdev, port, t1);
}
/**
* sis_old_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
* Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
* the old ide/pci driver.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
static const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
pci_read_config_word(pdev, drive_pci, &timing);
if (adev->dma_mode < XFER_UDMA_0) {
/* bits 3-0 hold recovery timing bits 8-10 active timing and
the higher bits are dependent on the device */
timing &= ~0x870F;
timing |= mwdma_bits[speed];
} else {
/* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
speed = adev->dma_mode - XFER_UDMA_0;
timing &= ~0x6000;
timing |= udma_bits[speed];
}
pci_write_config_word(pdev, drive_pci, timing);
}
/**
* sis_66_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
* Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
* the old ide/pci driver.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
/* MWDMA 0-2 and UDMA 0-5 */
static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
static const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
pci_read_config_word(pdev, drive_pci, &timing);
if (adev->dma_mode < XFER_UDMA_0) {
/* bits 3-0 hold recovery timing bits 8-10 active timing and
the higher bits are dependent on the device, bit 15 udma */
timing &= ~0x870F;
timing |= mwdma_bits[speed];
} else {
/* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
speed = adev->dma_mode - XFER_UDMA_0;
timing &= ~0xF000;
timing |= udma_bits[speed];
}
pci_write_config_word(pdev, drive_pci, timing);
}
/**
* sis_100_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
* Handles UDMA66 and early UDMA100 devices.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
static const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
pci_read_config_byte(pdev, drive_pci + 1, &timing);
if (adev->dma_mode < XFER_UDMA_0) {
/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
} else {
/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
speed = adev->dma_mode - XFER_UDMA_0;
timing &= ~0x8F;
timing |= udma_bits[speed];
}
pci_write_config_byte(pdev, drive_pci + 1, timing);
}
/**
* sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
* Handles early SiS 961 bridges.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
/* Low 4 bits are timing */
static const u8 udma_bits[] = { 0x8F, 0x8A, 0x87, 0x85, 0x83, 0x82, 0x81};
pci_read_config_byte(pdev, drive_pci + 1, &timing);
if (adev->dma_mode < XFER_UDMA_0) {
/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
} else {
/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
speed = adev->dma_mode - XFER_UDMA_0;
timing &= ~0x8F;
timing |= udma_bits[speed];
}
pci_write_config_byte(pdev, drive_pci + 1, timing);
}
/**
* sis_133_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port;
u32 t1;
port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
if (adev->dma_mode < XFER_UDMA_0) {
/* Recovery << 24 | Act << 16 | Ini << 12, like PIO modes */
static const u32 timing_u100[] = { 0x19154000, 0x06072000, 0x04062000 };
static const u32 timing_u133[] = { 0x221C6000, 0x0C0A3000, 0x05093000 };
int speed = adev->dma_mode - XFER_MW_DMA_0;
t1 &= 0xC0C00FFF;
/* disable UDMA */
t1 &= ~0x00000004;
if (t1 & 0x08)
t1 |= timing_u133[speed];
else
t1 |= timing_u100[speed];
} else {
/* bits 4- cycle time 8 - cvs time */
static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
int speed = adev->dma_mode - XFER_UDMA_0;
t1 &= ~0x00000FF0;
/* enable UDMA */
t1 |= 0x00000004;
if (t1 & 0x08)
t1 |= timing_u133[speed];
else
t1 |= timing_u100[speed];
}
pci_write_config_dword(pdev, port, t1);
}
/**
* sis_133_mode_filter - mode selection filter
* @adev: ATA device
* @mask: received mask to manipulate and pass back
*
* Block UDMA6 on devices that do not support it.
*/
static unsigned int sis_133_mode_filter(struct ata_device *adev, unsigned int mask)
{
struct ata_port *ap = adev->link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_port_base(adev);
u32 t1;
pci_read_config_dword(pdev, port, &t1);
/* if ATA133 is disabled, mask it out */
if (!(t1 & 0x08))
mask &= ~(0xC0 << ATA_SHIFT_UDMA);
return mask;
}
static const struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sis_133_for_sata_ops = {
.inherits = &ata_bmdma_port_ops,
.set_piomode = sis_133_set_piomode,
.set_dmamode = sis_133_set_dmamode,
.cable_detect = sis_133_cable_detect,
};
static struct ata_port_operations sis_base_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = sis_pre_reset,
};
static struct ata_port_operations sis_133_ops = {
.inherits = &sis_base_ops,
.set_piomode = sis_133_set_piomode,
.set_dmamode = sis_133_set_dmamode,
.cable_detect = sis_133_cable_detect,
.mode_filter = sis_133_mode_filter,
};
static struct ata_port_operations sis_133_early_ops = {
.inherits = &sis_base_ops,
.set_piomode = sis_100_set_piomode,
.set_dmamode = sis_133_early_set_dmamode,
.cable_detect = sis_66_cable_detect,
};
static struct ata_port_operations sis_100_ops = {
.inherits = &sis_base_ops,
.set_piomode = sis_100_set_piomode,
.set_dmamode = sis_100_set_dmamode,
.cable_detect = sis_66_cable_detect,
};
static struct ata_port_operations sis_66_ops = {
.inherits = &sis_base_ops,
.set_piomode = sis_old_set_piomode,
.set_dmamode = sis_66_set_dmamode,
.cable_detect = sis_66_cable_detect,
};
static struct ata_port_operations sis_old_ops = {
.inherits = &sis_base_ops,
.set_piomode = sis_old_set_piomode,
.set_dmamode = sis_old_set_dmamode,
.cable_detect = ata_cable_40wire,
};
static const struct ata_port_info sis_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
/* No UDMA */
.port_ops = &sis_old_ops,
};
static const struct ata_port_info sis_info33 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &sis_old_ops,
};
static const struct ata_port_info sis_info66 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA4,
.port_ops = &sis_66_ops,
};
static const struct ata_port_info sis_info100 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA5,
.port_ops = &sis_100_ops,
};
static const struct ata_port_info sis_info100_early = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA5,
.port_ops = &sis_66_ops,
};
static const struct ata_port_info sis_info133 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sis_133_ops,
};
const struct ata_port_info sis_info133_for_sata = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
.port_ops = &sis_133_for_sata_ops,
};
static const struct ata_port_info sis_info133_early = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
.port_ops = &sis_133_early_ops,
};
/* Privately shared with the SiS180 SATA driver, not for use elsewhere */
EXPORT_SYMBOL_GPL(sis_info133_for_sata);
static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
{
u16 regw;
u8 reg;
if (sis->info == &sis_info133) {
pci_read_config_word(pdev, 0x50, ®w);
if (regw & 0x08)
pci_write_config_word(pdev, 0x50, regw & ~0x08);
pci_read_config_word(pdev, 0x52, ®w);
if (regw & 0x08)
pci_write_config_word(pdev, 0x52, regw & ~0x08);
return;
}
if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
/* Fix up latency */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
/* Set compatibility bit */
pci_read_config_byte(pdev, 0x49, ®);
if (!(reg & 0x01))
pci_write_config_byte(pdev, 0x49, reg | 0x01);
return;
}
if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
/* Fix up latency */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
/* Set compatibility bit */
pci_read_config_byte(pdev, 0x52, ®);
if (!(reg & 0x04))
pci_write_config_byte(pdev, 0x52, reg | 0x04);
return;
}
if (sis->info == &sis_info33) {
pci_read_config_byte(pdev, PCI_CLASS_PROG, ®);
if (( reg & 0x0F ) != 0x00)
pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
/* Fall through to ATA16 fixup below */
}
if (sis->info == &sis_info || sis->info == &sis_info33) {
/* force per drive recovery and active timings
needed on ATA_33 and below chips */
pci_read_config_byte(pdev, 0x52, ®);
if (!(reg & 0x08))
pci_write_config_byte(pdev, 0x52, reg|0x08);
return;
}
BUG();
}
/**
* sis_init_one - Register SiS ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in sis_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *host = NULL;
struct sis_chipset *chipset = NULL;
struct sis_chipset *sets;
int rc;
static struct sis_chipset sis_chipsets[] = {
{ 0x0968, &sis_info133 },
{ 0x0966, &sis_info133 },
{ 0x0965, &sis_info133 },
{ 0x0745, &sis_info100 },
{ 0x0735, &sis_info100 },
{ 0x0733, &sis_info100 },
{ 0x0635, &sis_info100 },
{ 0x0633, &sis_info100 },
{ 0x0730, &sis_info100_early }, /* 100 with ATA 66 layout */
{ 0x0550, &sis_info100_early }, /* 100 with ATA 66 layout */
{ 0x0640, &sis_info66 },
{ 0x0630, &sis_info66 },
{ 0x0620, &sis_info66 },
{ 0x0540, &sis_info66 },
{ 0x0530, &sis_info66 },
{ 0x5600, &sis_info33 },
{ 0x5598, &sis_info33 },
{ 0x5597, &sis_info33 },
{ 0x5591, &sis_info33 },
{ 0x5582, &sis_info33 },
{ 0x5581, &sis_info33 },
{ 0x5596, &sis_info },
{ 0x5571, &sis_info },
{ 0x5517, &sis_info },
{ 0x5511, &sis_info },
{0}
};
static struct sis_chipset sis133_early = {
0x0, &sis_info133_early
};
static struct sis_chipset sis133 = {
0x0, &sis_info133
};
static struct sis_chipset sis100_early = {
0x0, &sis_info100_early
};
static struct sis_chipset sis100 = {
0x0, &sis_info100
};
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* We have to find the bridge first */
for (sets = &sis_chipsets[0]; sets->device; sets++) {
host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
if (host != NULL) {
chipset = sets; /* Match found */
if (sets->device == 0x630) { /* SIS630 */
if (host->revision >= 0x30) /* 630 ET */
chipset = &sis100_early;
}
break;
}
}
/* Look for concealed bridges */
if (chipset == NULL) {
/* Second check */
u32 idemisc;
u16 trueid;
/* Disable ID masking and register remapping then
see what the real ID is */
pci_read_config_dword(pdev, 0x54, &idemisc);
pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
pci_write_config_dword(pdev, 0x54, idemisc);
switch(trueid) {
case 0x5518: /* SIS 962/963 */
dev_info(&pdev->dev,
"SiS 962/963 MuTIOL IDE UDMA133 controller\n");
chipset = &sis133;
if ((idemisc & 0x40000000) == 0) {
pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
dev_info(&pdev->dev,
"Switching to 5513 register mapping\n");
}
break;
case 0x0180: /* SIS 965/965L */
chipset = &sis133;
break;
case 0x1180: /* SIS 966/966L */
chipset = &sis133;
break;
}
}
/* Further check */
if (chipset == NULL) {
struct pci_dev *lpc_bridge;
u16 trueid;
u8 prefctl;
u8 idecfg;
/* Try the second unmasking technique */
pci_read_config_byte(pdev, 0x4a, &idecfg);
pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
pci_write_config_byte(pdev, 0x4a, idecfg);
switch(trueid) {
case 0x5517:
lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
if (lpc_bridge == NULL)
break;
pci_read_config_byte(pdev, 0x49, &prefctl);
pci_dev_put(lpc_bridge);
if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
chipset = &sis133_early;
break;
}
chipset = &sis100;
break;
}
}
pci_dev_put(host);
/* No chipset info, no support */
if (chipset == NULL)
return -ENODEV;
ppi[0] = chipset->info;
sis_fixup(pdev, chipset);
return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
}
#ifdef CONFIG_PM_SLEEP
static int sis_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
sis_fixup(pdev, host->private_data);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id sis_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x5513), }, /* SiS 5513 */
{ PCI_VDEVICE(SI, 0x5518), }, /* SiS 5518 */
{ PCI_VDEVICE(SI, 0x1180), }, /* SiS 1180 */
{ }
};
static struct pci_driver sis_pci_driver = {
.name = DRV_NAME,
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sis_reinit_one,
#endif
};
module_pci_driver(sis_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_sis.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OF-platform PATA driver
*
* Copyright (c) 2007 MontaVista Software, Inc.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/libata.h>
#define DRV_NAME "pata_of_platform"
static const struct scsi_host_template pata_platform_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static int pata_of_platform_probe(struct platform_device *ofdev)
{
int ret;
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
struct resource irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
bool use16bit;
int irq;
ret = of_address_to_resource(dn, 0, &io_res);
if (ret) {
dev_err(&ofdev->dev, "can't get IO address from "
"device tree\n");
return -EINVAL;
}
ret = of_address_to_resource(dn, 1, &ctl_res);
if (ret) {
dev_err(&ofdev->dev, "can't get CTL address from "
"device tree\n");
return -EINVAL;
}
memset(&irq_res, 0, sizeof(irq_res));
irq = platform_get_irq_optional(ofdev, 0);
if (irq < 0 && irq != -ENXIO)
return irq;
if (irq > 0) {
irq_res.start = irq;
irq_res.end = irq;
}
of_property_read_u32(dn, "reg-shift", ®_shift);
if (!of_property_read_u32(dn, "pio-mode", &pio_mode)) {
if (pio_mode > 6) {
dev_err(&ofdev->dev, "invalid pio-mode\n");
return -EINVAL;
}
} else {
dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n");
}
use16bit = of_property_read_bool(dn, "ata-generic,use16bit");
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq > 0 ? &irq_res : NULL,
reg_shift, pio_mask, &pata_platform_sht,
use16bit);
}
static const struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct platform_driver pata_of_platform_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
.remove_new = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
MODULE_DESCRIPTION("OF-platform PATA driver");
MODULE_AUTHOR("Anton Vorontsov <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/pata_of_platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/ata/sata_fsl.c
*
* Freescale 3.0Gbps SATA device driver
*
* Author: Ashish Kalra <[email protected]>
* Li Yang <[email protected]>
*
* Copyright (c) 2006-2007, 2011-2012 Freescale Semiconductor, Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
static unsigned int intr_coalescing_count;
module_param(intr_coalescing_count, int, S_IRUGO);
MODULE_PARM_DESC(intr_coalescing_count,
"INT coalescing count threshold (1..31)");
static unsigned int intr_coalescing_ticks;
module_param(intr_coalescing_ticks, int, S_IRUGO);
MODULE_PARM_DESC(intr_coalescing_ticks,
"INT coalescing timer threshold in AHB ticks");
/* Controller information */
enum {
SATA_FSL_QUEUE_DEPTH = 16,
SATA_FSL_MAX_PRD = 63,
SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_PMP | ATA_FLAG_NCQ |
ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
SATA_FSL_CMD_SLOT_SIZE = (SATA_FSL_MAX_CMDS * SATA_FSL_CMD_HDR_SIZE),
/*
* SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
* chained indirect PRDEs up to a max count of 63.
* We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will
* be setup as an indirect descriptor, pointing to it's next
* (contiguous) PRDE. Though chained indirect PRDE arrays are
* supported,it will be more efficient to use a direct PRDT and
* a single chain/link to indirect PRDE array/PRDT.
*/
SATA_FSL_CMD_DESC_CFIS_SZ = 32,
SATA_FSL_CMD_DESC_SFIS_SZ = 32,
SATA_FSL_CMD_DESC_ACMD_SZ = 16,
SATA_FSL_CMD_DESC_RSRVD = 16,
SATA_FSL_CMD_DESC_SIZE = (SATA_FSL_CMD_DESC_CFIS_SZ +
SATA_FSL_CMD_DESC_SFIS_SZ +
SATA_FSL_CMD_DESC_ACMD_SZ +
SATA_FSL_CMD_DESC_RSRVD +
SATA_FSL_MAX_PRD * 16),
SATA_FSL_CMD_DESC_OFFSET_TO_PRDT =
(SATA_FSL_CMD_DESC_CFIS_SZ +
SATA_FSL_CMD_DESC_SFIS_SZ +
SATA_FSL_CMD_DESC_ACMD_SZ +
SATA_FSL_CMD_DESC_RSRVD),
SATA_FSL_CMD_DESC_AR_SZ = (SATA_FSL_CMD_DESC_SIZE * SATA_FSL_MAX_CMDS),
SATA_FSL_PORT_PRIV_DMA_SZ = (SATA_FSL_CMD_SLOT_SIZE +
SATA_FSL_CMD_DESC_AR_SZ),
/*
* MPC8315 has two SATA controllers, SATA1 & SATA2
* (one port per controller)
* MPC837x has 2/4 controllers, one port per controller
*/
SATA_FSL_MAX_PORTS = 1,
SATA_FSL_IRQ_FLAG = IRQF_SHARED,
};
/*
* Interrupt Coalescing Control Register bitdefs */
enum {
ICC_MIN_INT_COUNT_THRESHOLD = 1,
ICC_MAX_INT_COUNT_THRESHOLD = ((1 << 5) - 1),
ICC_MIN_INT_TICKS_THRESHOLD = 0,
ICC_MAX_INT_TICKS_THRESHOLD = ((1 << 19) - 1),
ICC_SAFE_INT_TICKS = 1,
};
/*
* Host Controller command register set - per port
*/
enum {
CQ = 0,
CA = 8,
CC = 0x10,
CE = 0x18,
DE = 0x20,
CHBA = 0x24,
HSTATUS = 0x28,
HCONTROL = 0x2C,
CQPMP = 0x30,
SIGNATURE = 0x34,
ICC = 0x38,
/*
* Host Status Register (HStatus) bitdefs
*/
ONLINE = (1 << 31),
GOING_OFFLINE = (1 << 30),
BIST_ERR = (1 << 29),
CLEAR_ERROR = (1 << 27),
FATAL_ERR_HC_MASTER_ERR = (1 << 18),
FATAL_ERR_PARITY_ERR_TX = (1 << 17),
FATAL_ERR_PARITY_ERR_RX = (1 << 16),
FATAL_ERR_DATA_UNDERRUN = (1 << 13),
FATAL_ERR_DATA_OVERRUN = (1 << 12),
FATAL_ERR_CRC_ERR_TX = (1 << 11),
FATAL_ERR_CRC_ERR_RX = (1 << 10),
FATAL_ERR_FIFO_OVRFL_TX = (1 << 9),
FATAL_ERR_FIFO_OVRFL_RX = (1 << 8),
FATAL_ERROR_DECODE = FATAL_ERR_HC_MASTER_ERR |
FATAL_ERR_PARITY_ERR_TX |
FATAL_ERR_PARITY_ERR_RX |
FATAL_ERR_DATA_UNDERRUN |
FATAL_ERR_DATA_OVERRUN |
FATAL_ERR_CRC_ERR_TX |
FATAL_ERR_CRC_ERR_RX |
FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX,
INT_ON_DATA_LENGTH_MISMATCH = (1 << 12),
INT_ON_FATAL_ERR = (1 << 5),
INT_ON_PHYRDY_CHG = (1 << 4),
INT_ON_SIGNATURE_UPDATE = (1 << 3),
INT_ON_SNOTIFY_UPDATE = (1 << 2),
INT_ON_SINGL_DEVICE_ERR = (1 << 1),
INT_ON_CMD_COMPLETE = 1,
INT_ON_ERROR = INT_ON_FATAL_ERR | INT_ON_SNOTIFY_UPDATE |
INT_ON_PHYRDY_CHG | INT_ON_SINGL_DEVICE_ERR,
/*
* Host Control Register (HControl) bitdefs
*/
HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30),
HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10),
HCONTROL_PMP_ATTACHED = (1 << 9),
HCONTROL_COPYOUT_STATFIS = (1 << 8),
IE_ON_FATAL_ERR = (1 << 5),
IE_ON_PHYRDY_CHG = (1 << 4),
IE_ON_SIGNATURE_UPDATE = (1 << 3),
IE_ON_SNOTIFY_UPDATE = (1 << 2),
IE_ON_SINGL_DEVICE_ERR = (1 << 1),
IE_ON_CMD_COMPLETE = 1,
DEFAULT_PORT_IRQ_ENABLE_MASK = IE_ON_FATAL_ERR | IE_ON_PHYRDY_CHG |
IE_ON_SIGNATURE_UPDATE | IE_ON_SNOTIFY_UPDATE |
IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
DATA_SNOOP_ENABLE_V1 = (1 << 22),
DATA_SNOOP_ENABLE_V2 = (1 << 28),
};
/*
* SATA Superset Registers
*/
enum {
SSTATUS = 0,
SERROR = 4,
SCONTROL = 8,
SNOTIFY = 0xC,
};
/*
* Control Status Register Set
*/
enum {
TRANSCFG = 0,
TRANSSTATUS = 4,
LINKCFG = 8,
LINKCFG1 = 0xC,
LINKCFG2 = 0x10,
LINKSTATUS = 0x14,
LINKSTATUS1 = 0x18,
PHYCTRLCFG = 0x1C,
COMMANDSTAT = 0x20,
};
/* TRANSCFG (transport-layer) configuration control */
enum {
TRANSCFG_RX_WATER_MARK = (1 << 4),
};
/* PHY (link-layer) configuration control */
enum {
PHY_BIST_ENABLE = 0x01,
};
/*
* Command Header Table entry, i.e, command slot
* 4 Dwords per command slot, command header size == 64 Dwords.
*/
struct cmdhdr_tbl_entry {
__le32 cda;
__le32 prde_fis_len;
__le32 ttl;
__le32 desc_info;
};
/*
* Description information bitdefs
*/
enum {
CMD_DESC_RES = (1 << 11),
VENDOR_SPECIFIC_BIST = (1 << 10),
CMD_DESC_SNOOP_ENABLE = (1 << 9),
FPDMA_QUEUED_CMD = (1 << 8),
SRST_CMD = (1 << 7),
BIST = (1 << 6),
ATAPI_CMD = (1 << 5),
};
/*
* Command Descriptor
*/
struct command_desc {
u8 cfis[8 * 4];
u8 sfis[8 * 4];
struct_group(cdb,
u8 acmd[4 * 4];
u8 fill[4 * 4];
);
u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
};
/*
* Physical region table descriptor(PRD)
*/
struct prde {
__le32 dba;
u8 fill[2 * 4];
__le32 ddc_and_ext;
};
/*
* ata_port private data
* This is our per-port instance data.
*/
struct sata_fsl_port_priv {
struct cmdhdr_tbl_entry *cmdslot;
dma_addr_t cmdslot_paddr;
struct command_desc *cmdentry;
dma_addr_t cmdentry_paddr;
};
/*
* ata_port->host_set private data
*/
struct sata_fsl_host_priv {
void __iomem *hcr_base;
void __iomem *ssr_base;
void __iomem *csr_base;
int irq;
int data_snoop;
struct device_attribute intr_coalescing;
struct device_attribute rx_watermark;
};
static void fsl_sata_set_irq_coalescing(struct ata_host *host,
unsigned int count, unsigned int ticks)
{
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
unsigned long flags;
if (count > ICC_MAX_INT_COUNT_THRESHOLD)
count = ICC_MAX_INT_COUNT_THRESHOLD;
else if (count < ICC_MIN_INT_COUNT_THRESHOLD)
count = ICC_MIN_INT_COUNT_THRESHOLD;
if (ticks > ICC_MAX_INT_TICKS_THRESHOLD)
ticks = ICC_MAX_INT_TICKS_THRESHOLD;
else if ((ICC_MIN_INT_TICKS_THRESHOLD == ticks) &&
(count > ICC_MIN_INT_COUNT_THRESHOLD))
ticks = ICC_SAFE_INT_TICKS;
spin_lock_irqsave(&host->lock, flags);
iowrite32((count << 24 | ticks), hcr_base + ICC);
intr_coalescing_count = count;
intr_coalescing_ticks = ticks;
spin_unlock_irqrestore(&host->lock, flags);
dev_dbg(host->dev, "interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
dev_dbg(host->dev, "ICC register status: (hcr base: 0x%p) = 0x%x\n",
hcr_base, ioread32(hcr_base + ICC));
}
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u %u\n",
intr_coalescing_count, intr_coalescing_ticks);
}
static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int coalescing_count, coalescing_ticks;
if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
fsl_sata_set_irq_coalescing(dev_get_drvdata(dev),
coalescing_count, coalescing_ticks);
return strlen(buf);
}
static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned int rx_watermark;
unsigned long flags;
struct ata_host *host = dev_get_drvdata(dev);
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *csr_base = host_priv->csr_base;
spin_lock_irqsave(&host->lock, flags);
rx_watermark = ioread32(csr_base + TRANSCFG);
rx_watermark &= 0x1f;
spin_unlock_irqrestore(&host->lock, flags);
return sysfs_emit(buf, "%u\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int rx_watermark;
unsigned long flags;
struct ata_host *host = dev_get_drvdata(dev);
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *csr_base = host_priv->csr_base;
u32 temp;
if (kstrtouint(buf, 10, &rx_watermark) < 0) {
dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
spin_lock_irqsave(&host->lock, flags);
temp = ioread32(csr_base + TRANSCFG);
temp &= 0xffffffe0;
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
spin_unlock_irqrestore(&host->lock, flags);
return strlen(buf);
}
static inline unsigned int sata_fsl_tag(struct ata_port *ap,
unsigned int tag,
void __iomem *hcr_base)
{
/* We let libATA core do actual (queue) tag allocation */
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
ata_port_dbg(ap, "tag %d invalid : out of range\n", tag);
return 0;
}
if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
ata_port_dbg(ap, "tag %d invalid : in use!!\n", tag);
return 0;
}
return tag;
}
static void sata_fsl_setup_cmd_hdr_entry(struct ata_port *ap,
struct sata_fsl_port_priv *pp,
unsigned int tag, u32 desc_info,
u32 data_xfer_len, u8 num_prde,
u8 fis_len)
{
dma_addr_t cmd_descriptor_address;
cmd_descriptor_address = pp->cmdentry_paddr +
tag * SATA_FSL_CMD_DESC_SIZE;
/* NOTE: both data_xfer_len & fis_len are Dword counts */
pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address);
pp->cmdslot[tag].prde_fis_len =
cpu_to_le32((num_prde << 16) | (fis_len << 2));
pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
ata_port_dbg(ap, "cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
le32_to_cpu(pp->cmdslot[tag].cda),
le32_to_cpu(pp->cmdslot[tag].prde_fis_len),
le32_to_cpu(pp->cmdslot[tag].ttl),
le32_to_cpu(pp->cmdslot[tag].desc_info));
}
static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
u32 *ttl, dma_addr_t cmd_desc_paddr,
int data_snoop)
{
struct scatterlist *sg;
unsigned int num_prde = 0;
u32 ttl_dwords = 0;
/*
* NOTE : direct & indirect prdt's are contiguously allocated
*/
struct prde *prd = (struct prde *)&((struct command_desc *)
cmd_desc)->prdt;
struct prde *prd_ptr_to_indirect_ext = NULL;
unsigned indirect_ext_segment_sz = 0;
dma_addr_t indirect_ext_segment_paddr;
unsigned int si;
indirect_ext_segment_paddr = cmd_desc_paddr +
SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t sg_addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
/* warn if each s/g element is not dword aligned */
if (unlikely(sg_addr & 0x03))
ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
(unsigned long long)sg_addr);
if (unlikely(sg_len & 0x03))
ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n",
sg_len);
if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
sg_next(sg) != NULL) {
prd_ptr_to_indirect_ext = prd;
prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
indirect_ext_segment_sz = 0;
++prd;
++num_prde;
}
ttl_dwords += sg_len;
prd->dba = cpu_to_le32(sg_addr);
prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
++num_prde;
++prd;
if (prd_ptr_to_indirect_ext)
indirect_ext_segment_sz += sg_len;
}
if (prd_ptr_to_indirect_ext) {
/* set indirect extension flag along with indirect ext. size */
prd_ptr_to_indirect_ext->ddc_and_ext =
cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
data_snoop |
(indirect_ext_segment_sz & ~0x03)));
}
*ttl = ttl_dwords;
return num_prde;
}
static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE;
u32 num_prde = 0;
u32 ttl_dwords = 0;
dma_addr_t cd_paddr;
cd = (struct command_desc *)pp->cmdentry + tag;
cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
if (ata_is_atapi(qc->tf.protocol)) {
desc_info |= ATAPI_CMD;
memset(&cd->cdb, 0, sizeof(cd->cdb));
memcpy(&cd->cdb, qc->cdb, qc->dev->cdb_len);
}
if (qc->flags & ATA_QCFLAG_DMAMAP)
num_prde = sata_fsl_fill_sg(qc, (void *)cd,
&ttl_dwords, cd_paddr,
host_priv->data_snoop);
if (qc->tf.protocol == ATA_PROT_NCQ)
desc_info |= FPDMA_QUEUED_CMD;
sata_fsl_setup_cmd_hdr_entry(ap, pp, tag, desc_info, ttl_dwords,
num_prde, 5);
ata_port_dbg(ap, "SATA FSL : di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
return AC_ERR_OK;
}
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
ata_port_dbg(ap, "CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(CC + hcr_base));
iowrite32(qc->dev->link->pmp, CQPMP + hcr_base);
/* Simply queue command to the controller/device */
iowrite32(1 << tag, CQ + hcr_base);
ata_port_dbg(ap, "tag=%d, CQ=0x%x, CA=0x%x\n",
tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
ata_port_dbg(ap, "CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
ioread32(CE + hcr_base),
ioread32(DE + hcr_base),
ioread32(CC + hcr_base),
ioread32(COMMANDSTAT + host_priv->csr_base));
return 0;
}
static void sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(qc->ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
cd = pp->cmdentry + tag;
ata_tf_from_fis(cd->sfis, &qc->result_tf);
}
static int sata_fsl_scr_write(struct ata_link *link,
unsigned int sc_reg_in, u32 val)
{
struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
void __iomem *ssr_base = host_priv->ssr_base;
unsigned int sc_reg;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_ERROR:
case SCR_CONTROL:
case SCR_ACTIVE:
sc_reg = sc_reg_in;
break;
default:
return -EINVAL;
}
ata_link_dbg(link, "reg_in = %d\n", sc_reg);
iowrite32(val, ssr_base + (sc_reg * 4));
return 0;
}
static int sata_fsl_scr_read(struct ata_link *link,
unsigned int sc_reg_in, u32 *val)
{
struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
void __iomem *ssr_base = host_priv->ssr_base;
unsigned int sc_reg;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_ERROR:
case SCR_CONTROL:
case SCR_ACTIVE:
sc_reg = sc_reg_in;
break;
default:
return -EINVAL;
}
ata_link_dbg(link, "reg_in = %d\n", sc_reg);
*val = ioread32(ssr_base + (sc_reg * 4));
return 0;
}
static void sata_fsl_freeze(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
ata_port_dbg(ap, "CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(DE + hcr_base));
ata_port_dbg(ap, "CmdStat = 0x%x\n",
ioread32(host_priv->csr_base + COMMANDSTAT));
/* disable interrupts on the controller/port */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
static void sata_fsl_thaw(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
ata_port_dbg(ap, "pending IRQs = 0x%x\n", (temp & 0x3F));
if (temp & 0x3F)
iowrite32((temp & 0x3F), hcr_base + HSTATUS);
/* enable interrupts on the controller/port */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
static void sata_fsl_pmp_attach(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL);
}
static void sata_fsl_pmp_detach(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
temp = ioread32(hcr_base + HCONTROL);
temp &= ~HCONTROL_PMP_ATTACHED;
iowrite32(temp, hcr_base + HCONTROL);
/* enable interrupts on the controller/port */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
}
static int sata_fsl_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct sata_fsl_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
pp = kzalloc(sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
GFP_KERNEL);
if (!mem) {
kfree(pp);
return -ENOMEM;
}
pp->cmdslot = mem;
pp->cmdslot_paddr = mem_dma;
mem += SATA_FSL_CMD_SLOT_SIZE;
mem_dma += SATA_FSL_CMD_SLOT_SIZE;
pp->cmdentry = mem;
pp->cmdentry_paddr = mem_dma;
ap->private_data = pp;
ata_port_dbg(ap, "CHBA = 0x%lx, cmdentry_phys = 0x%lx\n",
(unsigned long)pp->cmdslot_paddr,
(unsigned long)pp->cmdentry_paddr);
/* Now, update the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
/*
* Now, we can bring the controller on-line & also initiate
* the COMINIT sequence, we simply return here and the boot-probing
* & device discovery process is re-initiated by libATA using a
* Softreset EH (dummy) session. Hence, boot probing and device
* discovey will be part of sata_fsl_softreset() callback.
*/
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
ata_port_dbg(ap, "CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
return 0;
}
static void sata_fsl_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
/*
* Force host controller to go off-line, aborting current operations
*/
temp = ioread32(hcr_base + HCONTROL);
temp &= ~HCONTROL_ONLINE_PHY_RST;
temp |= HCONTROL_FORCE_OFFLINE;
iowrite32(temp, hcr_base + HCONTROL);
/* Poll for controller to go offline - should happen immediately */
ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1);
ap->private_data = NULL;
dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
pp->cmdslot, pp->cmdslot_paddr);
kfree(pp);
}
static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
struct ata_taskfile tf;
u32 temp;
temp = ioread32(hcr_base + SIGNATURE);
ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
tf.lbah = (temp >> 24) & 0xff;
tf.lbam = (temp >> 16) & 0xff;
tf.lbal = (temp >> 8) & 0xff;
tf.nsect = temp & 0xff;
return ata_port_classify(ap, &tf);
}
static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
int i = 0;
unsigned long start_jiffies;
try_offline_again:
/*
* Force host controller to go off-line, aborting current operations
*/
temp = ioread32(hcr_base + HCONTROL);
temp &= ~HCONTROL_ONLINE_PHY_RST;
iowrite32(temp, hcr_base + HCONTROL);
/* Poll for controller to go offline */
temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE,
1, 500);
if (temp & ONLINE) {
ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i);
/*
* Try to offline controller atleast twice
*/
i++;
if (i == 2)
goto err;
else
goto try_offline_again;
}
ata_port_dbg(ap, "hardreset, controller off-lined\n"
"HStatus = 0x%x HControl = 0x%x\n",
ioread32(hcr_base + HSTATUS),
ioread32(hcr_base + HCONTROL));
/*
* PHY reset should remain asserted for atleast 1ms
*/
ata_msleep(ap, 1);
sata_set_spd(link);
/*
* Now, bring the host controller online again, this can take time
* as PHY reset and communication establishment, 1st D2H FIS and
* device signature update is done, on safe side assume 500ms
* NOTE : Host online status may be indicated immediately!!
*/
temp = ioread32(hcr_base + HCONTROL);
temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE);
temp |= HCONTROL_PMP_ATTACHED;
iowrite32(temp, hcr_base + HCONTROL);
temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500);
if (!(temp & ONLINE)) {
ata_port_err(ap, "Hardreset failed, not on-lined\n");
goto err;
}
ata_port_dbg(ap, "controller off-lined & on-lined\n"
"HStatus = 0x%x HControl = 0x%x\n",
ioread32(hcr_base + HSTATUS),
ioread32(hcr_base + HCONTROL));
/*
* First, wait for the PHYRDY change to occur before waiting for
* the signature, and also verify if SStatus indicates device
* presence
*/
temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500);
if ((!(temp & 0x10)) || ata_link_offline(link)) {
ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n",
ioread32(hcr_base + HSTATUS));
*class = ATA_DEV_NONE;
return 0;
}
/*
* Wait for the first D2H from device,i.e,signature update notification
*/
start_jiffies = jiffies;
temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10,
500, jiffies_to_msecs(deadline - start_jiffies));
if ((temp & 0xFF) != 0x18) {
ata_port_warn(ap, "No Signature Update\n");
*class = ATA_DEV_NONE;
goto do_followup_srst;
} else {
ata_port_info(ap, "Signature Update detected @ %d msecs\n",
jiffies_to_msecs(jiffies - start_jiffies));
*class = sata_fsl_dev_classify(ap);
return 0;
}
do_followup_srst:
/*
* request libATA to perform follow-up softreset
*/
return -EAGAIN;
err:
return -EIO;
}
static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
int pmp = sata_srst_pmp(link);
u32 temp;
struct ata_taskfile tf;
u8 *cfis;
u32 Serror;
if (ata_link_offline(link)) {
*class = ATA_DEV_NONE;
return 0;
}
/*
* Send a device reset (SRST) explicitly on command slot #0
* Check : will the command queue (reg) be cleared during offlining ??
* Also we will be online only if Phy commn. has been established
* and device presence has been detected, therefore if we have
* reached here, we can send a command to the target device
*/
ata_tf_init(link->device, &tf);
cfis = (u8 *) &pp->cmdentry->cfis;
/* device reset/SRST is a control register update FIS, uses tag0 */
sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */
ata_tf_to_fis(&tf, pmp, 0, cfis);
ata_port_dbg(ap, "Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
cfis[0], cfis[1], cfis[2], cfis[3]);
/*
* Queue SRST command to the controller/device, ensure that no
* other commands are active on the controller/device
*/
ata_port_dbg(ap, "CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
iowrite32(0xFFFF, CC + hcr_base);
if (pmp != SATA_PMP_CTRL_PORT)
iowrite32(pmp, CQPMP + hcr_base);
iowrite32(1, CQ + hcr_base);
temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000);
if (temp & 0x1) {
ata_port_warn(ap, "ATA_SRST issue failed\n");
ata_port_dbg(ap, "Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
ata_port_dbg(ap, "HStatus = 0x%x HControl = 0x%x Serror = 0x%x\n",
ioread32(hcr_base + HSTATUS),
ioread32(hcr_base + HCONTROL),
Serror);
goto err;
}
ata_msleep(ap, 1);
/*
* SATA device enters reset state after receiving a Control register
* FIS with SRST bit asserted and it awaits another H2D Control reg.
* FIS with SRST bit cleared, then the device does internal diags &
* initialization, followed by indicating it's initialization status
* using ATA signature D2H register FIS to the host controller.
*/
sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
0, 0, 5);
tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */
ata_tf_to_fis(&tf, pmp, 0, cfis);
if (pmp != SATA_PMP_CTRL_PORT)
iowrite32(pmp, CQPMP + hcr_base);
iowrite32(1, CQ + hcr_base);
ata_msleep(ap, 150); /* ?? */
/*
* The above command would have signalled an interrupt on command
* complete, which needs special handling, by clearing the Nth
* command bit of the CCreg
*/
iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */
*class = ATA_DEV_NONE;
/* Verify if SStatus indicates device presence */
if (ata_link_online(link)) {
/*
* if we are here, device presence has been detected,
* 1st D2H FIS would have been received, but sfis in
* command desc. is not updated, but signature register
* would have been updated
*/
*class = sata_fsl_dev_classify(ap);
ata_port_dbg(ap, "ccreg = 0x%x\n", ioread32(hcr_base + CC));
ata_port_dbg(ap, "cereg = 0x%x\n", ioread32(hcr_base + CE));
}
return 0;
err:
return -EIO;
}
static void sata_fsl_error_handler(struct ata_port *ap)
{
sata_pmp_error_handler(ap);
}
static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_EH)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
/* make DMA engine forget about the failed command */
}
}
static void sata_fsl_error_intr(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 hstatus, dereg=0, cereg = 0, SError = 0;
unsigned int err_mask = 0, action = 0;
int freeze = 0, abort=0;
struct ata_link *link = NULL;
struct ata_queued_cmd *qc = NULL;
struct ata_eh_info *ehi;
hstatus = ioread32(hcr_base + HSTATUS);
cereg = ioread32(hcr_base + CE);
/* first, analyze and record host port events */
link = &ap->link;
ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
/*
* Handle & Clear SError
*/
sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
if (unlikely(SError & 0xFFFF0000))
sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
ata_port_dbg(ap, "hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
hstatus, cereg, ioread32(hcr_base + DE), SError);
/* handle fatal errors */
if (hstatus & FATAL_ERROR_DECODE) {
ehi->err_mask |= AC_ERR_ATA_BUS;
ehi->action |= ATA_EH_SOFTRESET;
freeze = 1;
}
/* Handle SDB FIS receive & notify update */
if (hstatus & INT_ON_SNOTIFY_UPDATE)
sata_async_notification(ap);
/* Handle PHYRDY change notification */
if (hstatus & INT_ON_PHYRDY_CHG) {
ata_port_dbg(ap, "PHYRDY change indication\n");
/* Setup a soft-reset EH action */
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "%s", "PHY RDY changed");
freeze = 1;
}
/* handle single device errors */
if (cereg) {
/*
* clear the command error, also clears queue to the device
* in error, and we can (re)issue commands to this device.
* When a device is in error all commands queued into the
* host controller and at the device are considered aborted
* and the queue for that device is stopped. Now, after
* clearing the device error, we can issue commands to the
* device to interrogate it to find the source of the error.
*/
abort = 1;
ata_port_dbg(ap, "single device error, CE=0x%x, DE=0x%x\n",
ioread32(hcr_base + CE), ioread32(hcr_base + DE));
/* find out the offending link and qc */
if (ap->nr_pmp_links) {
unsigned int dev_num;
dereg = ioread32(hcr_base + DE);
iowrite32(dereg, hcr_base + DE);
iowrite32(cereg, hcr_base + CE);
dev_num = ffs(dereg) - 1;
if (dev_num < ap->nr_pmp_links && dereg != 0) {
link = &ap->pmp_link[dev_num];
ehi = &link->eh_info;
qc = ata_qc_from_tag(ap, link->active_tag);
/*
* We should consider this as non fatal error,
* and TF must be updated as done below.
*/
err_mask |= AC_ERR_DEV;
} else {
err_mask |= AC_ERR_HSM;
action |= ATA_EH_HARDRESET;
freeze = 1;
}
} else {
dereg = ioread32(hcr_base + DE);
iowrite32(dereg, hcr_base + DE);
iowrite32(cereg, hcr_base + CE);
qc = ata_qc_from_tag(ap, link->active_tag);
/*
* We should consider this as non fatal error,
* and TF must be updated as done below.
*/
err_mask |= AC_ERR_DEV;
}
}
/* record error info */
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ehi->action |= action;
/* freeze or abort */
if (freeze)
ata_port_freeze(ap);
else if (abort) {
if (qc)
ata_link_abort(qc->dev->link);
else
ata_port_abort(ap);
}
}
static void sata_fsl_host_intr(struct ata_port *ap)
{
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 hstatus, done_mask = 0;
struct ata_queued_cmd *qc;
u32 SError;
u32 tag;
u32 status_mask = INT_ON_ERROR;
hstatus = ioread32(hcr_base + HSTATUS);
sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
/* Read command completed register */
done_mask = ioread32(hcr_base + CC);
/* Workaround for data length mismatch errata */
if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && ata_is_atapi(qc->tf.protocol)) {
u32 hcontrol;
/* Set HControl[27] to clear error registers */
hcontrol = ioread32(hcr_base + HCONTROL);
iowrite32(hcontrol | CLEAR_ERROR,
hcr_base + HCONTROL);
/* Clear HControl[27] */
iowrite32(hcontrol & ~CLEAR_ERROR,
hcr_base + HCONTROL);
/* Clear SError[E] bit */
sata_fsl_scr_write(&ap->link, SCR_ERROR,
SError);
/* Ignore fatal error and device error */
status_mask &= ~(INT_ON_SINGL_DEVICE_ERR
| INT_ON_FATAL_ERR);
break;
}
}
}
if (unlikely(SError & 0xFFFF0000)) {
ata_port_dbg(ap, "serror @host_intr : 0x%x\n", SError);
sata_fsl_error_intr(ap);
}
if (unlikely(hstatus & status_mask)) {
ata_port_dbg(ap, "error interrupt!!\n");
sata_fsl_error_intr(ap);
return;
}
ata_port_dbg(ap, "Status of all queues :\n");
ata_port_dbg(ap, "done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
done_mask,
ioread32(hcr_base + CA),
ioread32(hcr_base + CE),
ioread32(hcr_base + CQ),
ap->qc_active);
if (done_mask & ap->qc_active) {
int i;
/* clear CC bit, this will also complete the interrupt */
iowrite32(done_mask, hcr_base + CC);
ata_port_dbg(ap, "Status of all queues: done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
done_mask, ioread32(hcr_base + CA),
ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
if (done_mask & (1 << i))
ata_port_dbg(ap, "completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
}
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
return;
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
iowrite32(1, hcr_base + CC);
qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
ata_port_dbg(ap, "completing non-ncq cmd, CC=0x%x\n",
ioread32(hcr_base + CC));
if (qc) {
ata_qc_complete(qc);
}
} else {
/* Spurious Interrupt!! */
ata_port_dbg(ap, "spurious interrupt!!, CC = 0x%x\n",
ioread32(hcr_base + CC));
iowrite32(done_mask, hcr_base + CC);
return;
}
}
static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 interrupt_enables;
unsigned handled = 0;
struct ata_port *ap;
/* ack. any pending IRQs for this controller/port */
interrupt_enables = ioread32(hcr_base + HSTATUS);
interrupt_enables &= 0x3F;
if (!interrupt_enables)
return IRQ_NONE;
spin_lock(&host->lock);
/* Assuming one port per host controller */
ap = host->ports[0];
if (ap) {
sata_fsl_host_intr(ap);
} else {
dev_warn(host->dev, "interrupt on disabled port 0\n");
}
iowrite32(interrupt_enables, hcr_base + HSTATUS);
handled = 1;
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
/*
* Multiple ports are represented by multiple SATA controllers with
* one port per controller
*/
static int sata_fsl_init_controller(struct ata_host *host)
{
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
/*
* NOTE : We cannot bring the controller online before setting
* the CHBA, hence main controller initialization is done as
* part of the port_start() callback
*/
/* sata controller to operate in enterprise mode */
temp = ioread32(hcr_base + HCONTROL);
iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F)
iowrite32((temp & 0x3F), hcr_base + HSTATUS);
/* Keep interrupts disabled on the controller */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
/* Disable interrupt coalescing control(icc), for the moment */
dev_dbg(host->dev, "icc = 0x%x\n", ioread32(hcr_base + ICC));
iowrite32(0x01000000, hcr_base + ICC);
/* clear error registers, SError is cleared by libATA */
iowrite32(0x00000FFFF, hcr_base + CE);
iowrite32(0x00000FFFF, hcr_base + DE);
/*
* reset the number of command complete bits which will cause the
* interrupt to be signaled
*/
fsl_sata_set_irq_coalescing(host, intr_coalescing_count,
intr_coalescing_ticks);
/*
* host controller will be brought on-line, during xx_port_start()
* callback, that should also initiate the OOB, COMINIT sequence
*/
dev_dbg(host->dev, "HStatus = 0x%x HControl = 0x%x\n",
ioread32(hcr_base + HSTATUS), ioread32(hcr_base + HCONTROL));
return 0;
}
static void sata_fsl_host_stop(struct ata_host *host)
{
struct sata_fsl_host_priv *host_priv = host->private_data;
iounmap(host_priv->hcr_base);
kfree(host_priv);
}
/*
* scsi mid-layer and libata interface structures
*/
static const struct scsi_host_template sata_fsl_sht = {
ATA_NCQ_SHT_QD("sata_fsl", SATA_FSL_QUEUE_DEPTH),
.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations sata_fsl_ops = {
.inherits = &sata_pmp_port_ops,
.qc_defer = ata_std_qc_defer,
.qc_prep = sata_fsl_qc_prep,
.qc_issue = sata_fsl_qc_issue,
.qc_fill_rtf = sata_fsl_qc_fill_rtf,
.scr_read = sata_fsl_scr_read,
.scr_write = sata_fsl_scr_write,
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
.softreset = sata_fsl_softreset,
.hardreset = sata_fsl_hardreset,
.pmp_softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,
.port_start = sata_fsl_port_start,
.port_stop = sata_fsl_port_stop,
.host_stop = sata_fsl_host_stop,
.pmp_attach = sata_fsl_pmp_attach,
.pmp_detach = sata_fsl_pmp_detach,
};
static const struct ata_port_info sata_fsl_port_info[] = {
{
.flags = SATA_FSL_HOST_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &sata_fsl_ops,
},
};
static int sata_fsl_probe(struct platform_device *ofdev)
{
int retval = -ENXIO;
void __iomem *hcr_base = NULL;
void __iomem *ssr_base = NULL;
void __iomem *csr_base = NULL;
struct sata_fsl_host_priv *host_priv = NULL;
int irq;
struct ata_host *host = NULL;
u32 temp;
struct ata_port_info pi = sata_fsl_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
dev_info(&ofdev->dev, "Sata FSL Platform/CSB Driver init\n");
hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
goto error_exit_with_cleanup;
ssr_base = hcr_base + 0x100;
csr_base = hcr_base + 0x140;
if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) {
temp = ioread32(csr_base + TRANSCFG);
temp = temp & 0xffffffe0;
iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
}
dev_dbg(&ofdev->dev, "@reset i/o = 0x%x\n",
ioread32(csr_base + TRANSCFG));
host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
if (!host_priv)
goto error_exit_with_cleanup;
host_priv->hcr_base = hcr_base;
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
irq = platform_get_irq(ofdev, 0);
if (irq < 0) {
retval = irq;
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2"))
host_priv->data_snoop = DATA_SNOOP_ENABLE_V2;
else
host_priv->data_snoop = DATA_SNOOP_ENABLE_V1;
/* allocate host structure */
host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
if (!host) {
retval = -ENOMEM;
goto error_exit_with_cleanup;
}
/* host->iomap is not used currently */
host->private_data = host_priv;
/* initialize host controller */
sata_fsl_init_controller(host);
/*
* Now, register with libATA core, this will also initiate the
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG,
&sata_fsl_sht);
host_priv->intr_coalescing.show = fsl_sata_intr_coalescing_show;
host_priv->intr_coalescing.store = fsl_sata_intr_coalescing_store;
sysfs_attr_init(&host_priv->intr_coalescing.attr);
host_priv->intr_coalescing.attr.name = "intr_coalescing";
host_priv->intr_coalescing.attr.mode = S_IRUGO | S_IWUSR;
retval = device_create_file(host->dev, &host_priv->intr_coalescing);
if (retval)
goto error_exit_with_cleanup;
host_priv->rx_watermark.show = fsl_sata_rx_watermark_show;
host_priv->rx_watermark.store = fsl_sata_rx_watermark_store;
sysfs_attr_init(&host_priv->rx_watermark.attr);
host_priv->rx_watermark.attr.name = "rx_watermark";
host_priv->rx_watermark.attr.mode = S_IRUGO | S_IWUSR;
retval = device_create_file(host->dev, &host_priv->rx_watermark);
if (retval) {
device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
goto error_exit_with_cleanup;
}
return 0;
error_exit_with_cleanup:
if (host)
ata_host_detach(host);
if (hcr_base)
iounmap(hcr_base);
kfree(host_priv);
return retval;
}
static void sata_fsl_remove(struct platform_device *ofdev)
{
struct ata_host *host = platform_get_drvdata(ofdev);
struct sata_fsl_host_priv *host_priv = host->private_data;
device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
device_remove_file(&ofdev->dev, &host_priv->rx_watermark);
ata_host_detach(host);
}
#ifdef CONFIG_PM_SLEEP
static int sata_fsl_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
ata_host_suspend(host, state);
return 0;
}
static int sata_fsl_resume(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct sata_fsl_host_priv *host_priv = host->private_data;
int ret;
void __iomem *hcr_base = host_priv->hcr_base;
struct ata_port *ap = host->ports[0];
struct sata_fsl_port_priv *pp = ap->private_data;
ret = sata_fsl_init_controller(host);
if (ret) {
dev_err(&op->dev, "Error initializing hardware\n");
return ret;
}
/* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
iowrite32((ioread32(hcr_base + HCONTROL)
| HCONTROL_ONLINE_PHY_RST
| HCONTROL_SNOOP_ENABLE
| HCONTROL_PMP_ATTACHED),
hcr_base + HCONTROL);
ata_host_resume(host);
return 0;
}
#endif
static const struct of_device_id fsl_sata_match[] = {
{ .compatible = "fsl,pq-sata", },
{ .compatible = "fsl,pq-sata-v2", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_sata_match);
static struct platform_driver fsl_sata_driver = {
.driver = {
.name = "fsl-sata",
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
.remove_new = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
#endif
};
module_platform_driver(fsl_sata_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
MODULE_VERSION("1.10");
| linux-master | drivers/ata/sata_fsl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
* linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <[email protected]>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
* Portions Copyright (C) 2005-2010 MontaVista Software, Inc.
*
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
#define DRV_VERSION "0.6.30"
struct hpt_clock {
u8 xfer_speed;
u32 timing;
};
struct hpt_chip {
const char *name;
unsigned int base;
struct hpt_clock const *clocks[4];
};
/* key for bus clock timings
* bit
* 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
* cycles = value + 1
* 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
* register access.
* 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
* register access.
* 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
* 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
* 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
* 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
* register access.
* 28 UDMA enable.
* 29 DMA enable.
* 30 PIO_MST enable. If set, the chip is in bus master mode during
* PIO xfer.
* 31 FIFO enable. Only for PIO.
*/
static struct hpt_clock hpt37x_timings_33[] = {
{ XFER_UDMA_6, 0x12446231 }, /* 0x12646231 ?? */
{ XFER_UDMA_5, 0x12446231 },
{ XFER_UDMA_4, 0x12446231 },
{ XFER_UDMA_3, 0x126c6231 },
{ XFER_UDMA_2, 0x12486231 },
{ XFER_UDMA_1, 0x124c6233 },
{ XFER_UDMA_0, 0x12506297 },
{ XFER_MW_DMA_2, 0x22406c31 },
{ XFER_MW_DMA_1, 0x22406c33 },
{ XFER_MW_DMA_0, 0x22406c97 },
{ XFER_PIO_4, 0x06414e31 },
{ XFER_PIO_3, 0x06414e42 },
{ XFER_PIO_2, 0x06414e53 },
{ XFER_PIO_1, 0x06814e93 },
{ XFER_PIO_0, 0x06814ea7 }
};
static struct hpt_clock hpt37x_timings_50[] = {
{ XFER_UDMA_6, 0x12848242 },
{ XFER_UDMA_5, 0x12848242 },
{ XFER_UDMA_4, 0x12ac8242 },
{ XFER_UDMA_3, 0x128c8242 },
{ XFER_UDMA_2, 0x120c8242 },
{ XFER_UDMA_1, 0x12148254 },
{ XFER_UDMA_0, 0x121882ea },
{ XFER_MW_DMA_2, 0x22808242 },
{ XFER_MW_DMA_1, 0x22808254 },
{ XFER_MW_DMA_0, 0x228082ea },
{ XFER_PIO_4, 0x0a81f442 },
{ XFER_PIO_3, 0x0a81f443 },
{ XFER_PIO_2, 0x0a81f454 },
{ XFER_PIO_1, 0x0ac1f465 },
{ XFER_PIO_0, 0x0ac1f48a }
};
static struct hpt_clock hpt37x_timings_66[] = {
{ XFER_UDMA_6, 0x1c869c62 },
{ XFER_UDMA_5, 0x1cae9c62 }, /* 0x1c8a9c62 */
{ XFER_UDMA_4, 0x1c8a9c62 },
{ XFER_UDMA_3, 0x1c8e9c62 },
{ XFER_UDMA_2, 0x1c929c62 },
{ XFER_UDMA_1, 0x1c9a9c62 },
{ XFER_UDMA_0, 0x1c829c62 },
{ XFER_MW_DMA_2, 0x2c829c62 },
{ XFER_MW_DMA_1, 0x2c829c66 },
{ XFER_MW_DMA_0, 0x2c829d2e },
{ XFER_PIO_4, 0x0c829c62 },
{ XFER_PIO_3, 0x0c829c84 },
{ XFER_PIO_2, 0x0c829ca6 },
{ XFER_PIO_1, 0x0d029d26 },
{ XFER_PIO_0, 0x0d029d5e }
};
static const struct hpt_chip hpt370 = {
"HPT370",
48,
{
hpt37x_timings_33,
NULL,
NULL,
NULL
}
};
static const struct hpt_chip hpt370a = {
"HPT370A",
48,
{
hpt37x_timings_33,
NULL,
hpt37x_timings_50,
NULL
}
};
static const struct hpt_chip hpt372 = {
"HPT372",
55,
{
hpt37x_timings_33,
NULL,
hpt37x_timings_50,
hpt37x_timings_66
}
};
static const struct hpt_chip hpt302 = {
"HPT302",
66,
{
hpt37x_timings_33,
NULL,
hpt37x_timings_50,
hpt37x_timings_66
}
};
static const struct hpt_chip hpt371 = {
"HPT371",
66,
{
hpt37x_timings_33,
NULL,
hpt37x_timings_50,
hpt37x_timings_66
}
};
static const struct hpt_chip hpt372a = {
"HPT372A",
66,
{
hpt37x_timings_33,
NULL,
hpt37x_timings_50,
hpt37x_timings_66
}
};
static const struct hpt_chip hpt374 = {
"HPT374",
48,
{
hpt37x_timings_33,
NULL,
NULL,
NULL
}
};
/**
* hpt37x_find_mode - reset the hpt37x bus
* @ap: ATA port
* @speed: transfer mode
*
* Return the 32bit register programming information for this channel
* that matches the speed provided.
*/
static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = ap->host->private_data;
while (clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
}
BUG();
return 0xffffffffU; /* silence compiler warning */
}
static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
i = match_string(list, -1, model_num);
if (i >= 0) {
ata_dev_warn(dev, "%s is not supported for %s\n",
modestr, list[i]);
return 1;
}
return 0;
}
static const char * const bad_ata33[] = {
"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
"Maxtor 90845U3", "Maxtor 90650U2",
"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
"Maxtor 90510D4",
"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
NULL
};
static const char * const bad_ata100_5[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
"IBM-DTLA-307045",
"IBM-DTLA-307030",
"IBM-DTLA-307020",
"IBM-DTLA-307015",
"IBM-DTLA-305040",
"IBM-DTLA-305030",
"IBM-DTLA-305020",
"IC35L010AVER07-0",
"IC35L020AVER07-0",
"IC35L030AVER07-0",
"IC35L040AVER07-0",
"IC35L060AVER07-0",
"WDC AC310200R",
NULL
};
/**
* hpt370_filter - mode selection filter
* @adev: ATA device
* @mask: mode mask
*
* Block UDMA on devices that cause trouble with this controller.
*/
static unsigned int hpt370_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
}
/**
* hpt370a_filter - mode selection filter
* @adev: ATA device
* @mask: mode mask
*
* Block UDMA on devices that cause trouble with this controller.
*/
static unsigned int hpt370a_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
}
/**
* hpt372_filter - mode selection filter
* @adev: ATA device
* @mask: mode mask
*
* The Marvell bridge chips used on the HighPoint SATA cards do not seem
* to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
*/
static unsigned int hpt372_filter(struct ata_device *adev, unsigned int mask)
{
if (ata_id_is_sata(adev->id))
mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
return mask;
}
/**
* hpt37x_cable_detect - Detect the cable type
* @ap: ATA port to detect on
*
* Return the cable type attached to this port
*/
static int hpt37x_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 scr2, ata66;
pci_read_config_byte(pdev, 0x5B, &scr2);
pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
udelay(10); /* debounce */
/* Cable register now active */
pci_read_config_byte(pdev, 0x5A, &ata66);
/* Restore state */
pci_write_config_byte(pdev, 0x5B, scr2);
if (ata66 & (2 >> ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* hpt374_fn1_cable_detect - Detect the cable type
* @ap: ATA port to detect on
*
* Return the cable type attached to this port
*/
static int hpt374_fn1_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int mcrbase = 0x50 + 4 * ap->port_no;
u16 mcr3;
u8 ata66;
/* Do the extra channel work */
pci_read_config_word(pdev, mcrbase + 2, &mcr3);
/* Set bit 15 of 0x52 to enable TCBLID as input */
pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000);
pci_read_config_byte(pdev, 0x5A, &ata66);
/* Reset TCBLID/FCBLID to output */
pci_write_config_word(pdev, mcrbase + 2, mcr3);
if (ata66 & (2 >> ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* hpt37x_pre_reset - reset the hpt37x bus
* @link: ATA link to reset
* @deadline: deadline jiffies for the operation
*
* Perform the initial reset handling for the HPT37x.
*/
static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits hpt37x_enable_bits[] = {
{ 0x50, 1, 0x04, 0x04 },
{ 0x54, 1, 0x04, 0x04 }
};
u8 mcr2;
if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
return -ENOENT;
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(100);
/*
* Disable the "fast interrupt" prediction. Don't hold off
* on interrupts. (== 0x01 despite what the docs say)
*/
pci_read_config_byte(pdev, 0x51 + 4 * ap->port_no, &mcr2);
/* Is it HPT370/A? */
if (pdev->device == PCI_DEVICE_ID_TTI_HPT366 && pdev->revision < 5) {
mcr2 &= ~0x02;
mcr2 |= 0x01;
} else {
mcr2 &= ~0x07;
}
pci_write_config_byte(pdev, 0x51 + 4 * ap->port_no, mcr2);
return ata_sff_prereset(link, deadline);
}
static void hpt37x_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int addr = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
u32 reg, timing, mask;
/* Determine timing mask and find matching mode entry */
if (mode < XFER_MW_DMA_0)
mask = 0xcfc3ffff;
else if (mode < XFER_UDMA_0)
mask = 0x31c001ff;
else
mask = 0x303c0000;
timing = hpt37x_find_mode(ap, mode);
pci_read_config_dword(pdev, addr, ®);
reg = (reg & ~mask) | (timing & mask);
pci_write_config_dword(pdev, addr, reg);
}
/**
* hpt37x_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
static void hpt37x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
hpt37x_set_mode(ap, adev, adev->pio_mode);
}
/**
* hpt37x_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes.
*/
static void hpt37x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
hpt37x_set_mode(ap, adev, adev->dma_mode);
}
/**
* hpt370_bmdma_stop - DMA engine stop
* @qc: ATA command
*
* Work around the HPT370 DMA engine.
*/
static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
u8 dma_cmd;
if (dma_stat & ATA_DMA_ACTIVE) {
udelay(20);
dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
}
if (dma_stat & ATA_DMA_ACTIVE) {
/* Clear the engine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(10);
/* Stop DMA */
dma_cmd = ioread8(bmdma + ATA_DMA_CMD);
iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD);
/* Clear Error */
dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR,
bmdma + ATA_DMA_STATUS);
/* Clear the engine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(10);
}
ata_bmdma_stop(qc);
}
/**
* hpt37x_bmdma_stop - DMA engine stop
* @qc: ATA command
*
* Clean up after the HPT372 and later DMA engine
*/
static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int mscreg = 0x50 + 4 * ap->port_no;
u8 bwsr_stat, msc_stat;
pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
pci_read_config_byte(pdev, mscreg, &msc_stat);
if (bwsr_stat & (1 << ap->port_no))
pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
ata_bmdma_stop(qc);
}
static const struct scsi_host_template hpt37x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Configuration for HPT370
*/
static struct ata_port_operations hpt370_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt370_bmdma_stop,
.mode_filter = hpt370_filter,
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
/*
* Configuration for HPT370A. Close to 370 but less filters
*/
static struct ata_port_operations hpt370a_port_ops = {
.inherits = &hpt370_port_ops,
.mode_filter = hpt370a_filter,
};
/*
* Configuration for HPT371 and HPT302.
*/
static struct ata_port_operations hpt302_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt37x_bmdma_stop,
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
.prereset = hpt37x_pre_reset,
};
/*
* Configuration for HPT372. Mode setting works like 371 and 302
* but we have a mode filter.
*/
static struct ata_port_operations hpt372_port_ops = {
.inherits = &hpt302_port_ops,
.mode_filter = hpt372_filter,
};
/*
* Configuration for HPT374. Mode setting and filtering works like 372
* but we have a different cable detection procedure for function 1.
*/
static struct ata_port_operations hpt374_fn1_port_ops = {
.inherits = &hpt372_port_ops,
.cable_detect = hpt374_fn1_cable_detect,
};
/**
* hpt37x_clock_slot - Turn timing to PC clock entry
* @freq: Reported frequency in MHz
*
* Turn the timing data into a clock slot (0 for 33, 1 for 40, 2 for 50
* and 3 for 66Mhz)
*/
static int hpt37x_clock_slot(unsigned int freq)
{
if (freq < 40)
return 0; /* 33Mhz slot */
if (freq < 45)
return 1; /* 40Mhz slot */
if (freq < 55)
return 2; /* 50Mhz slot */
return 3; /* 60Mhz slot */
}
/**
* hpt37x_calibrate_dpll - Calibrate the DPLL loop
* @dev: PCI device
*
* Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
* succeeds
*/
static int hpt37x_calibrate_dpll(struct pci_dev *dev)
{
u8 reg5b;
u32 reg5c;
int tries;
for (tries = 0; tries < 0x5000; tries++) {
udelay(50);
pci_read_config_byte(dev, 0x5b, ®5b);
if (reg5b & 0x80) {
/* See if it stays set */
for (tries = 0; tries < 0x1000; tries++) {
pci_read_config_byte(dev, 0x5b, ®5b);
/* Failed ? */
if ((reg5b & 0x80) == 0)
return 0;
}
/* Turn off tuning, we have the DPLL set */
pci_read_config_dword(dev, 0x5c, ®5c);
pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
return 1;
}
}
/* Never went stable */
return 0;
}
static int hpt37x_pci_clock(struct pci_dev *pdev, unsigned int base)
{
unsigned int freq;
u32 fcnt;
/*
* Some devices do not let this value be accessed via PCI space
* according to the old driver. In addition we must use the value
* from FN 0 on the HPT374.
*/
if (pdev->device == PCI_DEVICE_ID_TTI_HPT374 &&
(PCI_FUNC(pdev->devfn) & 1)) {
struct pci_dev *pdev_fn0;
pdev_fn0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
/* Someone hot plugged the controller on us? */
if (!pdev_fn0)
return 0;
fcnt = inl(pci_resource_start(pdev_fn0, 4) + 0x90);
pci_dev_put(pdev_fn0);
} else {
fcnt = inl(pci_resource_start(pdev, 4) + 0x90);
}
if ((fcnt >> 12) != 0xABCDE) {
u32 total = 0;
int i;
u16 sr;
dev_warn(&pdev->dev, "BIOS clock data not set\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
pci_read_config_word(pdev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
}
fcnt = total / 128;
}
fcnt &= 0x1FF;
freq = (fcnt * base) / 192; /* in MHz */
/* Clamp to bands */
if (freq < 40)
return 33;
if (freq < 45)
return 40;
if (freq < 55)
return 50;
return 66;
}
/**
* hpt37x_init_one - Initialise an HPT37X/302
* @dev: PCI device
* @id: Entry in match table
*
* Initialise an HPT37x device. There are some interesting complications
* here. Firstly the chip may report 366 and be one of several variants.
* Secondly all the timings depend on the clock for the chip which we must
* detect and look up
*
* This is the known chip mappings. It may be missing a couple of later
* releases.
*
* Chip version PCI Rev Notes
* HPT366 4 (HPT366) 0 Other driver
* HPT366 4 (HPT366) 1 Other driver
* HPT368 4 (HPT366) 2 Other driver
* HPT370 4 (HPT366) 3 UDMA100
* HPT370A 4 (HPT366) 4 UDMA100
* HPT372 4 (HPT366) 5 UDMA133 (1)
* HPT372N 4 (HPT366) 6 Other driver
* HPT372A 5 (HPT372) 1 UDMA133 (1)
* HPT372N 5 (HPT372) 2 Other driver
* HPT302 6 (HPT302) 1 UDMA133
* HPT302N 6 (HPT302) 2 Other driver
* HPT371 7 (HPT371) * UDMA133
* HPT374 8 (HPT374) * UDMA133 4 channel
* HPT372N 9 (HPT372N) * Other driver
*
* (1) UDMA133 support depends on the bus clock
*/
static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* HPT370 - UDMA100 */
static const struct ata_port_info info_hpt370 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &hpt370_port_ops
};
/* HPT370A - UDMA100 */
static const struct ata_port_info info_hpt370a = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &hpt370a_port_ops
};
/* HPT370 - UDMA66 */
static const struct ata_port_info info_hpt370_33 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &hpt370_port_ops
};
/* HPT370A - UDMA66 */
static const struct ata_port_info info_hpt370a_33 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &hpt370a_port_ops
};
/* HPT372 - UDMA133 */
static const struct ata_port_info info_hpt372 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &hpt372_port_ops
};
/* HPT371, 302 - UDMA133 */
static const struct ata_port_info info_hpt302 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &hpt302_port_ops
};
/* HPT374 - UDMA100, function 1 uses different cable_detect method */
static const struct ata_port_info info_hpt374_fn0 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &hpt372_port_ops
};
static const struct ata_port_info info_hpt374_fn1 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &hpt374_fn1_port_ops
};
static const int MHz[4] = { 33, 40, 50, 66 };
void *private_data = NULL;
const struct ata_port_info *ppi[] = { NULL, NULL };
u8 rev = dev->revision;
u8 irqmask;
u8 mcr1;
unsigned int freq; /* MHz */
int prefer_dpll = 1;
unsigned long iobase = pci_resource_start(dev, 4);
const struct hpt_chip *chip_table;
int clock_slot;
int rc;
rc = pcim_enable_device(dev);
if (rc)
return rc;
switch (dev->device) {
case PCI_DEVICE_ID_TTI_HPT366:
/* May be a later chip in disguise. Check */
/* Older chips are in the HPT366 driver. Ignore them */
if (rev < 3)
return -ENODEV;
/* N series chips have their own driver. Ignore */
if (rev == 6)
return -ENODEV;
switch (rev) {
case 3:
ppi[0] = &info_hpt370;
chip_table = &hpt370;
prefer_dpll = 0;
break;
case 4:
ppi[0] = &info_hpt370a;
chip_table = &hpt370a;
prefer_dpll = 0;
break;
case 5:
ppi[0] = &info_hpt372;
chip_table = &hpt372;
break;
default:
dev_err(&dev->dev,
"Unknown HPT366 subtype, please report (%d)\n",
rev);
return -ENODEV;
}
break;
case PCI_DEVICE_ID_TTI_HPT372:
/* 372N if rev >= 2 */
if (rev >= 2)
return -ENODEV;
ppi[0] = &info_hpt372;
chip_table = &hpt372a;
break;
case PCI_DEVICE_ID_TTI_HPT302:
/* 302N if rev > 1 */
if (rev > 1)
return -ENODEV;
ppi[0] = &info_hpt302;
/* Check this */
chip_table = &hpt302;
break;
case PCI_DEVICE_ID_TTI_HPT371:
if (rev > 1)
return -ENODEV;
ppi[0] = &info_hpt302;
chip_table = &hpt371;
/*
* Single channel device, master is not present but the BIOS
* (or us for non x86) must mark it absent
*/
pci_read_config_byte(dev, 0x50, &mcr1);
mcr1 &= ~0x04;
pci_write_config_byte(dev, 0x50, mcr1);
break;
case PCI_DEVICE_ID_TTI_HPT374:
chip_table = &hpt374;
if (!(PCI_FUNC(dev->devfn) & 1))
*ppi = &info_hpt374_fn0;
else
*ppi = &info_hpt374_fn1;
break;
default:
dev_err(&dev->dev, "PCI table is bogus, please report (%d)\n",
dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
pci_read_config_byte(dev, 0x5A, &irqmask);
irqmask &= ~0x10;
pci_write_config_byte(dev, 0x5a, irqmask);
/*
* HPT371 chips physically have only one channel, the secondary one,
* but the primary channel registers do exist! Go figure...
* So, we manually disable the non-existing channel here
* (if the BIOS hasn't done this already).
*/
if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
u8 mcr1;
pci_read_config_byte(dev, 0x50, &mcr1);
mcr1 &= ~0x04;
pci_write_config_byte(dev, 0x50, mcr1);
}
/*
* default to pci clock. make sure MA15/16 are set to output
* to prevent drives having problems with 40-pin cables. Needed
* for some drives such as IBM-DTLA which will not enter ready
* state on reset when PDIAG is a input.
*/
pci_write_config_byte(dev, 0x5b, 0x23);
/*
* HighPoint does this for HPT372A.
* NOTE: This register is only writeable via I/O space.
*/
if (chip_table == &hpt372a)
outb(0x0e, iobase + 0x9c);
freq = hpt37x_pci_clock(dev, chip_table->base);
if (!freq)
return -ENODEV;
/*
* Turn the frequency check into a band and then find a timing
* table to match it.
*/
clock_slot = hpt37x_clock_slot(freq);
if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) {
/*
* We need to try PLL mode instead
*
* For non UDMA133 capable devices we should
* use a 50MHz DPLL by choice
*/
unsigned int f_low, f_high;
int dpll, adjust;
/* Compute DPLL */
dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2;
f_low = (MHz[clock_slot] * 48) / MHz[dpll];
f_high = f_low + 2;
if (clock_slot > 1)
f_high += 2;
/* Select the DPLL clock. */
pci_write_config_byte(dev, 0x5b, 0x21);
pci_write_config_dword(dev, 0x5C,
(f_high << 16) | f_low | 0x100);
for (adjust = 0; adjust < 8; adjust++) {
if (hpt37x_calibrate_dpll(dev))
break;
/*
* See if it'll settle at a fractionally
* different clock
*/
if (adjust & 1)
f_low -= adjust >> 1;
else
f_high += adjust >> 1;
pci_write_config_dword(dev, 0x5C,
(f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
if (dpll == 3)
private_data = (void *)hpt37x_timings_66;
else
private_data = (void *)hpt37x_timings_50;
dev_info(&dev->dev, "bus clock %dMHz, using %dMHz DPLL\n",
MHz[clock_slot], MHz[dpll]);
} else {
private_data = (void *)chip_table->clocks[clock_slot];
/*
* Perform a final fixup. Note that we will have used the
* DPLL on the HPT372 which means we don't have to worry
* about lack of UDMA133 support on lower clocks
*/
if (clock_slot < 2 && ppi[0] == &info_hpt370)
ppi[0] = &info_hpt370_33;
if (clock_slot < 2 && ppi[0] == &info_hpt370a)
ppi[0] = &info_hpt370a_33;
dev_info(&dev->dev, "%s using %dMHz bus clock\n",
chip_table->name, MHz[clock_slot]);
}
/* Now kick off ATA set up */
return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
}
static const struct pci_device_id hpt37x[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
{ },
};
static struct pci_driver hpt37x_pci_driver = {
.name = DRV_NAME,
.id_table = hpt37x,
.probe = hpt37x_init_one,
.remove = ata_pci_remove_one
};
module_pci_driver(hpt37x_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt37x);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_hpt37x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Allwinner sunxi AHCI SATA platform driver
* Copyright 2013 Olliver Schinagl <[email protected]>
* Copyright 2014 Hans de Goede <[email protected]>
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
* Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>,
* Daniel Wang <[email protected]>
*/
#include <linux/ahci_platform.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "ahci.h"
#define DRV_NAME "ahci-sunxi"
/* Insmod parameters */
static bool enable_pmp;
module_param(enable_pmp, bool, 0);
MODULE_PARM_DESC(enable_pmp,
"Enable support for sata port multipliers, only use if you use a pmp!");
#define AHCI_BISTAFR 0x00a0
#define AHCI_BISTCR 0x00a4
#define AHCI_BISTFCTR 0x00a8
#define AHCI_BISTSR 0x00ac
#define AHCI_BISTDECR 0x00b0
#define AHCI_DIAGNR0 0x00b4
#define AHCI_DIAGNR1 0x00b8
#define AHCI_OOBR 0x00bc
#define AHCI_PHYCS0R 0x00c0
#define AHCI_PHYCS1R 0x00c4
#define AHCI_PHYCS2R 0x00c8
#define AHCI_TIMER1MS 0x00e0
#define AHCI_GPARAM1R 0x00e8
#define AHCI_GPARAM2R 0x00ec
#define AHCI_PPARAMR 0x00f0
#define AHCI_TESTR 0x00f4
#define AHCI_VERSIONR 0x00f8
#define AHCI_IDR 0x00fc
#define AHCI_RWCR 0x00fc
#define AHCI_P0DMACR 0x0170
#define AHCI_P0PHYCR 0x0178
#define AHCI_P0PHYSR 0x017c
static void sunxi_clrbits(void __iomem *reg, u32 clr_val)
{
u32 reg_val;
reg_val = readl(reg);
reg_val &= ~(clr_val);
writel(reg_val, reg);
}
static void sunxi_setbits(void __iomem *reg, u32 set_val)
{
u32 reg_val;
reg_val = readl(reg);
reg_val |= set_val;
writel(reg_val, reg);
}
static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val)
{
u32 reg_val;
reg_val = readl(reg);
reg_val &= ~(clr_val);
reg_val |= set_val;
writel(reg_val, reg);
}
static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift)
{
return (readl(reg) >> shift) & mask;
}
static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base)
{
u32 reg_val;
int timeout;
/* This magic is from the original code */
writel(0, reg_base + AHCI_RWCR);
msleep(5);
sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19));
sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
(0x7 << 24),
(0x5 << 24) | BIT(23) | BIT(18));
sunxi_clrsetbits(reg_base + AHCI_PHYCS1R,
(0x3 << 16) | (0x1f << 8) | (0x3 << 6),
(0x2 << 16) | (0x6 << 8) | (0x2 << 6));
sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15));
sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19));
sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
(0x7 << 20), (0x3 << 20));
sunxi_clrsetbits(reg_base + AHCI_PHYCS2R,
(0x1f << 5), (0x19 << 5));
msleep(5);
sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19));
timeout = 250; /* Power up takes aprox 50 us */
do {
reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28);
if (reg_val == 0x02)
break;
if (--timeout == 0) {
dev_err(dev, "PHY power up failed.\n");
return -EIO;
}
udelay(1);
} while (1);
sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24));
timeout = 100; /* Calibration takes aprox 10 us */
do {
reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24);
if (reg_val == 0x00)
break;
if (--timeout == 0) {
dev_err(dev, "PHY calibration failed.\n");
return -EIO;
}
udelay(1);
} while (1);
msleep(15);
writel(0x7, reg_base + AHCI_RWCR);
return 0;
}
static void ahci_sunxi_start_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
/* Setup DMA before DMA start
*
* NOTE: A similar SoC with SATA/AHCI by Texas Instruments documents
* this Vendor Specific Port (P0DMACR, aka PxDMACR) in its
* User's Guide document (TMS320C674x/OMAP-L1x Processor
* Serial ATA (SATA) Controller, Literature Number: SPRUGJ8C,
* March 2011, Chapter 4.33 Port DMA Control Register (P0DMACR),
* p.68, https://www.ti.com/lit/ug/sprugj8c/sprugj8c.pdf)
* as equivalent to the following struct:
*
* struct AHCI_P0DMACR_t
* {
* unsigned TXTS : 4;
* unsigned RXTS : 4;
* unsigned TXABL : 4;
* unsigned RXABL : 4;
* unsigned Reserved : 16;
* };
*
* TXTS: Transmit Transaction Size (TX_TRANSACTION_SIZE).
* This field defines the DMA transaction size in DWORDs for
* transmit (system bus read, device write) operation. [...]
*
* RXTS: Receive Transaction Size (RX_TRANSACTION_SIZE).
* This field defines the Port DMA transaction size in DWORDs
* for receive (system bus write, device read) operation. [...]
*
* TXABL: Transmit Burst Limit.
* This field allows software to limit the VBUSP master read
* burst size. [...]
*
* RXABL: Receive Burst Limit.
* Allows software to limit the VBUSP master write burst
* size. [...]
*
* Reserved: Reserved.
*
*
* NOTE: According to the above document, the following alternative
* to the code below could perhaps be a better option
* (or preparation) for possible further improvements later:
* sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ffff,
* 0x00000033);
*/
sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ffff, 0x00004433);
/* Start DMA */
sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
}
static const struct ata_port_info ahci_sunxi_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ahci_sunxi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
int rc;
hpriv = ahci_platform_get_resources(pdev, AHCI_PLATFORM_GET_RESETS);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->start_engine = ahci_sunxi_start_engine;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
if (rc)
goto disable_resources;
hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_YES_NCQ;
/*
* The sunxi sata controller seems to be unable to successfully do a
* soft reset if no pmp is attached, so disable pmp use unless
* requested, otherwise directly attached disks do not work.
*/
if (!enable_pmp)
hpriv->flags |= AHCI_HFLAG_NO_PMP;
rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_sunxi_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
int rc;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
if (rc)
goto disable_resources;
rc = ahci_platform_resume_host(dev);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
#endif
static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
ahci_sunxi_resume);
static const struct of_device_id ahci_sunxi_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ahci", },
{ .compatible = "allwinner,sun8i-r40-ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
.pm = &ahci_sunxi_pm_ops,
},
};
module_platform_driver(ahci_sunxi_driver);
MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver");
MODULE_AUTHOR("Olliver Schinagl <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/ahci_sunxi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Helper library for PATA timings
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
/*
* This mode timing computation functionality is ported over from
* drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
*/
/*
* PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
* These were taken from ATA/ATAPI-6 standard, rev 0a, except
* for UDMA6, which is currently supported only by Maxtor drives.
*
* For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
*/
static const struct ata_timing ata_timing[] = {
/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
{ XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
{ XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
{ XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
{ XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
{ XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
{ XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
{ XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
{ XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
{ XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
{ XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
{ XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
{ XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
{ XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
{ XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
{ XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
{ XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
{ XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
{ XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
{ XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
{ XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
{ XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
{ XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
{ 0xFF }
};
#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
static void ata_timing_quantize(const struct ata_timing *t,
struct ata_timing *q, int T, int UT)
{
q->setup = EZ(t->setup, T);
q->act8b = EZ(t->act8b, T);
q->rec8b = EZ(t->rec8b, T);
q->cyc8b = EZ(t->cyc8b, T);
q->active = EZ(t->active, T);
q->recover = EZ(t->recover, T);
q->dmack_hold = EZ(t->dmack_hold, T);
q->cycle = EZ(t->cycle, T);
q->udma = EZ(t->udma, UT);
}
void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
struct ata_timing *m, unsigned int what)
{
if (what & ATA_TIMING_SETUP)
m->setup = max(a->setup, b->setup);
if (what & ATA_TIMING_ACT8B)
m->act8b = max(a->act8b, b->act8b);
if (what & ATA_TIMING_REC8B)
m->rec8b = max(a->rec8b, b->rec8b);
if (what & ATA_TIMING_CYC8B)
m->cyc8b = max(a->cyc8b, b->cyc8b);
if (what & ATA_TIMING_ACTIVE)
m->active = max(a->active, b->active);
if (what & ATA_TIMING_RECOVER)
m->recover = max(a->recover, b->recover);
if (what & ATA_TIMING_DMACK_HOLD)
m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
if (what & ATA_TIMING_CYCLE)
m->cycle = max(a->cycle, b->cycle);
if (what & ATA_TIMING_UDMA)
m->udma = max(a->udma, b->udma);
}
EXPORT_SYMBOL_GPL(ata_timing_merge);
const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
{
const struct ata_timing *t = ata_timing;
while (xfer_mode > t->mode)
t++;
if (xfer_mode == t->mode)
return t;
WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
__func__, xfer_mode);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_timing_find_mode);
int ata_timing_compute(struct ata_device *adev, unsigned short speed,
struct ata_timing *t, int T, int UT)
{
const u16 *id = adev->id;
const struct ata_timing *s;
struct ata_timing p;
/*
* Find the mode.
*/
s = ata_timing_find_mode(speed);
if (!s)
return -EINVAL;
memcpy(t, s, sizeof(*s));
/*
* If the drive is an EIDE drive, it can tell us it needs extended
* PIO/MW_DMA cycle timing.
*/
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
if (speed <= XFER_PIO_2)
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
else if ((speed <= XFER_PIO_4) ||
(speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
p.cycle = id[ATA_ID_EIDE_DMA_MIN];
ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
}
/*
* Convert the timing to bus clock counts.
*/
ata_timing_quantize(t, t, T, UT);
/*
* Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
* S.M.A.R.T * and some other commands. We have to ensure that the
* DMA cycle timing is slower/equal than the fastest PIO timing.
*/
if (speed > XFER_PIO_6) {
ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
}
/*
* Lengthen active & recovery time so that cycle time is correct.
*/
if (t->act8b + t->rec8b < t->cyc8b) {
t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
t->rec8b = t->cyc8b - t->act8b;
}
if (t->active + t->recover < t->cycle) {
t->active += (t->cycle - (t->active + t->recover)) / 2;
t->recover = t->cycle - t->active;
}
/*
* In a few cases quantisation may produce enough errors to
* leave t->cycle too low for the sum of active and recovery
* if so we must correct this.
*/
if (t->active + t->recover > t->cycle)
t->cycle = t->active + t->recover;
return 0;
}
EXPORT_SYMBOL_GPL(ata_timing_compute);
| linux-master | drivers/ata/libata-pata-timings.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_uli.c - ULi Electronics SATA
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
#define DRV_VERSION "1.3"
enum {
uli_5289 = 0,
uli_5287 = 1,
uli_5281 = 2,
uli_max_ports = 4,
/* PCI configuration registers */
ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
};
struct uli_priv {
unsigned int scr_cfg_addr[uli_max_ports];
};
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static const struct pci_device_id uli_pci_tbl[] = {
{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
{ PCI_VDEVICE(AL, 0x5287), uli_5287 },
{ PCI_VDEVICE(AL, 0x5281), uli_5281 },
{ } /* terminate list */
};
static struct pci_driver uli_pci_driver = {
.name = DRV_NAME,
.id_table = uli_pci_tbl,
.probe = uli_init_one,
.remove = ata_pci_remove_one,
};
static const struct scsi_host_template uli_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &uli_ops,
};
MODULE_AUTHOR("Peer Chen");
MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
{
struct uli_priv *hpriv = ap->host->private_data;
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
u32 val;
pci_read_config_dword(pdev, cfg_addr, &val);
return val;
}
static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
pci_write_config_dword(pdev, cfg_addr, val);
}
static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = uli_scr_cfg_read(link, sc_reg);
return 0;
}
static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
return -EINVAL;
uli_scr_cfg_write(link, sc_reg, val);
return 0;
}
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
unsigned int board_idx = (unsigned int) ent->driver_data;
struct ata_host *host;
struct uli_priv *hpriv;
void __iomem * const *iomap;
struct ata_ioports *ioaddr;
int n_ports, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
n_ports = 2;
if (board_idx == uli_5287)
n_ports = 4;
/* allocate the host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
host->private_data = hpriv;
/* the first two ports are standard SFF */
rc = ata_pci_sff_init_host(host);
if (rc)
return rc;
ata_pci_bmdma_init(host);
iomap = host->iomap;
switch (board_idx) {
case uli_5287:
/* If there are four, the last two live right after
* the standard SFF ports.
*/
hpriv->scr_cfg_addr[0] = ULI5287_BASE;
hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
ioaddr = &host->ports[2]->ioaddr;
ioaddr->cmd_addr = iomap[0] + 8;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = (void __iomem *)
((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
ioaddr->bmdma_addr = iomap[4] + 16;
hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[2],
"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 0) + 8,
((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
(unsigned long long)pci_resource_start(pdev, 4) + 16);
ioaddr = &host->ports[3]->ioaddr;
ioaddr->cmd_addr = iomap[2] + 8;
ioaddr->altstatus_addr =
ioaddr->ctl_addr = (void __iomem *)
((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
ioaddr->bmdma_addr = iomap[4] + 24;
hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[2],
"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 2) + 9,
((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
(unsigned long long)pci_resource_start(pdev, 4) + 24);
break;
case uli_5289:
hpriv->scr_cfg_addr[0] = ULI5287_BASE;
hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
break;
case uli_5281:
hpriv->scr_cfg_addr[0] = ULI5281_BASE;
hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
break;
default:
BUG();
break;
}
pci_set_master(pdev);
pci_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
module_pci_driver(uli_pci_driver);
| linux-master | drivers/ata/sata_uli.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata-isapnp.c - ISA PnP PATA controller driver.
* Copyright 2005/2006 Red Hat Inc, all rights reserved.
*
* Based in part on ide-pnp.c by Andrey Panin <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#define DRV_NAME "pata_isapnp"
#define DRV_VERSION "0.2.5"
static const struct scsi_host_template isapnp_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations isapnp_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
};
static struct ata_port_operations isapnp_noalt_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
/* No altstatus so we don't want to use the lost interrupt poll */
.lost_interrupt = ATA_OP_NULL,
};
/**
* isapnp_init_one - attach an isapnp interface
* @idev: PnP device
* @dev_id: matching detect line
*
* Register an ISA bus IDE interface. Such interfaces are PIO 0 and
* non shared IRQ.
*/
static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
{
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
int irq = 0;
irq_handler_t handler = NULL;
if (pnp_port_valid(idev, 0) == 0)
return -ENODEV;
if (pnp_irq_valid(idev, 0)) {
irq = pnp_irq(idev, 0);
handler = ata_sff_interrupt;
}
/* allocate host */
host = ata_host_alloc(&idev->dev, 1);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
if (!cmd_addr)
return -ENOMEM;
ap = host->ports[0];
ap->ops = &isapnp_noalt_port_ops;
ap->pio_mask = ATA_PIO0;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = cmd_addr;
if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;
}
ata_sff_std_ports(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
(unsigned long long)pnp_port_start(idev, 0),
(unsigned long long)pnp_port_start(idev, 1));
/* activate */
return ata_host_activate(host, irq, handler, 0,
&isapnp_sht);
}
/**
* isapnp_remove_one - unplug an isapnp interface
* @idev: PnP device
*
* Remove a previously configured PnP ATA port. Called only on module
* unload events as the core does not currently deal with ISAPnP docking.
*/
static void isapnp_remove_one(struct pnp_dev *idev)
{
struct device *dev = &idev->dev;
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
}
static struct pnp_device_id isapnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, isapnp_devices);
static struct pnp_driver isapnp_driver = {
.name = DRV_NAME,
.id_table = isapnp_devices,
.probe = isapnp_init_one,
.remove = isapnp_remove_one,
};
module_pnp_driver(isapnp_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_isapnp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pata_rdc - Driver for later RDC PATA controllers
*
* This is actually a driver for hardware meeting
* INCITS 370-2004 (1510D): ATA Host Adapter Standards
*
* Based on ata_piix.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_rdc"
#define DRV_VERSION "0.01"
struct rdc_host_priv {
u32 saved_iocfg;
};
/**
* rdc_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from ATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_cable_detect(struct ata_port *ap)
{
struct rdc_host_priv *hpriv = ap->host->private_data;
u8 mask;
/* check BIOS cable detect results */
mask = 0x30 << (2 * ap->port_no);
if ((hpriv->saved_iocfg & mask) == 0)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* rdc_pata_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* LOCKING:
* None (inherited from caller).
*/
static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits rdc_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static DEFINE_SPINLOCK(rdc_lock);
/**
* rdc_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
spin_lock_irqsave(&rdc_lock, flags);
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
*/
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
/* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
<< (ap->port_no ? 4 : 0);
} else {
/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
master_data &= 0xccf0;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
/* load ISP and RCT */
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&rdc_lock, flags);
}
/**
* rdc_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
spin_lock_irqsave(&rdc_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
pci_write_config_word(dev, 0x54, ideconf);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&rdc_lock, flags);
}
static struct ata_port_operations rdc_pata_ops = {
.inherits = &ata_bmdma32_port_ops,
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
.prereset = rdc_pata_prereset,
};
static const struct ata_port_info rdc_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &rdc_pata_ops,
};
static const struct scsi_host_template rdc_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/**
* rdc_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in rdc_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach.
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
pci_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct rdc_host_priv *hpriv = host->private_data;
pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
ata_pci_remove_one(pdev);
}
static const struct pci_device_id rdc_pci_tbl[] = {
{ PCI_DEVICE(0x17F3, 0x1011), },
{ PCI_DEVICE(0x17F3, 0x1012), },
{ } /* terminate list */
};
static struct pci_driver rdc_pci_driver = {
.name = DRV_NAME,
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(rdc_pci_driver);
MODULE_AUTHOR("Alan Cox (based on ata_piix)");
MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_rdc.c |
/*
* SATA glue for Cavium Octeon III SOCs.
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2010-2015 Cavium Networks
*
*/
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <asm/octeon/octeon.h>
#define CVMX_SATA_UCTL_SHIM_CFG 0xE8
#define SATA_UCTL_ENDIAN_MODE_BIG 1
#define SATA_UCTL_ENDIAN_MODE_LITTLE 0
#define SATA_UCTL_ENDIAN_MODE_MASK 3
#define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT 8
#define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT 0
#define SATA_UCTL_DMA_READ_CMD_SHIFT 12
static int ahci_octeon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
void __iomem *base;
u64 cfg;
int ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG);
cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT);
cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT);
#ifdef __BIG_ENDIAN
cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
#else
cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
#endif
cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT;
cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg);
if (!node) {
dev_err(dev, "no device node, failed to add octeon sata\n");
return -ENODEV;
}
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add ahci-platform core\n");
return ret;
}
return 0;
}
static const struct of_device_id octeon_ahci_match[] = {
{ .compatible = "cavium,octeon-7130-sata-uctl", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, octeon_ahci_match);
static struct platform_driver ahci_octeon_driver = {
.probe = ahci_octeon_probe,
.driver = {
.name = "octeon-ahci",
.of_match_table = octeon_ahci_match,
},
};
module_platform_driver(ahci_octeon_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cavium, Inc. <[email protected]>");
MODULE_DESCRIPTION("Cavium Inc. sata config.");
| linux-master | drivers/ata/ahci_octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_radisys.c - Intel PATA/SATA controllers
*
* (C) 2006 Red Hat <[email protected]>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* A PIIX relative, this device has a single ATA channel and no
* slave timings, SITRE or PPE. In that sense it is a close relative
* of the original PIIX. It does however support UDMA 33/66 per channel
* although no other modes/timings. Also lacking is 32bit I/O on the ATA
* port.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_radisys"
#define DRV_VERSION "0.4.4"
/**
* radisys_set_piomode - Initialize host controller PATA PIO timings
* @ap: ATA port
* @adev: Device whose timings we are configuring
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 }, /* Check me */
{ 0, 0 },
{ 1, 1 },
{ 2, 2 },
{ 3, 3 }, };
if (pio > 0)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE IORDY */
pci_read_config_word(dev, 0x40, &idetm_data);
/* Enable IE and TIME as appropriate. Clear the other
drive timing bits */
idetm_data &= 0xCCCC;
idetm_data |= (control << (4 * adev->devno));
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
pci_write_config_word(dev, 0x40, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* radisys_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u16 idetm_data;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 1 },
{ 2, 2 },
{ 3, 3 }, };
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally.
*/
pci_read_config_word(dev, 0x40, &idetm_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (adev->dma_mode < XFER_UDMA_0) {
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
int control = 3; /* IORDY|TIME0 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO0 for PIO cycles. */
if (adev->pio_mode < needed_pio[mwdma])
control = 1;
/* Mask out the relevant control and timing bits we will load. Also
clear the other drive TIME register as a precaution */
idetm_data &= 0xCCCC;
idetm_data |= control << (4 * adev->devno);
idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
udma_enable &= ~(1 << adev->devno);
} else {
u8 udma_mode;
/* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
pci_read_config_byte(dev, 0x4A, &udma_mode);
if (adev->xfer_mode == XFER_UDMA_2)
udma_mode &= ~(2 << (adev->devno * 4));
else /* UDMA 4 */
udma_mode |= (2 << (adev->devno * 4));
pci_write_config_byte(dev, 0x4A, udma_mode);
udma_enable |= (1 << adev->devno);
}
pci_write_config_word(dev, 0x40, idetm_data);
pci_write_config_byte(dev, 0x48, udma_enable);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* radisys_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
if (adev != ap->private_data) {
/* UDMA timing is not shared */
if (adev->dma_mode < XFER_UDMA_0 || !ata_dma_enabled(adev)) {
if (ata_dma_enabled(adev))
radisys_set_dmamode(ap, adev);
else if (adev->pio_mode)
radisys_set_piomode(ap, adev);
}
}
return ata_bmdma_qc_issue(qc);
}
static const struct scsi_host_template radisys_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations radisys_pata_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_issue = radisys_qc_issue,
.cable_detect = ata_cable_unknown,
.set_piomode = radisys_set_piomode,
.set_dmamode = radisys_set_dmamode,
};
/**
* radisys_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in radisys_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA24_ONLY,
.port_ops = &radisys_pata_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
static const struct pci_device_id radisys_pci_tbl[] = {
{ PCI_VDEVICE(RADISYS, 0x8201), },
{ } /* terminate list */
};
static struct pci_driver radisys_pci_driver = {
.name = DRV_NAME,
.id_table = radisys_pci_tbl,
.probe = radisys_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(radisys_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_radisys.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_qstor.c - Pacific Digital Corporation QStor SATA
*
* Maintained by: Mark Lord <[email protected]>
*
* Copyright 2005 Pacific Digital Corporation.
* (OSL/GPL code release authorized by Jalil Fadavi).
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
#define DRV_VERSION "0.09"
enum {
QS_MMIO_BAR = 4,
QS_PORTS = 4,
QS_MAX_PRD = LIBATA_MAX_PRD,
QS_CPB_ORDER = 6,
QS_CPB_BYTES = (1 << QS_CPB_ORDER),
QS_PRD_BYTES = QS_MAX_PRD * 16,
QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
/* global register offsets */
QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
QS_HID_HPHY = 0x0004, /* host physical interface info */
QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
QS_HST_SFF = 0x0100, /* host status fifo offset */
QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
/* global control bits */
QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
QS_CNFG3_GSRST = 0x01, /* global chip reset */
QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
/* per-channel register offsets */
QS_CCF_CPBA = 0x0710, /* chan CPB base address */
QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
/* channel control bits */
QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
QS_CTR0_CLER = (1 << 2), /* clear channel errors */
QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
/* pkt sub-field headers */
QS_HCB_HDR = 0x01, /* Host Control Block header */
QS_DCB_HDR = 0x02, /* Device Control Block header */
/* pkt HCB flag bits */
QS_HF_DIRO = (1 << 0), /* data DIRection Out */
QS_HF_DAT = (1 << 3), /* DATa pkt */
QS_HF_IEN = (1 << 4), /* Interrupt ENable */
QS_HF_VLD = (1 << 5), /* VaLiD pkt */
/* pkt DCB flag bits */
QS_DF_PORD = (1 << 2), /* Pio OR Dma */
QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
/* PCI device IDs */
board_2068_idx = 0, /* QStor 4-port SATA/RAID */
};
enum {
QS_DMA_BOUNDARY = ~0UL
};
typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
struct qs_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
qs_state_t state;
};
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
static void qs_error_handler(struct ata_port *ap);
static const struct scsi_host_template qs_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = QS_MAX_PRD,
.dma_boundary = QS_DMA_BOUNDARY,
};
static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
.freeze = qs_freeze,
.thaw = qs_thaw,
.prereset = qs_prereset,
.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
.scr_read = qs_scr_read,
.scr_write = qs_scr_write,
.port_start = qs_port_start,
.host_stop = qs_host_stop,
};
static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &qs_ata_ops,
},
};
static const struct pci_device_id qs_ata_pci_tbl[] = {
{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
{ } /* terminate list */
};
static struct pci_driver qs_ata_pci_driver = {
.name = DRV_NAME,
.id_table = qs_ata_pci_tbl,
.probe = qs_ata_init_one,
.remove = ata_pci_remove_one,
};
static void __iomem *qs_mmio_base(struct ata_host *host)
{
return host->iomap[QS_MMIO_BAR];
}
static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 1; /* ATAPI DMA not supported */
}
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
struct qs_port_priv *pp = ap->private_data;
pp->state = qs_state_mmio;
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
static inline void qs_reset_channel_logic(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
readb(chan + QS_CCT_CTR0); /* flush */
qs_enter_reg_mode(ap);
}
static void qs_freeze(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
qs_enter_reg_mode(ap);
}
static void qs_thaw(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
qs_enter_reg_mode(ap);
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
static int qs_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
qs_reset_channel_logic(ap);
return ata_sff_prereset(link, deadline);
}
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static void qs_error_handler(struct ata_port *ap)
{
qs_enter_reg_mode(ap);
ata_sff_error_handler(ap);
}
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct qs_port_priv *pp = ap->private_data;
u8 *prd = pp->pkt + QS_CPB_BYTES;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u64 addr;
u32 len;
addr = sg_dma_address(sg);
*(__le64 *)prd = cpu_to_le64(addr);
prd += sizeof(u64);
len = sg_dma_len(sg);
*(__le32 *)prd = cpu_to_le32(len);
prd += sizeof(u64);
}
return si;
}
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
u64 addr;
unsigned int nelem;
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
nelem = qs_fill_sg(qc);
if ((qc->tf.flags & ATA_TFLAG_WRITE))
hflags |= QS_HF_DIRO;
if ((qc->tf.flags & ATA_TFLAG_LBA48))
dflags |= QS_DF_ELBA;
/* host control block (HCB) */
buf[ 0] = QS_HCB_HDR;
buf[ 1] = hflags;
*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
/* device control block (DCB) */
buf[24] = QS_DCB_HDR;
buf[28] = dflags;
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
return AC_ERR_OK;
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
wmb(); /* flush PRDs and pkt to memory */
writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
readl(chan + QS_CCT_CFF); /* flush */
}
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pp->state = qs_state_pkt;
qs_packet_start(qc);
return 0;
case ATAPI_PROT_DMA:
BUG();
break;
default:
break;
}
pp->state = qs_state_mmio;
return ata_sff_qc_issue(qc);
}
static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
{
qc->err_mask |= ac_err_mask(status);
if (!qc->err_mask) {
ata_qc_complete(qc);
} else {
struct ata_port *ap = qc->ap;
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "status 0x%02X", status);
if (qc->err_mask == AC_ERR_DEV)
ata_port_abort(ap);
else
ata_port_freeze(ap);
}
}
static inline unsigned int qs_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0;
u8 sFFE;
u8 __iomem *mmio_base = qs_mmio_base(host);
do {
u32 sff0 = readl(mmio_base + QS_HST_SFF);
u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
sFFE = sff1 >> 31; /* empty flag */
if (sEVLD) {
u8 sDST = sff0 >> 16; /* dev status */
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
dev_dbg(host->dev, "SFF=%08x%08x: sHST=%d sDST=%02x\n",
sff1, sff0, sHST, sDST);
handled = 1;
if (!pp || pp->state != qs_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
switch (sHST) {
case 0: /* successful CPB */
case 3: /* device error */
qs_enter_reg_mode(qc->ap);
qs_do_or_die(qc, sDST);
break;
default:
break;
}
}
}
} while (!sFFE);
return handled;
}
static inline unsigned int qs_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (!qc) {
/*
* The qstor hardware generates spurious
* interrupts from time to time when switching
* in and out of packet mode. There's no
* obvious way to know if we're here now due
* to that, so just ack the irq and pretend we
* knew it was ours.. (ugh). This does not
* affect packet mode.
*/
ata_sff_check_status(ap);
handled = 1;
continue;
}
if (!pp || pp->state != qs_state_mmio)
continue;
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
static irqreturn_t qs_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr =
port->data_addr = base + 0x400;
port->error_addr =
port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
port->device_addr = base + 0x430;
port->status_addr =
port->command_addr = base + 0x438;
port->altstatus_addr =
port->ctl_addr = base + 0x440;
port->scr_addr = base + 0xc00;
}
static int qs_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct qs_port_priv *pp;
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
ap->private_data = pp;
qs_enter_reg_mode(ap);
addr = (u64)pp->pkt_dma;
writel((u32) addr, chan + QS_CCF_CPBA);
writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
return 0;
}
static void qs_host_stop(struct ata_host *host)
{
void __iomem *mmio_base = qs_mmio_base(host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
}
static void qs_host_init(struct ata_host *host, unsigned int chip_id)
{
void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
unsigned int port_no;
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
/* reset each channel in turn */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
/* set FIFO depths to same settings as Windows driver */
writew(32, chan + QS_CFC_HUFT);
writew(32, chan + QS_CFC_HDFT);
writew(10, chan + QS_CFC_DUFT);
writew( 8, chan + QS_CFC_DDFT);
/* set CPB size in bytes, as a power of two */
writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
}
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
/*
* The QStor understands 64-bit buses, and uses 64-bit fields
* for DMA pointers regardless of bus width. We just have to
* make sure our DMA masks are set appropriately for whatever
* bridge lies between us and the QStor, and then the DMA mapping
* code will ensure we only ever "see" appropriate buffer addresses.
* If we're 32-bit limited somewhere, then our 64-bit fields will
* just end up with zeros in the upper 32-bits, without any special
* logic required outside of this routine (below).
*/
static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
u32 bus_info = readl(mmio_base + QS_HID_HPHY);
int dma_bits = (bus_info & QS_HPHY_64BIT) ? 64 : 32;
int rc;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits));
if (rc)
dev_err(&pdev->dev, "%d-bit DMA enable failed\n", dma_bits);
return rc;
}
static int qs_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
struct ata_host *host;
int rc, port_no;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
return -ENODEV;
rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
if (rc)
return rc;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
unsigned int offset = port_no * 0x4000;
void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
qs_ata_setup_port(&ap->ioaddr, chan);
ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
}
/* initialize adapter */
qs_host_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
&qs_ata_sht);
}
module_pci_driver(qs_ata_pci_driver);
MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_qstor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_sl82c105.c - SL82C105 PATA for new ATA layer
* (C) 2005 Red Hat Inc
* (C) 2011 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/sl82c105.c
* SL82C105/Winbond 553 IDE driver
*
* and in part on the documentation and errata sheet
*
*
* Note: The controller like many controllers has shared timings for
* PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
* in the dma_stop function. Thus we actually don't need a set_dmamode
* method as the PIO method is always called and will set the right PIO
* timing parameters.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sl82c105"
#define DRV_VERSION "0.3.3"
enum {
/*
* SL82C105 PCI config register 0x40 bits.
*/
CTRL_IDE_IRQB = (1 << 30),
CTRL_IDE_IRQA = (1 << 28),
CTRL_LEGIRQ = (1 << 11),
CTRL_P1F16 = (1 << 5),
CTRL_P1EN = (1 << 4),
CTRL_P0F16 = (1 << 1),
CTRL_P0EN = (1 << 0)
};
/**
* sl82c105_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits sl82c105_enable_bits[] = {
{ 0x40, 1, 0x01, 0x01 },
{ 0x40, 1, 0x10, 0x10 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* sl82c105_configure_piomode - set chip PIO timing
* @ap: ATA interface
* @adev: ATA device
* @pio: PIO mode
*
* Called to do the PIO mode setup. Our timing registers are shared
* so a configure_dmamode call will undo any work we do here and vice
* versa
*/
static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static u16 pio_timing[5] = {
0x50D, 0x407, 0x304, 0x242, 0x240
};
u16 dummy;
int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
pci_write_config_word(pdev, timing, pio_timing[pio]);
/* Can we lose this oddity of the old driver */
pci_read_config_word(pdev, timing, &dummy);
}
/**
* sl82c105_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* sl82c105_configure_dmamode - set DMA mode in chip
* @ap: ATA interface
* @adev: ATA device
*
* Load DMA cycle times into the chip ready for a DMA transfer
* to occur.
*/
static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static u16 dma_timing[3] = {
0x707, 0x201, 0x200
};
u16 dummy;
int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
int dma = adev->dma_mode - XFER_MW_DMA_0;
pci_write_config_word(pdev, timing, dma_timing[dma]);
/* Can we lose this oddity of the old driver */
pci_read_config_word(pdev, timing, &dummy);
}
/**
* sl82c105_reset_engine - Reset the DMA engine
* @ap: ATA interface
*
* The sl82c105 has some serious problems with the DMA engine
* when transfers don't run as expected or ATAPI is used. The
* recommended fix is to reset the engine each use using a chip
* test register.
*/
static void sl82c105_reset_engine(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 val;
pci_read_config_word(pdev, 0x7E, &val);
pci_write_config_word(pdev, 0x7E, val | 4);
pci_write_config_word(pdev, 0x7E, val & ~4);
}
/**
* sl82c105_bmdma_start - DMA engine begin
* @qc: ATA command
*
* Reset the DMA engine each use as recommended by the errata
* document.
*
* FIXME: if we switch clock at BMDMA start/end we might get better
* PIO performance on DMA capable devices.
*/
static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
udelay(100);
sl82c105_reset_engine(ap);
udelay(100);
/* Set the clocks for DMA */
sl82c105_configure_dmamode(ap, qc->dev);
/* Activate DMA */
ata_bmdma_start(qc);
}
/**
* sl82c105_bmdma_stop - DMA engine stop
* @qc: ATA command
*
* Reset the DMA engine each use as recommended by the errata
* document.
*
* This function is also called to turn off DMA when a timeout occurs
* during DMA operation. In both cases we need to reset the engine.
*
* We assume bmdma_stop is always called if bmdma_start as called. If
* not then we may need to wrap qc_issue.
*/
static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
ata_bmdma_stop(qc);
sl82c105_reset_engine(ap);
udelay(100);
/* This will redo the initial setup of the DMA device to matching
PIO timings */
sl82c105_set_piomode(ap, qc->dev);
}
/**
* sl82c105_qc_defer - implement serialization
* @qc: command
*
* We must issue one command per host not per channel because
* of the reset bug.
*
* Q: is the scsi host lock sufficient ?
*/
static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_host *host = qc->ap->host;
struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
int rc;
/* First apply the usual rules */
rc = ata_std_qc_defer(qc);
if (rc != 0)
return rc;
/* Now apply serialization rules. Only allow a command if the
other channel state machine is idle */
if (alt && alt->qc_active)
return ATA_DEFER_PORT;
return 0;
}
static bool sl82c105_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
pci_read_config_dword(pdev, 0x40, &val);
return val & mask;
}
static const struct scsi_host_template sl82c105_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations sl82c105_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_defer = sl82c105_qc_defer,
.bmdma_start = sl82c105_bmdma_start,
.bmdma_stop = sl82c105_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = sl82c105_set_piomode,
.prereset = sl82c105_pre_reset,
.sff_irq_check = sl82c105_sff_irq_check,
};
/**
* sl82c105_bridge_revision - find bridge version
* @pdev: PCI device for the ATA function
*
* Locates the PCI bridge associated with the ATA function and
* providing it is a Winbond 553 reports the revision. If it cannot
* find a revision or the right device it returns -1
*/
static int sl82c105_bridge_revision(struct pci_dev *pdev)
{
struct pci_dev *bridge;
/*
* The bridge should be part of the same device, but function 0.
*/
bridge = pci_get_slot(pdev->bus,
PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
if (!bridge)
return -1;
/*
* Make sure it is a Winbond 553 and is an ISA bridge.
*/
if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
pci_dev_put(bridge);
return -1;
}
/*
* We need to find function 0's revision, not function 1
*/
pci_dev_put(bridge);
return bridge->revision;
}
static void sl82c105_fixup(struct pci_dev *pdev)
{
u32 val;
pci_read_config_dword(pdev, 0x40, &val);
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(pdev, 0x40, val);
}
static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info_dma = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &sl82c105_port_ops
};
static const struct ata_port_info info_early = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &sl82c105_port_ops
};
/* for now use only the first port */
const struct ata_port_info *ppi[] = { &info_early,
NULL };
int rev;
int rc;
rc = pcim_enable_device(dev);
if (rc)
return rc;
rev = sl82c105_bridge_revision(dev);
if (rev == -1)
dev_warn(&dev->dev,
"pata_sl82c105: Unable to find bridge, disabling DMA\n");
else if (rev <= 5)
dev_warn(&dev->dev,
"pata_sl82c105: Early bridge revision, no DMA available\n");
else
ppi[0] = &info_dma;
sl82c105_fixup(dev);
return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int sl82c105_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
sl82c105_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id sl82c105[] = {
{ PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
{ },
};
static struct pci_driver sl82c105_pci_driver = {
.name = DRV_NAME,
.id_table = sl82c105,
.probe = sl82c105_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sl82c105_reinit_one,
#endif
};
module_pci_driver(sl82c105_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Sl82c105");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sl82c105);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_sl82c105.c |
/*
* drivers/ata/pata_arasan_cf.c
*
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
* Viresh Kumar <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* The Arasan CompactFlash Device Controller IP core has three basic modes of
* operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
* ATA using true IDE modes. This driver supports only True IDE mode currently.
*
* Arasan CF Controller shares global irq register with Arasan XD Controller.
*
* Tested on arch/arm/mach-spear13xx
*/
#include <linux/ata.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pata_arasan_cf_data.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <trace/events/libata.h>
#define DRIVER_NAME "arasan_cf"
#define TIMEOUT msecs_to_jiffies(3000)
/* Registers */
/* CompactFlash Interface Status */
#define CFI_STS 0x000
#define STS_CHG (1)
#define BIN_AUDIO_OUT (1 << 1)
#define CARD_DETECT1 (1 << 2)
#define CARD_DETECT2 (1 << 3)
#define INP_ACK (1 << 4)
#define CARD_READY (1 << 5)
#define IO_READY (1 << 6)
#define B16_IO_PORT_SEL (1 << 7)
/* IRQ */
#define IRQ_STS 0x004
/* Interrupt Enable */
#define IRQ_EN 0x008
#define CARD_DETECT_IRQ (1)
#define STATUS_CHNG_IRQ (1 << 1)
#define MEM_MODE_IRQ (1 << 2)
#define IO_MODE_IRQ (1 << 3)
#define TRUE_IDE_MODE_IRQ (1 << 8)
#define PIO_XFER_ERR_IRQ (1 << 9)
#define BUF_AVAIL_IRQ (1 << 10)
#define XFER_DONE_IRQ (1 << 11)
#define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
TRUE_IDE_MODE_IRQ)
#define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
BUF_AVAIL_IRQ | XFER_DONE_IRQ)
/* Operation Mode */
#define OP_MODE 0x00C
#define CARD_MODE_MASK (0x3)
#define MEM_MODE (0x0)
#define IO_MODE (0x1)
#define TRUE_IDE_MODE (0x2)
#define CARD_TYPE_MASK (1 << 2)
#define CF_CARD (0)
#define CF_PLUS_CARD (1 << 2)
#define CARD_RESET (1 << 3)
#define CFHOST_ENB (1 << 4)
#define OUTPUTS_TRISTATE (1 << 5)
#define ULTRA_DMA_ENB (1 << 8)
#define MULTI_WORD_DMA_ENB (1 << 9)
#define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
#define DRQ_BLOCK_SIZE_512 (0)
#define DRQ_BLOCK_SIZE_1024 (1 << 11)
#define DRQ_BLOCK_SIZE_2048 (2 << 11)
#define DRQ_BLOCK_SIZE_4096 (3 << 11)
/* CF Interface Clock Configuration */
#define CLK_CFG 0x010
#define CF_IF_CLK_MASK (0XF)
/* CF Timing Mode Configuration */
#define TM_CFG 0x014
#define MEM_MODE_TIMING_MASK (0x3)
#define MEM_MODE_TIMING_250NS (0x0)
#define MEM_MODE_TIMING_120NS (0x1)
#define MEM_MODE_TIMING_100NS (0x2)
#define MEM_MODE_TIMING_80NS (0x3)
#define IO_MODE_TIMING_MASK (0x3 << 2)
#define IO_MODE_TIMING_250NS (0x0 << 2)
#define IO_MODE_TIMING_120NS (0x1 << 2)
#define IO_MODE_TIMING_100NS (0x2 << 2)
#define IO_MODE_TIMING_80NS (0x3 << 2)
#define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
#define TRUEIDE_PIO_TIMING_SHIFT 4
#define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
#define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
#define ULTRA_DMA_TIMING_MASK (0x7 << 10)
#define ULTRA_DMA_TIMING_SHIFT 10
/* CF Transfer Address */
#define XFER_ADDR 0x014
#define XFER_ADDR_MASK (0x7FF)
#define MAX_XFER_COUNT 0x20000u
/* Transfer Control */
#define XFER_CTR 0x01C
#define XFER_COUNT_MASK (0x3FFFF)
#define ADDR_INC_DISABLE (1 << 24)
#define XFER_WIDTH_MASK (1 << 25)
#define XFER_WIDTH_8B (0)
#define XFER_WIDTH_16B (1 << 25)
#define MEM_TYPE_MASK (1 << 26)
#define MEM_TYPE_COMMON (0)
#define MEM_TYPE_ATTRIBUTE (1 << 26)
#define MEM_IO_XFER_MASK (1 << 27)
#define MEM_XFER (0)
#define IO_XFER (1 << 27)
#define DMA_XFER_MODE (1 << 28)
#define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
#define XFER_DIR_MASK (1 << 30)
#define XFER_READ (0)
#define XFER_WRITE (1 << 30)
#define XFER_START (1 << 31)
/* Write Data Port */
#define WRITE_PORT 0x024
/* Read Data Port */
#define READ_PORT 0x028
/* ATA Data Port */
#define ATA_DATA_PORT 0x030
#define ATA_DATA_PORT_MASK (0xFFFF)
/* ATA Error/Features */
#define ATA_ERR_FTR 0x034
/* ATA Sector Count */
#define ATA_SC 0x038
/* ATA Sector Number */
#define ATA_SN 0x03C
/* ATA Cylinder Low */
#define ATA_CL 0x040
/* ATA Cylinder High */
#define ATA_CH 0x044
/* ATA Select Card/Head */
#define ATA_SH 0x048
/* ATA Status-Command */
#define ATA_STS_CMD 0x04C
/* ATA Alternate Status/Device Control */
#define ATA_ASTS_DCTR 0x050
/* Extended Write Data Port 0x200-0x3FC */
#define EXT_WRITE_PORT 0x200
/* Extended Read Data Port 0x400-0x5FC */
#define EXT_READ_PORT 0x400
#define FIFO_SIZE 0x200u
/* Global Interrupt Status */
#define GIRQ_STS 0x800
/* Global Interrupt Status enable */
#define GIRQ_STS_EN 0x804
/* Global Interrupt Signal enable */
#define GIRQ_SGN_EN 0x808
#define GIRQ_CF (1)
#define GIRQ_XD (1 << 1)
/* Compact Flash Controller Dev Structure */
struct arasan_cf_dev {
/* pointer to ata_host structure */
struct ata_host *host;
/* clk structure */
struct clk *clk;
/* physical base address of controller */
dma_addr_t pbase;
/* virtual base address of controller */
void __iomem *vbase;
/* irq number*/
int irq;
/* status to be updated to framework regarding DMA transfer */
u8 dma_status;
/* Card is present or Not */
u8 card_present;
/* dma specific */
/* Completion for transfer complete interrupt from controller */
struct completion cf_completion;
/* Completion for DMA transfer complete. */
struct completion dma_completion;
/* Dma channel allocated */
struct dma_chan *dma_chan;
/* Mask for DMA transfers */
dma_cap_mask_t mask;
/* DMA transfer work */
struct work_struct work;
/* DMA delayed finish work */
struct delayed_work dwork;
/* qc to be transferred using DMA */
struct ata_queued_cmd *qc;
};
static const struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
.dma_boundary = 0xFFFFFFFFUL,
};
static void cf_dumpregs(struct arasan_cf_dev *acdev)
{
struct device *dev = acdev->host->dev;
dev_dbg(dev, ": =========== REGISTER DUMP ===========");
dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
dev_dbg(dev, ": =====================================");
}
/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
{
/* enable should be 0 or 1 */
writel(enable, acdev->vbase + GIRQ_STS_EN);
writel(enable, acdev->vbase + GIRQ_SGN_EN);
}
/* Enable/Disable CF interrupts */
static inline void
cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
{
u32 val = readl(acdev->vbase + IRQ_EN);
/* clear & enable/disable irqs */
if (enable) {
writel(mask, acdev->vbase + IRQ_STS);
writel(val | mask, acdev->vbase + IRQ_EN);
} else
writel(val & ~mask, acdev->vbase + IRQ_EN);
}
static inline void cf_card_reset(struct arasan_cf_dev *acdev)
{
u32 val = readl(acdev->vbase + OP_MODE);
writel(val | CARD_RESET, acdev->vbase + OP_MODE);
udelay(200);
writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
}
static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
{
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
acdev->vbase + OP_MODE);
writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
acdev->vbase + OP_MODE);
}
static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
{
struct ata_port *ap = acdev->host->ports[0];
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 val = readl(acdev->vbase + CFI_STS);
/* Both CD1 & CD2 should be low if card inserted completely */
if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
if (acdev->card_present)
return;
acdev->card_present = 1;
cf_card_reset(acdev);
} else {
if (!acdev->card_present)
return;
acdev->card_present = 0;
}
if (hotplugged) {
ata_ehi_hotplugged(ehi);
ata_port_freeze(ap);
}
}
static int cf_init(struct arasan_cf_dev *acdev)
{
struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
unsigned int if_clk;
unsigned long flags;
int ret = 0;
ret = clk_prepare_enable(acdev->clk);
if (ret) {
dev_dbg(acdev->host->dev, "clock enable failed");
return ret;
}
ret = clk_set_rate(acdev->clk, 166000000);
if (ret) {
dev_warn(acdev->host->dev, "clock set rate failed");
clk_disable_unprepare(acdev->clk);
return ret;
}
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
/* TODO: read from device tree */
if_clk = CF_IF_CLK_166M;
if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
if_clk = pdata->cf_if_clk;
writel(if_clk, acdev->vbase + CLK_CFG);
writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
cf_ginterrupt_enable(acdev, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return ret;
}
static void cf_exit(struct arasan_cf_dev *acdev)
{
unsigned long flags;
spin_lock_irqsave(&acdev->host->lock, flags);
cf_ginterrupt_enable(acdev, 0);
cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
cf_card_reset(acdev);
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
acdev->vbase + OP_MODE);
spin_unlock_irqrestore(&acdev->host->lock, flags);
clk_disable_unprepare(acdev->clk);
}
static void dma_callback(void *dev)
{
struct arasan_cf_dev *acdev = dev;
complete(&acdev->dma_completion);
}
static inline void dma_complete(struct arasan_cf_dev *acdev)
{
struct ata_queued_cmd *qc = acdev->qc;
unsigned long flags;
acdev->qc = NULL;
ata_sff_interrupt(acdev->irq, acdev->host);
spin_lock_irqsave(&acdev->host->lock, flags);
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static inline int wait4buf(struct arasan_cf_dev *acdev)
{
if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
return -ETIMEDOUT;
}
/* Check if PIO Error interrupt has occurred */
if (acdev->dma_status & ATA_DMA_ERR)
return -EAGAIN;
return 0;
}
static int
dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
{
struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie;
unsigned long flags = DMA_PREP_INTERRUPT;
int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
if (!tx) {
dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
return -EAGAIN;
}
tx->callback = dma_callback;
tx->callback_param = acdev;
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(acdev->host->dev, "dma_submit_error\n");
return ret;
}
chan->device->device_issue_pending(chan);
/* Wait for DMA to complete */
if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
dmaengine_terminate_all(chan);
dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
return -ETIMEDOUT;
}
return ret;
}
static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
{
dma_addr_t dest = 0, src = 0;
u32 xfer_cnt, sglen, dma_len, xfer_ctr;
u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
unsigned long flags;
int ret = 0;
sglen = sg_dma_len(sg);
if (write) {
src = sg_dma_address(sg);
dest = acdev->pbase + EXT_WRITE_PORT;
} else {
dest = sg_dma_address(sg);
src = acdev->pbase + EXT_READ_PORT;
}
/*
* For each sg:
* MAX_XFER_COUNT data will be transferred before we get transfer
* complete interrupt. Between after FIFO_SIZE data
* buffer available interrupt will be generated. At this time we will
* fill FIFO again: max FIFO_SIZE data.
*/
while (sglen) {
xfer_cnt = min(sglen, MAX_XFER_COUNT);
spin_lock_irqsave(&acdev->host->lock, flags);
xfer_ctr = readl(acdev->vbase + XFER_CTR) &
~XFER_COUNT_MASK;
writel(xfer_ctr | xfer_cnt | XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
/* continue dma xfers until current sg is completed */
while (xfer_cnt) {
/* wait for read to complete */
if (!write) {
ret = wait4buf(acdev);
if (ret)
goto fail;
}
/* read/write FIFO in chunk of FIFO_SIZE */
dma_len = min(xfer_cnt, FIFO_SIZE);
ret = dma_xfer(acdev, src, dest, dma_len);
if (ret) {
dev_err(acdev->host->dev, "dma failed");
goto fail;
}
if (write)
src += dma_len;
else
dest += dma_len;
sglen -= dma_len;
xfer_cnt -= dma_len;
/* wait for write to complete */
if (write) {
ret = wait4buf(acdev);
if (ret)
goto fail;
}
}
}
fail:
spin_lock_irqsave(&acdev->host->lock, flags);
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return ret;
}
/*
* This routine uses External DMA controller to read/write data to FIFO of CF
* controller. There are two xfer related interrupt supported by CF controller:
* - buf_avail: This interrupt is generated as soon as we have buffer of 512
* bytes available for reading or empty buffer available for writing.
* - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
* data to/from FIFO. xfer_size is programmed in XFER_CTR register.
*
* Max buffer size = FIFO_SIZE = 512 Bytes.
* Max xfer_size = MAX_XFER_COUNT = 256 KB.
*/
static void data_xfer(struct work_struct *work)
{
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
work);
struct ata_queued_cmd *qc = acdev->qc;
struct scatterlist *sg;
unsigned long flags;
u32 temp;
int ret = 0;
/* request dma channels */
/* dma_request_channel may sleep, so calling from process context */
acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
if (IS_ERR(acdev->dma_chan)) {
dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan),
"Unable to get dma_chan\n");
acdev->dma_chan = NULL;
goto chan_request_fail;
}
for_each_sg(qc->sg, sg, qc->n_elem, temp) {
ret = sg_xfer(acdev, sg);
if (ret)
break;
}
dma_release_channel(acdev->dma_chan);
acdev->dma_chan = NULL;
/* data xferred successfully */
if (!ret) {
u32 status;
spin_lock_irqsave(&acdev->host->lock, flags);
status = ioread8(qc->ap->ioaddr.altstatus_addr);
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (status & (ATA_BUSY | ATA_DRQ)) {
ata_sff_queue_delayed_work(&acdev->dwork, 1);
return;
}
goto sff_intr;
}
cf_dumpregs(acdev);
chan_request_fail:
spin_lock_irqsave(&acdev->host->lock, flags);
/* error when transferring data to/from memory */
qc->err_mask |= AC_ERR_HOST_BUS;
qc->ap->hsm_task_state = HSM_ST_ERR;
cf_ctrl_reset(acdev);
spin_unlock_irqrestore(&acdev->host->lock, flags);
sff_intr:
dma_complete(acdev);
}
static void delayed_finish(struct work_struct *work)
{
struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
dwork.work);
struct ata_queued_cmd *qc = acdev->qc;
unsigned long flags;
u8 status;
spin_lock_irqsave(&acdev->host->lock, flags);
status = ioread8(qc->ap->ioaddr.altstatus_addr);
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (status & (ATA_BUSY | ATA_DRQ))
ata_sff_queue_delayed_work(&acdev->dwork, 1);
else
dma_complete(acdev);
}
static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
{
struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
unsigned long flags;
u32 irqsts;
irqsts = readl(acdev->vbase + GIRQ_STS);
if (!(irqsts & GIRQ_CF))
return IRQ_NONE;
spin_lock_irqsave(&acdev->host->lock, flags);
irqsts = readl(acdev->vbase + IRQ_STS);
writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
/* handle only relevant interrupts */
irqsts &= ~IGNORED_IRQS;
if (irqsts & CARD_DETECT_IRQ) {
cf_card_detect(acdev, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
return IRQ_HANDLED;
}
if (irqsts & PIO_XFER_ERR_IRQ) {
acdev->dma_status = ATA_DMA_ERR;
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
spin_unlock_irqrestore(&acdev->host->lock, flags);
complete(&acdev->cf_completion);
dev_err(acdev->host->dev, "pio xfer err irq\n");
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&acdev->host->lock, flags);
if (irqsts & BUF_AVAIL_IRQ) {
complete(&acdev->cf_completion);
return IRQ_HANDLED;
}
if (irqsts & XFER_DONE_IRQ) {
struct ata_queued_cmd *qc = acdev->qc;
/* Send Complete only for write */
if (qc->tf.flags & ATA_TFLAG_WRITE)
complete(&acdev->cf_completion);
}
return IRQ_HANDLED;
}
static void arasan_cf_freeze(struct ata_port *ap)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
/* stop transfer and reset controller */
writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
acdev->vbase + XFER_CTR);
cf_ctrl_reset(acdev);
acdev->dma_status = ATA_DMA_ERR;
ata_sff_dma_pause(ap);
ata_sff_freeze(ap);
}
static void arasan_cf_error_handler(struct ata_port *ap)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
/*
* DMA transfers using an external DMA controller may be scheduled.
* Abort them before handling error. Refer data_xfer() for further
* details.
*/
cancel_work_sync(&acdev->work);
cancel_delayed_work_sync(&acdev->dwork);
return ata_sff_error_handler(ap);
}
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
{
struct ata_queued_cmd *qc = acdev->qc;
struct ata_port *ap = qc->ap;
struct ata_taskfile *tf = &qc->tf;
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
u32 write = tf->flags & ATA_TFLAG_WRITE;
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
writel(xfer_ctr, acdev->vbase + XFER_CTR);
ap->ops->sff_exec_command(ap, tf);
ata_sff_queue_work(&acdev->work);
}
static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct arasan_cf_dev *acdev = ap->host->private_data;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
return ata_sff_qc_issue(qc);
/* select the device */
ata_wait_idle(ap);
ata_sff_dev_select(ap, qc->dev->devno);
ata_wait_idle(ap);
/* start the command */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
acdev->dma_status = 0;
acdev->qc = qc;
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
arasan_cf_dma_start(acdev);
ap->hsm_task_state = HSM_ST_LAST;
break;
default:
WARN_ON(1);
return AC_ERR_SYSTEM;
}
return 0;
}
static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
u8 pio = adev->pio_mode - XFER_PIO_0;
unsigned long flags;
u32 val;
/* Arasan ctrl supports Mode0 -> Mode6 */
if (pio > 6) {
dev_err(ap->dev, "Unknown PIO mode\n");
return;
}
spin_lock_irqsave(&acdev->host->lock, flags);
val = readl(acdev->vbase + OP_MODE) &
~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
writel(val, acdev->vbase + OP_MODE);
val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
writel(val, acdev->vbase + TM_CFG);
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
u32 opmode, tmcfg, dma_mode = adev->dma_mode;
unsigned long flags;
spin_lock_irqsave(&acdev->host->lock, flags);
opmode = readl(acdev->vbase + OP_MODE) &
~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
tmcfg = readl(acdev->vbase + TM_CFG);
if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
opmode |= ULTRA_DMA_ENB;
tmcfg &= ~ULTRA_DMA_TIMING_MASK;
tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
} else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
opmode |= MULTI_WORD_DMA_ENB;
tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
TRUEIDE_MWORD_DMA_TIMING_SHIFT;
} else {
dev_err(ap->dev, "Unknown DMA mode\n");
spin_unlock_irqrestore(&acdev->host->lock, flags);
return;
}
writel(opmode, acdev->vbase + OP_MODE);
writel(tmcfg, acdev->vbase + TM_CFG);
writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
spin_unlock_irqrestore(&acdev->host->lock, flags);
}
static struct ata_port_operations arasan_cf_ops = {
.inherits = &ata_sff_port_ops,
.freeze = arasan_cf_freeze,
.error_handler = arasan_cf_error_handler,
.qc_issue = arasan_cf_qc_issue,
.set_piomode = arasan_cf_set_piomode,
.set_dmamode = arasan_cf_set_dmamode,
};
static int arasan_cf_probe(struct platform_device *pdev)
{
struct arasan_cf_dev *acdev;
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ata_host *host;
struct ata_port *ap;
struct resource *res;
u32 quirk;
irq_handler_t irq_handler = NULL;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
DRIVER_NAME)) {
dev_warn(&pdev->dev, "Failed to get memory region resource\n");
return -ENOENT;
}
acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
if (!acdev)
return -ENOMEM;
if (pdata)
quirk = pdata->quirk;
else
quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
/*
* If there's an error getting IRQ (or we do get IRQ0),
* support only PIO
*/
ret = platform_get_irq(pdev, 0);
if (ret > 0) {
acdev->irq = ret;
irq_handler = arasan_cf_interrupt;
} else if (ret == -EPROBE_DEFER) {
return ret;
} else {
quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
}
acdev->pbase = res->start;
acdev->vbase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!acdev->vbase) {
dev_warn(&pdev->dev, "ioremap fail\n");
return -ENOMEM;
}
acdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(acdev->clk)) {
dev_warn(&pdev->dev, "Clock not found\n");
return PTR_ERR(acdev->clk);
}
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
dev_warn(&pdev->dev, "alloc host fail\n");
return -ENOMEM;
}
ap = host->ports[0];
host->private_data = acdev;
acdev->host = host;
ap->ops = &arasan_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->mwdma_mask = ATA_MWDMA4;
ap->udma_mask = ATA_UDMA6;
init_completion(&acdev->cf_completion);
init_completion(&acdev->dma_completion);
INIT_WORK(&acdev->work, data_xfer);
INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
dma_cap_set(DMA_MEMCPY, acdev->mask);
/* Handle platform specific quirks */
if (quirk) {
if (quirk & CF_BROKEN_PIO) {
ap->ops->set_piomode = NULL;
ap->pio_mask = 0;
}
if (quirk & CF_BROKEN_MWDMA)
ap->mwdma_mask = 0;
if (quirk & CF_BROKEN_UDMA)
ap->udma_mask = 0;
}
ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
ata_port_desc(ap, "phy_addr %llx virt_addr %p",
(unsigned long long) res->start, acdev->vbase);
ret = cf_init(acdev);
if (ret)
return ret;
cf_card_detect(acdev, 0);
ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
&arasan_cf_sht);
if (!ret)
return 0;
cf_exit(acdev);
return ret;
}
static void arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
ata_host_detach(host);
cf_exit(acdev);
}
#ifdef CONFIG_PM_SLEEP
static int arasan_cf_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
if (acdev->dma_chan)
dmaengine_terminate_all(acdev->dma_chan);
cf_exit(acdev);
ata_host_suspend(host, PMSG_SUSPEND);
return 0;
}
static int arasan_cf_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
cf_init(acdev);
ata_host_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
#ifdef CONFIG_OF
static const struct of_device_id arasan_cf_id_table[] = {
{ .compatible = "arasan,cf-spear1340" },
{}
};
MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
#endif
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
.remove_new = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
.of_match_table = of_match_ptr(arasan_cf_id_table),
},
};
module_platform_driver(arasan_cf_driver);
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/ata/pata_arasan_cf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pata_atp867x.c - ARTOP 867X 64bit 4-channel UDMA133 ATA controller driver
*
* (C) 2009 Google Inc. John(Jung-Ik) Lee <[email protected]>
*
* Per Atp867 data sheet rev 1.2, Acard.
* Based in part on early ide code from
* 2003-2004 by Eric Uhrhane, Google, Inc.
*
* TODO:
* 1. RAID features [comparison, XOR, striping, mirroring, etc.]
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_atp867x"
#define DRV_VERSION "0.7.5"
/*
* IO Registers
* Note that all runtime hot priv ports are cached in ap private_data
*/
enum {
ATP867X_IO_CHANNEL_OFFSET = 0x10,
/*
* IO Register Bitfields
*/
ATP867X_IO_PIOSPD_ACTIVE_SHIFT = 4,
ATP867X_IO_PIOSPD_RECOVER_SHIFT = 0,
ATP867X_IO_DMAMODE_MSTR_SHIFT = 0,
ATP867X_IO_DMAMODE_MSTR_MASK = 0x07,
ATP867X_IO_DMAMODE_SLAVE_SHIFT = 4,
ATP867X_IO_DMAMODE_SLAVE_MASK = 0x70,
ATP867X_IO_DMAMODE_UDMA_6 = 0x07,
ATP867X_IO_DMAMODE_UDMA_5 = 0x06,
ATP867X_IO_DMAMODE_UDMA_4 = 0x05,
ATP867X_IO_DMAMODE_UDMA_3 = 0x04,
ATP867X_IO_DMAMODE_UDMA_2 = 0x03,
ATP867X_IO_DMAMODE_UDMA_1 = 0x02,
ATP867X_IO_DMAMODE_UDMA_0 = 0x01,
ATP867X_IO_DMAMODE_DISABLE = 0x00,
ATP867X_IO_SYS_INFO_66MHZ = 0x04,
ATP867X_IO_SYS_INFO_SLOW_UDMA5 = 0x02,
ATP867X_IO_SYS_MASK_RESERVED = (~0xf1),
ATP867X_IO_PORTSPD_VAL = 0x1143,
ATP867X_PREREAD_VAL = 0x0200,
ATP867X_NUM_PORTS = 4,
ATP867X_BAR_IOBASE = 0,
ATP867X_BAR_ROMBASE = 6,
};
#define ATP867X_IOBASE(ap) ((ap)->host->iomap[0])
#define ATP867X_SYS_INFO(ap) (0x3F + ATP867X_IOBASE(ap))
#define ATP867X_IO_PORTBASE(ap, port) (0x00 + ATP867X_IOBASE(ap) + \
(port) * ATP867X_IO_CHANNEL_OFFSET)
#define ATP867X_IO_DMABASE(ap, port) (0x40 + \
ATP867X_IO_PORTBASE((ap), (port)))
#define ATP867X_IO_STATUS(ap, port) (0x07 + \
ATP867X_IO_PORTBASE((ap), (port)))
#define ATP867X_IO_ALTSTATUS(ap, port) (0x0E + \
ATP867X_IO_PORTBASE((ap), (port)))
/*
* hot priv ports
*/
#define ATP867X_IO_MSTRPIOSPD(ap, port) (0x08 + \
ATP867X_IO_DMABASE((ap), (port)))
#define ATP867X_IO_SLAVPIOSPD(ap, port) (0x09 + \
ATP867X_IO_DMABASE((ap), (port)))
#define ATP867X_IO_8BPIOSPD(ap, port) (0x0A + \
ATP867X_IO_DMABASE((ap), (port)))
#define ATP867X_IO_DMAMODE(ap, port) (0x0B + \
ATP867X_IO_DMABASE((ap), (port)))
#define ATP867X_IO_PORTSPD(ap, port) (0x4A + \
ATP867X_IO_PORTBASE((ap), (port)))
#define ATP867X_IO_PREREAD(ap, port) (0x4C + \
ATP867X_IO_PORTBASE((ap), (port)))
struct atp867x_priv {
void __iomem *dma_mode;
void __iomem *mstr_piospd;
void __iomem *slave_piospd;
void __iomem *eightb_piospd;
int pci66mhz;
};
static void atp867x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct atp867x_priv *dp = ap->private_data;
u8 speed = adev->dma_mode;
u8 b;
u8 mode = speed - XFER_UDMA_0 + 1;
/*
* Doc 6.6.9: decrease the udma mode value by 1 for safer UDMA speed
* on 66MHz bus
* rev-A: UDMA_1~4 (5, 6 no change)
* rev-B: all UDMA modes
* UDMA_0 stays not to disable UDMA
*/
if (dp->pci66mhz && mode > ATP867X_IO_DMAMODE_UDMA_0 &&
(pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B ||
mode < ATP867X_IO_DMAMODE_UDMA_5))
mode--;
b = ioread8(dp->dma_mode);
if (adev->devno & 1) {
b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK) |
(mode << ATP867X_IO_DMAMODE_SLAVE_SHIFT);
} else {
b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK) |
(mode << ATP867X_IO_DMAMODE_MSTR_SHIFT);
}
iowrite8(b, dp->dma_mode);
}
static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
unsigned int clk)
{
struct atp867x_priv *dp = ap->private_data;
unsigned char clocks = clk;
/*
* Doc 6.6.9: increase the clock value by 1 for safer PIO speed
* on 66MHz bus
*/
if (dp->pci66mhz)
clocks++;
switch (clocks) {
case 0:
clocks = 1;
break;
case 1 ... 6:
break;
default:
ata_port_warn(ap, "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
fallthrough;
case 9 ... 12:
clocks = 7; /* 12 clk */
break;
case 7:
case 8: /* default 8 clk */
clocks = 0;
goto active_clock_shift_done;
}
active_clock_shift_done:
return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
}
static int atp867x_get_recover_clocks_shifted(struct ata_port *ap,
unsigned int clk)
{
unsigned char clocks = clk;
switch (clocks) {
case 0:
clocks = 1;
break;
case 1 ... 11:
break;
case 13:
case 14:
--clocks; /* by the spec */
break;
case 15:
break;
default:
ata_port_warn(ap, "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
fallthrough;
case 12: /* default 12 clk */
clocks = 0;
break;
}
return clocks << ATP867X_IO_PIOSPD_RECOVER_SHIFT;
}
static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *peer = ata_dev_pair(adev);
struct atp867x_priv *dp = ap->private_data;
u8 speed = adev->pio_mode;
struct ata_timing t, p;
int T, UT;
u8 b;
T = 1000000000 / 33333;
UT = T / 4;
ata_timing_compute(adev, speed, &t, T, UT);
if (peer && peer->pio_mode) {
ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
}
b = ioread8(dp->dma_mode);
if (adev->devno & 1)
b = (b & ~ATP867X_IO_DMAMODE_SLAVE_MASK);
else
b = (b & ~ATP867X_IO_DMAMODE_MSTR_MASK);
iowrite8(b, dp->dma_mode);
b = atp867x_get_active_clocks_shifted(ap, t.active) |
atp867x_get_recover_clocks_shifted(ap, t.recover);
if (adev->devno & 1)
iowrite8(b, dp->slave_piospd);
else
iowrite8(b, dp->mstr_piospd);
b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
atp867x_get_recover_clocks_shifted(ap, t.rec8b);
iowrite8(b, dp->eightb_piospd);
}
static int atp867x_cable_override(struct pci_dev *pdev)
{
if (pdev->subsystem_vendor == PCI_VENDOR_ID_ARTOP &&
(pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867A ||
pdev->subsystem_device == PCI_DEVICE_ID_ARTOP_ATP867B)) {
return 1;
}
return 0;
}
static int atp867x_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (atp867x_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
return ATA_CBL_PATA_UNK;
}
static const struct scsi_host_template atp867x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations atp867x_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = atp867x_cable_detect,
.set_piomode = atp867x_set_piomode,
.set_dmamode = atp867x_set_dmamode,
};
static void atp867x_check_res(struct pci_dev *pdev)
{
int i;
unsigned long start, len;
/* Check the PCI resources for this channel are enabled */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
start = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
dev_dbg(&pdev->dev, "ATP867X: resource start:len=%lx:%lx\n",
start, len);
}
}
static void atp867x_check_ports(struct ata_port *ap, int port)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
struct atp867x_priv *dp = ap->private_data;
ata_port_dbg(ap, "ATP867X: port[%d] addresses\n"
" cmd_addr =0x%lx, 0x%lx\n"
" ctl_addr =0x%lx, 0x%lx\n"
" bmdma_addr =0x%lx, 0x%lx\n"
" data_addr =0x%lx\n"
" error_addr =0x%lx\n"
" feature_addr =0x%lx\n"
" nsect_addr =0x%lx\n"
" lbal_addr =0x%lx\n"
" lbam_addr =0x%lx\n"
" lbah_addr =0x%lx\n"
" device_addr =0x%lx\n"
" status_addr =0x%lx\n"
" command_addr =0x%lx\n"
" dp->dma_mode =0x%lx\n"
" dp->mstr_piospd =0x%lx\n"
" dp->slave_piospd =0x%lx\n"
" dp->eightb_piospd =0x%lx\n"
" dp->pci66mhz =0x%lx\n",
port,
(unsigned long)ioaddr->cmd_addr,
(unsigned long)ATP867X_IO_PORTBASE(ap, port),
(unsigned long)ioaddr->ctl_addr,
(unsigned long)ATP867X_IO_ALTSTATUS(ap, port),
(unsigned long)ioaddr->bmdma_addr,
(unsigned long)ATP867X_IO_DMABASE(ap, port),
(unsigned long)ioaddr->data_addr,
(unsigned long)ioaddr->error_addr,
(unsigned long)ioaddr->feature_addr,
(unsigned long)ioaddr->nsect_addr,
(unsigned long)ioaddr->lbal_addr,
(unsigned long)ioaddr->lbam_addr,
(unsigned long)ioaddr->lbah_addr,
(unsigned long)ioaddr->device_addr,
(unsigned long)ioaddr->status_addr,
(unsigned long)ioaddr->command_addr,
(unsigned long)dp->dma_mode,
(unsigned long)dp->mstr_piospd,
(unsigned long)dp->slave_piospd,
(unsigned long)dp->eightb_piospd,
(unsigned long)dp->pci66mhz);
}
static int atp867x_set_priv(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct atp867x_priv *dp;
int port = ap->port_no;
dp = ap->private_data =
devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
if (dp == NULL)
return -ENOMEM;
dp->dma_mode = ATP867X_IO_DMAMODE(ap, port);
dp->mstr_piospd = ATP867X_IO_MSTRPIOSPD(ap, port);
dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port);
dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port);
dp->pci66mhz =
ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ;
return 0;
}
static void atp867x_fixup(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct ata_port *ap = host->ports[0];
int i;
u8 v;
/*
* Broken BIOS might not set latency high enough
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &v);
if (v < 0x80) {
v = 0x80;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v);
dev_dbg(&pdev->dev, "ATP867X: set latency timer to %d\n", v);
}
/*
* init 8bit io ports speed(0aaarrrr) to 43h and
* init udma modes of master/slave to 0/0(11h)
*/
for (i = 0; i < ATP867X_NUM_PORTS; i++)
iowrite16(ATP867X_IO_PORTSPD_VAL, ATP867X_IO_PORTSPD(ap, i));
/*
* init PreREAD counts
*/
for (i = 0; i < ATP867X_NUM_PORTS; i++)
iowrite16(ATP867X_PREREAD_VAL, ATP867X_IO_PREREAD(ap, i));
v = ioread8(ATP867X_IOBASE(ap) + 0x28);
v &= 0xcf; /* Enable INTA#: bit4=0 means enable */
v |= 0xc0; /* Enable PCI burst, MRM & not immediate interrupts */
iowrite8(v, ATP867X_IOBASE(ap) + 0x28);
/*
* Turn off the over clocked udma5 mode, only for Rev-B
*/
v = ioread8(ATP867X_SYS_INFO(ap));
v &= ATP867X_IO_SYS_MASK_RESERVED;
if (pdev->device == PCI_DEVICE_ID_ARTOP_ATP867B)
v |= ATP867X_IO_SYS_INFO_SLOW_UDMA5;
iowrite8(v, ATP867X_SYS_INFO(ap));
}
static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
{
struct device *gdev = host->dev;
struct pci_dev *pdev = to_pci_dev(gdev);
unsigned int mask = 0;
int i, rc;
/*
* do not map rombase
*/
rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
atp867x_check_res(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++)
dev_dbg(gdev, "ATP867X: iomap[%d]=0x%p\n", i,
host->iomap[i]);
/*
* request, iomap BARs and init port addresses accordingly
*/
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i);
ioaddr->ctl_addr = ioaddr->altstatus_addr
= ATP867X_IO_ALTSTATUS(ap, i);
ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i);
ata_sff_std_ports(ioaddr);
rc = atp867x_set_priv(ap);
if (rc)
return rc;
atp867x_check_ports(ap, i);
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
(unsigned long)ioaddr->cmd_addr,
(unsigned long)ioaddr->ctl_addr);
ata_port_desc(ap, "bmdma 0x%lx",
(unsigned long)ioaddr->bmdma_addr);
mask |= 1 << i;
}
if (!mask) {
dev_err(gdev, "no available native port\n");
return -ENODEV;
}
atp867x_fixup(host);
return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
}
static int atp867x_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct ata_port_info info_867x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &atp867x_ops,
};
struct ata_host *host;
const struct ata_port_info *ppi[] = { &info_867x, NULL };
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
dev_info(&pdev->dev, "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
pdev->device);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
if (!host) {
dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
rc = atp867x_ata_pci_sff_init_host(host);
if (rc) {
dev_err(&pdev->dev, "failed to init host\n");
goto err_out;
}
pci_set_master(pdev);
rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
dev_err(&pdev->dev, "failed to activate host\n");
err_out:
return rc;
}
#ifdef CONFIG_PM_SLEEP
static int atp867x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
atp867x_fixup(host);
ata_host_resume(host);
return 0;
}
#endif
static struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
};
static struct pci_driver atp867x_driver = {
.name = DRV_NAME,
.id_table = atp867x_pci_tbl,
.probe = atp867x_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = atp867x_reinit_one,
#endif
};
module_pci_driver(atp867x_driver);
MODULE_AUTHOR("John(Jung-Ik) Lee, Google Inc.");
MODULE_DESCRIPTION("low level driver for Artop/Acard 867x ATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, atp867x_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_atp867x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_via.c - VIA PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
*
* Documentation
* Most chipset documentation available under NDA only
*
* VIA version guide
* VIA VT82C561 - early design, uses ata_generic currently
* VIA VT82C576 - MWDMA, 33Mhz
* VIA VT82C586 - MWDMA, 33Mhz
* VIA VT82C586a - Added UDMA to 33Mhz
* VIA VT82C586b - UDMA33
* VIA VT82C596a - Nonfunctional UDMA66
* VIA VT82C596b - Working UDMA66
* VIA VT82C686 - Nonfunctional UDMA66
* VIA VT82C686a - Working UDMA66
* VIA VT82C686b - Updated to UDMA100
* VIA VT8231 - UDMA100
* VIA VT8233 - UDMA100
* VIA VT8233a - UDMA133
* VIA VT8233c - UDMA100
* VIA VT8235 - UDMA133
* VIA VT8237 - UDMA133
* VIA VT8237A - UDMA133
* VIA VT8237S - UDMA133
* VIA VT8251 - UDMA133
*
* Most registers remain compatible across chips. Others start reserved
* and acquire sensible semantics if set to 1 (eg cable detect). A few
* exceptions exist, notably around the FIFO settings.
*
* One additional quirk of the VIA design is that like ALi they use few
* PCI IDs for a lot of chips.
*
* Based heavily on:
*
* Version 3.38
*
* VIA IDE driver for Linux. Supported southbridges:
*
* vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
* vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
* vt8235, vt8237
*
* Copyright (c) 2000-2002 Vojtech Pavlik
*
* Based on the work of:
* Michel Aubry
* Jeff Garzik
* Andre Hedrick
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_via"
#define DRV_VERSION "0.3.4"
enum {
VIA_BAD_PREQ = 0x01, /* Crashes if PREQ# till DDACK# set */
VIA_BAD_CLK66 = 0x02, /* 66 MHz clock doesn't work correctly */
VIA_SET_FIFO = 0x04, /* Needs to have FIFO split set */
VIA_NO_UNMASK = 0x08, /* Doesn't work with IRQ unmasking on */
VIA_BAD_ID = 0x10, /* Has wrong vendor ID (0x1107) */
VIA_BAD_AST = 0x20, /* Don't touch Address Setup Timing */
VIA_NO_ENABLES = 0x40, /* Has no enablebits */
VIA_SATA_PATA = 0x80, /* SATA/PATA combined configuration */
};
enum {
VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
};
/*
* VIA SouthBridge chips.
*/
static const struct via_isa_bridge {
const char *name;
u16 id;
u8 rev_min;
u8 rev_max;
u8 udma_mask;
u8 flags;
} via_isa_bridges[] = {
{ "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
{ "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
{ "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
{ "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, },
{ "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, },
{ "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, },
{ "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, },
{ "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, },
{ "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
{ "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, },
{ "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
{ "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
{ "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
{ "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ NULL }
};
static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
{
.ident = "AVERATEC 3200",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
DMI_MATCH(DMI_BOARD_NAME, "3200"),
},
},
{ }
};
struct via_port {
u8 cached_device;
};
/*
* Cable special cases
*/
static const struct dmi_system_id cable_dmi_table[] = {
{
.ident = "Acer Ferrari 3400",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
},
},
{ }
};
static int via_cable_override(struct pci_dev *pdev)
{
/* Systems by DMI */
if (dmi_check_system(cable_dmi_table))
return 1;
/* Arima W730-K8/Targa Visionary 811/... */
if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032)
return 1;
return 0;
}
/**
* via_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection. Actually for the VIA case the BIOS
* already did this for us. We read the values provided by the
* BIOS. If you are using an 8235 in a non-PC configuration you
* may need to update this code.
*
* Hotplug also impacts on this.
*/
static int via_cable_detect(struct ata_port *ap) {
const struct via_isa_bridge *config = ap->host->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 ata66;
if (via_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
return ATA_CBL_SATA;
/* Early chips are 40 wire */
if (config->udma_mask < ATA_UDMA4)
return ATA_CBL_PATA40;
/* UDMA 66 chips have only drive side logic */
else if (config->udma_mask < ATA_UDMA5)
return ATA_CBL_PATA_UNK;
/* UDMA 100 or later */
pci_read_config_dword(pdev, 0x50, &ata66);
/* Check both the drive cable reporting bits, we might not have
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
/* Check with ACPI so we can spot BIOS reported SATA bridges */
if (ata_acpi_init_gtm(ap) &&
ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
const struct via_isa_bridge *config = ap->host->private_data;
if (!(config->flags & VIA_NO_ENABLES)) {
static const struct pci_bits via_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no]))
return -ENOENT;
}
return ata_sff_prereset(link, deadline);
}
/**
* via_do_set_mode - set transfer mode data
* @ap: ATA interface
* @adev: ATA device
* @mode: ATA mode being programmed
* @set_ast: Set to program address setup
* @udma_type: UDMA mode/format of registers
*
* Program the VIA registers for DMA and PIO modes. Uses the ata timing
* support in order to compute modes.
*
* FIXME: Hotplug will require we serialize multiple mode changes
* on the two channels.
*/
static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev,
int mode, int set_ast, int udma_type)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *peer = ata_dev_pair(adev);
struct ata_timing t, p;
const int via_clock = 33333; /* Bus clock in kHz */
const int T = 1000000000 / via_clock;
int UT = T;
int ut;
int offset = 3 - (2*ap->port_no) - adev->devno;
switch (udma_type) {
case ATA_UDMA4:
UT = T / 2; break;
case ATA_UDMA5:
UT = T / 3; break;
case ATA_UDMA6:
UT = T / 4; break;
}
/* Calculate the timing values we require */
ata_timing_compute(adev, mode, &t, T, UT);
/* We share 8bit timing so we must merge the constraints */
if (peer) {
if (peer->pio_mode) {
ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
}
}
/* Address setup is programmable but breaks on UDMA133 setups */
if (set_ast) {
u8 setup; /* 2 bits per drive */
int shift = 2 * offset;
pci_read_config_byte(pdev, 0x4C, &setup);
setup &= ~(3 << shift);
setup |= (clamp_val(t.setup, 1, 4) - 1) << shift;
pci_write_config_byte(pdev, 0x4C, setup);
}
/* Load the PIO mode bits */
pci_write_config_byte(pdev, 0x4F - ap->port_no,
((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
pci_write_config_byte(pdev, 0x48 + offset,
((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
/* Load the UDMA bits according to type */
switch (udma_type) {
case ATA_UDMA2:
default:
ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
break;
case ATA_UDMA4:
ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
break;
case ATA_UDMA5:
ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
break;
case ATA_UDMA6:
ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
break;
}
/* Set UDMA unless device is not UDMA capable */
if (udma_type) {
u8 udma_etc;
pci_read_config_byte(pdev, 0x50 + offset, &udma_etc);
/* clear transfer mode bit */
udma_etc &= ~0x20;
if (t.udma) {
/* preserve 80-wire cable detection bit */
udma_etc &= 0x10;
udma_etc |= ut;
}
pci_write_config_byte(pdev, 0x50 + offset, udma_etc);
}
}
static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
const struct via_isa_bridge *config = ap->host->private_data;
int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask);
}
static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
const struct via_isa_bridge *config = ap->host->private_data;
int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask);
}
/**
* via_mode_filter - filter buggy device/mode pairs
* @dev: ATA device
* @mask: Mode bitmask
*
* We need to apply some minimal filtering for old controllers and at least
* one breed of Transcend SSD. Return the updated mask.
*/
static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
{
struct ata_host *host = dev->link->ap->host;
const struct via_isa_bridge *config = host->private_data;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
if (config->id == PCI_DEVICE_ID_VIA_82C586_0) {
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strcmp(model_num, "TS64GSSD25-M") == 0) {
ata_dev_warn(dev,
"disabling UDMA mode due to reported lockups with this device\n");
mask &= ~ ATA_MASK_UDMA;
}
}
if (dev->class == ATA_DEV_ATAPI &&
dmi_check_system(no_atapi_dma_dmi_table)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
return mask;
}
/**
* via_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller.
*
* Note: This is to fix the internal bug of via chipsets, which
* will reset the device register after changing the IEN bit on
* ctl register
*/
static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
struct via_port *vp = ap->private_data;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
int newctl = 0;
if (tf->ctl != ap->last_ctl) {
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
newctl = 1;
}
if (tf->flags & ATA_TFLAG_DEVICE) {
iowrite8(tf->device, ioaddr->device_addr);
vp->cached_device = tf->device;
} else if (newctl)
iowrite8(vp->cached_device, ioaddr->device_addr);
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
WARN_ON_ONCE(!ioaddr->ctl_addr);
iowrite8(tf->hob_feature, ioaddr->feature_addr);
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
}
if (is_addr) {
iowrite8(tf->feature, ioaddr->feature_addr);
iowrite8(tf->nsect, ioaddr->nsect_addr);
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
}
ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
{
struct via_port *vp;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL);
if (vp == NULL)
return -ENOMEM;
ap->private_data = vp;
return 0;
}
static const struct scsi_host_template via_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations via_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = via_cable_detect,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
.prereset = via_pre_reset,
.sff_tf_load = via_tf_load,
.port_start = via_port_start,
.mode_filter = via_mode_filter,
};
static struct ata_port_operations via_port_ops_noirq = {
.inherits = &via_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
};
/**
* via_config_fifo - set up the FIFO
* @pdev: PCI device
* @flags: configuration flags
*
* Set the FIFO properties for this device if necessary. Used both on
* set up and on and the resume path
*/
static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
{
u8 enable;
/* 0x40 low bits indicate enabled channels */
pci_read_config_byte(pdev, 0x40 , &enable);
enable &= 3;
if (flags & VIA_SET_FIFO) {
static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
u8 fifo;
pci_read_config_byte(pdev, 0x43, &fifo);
/* Clear PREQ# until DDACK# for errata */
if (flags & VIA_BAD_PREQ)
fifo &= 0x7F;
else
fifo &= 0x9f;
/* Turn on FIFO for enabled channels */
fifo |= fifo_setting[enable];
pci_write_config_byte(pdev, 0x43, fifo);
}
}
static void via_fixup(struct pci_dev *pdev, const struct via_isa_bridge *config)
{
u32 timing;
/* Initialise the FIFO for the enabled channels. */
via_config_fifo(pdev, config->flags);
if (config->udma_mask == ATA_UDMA4) {
/* The 66 MHz devices require we enable the clock */
pci_read_config_dword(pdev, 0x50, &timing);
timing |= 0x80008;
pci_write_config_dword(pdev, 0x50, timing);
}
if (config->flags & VIA_BAD_CLK66) {
/* Disable the 66MHz clock on problem devices */
pci_read_config_dword(pdev, 0x50, &timing);
timing &= ~0x80008;
pci_write_config_dword(pdev, 0x50, timing);
}
}
/**
* via_init_one - discovery callback
* @pdev: PCI device
* @id: PCI table info
*
* A VIA IDE interface has been discovered. Figure out what revision
* and perform configuration work before handing it to the ATA layer
*/
static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
/* Early VIA without UDMA support */
static const struct ata_port_info via_mwdma_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &via_port_ops
};
/* Ditto with IRQ masking required */
static const struct ata_port_info via_mwdma_info_borked = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &via_port_ops_noirq,
};
/* VIA UDMA 33 devices (and borked 66) */
static const struct ata_port_info via_udma33_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &via_port_ops
};
/* VIA UDMA 66 devices */
static const struct ata_port_info via_udma66_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &via_port_ops
};
/* VIA UDMA 100 devices */
static const struct ata_port_info via_udma100_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &via_port_ops
};
/* UDMA133 with bad AST (All current 133) */
static const struct ata_port_info via_udma133_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */
.port_ops = &via_port_ops
};
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *isa;
const struct via_isa_bridge *config;
u8 enable;
unsigned long flags = id->driver_data;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (flags & VIA_IDFLAG_SINGLE)
ppi[1] = &ata_dummy_port_info;
/* To find out how the IDE will behave and what features we
actually have to look at the bridge not the IDE controller */
for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
config++)
if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
!!(config->flags & VIA_BAD_ID),
config->id, NULL))) {
u8 rev = isa->revision;
pci_dev_put(isa);
if ((id->device == 0x0415 || id->device == 0x3164) &&
(config->id != id->device))
continue;
if (rev >= config->rev_min && rev <= config->rev_max)
break;
}
if (!(config->flags & VIA_NO_ENABLES)) {
/* 0x40 low bits indicate enabled channels */
pci_read_config_byte(pdev, 0x40 , &enable);
enable &= 3;
if (enable == 0)
return -ENODEV;
}
/* Clock set up */
switch (config->udma_mask) {
case 0x00:
if (config->flags & VIA_NO_UNMASK)
ppi[0] = &via_mwdma_info_borked;
else
ppi[0] = &via_mwdma_info;
break;
case ATA_UDMA2:
ppi[0] = &via_udma33_info;
break;
case ATA_UDMA4:
ppi[0] = &via_udma66_info;
break;
case ATA_UDMA5:
ppi[0] = &via_udma100_info;
break;
case ATA_UDMA6:
ppi[0] = &via_udma133_info;
break;
default:
WARN_ON(1);
return -ENODEV;
}
via_fixup(pdev, config);
/* We have established the device type, now fire it up */
return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
}
#ifdef CONFIG_PM_SLEEP
/**
* via_reinit_one - reinit after resume
* @pdev: PCI device
*
* Called when the VIA PATA device is resumed. We must then
* reconfigure the fifo and other setup we may have altered. In
* addition the kernel needs to have the resume methods on PCI
* quirk supported.
*/
static int via_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
via_fixup(pdev, host->private_data);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id via[] = {
{ PCI_VDEVICE(VIA, 0x0415), },
{ PCI_VDEVICE(VIA, 0x0571), },
{ PCI_VDEVICE(VIA, 0x0581), },
{ PCI_VDEVICE(VIA, 0x1571), },
{ PCI_VDEVICE(VIA, 0x3164), },
{ PCI_VDEVICE(VIA, 0x5324), },
{ PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
{ PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
{ },
};
static struct pci_driver via_pci_driver = {
.name = DRV_NAME,
.id_table = via,
.probe = via_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = via_reinit_one,
#endif
};
module_pci_driver(via_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for VIA PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, via);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_via.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_oldpiix.c - Intel PATA/SATA controllers
*
* (C) 2005 Red Hat
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* Early PIIX differs significantly from the later PIIX as it lacks
* SITRE and the slave timing registers. This means that you have to
* set timing per channel, or be clever. Libata tells us whenever it
* does drive selection and we use this to reload the timings.
*
* Because of these behaviour differences PIIX gets its own driver module.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_oldpiix"
#define DRV_VERSION "0.5.5"
/**
* oldpiix_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Set up cable type and use generic probe init
*/
static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits oldpiix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* oldpiix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 1)
control |= 1; /* TIME */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE */
/* Intel specifies that the prefetch/posting is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE */
pci_read_config_word(dev, idetm_port, &idetm_data);
/*
* Set PPE, IE and TIME as appropriate.
* Clear the other drive's timing bits.
*/
if (adev->devno == 0) {
idetm_data &= 0xCCE0;
idetm_data |= control;
} else {
idetm_data &= 0xCC0E;
idetm_data |= (control << 4);
}
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
pci_write_config_word(dev, idetm_port, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* oldpiix_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 idetm_port = ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
pci_read_config_word(dev, idetm_port, &idetm_data);
control = 3; /* IORDY|TIME0 */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
/* Mask out the relevant control and timing bits we will load. Also
clear the other drive TIME register as a precaution */
if (adev->devno == 0) {
idetm_data &= 0xCCE0;
idetm_data |= control;
} else {
idetm_data &= 0xCC0E;
idetm_data |= (control << 4);
}
idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
pci_write_config_word(dev, idetm_port, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* oldpiix_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
if (adev != ap->private_data) {
oldpiix_set_piomode(ap, adev);
if (ata_dma_enabled(adev))
oldpiix_set_dmamode(ap, adev);
}
return ata_bmdma_qc_issue(qc);
}
static const struct scsi_host_template oldpiix_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations oldpiix_pata_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_issue = oldpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
.prereset = oldpiix_pre_reset,
};
/**
* oldpiix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in oldpiix_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.port_ops = &oldpiix_pata_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
static const struct pci_device_id oldpiix_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1230), },
{ } /* terminate list */
};
static struct pci_driver oldpiix_pci_driver = {
.name = DRV_NAME,
.id_table = oldpiix_pci_tbl,
.probe = oldpiix_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(oldpiix_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_oldpiix.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_mpiix.c - Intel MPIIX PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <[email protected]>
*
* The MPIIX is different enough to the PIIX4 and friends that we give it
* a separate driver. The old ide/pci code handles this by just not tuning
* MPIIX at all.
*
* The MPIIX also differs in another important way from the majority of PIIX
* devices. The chip is a bridge (pardon the pun) between the old world of
* ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
* IDE controller is not decoded in PCI space and the chip does not claim to
* be IDE class PCI. This requires slightly non-standard probe logic compared
* with PCI IDE and also that we do not disable the device when our driver is
* unloaded (as it has many other functions).
*
* The driver consciously keeps this logic internally to avoid pushing quirky
* PATA history into the clean libata layer.
*
* Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
* hard disk present this driver will not detect it. This is not a bug. In this
* configuration the secondary port of the MPIIX is disabled and the addresses
* are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
* to operate.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_mpiix"
#define DRV_VERSION "0.7.7"
enum {
IDETIM = 0x6C, /* IDE control register */
IORDY = (1 << 1),
PPE = (1 << 2),
FTIM = (1 << 0),
ENABLED = (1 << 15),
SECONDARY = (1 << 14)
};
static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* mpiix_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. The MPIIX allows us to program the
* IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
* prefetching or IORDY are used.
*
* This would get very ugly because we can only program timing for one
* device at a time, the other gets PIO0. Fortunately libata calls
* our qc_issue command before a command is issued so we can flip the
* timings back and forth to reduce the pain.
*/
static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int control = 0;
int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 idetim;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(pdev, IDETIM, &idetim);
/* Mask the IORDY/TIME/PPE for this device */
if (adev->class == ATA_DEV_ATA)
control |= PPE; /* Enable prefetch/posting for disk */
if (ata_pio_need_iordy(adev))
control |= IORDY;
if (pio > 1)
control |= FTIM; /* This drive is on the fast timing bank */
/* Mask out timing and clear both TIME bank selects */
idetim &= 0xCCEE;
idetim &= ~(0x07 << (4 * adev->devno));
idetim |= control << (4 * adev->devno);
idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
pci_write_config_word(pdev, IDETIM, idetim);
/* We use ap->private_data as a pointer to the device currently
loaded for timing */
ap->private_data = adev;
}
/**
* mpiix_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If modes have been configured and the channel data is not loaded
then load it. We have to check if pio_mode is set as the core code
does not set adev->pio_mode to XFER_PIO_0 while probing as would be
logical */
if (adev->pio_mode && adev != ap->private_data)
mpiix_set_piomode(ap, adev);
return ata_sff_qc_issue(qc);
}
static const struct scsi_host_template mpiix_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations mpiix_port_ops = {
.inherits = &ata_sff_port_ops,
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* Single threaded by the PCI probe logic */
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
u16 idetim;
int cmd, ctl, irq;
ata_print_version_once(&dev->dev, DRV_VERSION);
host = ata_host_alloc(&dev->dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
/* MPIIX has many functions which can be turned on or off according
to other devices present. Make sure IDE is enabled before we try
and use it */
pci_read_config_word(dev, IDETIM, &idetim);
if (!(idetim & ENABLED))
return -ENODEV;
/* See if it's primary or secondary channel... */
if (!(idetim & SECONDARY)) {
cmd = 0x1F0;
ctl = 0x3F6;
irq = 14;
} else {
cmd = 0x170;
ctl = 0x376;
irq = 15;
}
cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
if (!cmd_addr || !ctl_addr)
return -ENOMEM;
ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
/* We do our own plumbing to avoid leaking special cases for whacko
ancient hardware into the core code. There are two issues to
worry about. #1 The chip is a bridge so if in legacy mode and
without BARs set fools the setup. #2 If you pci_disable_device
the MPIIX your box goes castors up */
ap->ops = &mpiix_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->ioaddr.cmd_addr = cmd_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ioaddr.altstatus_addr = ctl_addr;
/* Let libata fill in the port details */
ata_sff_std_ports(&ap->ioaddr);
/* activate host */
return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED,
&mpiix_sht);
}
static const struct pci_device_id mpiix[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
{ },
};
static struct pci_driver mpiix_pci_driver = {
.name = DRV_NAME,
.id_table = mpiix,
.probe = mpiix_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(mpiix_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mpiix);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_mpiix.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_netcell.c - Netcell PATA driver
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_netcell"
#define DRV_VERSION "0.1.7"
/* No PIO or DMA methods needed for this device */
static unsigned int netcell_read_id(struct ata_device *adev,
struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
/* Firmware forgets to mark words 85-87 valid */
if (err_mask == 0)
id[ATA_ID_CSF_DEFAULT] |= cpu_to_le16(0x4000);
return err_mask;
}
static const struct scsi_host_template netcell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations netcell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_80wire,
.read_id = netcell_read_id,
};
/**
* netcell_init_one - Register Netcell ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in netcell_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
/* Actually we don't really care about these as the
firmware deals with it */
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5, /* UDMA 133 */
.port_ops = &netcell_ops,
};
const struct ata_port_info *port_info[] = { &info, NULL };
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* Any chip specific setup/optimisation/messages here */
ata_pci_bmdma_clear_simplex(pdev);
/* And let the library code do the work */
return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
}
static const struct pci_device_id netcell_pci_tbl[] = {
{ PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), },
{ } /* terminate list */
};
static struct pci_driver netcell_pci_driver = {
.name = DRV_NAME,
.id_table = netcell_pci_tbl,
.probe = netcell_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(netcell_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_netcell.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI PATA driver
*
* (c) 2007 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/acpi.h>
#include <linux/libata.h>
#include <linux/ata.h>
#include <scsi/scsi_host.h>
#define DRV_NAME "pata_acpi"
#define DRV_VERSION "0.2.3"
struct pata_acpi {
struct ata_acpi_gtm gtm;
void *last;
unsigned long mask[2];
};
/**
* pacpi_pre_reset - check for 40/80 pin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*/
static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pata_acpi *acpi = ap->private_data;
if (ACPI_HANDLE(&ap->tdev) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
return -ENODEV;
return ata_sff_prereset(link, deadline);
}
/**
* pacpi_cable_detect - cable type detection
* @ap: port to detect
*
* Perform device specific cable detection
*/
static int pacpi_cable_detect(struct ata_port *ap)
{
struct pata_acpi *acpi = ap->private_data;
if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA))
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
/**
* pacpi_discover_modes - filter non ACPI modes
* @ap: ATA port
* @adev: ATA device
*
* Try the modes available and see which ones the ACPI method will
* set up sensibly. From this we get a mask of ACPI modes we can use
*/
static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
{
struct pata_acpi *acpi = ap->private_data;
struct ata_acpi_gtm probe;
unsigned int xfer_mask;
probe = acpi->gtm;
ata_acpi_gtm(ap, &probe);
xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
ap->cbl = ATA_CBL_PATA80;
return xfer_mask;
}
/**
* pacpi_mode_filter - mode filter for ACPI
* @adev: device
* @mask: mask of valid modes
*
* Filter the valid mode list according to our own specific rules, in
* this case the list of discovered valid modes obtained by ACPI probing
*/
static unsigned int pacpi_mode_filter(struct ata_device *adev, unsigned int mask)
{
struct pata_acpi *acpi = adev->link->ap->private_data;
return mask & acpi->mask[adev->devno];
}
/**
* pacpi_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*/
static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
const struct ata_timing *t;
if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
t = ata_timing_find_mode(adev->pio_mode);
acpi->gtm.drive[unit].pio = t->cycle;
ata_acpi_stm(ap, &acpi->gtm);
/* See what mode we actually got */
ata_acpi_gtm(ap, &acpi->gtm);
}
/**
* pacpi_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*/
static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
const struct ata_timing *t;
if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
t = ata_timing_find_mode(adev->dma_mode);
if (adev->dma_mode >= XFER_UDMA_0) {
acpi->gtm.drive[unit].dma = t->udma;
acpi->gtm.flags |= (1 << (2 * unit));
} else {
acpi->gtm.drive[unit].dma = t->cycle;
acpi->gtm.flags &= ~(1 << (2 * unit));
}
ata_acpi_stm(ap, &acpi->gtm);
/* See what mode we actually got */
ata_acpi_gtm(ap, &acpi->gtm);
}
/**
* pacpi_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary.
*/
static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pata_acpi *acpi = ap->private_data;
if (acpi->gtm.flags & 0x10)
return ata_bmdma_qc_issue(qc);
if (adev != acpi->last) {
pacpi_set_piomode(ap, adev);
if (ata_dma_enabled(adev))
pacpi_set_dmamode(ap, adev);
acpi->last = adev;
}
return ata_bmdma_qc_issue(qc);
}
/**
* pacpi_port_start - port setup
* @ap: ATA port being set up
*
* Use the port_start hook to maintain private control structures
*/
static int pacpi_port_start(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct pata_acpi *acpi;
if (ACPI_HANDLE(&ap->tdev) == NULL)
return -ENODEV;
acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
if (ap->private_data == NULL)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
return ata_bmdma_port_start(ap);
}
static const struct scsi_host_template pacpi_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations pacpi_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_issue = pacpi_qc_issue,
.cable_detect = pacpi_cable_detect,
.mode_filter = pacpi_mode_filter,
.set_piomode = pacpi_set_piomode,
.set_dmamode = pacpi_set_dmamode,
.prereset = pacpi_pre_reset,
.port_start = pacpi_port_start,
};
/**
* pacpi_init_one - Register ACPI ATA PCI device with kernel services
* @pdev: PCI device to register
* @id: PCI device ID
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pacpi_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
if (pdev->vendor == PCI_VENDOR_ID_ATI) {
int rc = pcim_enable_device(pdev);
if (rc < 0)
return rc;
pcim_pin_device(pdev);
}
return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
}
static const struct pci_device_id pacpi_pci_tbl[] = {
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
{ } /* terminate list */
};
static struct pci_driver pacpi_pci_driver = {
.name = DRV_NAME,
.id_table = pacpi_pci_tbl,
.probe = pacpi_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(pacpi_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_acpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sata_mv.c - Marvell SATA support
*
* Copyright 2008-2009: Marvell Corporation, all rights reserved.
* Copyright 2005: EMC Corporation, all rights reserved.
* Copyright 2005 Red Hat, Inc. All rights reserved.
*
* Originally written by Brett Russ.
* Extensive overhaul and enhancement by Mark Lord <[email protected]>.
*
* Please ALWAYS copy [email protected] on emails.
*/
/*
* sata_mv TODO list:
*
* --> Develop a low-power-consumption strategy, and implement it.
*
* --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
*
* --> [Experiment, Marvell value added] Is it possible to use target
* mode to cross-connect two Linux boxes with Marvell cards? If so,
* creating LibATA target mode support would be very interesting.
*
* Target mode, for those without docs, is the ability to directly
* connect two SATA ports.
*/
/*
* 80x1-B2 errata PCI#11:
*
* Users of the 6041/6081 Rev.B2 chips (current is C0)
* should be careful to insert those cards only onto PCI-X bus #0,
* and only in device slots 0..7, not higher. The chips may not
* work correctly otherwise (note: this is a pretty rare condition).
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
#define DRV_VERSION "1.28"
/*
* module options
*/
#ifdef CONFIG_PCI
static int msi;
module_param(msi, int, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
#endif
static int irq_coalescing_io_count;
module_param(irq_coalescing_io_count, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_io_count,
"IRQ coalescing I/O count threshold (0..255)");
static int irq_coalescing_usecs;
module_param(irq_coalescing_usecs, int, S_IRUGO);
MODULE_PARM_DESC(irq_coalescing_usecs,
"IRQ coalescing time threshold in usecs");
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
MV_IO_BAR = 2, /* offset 0x18: IO space */
MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
MAX_COAL_IO_COUNT = 255, /* completed I/O count */
MV_PCI_REG_BASE = 0,
/*
* Per-chip ("all ports") interrupt coalescing feature.
* This is only for GEN_II / GEN_IIE hardware.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
COAL_REG_BASE = 0x18000,
IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
/*
* Registers for the (unused here) transaction coalescing feature:
*/
TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
SATAHC0_REG_BASE = 0x20000,
FLASH_CTL = 0x1046c,
GPIO_PORT_CTL = 0x104f0,
RESET_CFG = 0x180d8,
MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
MV_MAX_Q_DEPTH = 32,
MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
/* CRQB needs alignment on a 1KB boundary. Size == 1KB
* CRPB needs alignment on a 256B boundary. Size == 256B
* ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
*/
MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
MV_MAX_SG_CT = 256,
MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
MV_PORT_HC_SHIFT = 2,
MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
CRQB_CMD_LAST = (1 << 15),
CRPB_FLAG_STATUS_SHIFT = 8,
CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
EPRD_FLAG_END_OF_TBL = (1 << 31),
/* PCI interface registers */
MV_PCI_COMMAND = 0xc00,
MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
PCI_MAIN_CMD_STS = 0xd30,
STOP_PCI_MASTER = (1 << 2),
PCI_MASTER_EMPTY = (1 << 3),
GLOB_SFT_RST = (1 << 4),
MV_PCI_MODE = 0xd00,
MV_PCI_MODE_MASK = 0x30,
MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
MV_PCI_DISC_TIMER = 0xd04,
MV_PCI_MSI_TRIGGER = 0xc38,
MV_PCI_SERR_MASK = 0xc28,
MV_PCI_XBAR_TMOUT = 0x1d04,
MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
MV_PCI_ERR_ATTRIBUTE = 0x1d48,
MV_PCI_ERR_COMMAND = 0x1d50,
PCI_IRQ_CAUSE = 0x1d58,
PCI_IRQ_MASK = 0x1d5c,
PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
PCIE_IRQ_CAUSE = 0x1900,
PCIE_IRQ_MASK = 0x1910,
PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
PCI_HC_MAIN_IRQ_MASK = 0x1d64,
SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
SOC_HC_MAIN_IRQ_MASK = 0x20024,
ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
PCI_ERR = (1 << 18),
TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
GPIO_INT = (1 << 22),
SELF_INT = (1 << 23),
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
/* SATAHC registers */
HC_CFG = 0x00,
HC_IRQ_CAUSE = 0x14,
DMA_IRQ = (1 << 0), /* shift by port # */
HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
DEV_IRQ = (1 << 8), /* shift by port # */
/*
* Per-HC (Host-Controller) interrupt coalescing feature.
* This is present on all chip generations.
*
* Coalescing defers the interrupt until either the IO_THRESHOLD
* (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
SOC_LED_CTRL = 0x2c,
SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
/* with dev activity LED */
/* Shadow block registers */
SHD_BLK = 0x100,
SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
/* SATA registers */
SATA_STATUS = 0x300, /* ctrl, err regs follow status */
SATA_ACTIVE = 0x350,
FIS_IRQ_CAUSE = 0x364,
FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
LTMODE = 0x30c, /* requires read-after-write */
LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
PHY_MODE2 = 0x330,
PHY_MODE3 = 0x310,
PHY_MODE4 = 0x314, /* requires read-after-write */
PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
SATA_IFCTL = 0x344,
SATA_TESTCTL = 0x348,
SATA_IFSTAT = 0x34c,
VENDOR_UNIQUE_FIS = 0x35c,
FISCFG = 0x360,
FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
PHY_MODE9_GEN2 = 0x398,
PHY_MODE9_GEN1 = 0x39c,
PHYCFG_OFS = 0x3a0, /* only in 65n devices */
MV5_PHY_MODE = 0x74,
MV5_LTMODE = 0x30,
MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050,
LP_PHY_CTL = 0x058,
LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
LP_PHY_CTL_PIN_PU_RX = (1 << 1),
LP_PHY_CTL_PIN_PU_TX = (1 << 2),
LP_PHY_CTL_GEN_TX_3G = (1 << 5),
LP_PHY_CTL_GEN_RX_3G = (1 << 9),
MV_M2_PREAMP_MASK = 0x7e0,
/* Port registers */
EDMA_CFG = 0,
EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
EDMA_ERR_IRQ_CAUSE = 0x8,
EDMA_ERR_IRQ_MASK = 0xc,
EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
EDMA_ERR_DEV = (1 << 2), /* device error */
EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
EDMA_ERR_OVERRUN_5 = (1 << 5),
EDMA_ERR_UNDERRUN_5 = (1 << 6),
EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
EDMA_ERR_LNK_CTRL_RX_1 |
EDMA_ERR_LNK_CTRL_RX_3 |
EDMA_ERR_LNK_CTRL_TX,
EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_SERR |
EDMA_ERR_SELF_DIS |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY |
EDMA_ERR_LNK_CTRL_RX_2 |
EDMA_ERR_LNK_DATA_RX |
EDMA_ERR_LNK_DATA_TX |
EDMA_ERR_TRANS_PROTO,
EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
EDMA_ERR_DEV_CON |
EDMA_ERR_OVERRUN_5 |
EDMA_ERR_UNDERRUN_5 |
EDMA_ERR_SELF_DIS_5 |
EDMA_ERR_CRQB_PAR |
EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR |
EDMA_ERR_IORDY,
EDMA_REQ_Q_BASE_HI = 0x10,
EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
EDMA_REQ_Q_OUT_PTR = 0x18,
EDMA_REQ_Q_PTR_SHIFT = 5,
EDMA_RSP_Q_BASE_HI = 0x1c,
EDMA_RSP_Q_IN_PTR = 0x20,
EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
EDMA_RSP_Q_PTR_SHIFT = 3,
EDMA_CMD = 0x28, /* EDMA command register */
EDMA_EN = (1 << 0), /* enable EDMA */
EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
EDMA_STATUS = 0x30, /* EDMA engine status */
EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
EDMA_IORDY_TMOUT = 0x34,
EDMA_ARB_CFG = 0x38,
EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
BMDMA_CMD = 0x224, /* bmdma command register */
BMDMA_STATUS = 0x228, /* bmdma status register */
BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
/* Host private flags (hp_flags) */
MV_HP_FLAG_MSI = (1 << 0),
MV_HP_ERRATA_50XXB0 = (1 << 1),
MV_HP_ERRATA_50XXB2 = (1 << 2),
MV_HP_ERRATA_60X1B2 = (1 << 3),
MV_HP_ERRATA_60X1C0 = (1 << 4),
MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
enum {
/* DMA boundary 0xffff is required by the s/g splitting
* we need on /length/ in mv_fill-sg().
*/
MV_DMA_BOUNDARY = 0xffffU,
/* mask of register bits containing lower 32 bits
* of EDMA request queue DMA address
*/
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
/* ditto, for response queue */
EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
};
enum chip_type {
chip_504x,
chip_508x,
chip_5080,
chip_604x,
chip_608x,
chip_6042,
chip_7042,
chip_soc,
};
/* Command ReQuest Block: 32B */
struct mv_crqb {
__le32 sg_addr;
__le32 sg_addr_hi;
__le16 ctrl_flags;
__le16 ata_cmd[11];
};
struct mv_crqb_iie {
__le32 addr;
__le32 addr_hi;
__le32 flags;
__le32 len;
__le32 ata_cmd[4];
};
/* Command ResPonse Block: 8B */
struct mv_crpb {
__le16 id;
__le16 flags;
__le32 tmstmp;
};
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
__le32 addr;
__le32 flags_size;
__le32 addr_hi;
__le32 reserved;
};
/*
* We keep a local cache of a few frequently accessed port
* registers here, to avoid having to read them (very slow)
* when switching between EDMA and non-EDMA modes.
*/
struct mv_cached_regs {
u32 fiscfg;
u32 ltmode;
u32 haltcond;
u32 unknown_rsvd;
};
struct mv_port_priv {
struct mv_crqb *crqb;
dma_addr_t crqb_dma;
struct mv_crpb *crpb;
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
unsigned int req_idx;
unsigned int resp_idx;
u32 pp_flags;
struct mv_cached_regs cached;
unsigned int delayed_eh_pmp_map;
};
struct mv_port_signal {
u32 amps;
u32 pre;
};
struct mv_host_priv {
u32 hp_flags;
unsigned int board_idx;
u32 main_irq_mask;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
int n_ports;
void __iomem *base;
void __iomem *main_irq_cause_addr;
void __iomem *main_irq_mask_addr;
u32 irq_cause_offset;
u32 irq_mask_offset;
u32 unmask_all_irqs;
/*
* Needed on some devices that require their clocks to be enabled.
* These are optional: if the platform device does not have any
* clocks, they won't be used. Also, if the underlying hardware
* does not support the common clock framework (CONFIG_HAVE_CLK=n),
* all the clock operations become no-ops (see clk.h).
*/
struct clk *clk;
struct clk **port_clks;
/*
* Some devices have a SATA PHY which can be enabled/disabled
* in order to save power. These are optional: if the platform
* devices does not have any phy, they won't be used.
*/
struct phy **port_phys;
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
* and less memory waste in accomplishing the alignment.
*/
struct dma_pool *crqb_pool;
struct dma_pool *crpb_pool;
struct dma_pool *sg_tbl_pool;
};
struct mv_hw_ops {
void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
static void mv6_dev_config(struct ata_device *dev);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
static int mv_stop_edma(struct ata_port *ap);
static int mv_stop_edma_engine(void __iomem *port_mmio);
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void mv_pmp_error_handler(struct ata_port *ap);
static void mv_process_crpb_entries(struct ata_port *ap,
struct mv_port_priv *pp);
static void mv_sff_irq_clear(struct ata_port *ap);
static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
static void mv_bmdma_setup(struct ata_queued_cmd *qc);
static void mv_bmdma_start(struct ata_queued_cmd *qc);
static void mv_bmdma_stop(struct ata_queued_cmd *qc);
static u8 mv_bmdma_status(struct ata_port *ap);
static u8 mv_sff_check_status(struct ata_port *ap);
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
* because we have to allow room for worst case splitting of
* PRDs for 64K boundaries in mv_fill_sg().
*/
#ifdef CONFIG_PCI
static const struct scsi_host_template mv5_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
};
#endif
static const struct scsi_host_template mv6_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations mv5_ops = {
.inherits = &ata_sff_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv6_ops = {
.inherits = &ata_bmdma_port_ops,
.lost_interrupt = ATA_OP_NULL,
.qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.dev_config = mv6_dev_config,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
.softreset = mv_softreset,
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
.sff_check_status = mv_sff_check_status,
.sff_irq_clear = mv_sff_irq_clear,
.check_atapi_dma = mv_check_atapi_dma,
.bmdma_setup = mv_bmdma_setup,
.bmdma_start = mv_bmdma_start,
.bmdma_stop = mv_bmdma_stop,
.bmdma_status = mv_bmdma_status,
.port_start = mv_port_start,
.port_stop = mv_port_stop,
};
static struct ata_port_operations mv_iie_ops = {
.inherits = &mv6_ops,
.dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
};
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.flags = MV_GEN_I_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_508x */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
.flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_604x */
.flags = MV_GEN_II_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_6042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_soc */
.flags = MV_GEN_IIE_FLAGS,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
};
static const struct pci_device_id mv_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
/* RocketRAID 1720/174x have different identifiers */
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
/* Adaptec 1430SA */
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
/* Marvell 7042 support */
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
/* Highpoint RocketRAID PCIe series */
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
{ } /* terminate list */
};
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
.read_preamp = mv5_read_preamp,
.reset_hc = mv5_reset_hc,
.reset_flash = mv5_reset_flash,
.reset_bus = mv5_reset_bus,
};
static const struct mv_hw_ops mv6xxx_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv6_enable_leds,
.read_preamp = mv6_read_preamp,
.reset_hc = mv6_reset_hc,
.reset_flash = mv6_reset_flash,
.reset_bus = mv_reset_pci_bus,
};
static const struct mv_hw_ops mv_soc_ops = {
.phy_errata = mv6_phy_errata,
.enable_leds = mv_soc_enable_leds,
.read_preamp = mv_soc_read_preamp,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
static const struct mv_hw_ops mv_soc_65n_ops = {
.phy_errata = mv_soc_65n_phy_errata,
.enable_leds = mv_soc_enable_leds,
.reset_hc = mv_soc_reset_hc,
.reset_flash = mv_soc_reset_flash,
.reset_bus = mv_soc_reset_bus,
};
/*
* Functions
*/
static inline void writelfl(unsigned long data, void __iomem *addr)
{
writel(data, addr);
(void) readl(addr); /* flush to avoid PCI posted write */
}
static inline unsigned int mv_hc_from_port(unsigned int port)
{
return port >> MV_PORT_HC_SHIFT;
}
static inline unsigned int mv_hardport_from_port(unsigned int port)
{
return port & MV_PORT_MASK;
}
/*
* Consolidate some rather tricky bit shift calculations.
* This is hot-path stuff, so not a function.
* Simple code, with two return values, so macro rather than inline.
*
* port is the sole input, in range 0..7.
* shift is one output, for use with main_irq_cause / main_irq_mask registers.
* hardport is the other output, in range 0..3.
*
* Note that port and hardport may be the same variable in some cases.
*/
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
{ \
shift = mv_hc_from_port(port) * HC_SHIFT; \
hardport = mv_hardport_from_port(port); \
shift += hardport * 2; \
}
static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}
static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
unsigned int port)
{
return mv_hc_base(base, mv_hc_from_port(port));
}
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
return mv_hc_base_from_port(base, port) +
MV_SATAHC_ARBTR_REG_SZ +
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
return hc_mmio + ofs;
}
static inline void __iomem *mv_host_base(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
return hpriv->base;
}
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
return mv_port_base(mv_host_base(ap->host), ap->port_no);
}
static inline int mv_get_hc_count(unsigned long port_flags)
{
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
/**
* mv_save_cached_regs - (re-)initialize cached port registers
* @ap: the port whose registers we are caching
*
* Initialize the local cache of port registers,
* so that reading them over and over again can
* be avoided on the hotter paths of this driver.
* This saves a few microseconds each time we switch
* to/from EDMA mode to perform (eg.) a drive cache flush.
*/
static void mv_save_cached_regs(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
pp->cached.fiscfg = readl(port_mmio + FISCFG);
pp->cached.ltmode = readl(port_mmio + LTMODE);
pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
}
/**
* mv_write_cached_reg - write to a cached port register
* @addr: hardware address of the register
* @old: pointer to cached value of the register
* @new: new value for the register
*
* Write a new value to a cached register,
* but only if the value is different from before.
*/
static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
{
if (new != *old) {
unsigned long laddr;
*old = new;
/*
* Workaround for 88SX60x1-B2 FEr SATA#13:
* Read-after-write is needed to prevent generating 64-bit
* write cycles on the PCI bus for SATA interface registers
* at offsets ending in 0x4 or 0xc.
*
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
laddr = (unsigned long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
writelfl(new, addr); /* read after write */
return;
}
}
writel(new, addr); /* unaffected by the errata */
}
}
static void mv_set_edma_ptrs(void __iomem *port_mmio,
struct mv_host_priv *hpriv,
struct mv_port_priv *pp)
{
u32 index;
/*
* initialize request queue
*/
pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
WARN_ON(pp->crqb_dma & 0x3ff);
writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_REQ_Q_IN_PTR);
writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
/*
* initialize response queue
*/
pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
WARN_ON(pp->crpb_dma & 0xff);
writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
{
/*
* When writing to the main_irq_mask in hardware,
* we must ensure exclusivity between the interrupt coalescing bits
* and the corresponding individual port DONE_IRQ bits.
*
* Note that this register is really an "IRQ enable" register,
* not an "IRQ mask" register as Marvell's naming might suggest.
*/
if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
mask &= ~DONE_IRQ_0_3;
if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
mask &= ~DONE_IRQ_4_7;
writelfl(mask, hpriv->main_irq_mask_addr);
}
static void mv_set_main_irq_mask(struct ata_host *host,
u32 disable_bits, u32 enable_bits)
{
struct mv_host_priv *hpriv = host->private_data;
u32 old_mask, new_mask;
old_mask = hpriv->main_irq_mask;
new_mask = (old_mask & ~disable_bits) | enable_bits;
if (new_mask != old_mask) {
hpriv->main_irq_mask = new_mask;
mv_write_main_irq_mask(new_mask, hpriv);
}
}
static void mv_enable_port_irqs(struct ata_port *ap,
unsigned int port_bits)
{
unsigned int shift, hardport, port = ap->port_no;
u32 disable_bits, enable_bits;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
enable_bits = port_bits << shift;
mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
}
static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
void __iomem *port_mmio,
unsigned int port_irqs)
{
struct mv_host_priv *hpriv = ap->host->private_data;
int hardport = mv_hardport_from_port(ap->port_no);
void __iomem *hc_mmio = mv_hc_base_from_port(
mv_host_base(ap->host), ap->port_no);
u32 hc_irq_cause;
/* clear EDMA event indicators, if any */
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
/* clear FIS IRQ Cause */
if (IS_GEN_IIE(hpriv))
writelfl(0, port_mmio + FIS_IRQ_CAUSE);
mv_enable_port_irqs(ap, port_irqs);
}
static void mv_set_irq_coalescing(struct ata_host *host,
unsigned int count, unsigned int usecs)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
u32 coal_enable = 0;
unsigned long flags;
unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
ALL_PORTS_COAL_DONE;
/* Disable IRQ coalescing if either threshold is zero */
if (!usecs || !count) {
clks = count = 0;
} else {
/* Respect maximum limits of the hardware */
clks = usecs * COAL_CLOCKS_PER_USEC;
if (clks > MAX_COAL_TIME_THRESHOLD)
clks = MAX_COAL_TIME_THRESHOLD;
if (count > MAX_COAL_IO_COUNT)
count = MAX_COAL_IO_COUNT;
}
spin_lock_irqsave(&host->lock, flags);
mv_set_main_irq_mask(host, coal_disable, 0);
if (is_dual_hc && !IS_GEN_I(hpriv)) {
/*
* GEN_II/GEN_IIE with dual host controllers:
* one set of global thresholds for the entire chip.
*/
writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
/* clear leftover coal IRQ bit */
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
if (count)
coal_enable = ALL_PORTS_COAL_DONE;
clks = count = 0; /* force clearing of regular regs below */
}
/*
* All chips: independent thresholds for each HC on the chip.
*/
hc_mmio = mv_hc_base_from_port(mmio, 0);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_0_3_COAL_DONE;
if (is_dual_hc) {
hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
if (count)
coal_enable |= PORTS_4_7_COAL_DONE;
}
mv_set_main_irq_mask(host, 0, coal_enable);
spin_unlock_irqrestore(&host->lock, flags);
}
/*
* mv_start_edma - Enable eDMA engine
* @pp: port private data
*
* Verify the local cache of the eDMA state is accurate with a
* WARN_ON.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
struct mv_port_priv *pp, u8 protocol)
{
int want_ncq = (protocol == ATA_PROT_NCQ);
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
if (want_ncq != using_ncq)
mv_stop_edma(ap);
}
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
mv_edma_cfg(ap, want_ncq, 1);
mv_set_edma_ptrs(port_mmio, hpriv, pp);
mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
writelfl(EDMA_EN, port_mmio + EDMA_CMD);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
}
static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
const int per_loop = 5, timeout = (15 * 1000 / per_loop);
int i;
/*
* Wait for the EDMA engine to finish transactions in progress.
* No idea what a good "timeout" value might be, but measurements
* indicate that it often requires hundreds of microseconds
* with two drives in-use. So we use the 15msec value above
* as a rough guess at what even more drives might require.
*/
for (i = 0; i < timeout; ++i) {
u32 edma_stat = readl(port_mmio + EDMA_STATUS);
if ((edma_stat & empty_idle) == empty_idle)
break;
udelay(per_loop);
}
/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/**
* mv_stop_edma_engine - Disable eDMA engine
* @port_mmio: io base address
*
* LOCKING:
* Inherited from caller.
*/
static int mv_stop_edma_engine(void __iomem *port_mmio)
{
int i;
/* Disable eDMA. The disable bit auto clears. */
writelfl(EDMA_DS, port_mmio + EDMA_CMD);
/* Wait for the chip to confirm eDMA is off. */
for (i = 10000; i > 0; i--) {
u32 reg = readl(port_mmio + EDMA_CMD);
if (!(reg & EDMA_EN))
return 0;
udelay(10);
}
return -EIO;
}
static int mv_stop_edma(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
int err = 0;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
ata_port_err(ap, "Unable to stop eDMA\n");
err = -EIO;
}
mv_edma_cfg(ap, 0, 0);
return err;
}
static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
{
int b, w, o;
unsigned char linebuf[38];
for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) {
o += scnprintf(linebuf + o, sizeof(linebuf) - o,
"%08x ", readl(start + b));
b += sizeof(u32);
}
dev_dbg(dev, "%s: %p: %s\n",
__func__, start + b, linebuf);
}
}
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
int b, w, o;
u32 dw = 0;
unsigned char linebuf[38];
for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
o += snprintf(linebuf + o, sizeof(linebuf) - o,
"%08x ", dw);
b += sizeof(u32);
}
dev_dbg(&pdev->dev, "%s: %02x: %s\n",
__func__, b, linebuf);
}
}
static void mv_dump_all_regs(void __iomem *mmio_base,
struct pci_dev *pdev)
{
void __iomem *hc_base;
void __iomem *port_base;
int start_port, num_ports, p, start_hc, num_hcs, hc;
start_hc = start_port = 0;
num_ports = 8; /* should be benign for 4 port devs */
num_hcs = 2;
dev_dbg(&pdev->dev,
"%s: All registers for port(s) %u-%u:\n", __func__,
start_port, num_ports > 1 ? num_ports - 1 : start_port);
dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
mv_dump_pci_cfg(pdev, 0x68);
dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
hc_base = mv_hc_base(mmio_base, hc);
dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
mv_dump_mem(&pdev->dev, hc_base, 0x1c);
}
for (p = start_port; p < start_port + num_ports; p++) {
port_base = mv_port_base(mmio_base, p);
dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
mv_dump_mem(&pdev->dev, port_base, 0x54);
dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
}
}
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_CONTROL:
case SCR_ERROR:
ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
break;
case SCR_ACTIVE:
ofs = SATA_ACTIVE; /* active is not with the others */
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(mv_ap_base(link->ap) + ofs);
return 0;
} else
return -EINVAL;
}
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
void __iomem *addr = mv_ap_base(link->ap) + ofs;
struct mv_host_priv *hpriv = link->ap->host->private_data;
if (sc_reg_in == SCR_CONTROL) {
/*
* Workaround for 88SX60x1 FEr SATA#26:
*
* COMRESETs have to take care not to accidentally
* put the drive to sleep when writing SCR_CONTROL.
* Setting bits 12..15 prevents this problem.
*
* So if we see an outbound COMMRESET, set those bits.
* Ditto for the followup write that clears the reset.
*
* The proprietary driver does this for
* all chip versions, and so do we.
*/
if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
void __iomem *lp_phy_addr =
mv_ap_base(link->ap) + LP_PHY_CTL;
/*
* Set PHY speed according to SControl speed.
*/
u32 lp_phy_val =
LP_PHY_CTL_PIN_PU_PLL |
LP_PHY_CTL_PIN_PU_RX |
LP_PHY_CTL_PIN_PU_TX;
if ((val & 0xf0) != 0x10)
lp_phy_val |=
LP_PHY_CTL_GEN_TX_3G |
LP_PHY_CTL_GEN_RX_3G;
writelfl(lp_phy_val, lp_phy_addr);
}
}
writelfl(val, addr);
return 0;
} else
return -EINVAL;
}
static void mv6_dev_config(struct ata_device *adev)
{
/*
* Deal with Gen-II ("mv6") hardware quirks/restrictions:
*
* Gen-II does not support NCQ over a port multiplier
* (no FIS-based switching).
*/
if (adev->flags & ATA_DFLAG_NCQ) {
if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
ata_dev_info(adev,
"NCQ disabled for command-based switching\n");
}
}
}
static int mv_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
struct ata_port *ap = link->ap;
struct mv_port_priv *pp = ap->private_data;
/*
* Don't allow new commands if we're in a delayed EH state
* for NCQ and/or FIS-based switching.
*/
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
return ATA_DEFER_PORT;
/* PIO commands need exclusive link: no other commands [DMA or PIO]
* can run concurrently.
* set excl_link when we want to send a PIO command in DMA mode
* or a non-NCQ command in NCQ mode.
* When we receive a command from that link, and there are no
* outstanding commands, mark a flag to clear excl_link and let
* the command go through.
*/
if (unlikely(ap->excl_link)) {
if (link == ap->excl_link) {
if (ap->nr_active_links)
return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
return 0;
} else
return ATA_DEFER_PORT;
}
/*
* If the port is completely idle, then allow the new qc.
*/
if (ap->nr_active_links == 0)
return 0;
/*
* The port is operating in host queuing mode (EDMA) with NCQ
* enabled, allow multiple NCQ commands. EDMA also allows
* queueing multiple DMA commands but libata core currently
* doesn't allow it.
*/
if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
if (ata_is_ncq(qc->tf.protocol))
return 0;
else {
ap->excl_link = link;
return ATA_DEFER_PORT;
}
}
return ATA_DEFER_PORT;
}
static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
{
struct mv_port_priv *pp = ap->private_data;
void __iomem *port_mmio;
u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
u32 ltmode, *old_ltmode = &pp->cached.ltmode;
u32 haltcond, *old_haltcond = &pp->cached.haltcond;
ltmode = *old_ltmode & ~LTMODE_BIT8;
haltcond = *old_haltcond | EDMA_ERR_DEV;
if (want_fbs) {
fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
ltmode = *old_ltmode | LTMODE_BIT8;
if (want_ncq)
haltcond &= ~EDMA_ERR_DEV;
else
fiscfg |= FISCFG_WAIT_DEV_ERR;
} else {
fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
}
port_mmio = mv_ap_base(ap);
mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
}
static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{
struct mv_host_priv *hpriv = ap->host->private_data;
u32 old, new;
/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
old = readl(hpriv->base + GPIO_PORT_CTL);
if (want_ncq)
new = old | (1 << 22);
else
new = old & ~(1 << 22);
if (new != old)
writel(new, hpriv->base + GPIO_PORT_CTL);
}
/*
* mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
* @ap: Port being initialized
*
* There are two DMA modes on these chips: basic DMA, and EDMA.
*
* Bit-0 of the "EDMA RESERVED" register enables/disables use
* of basic DMA on the GEN_IIE versions of the chips.
*
* This bit survives EDMA resets, and must be set for basic DMA
* to function, and should be cleared when EDMA is active.
*/
static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
{
struct mv_port_priv *pp = ap->private_data;
u32 new, *old = &pp->cached.unknown_rsvd;
if (enable_bmdma)
new = *old | 1;
else
new = *old & ~1;
mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
}
/*
* SOC chips have an issue whereby the HDD LEDs don't always blink
* during I/O when NCQ is enabled. Enabling a special "LED blink" mode
* of the SOC takes care of it, generating a steady blink rate when
* any drive on the chip is active.
*
* Unfortunately, the blink mode is a global hardware setting for the SOC,
* so we must use it whenever at least one port on the SOC has NCQ enabled.
*
* We turn "LED blink" off when NCQ is not in use anywhere, because the normal
* LED operation works then, and provides better (more accurate) feedback.
*
* Note that this code assumes that an SOC never has more than one HC onboard.
*/
static void mv_soc_led_blink_enable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
return;
hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_soc_led_blink_disable(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *hc_mmio;
u32 led_ctrl;
unsigned int port;
if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
return;
/* disable led-blink only if no ports are using NCQ */
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *this_ap = host->ports[port];
struct mv_port_priv *pp = this_ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return;
}
hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
}
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
{
u32 cfg;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
if (IS_GEN_I(hpriv))
cfg |= (1 << 8); /* enab config burst size mask */
else if (IS_GEN_II(hpriv)) {
cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
mv_60x1_errata_sata25(ap, want_ncq);
} else if (IS_GEN_IIE(hpriv)) {
int want_fbs = sata_pmp_attached(ap);
/*
* Possible future enhancement:
*
* The chip can use FBS with non-NCQ, if we allow it,
* But first we need to have the error handling in place
* for this mode (datasheet section 7.3.15.4.2.3).
* So disallow non-NCQ FBS for now.
*/
want_fbs &= want_ncq;
mv_config_fbs(ap, want_ncq, want_fbs);
if (want_fbs) {
pp->pp_flags |= MV_PP_FLAG_FBS_EN;
cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
}
cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
if (want_edma) {
cfg |= (1 << 22); /* enab 4-entry host queue cache */
if (!IS_SOC(hpriv))
cfg |= (1 << 18); /* enab early completion */
}
if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
mv_bmdma_enable_iie(ap, !want_edma);
if (IS_SOC(hpriv)) {
if (want_ncq)
mv_soc_led_blink_enable(ap);
else
mv_soc_led_blink_disable(ap);
}
}
if (want_ncq) {
cfg |= EDMA_CFG_NCQ;
pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
}
writelfl(cfg, port_mmio + EDMA_CFG);
}
static void mv_port_free_dma_mem(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
int tag;
if (pp->crqb) {
dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
pp->crqb = NULL;
}
if (pp->crpb) {
dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
pp->crpb = NULL;
}
/*
* For GEN_I, there's no NCQ, so we have only a single sg_tbl.
* For later hardware, we have one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (pp->sg_tbl[tag]) {
if (tag == 0 || !IS_GEN_I(hpriv))
dma_pool_free(hpriv->sg_tbl_pool,
pp->sg_tbl[tag],
pp->sg_tbl_dma[tag]);
pp->sg_tbl[tag] = NULL;
}
}
}
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
*
* Allocate and point to DMA memory, init port private memory,
* zero indices.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
unsigned long flags;
int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
ap->flags |= ATA_FLAG_AN;
/*
* For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
* For later hardware, we need one unique sg_tbl per NCQ tag.
*/
for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
if (tag == 0 || !IS_GEN_I(hpriv)) {
pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
GFP_KERNEL, &pp->sg_tbl_dma[tag]);
if (!pp->sg_tbl[tag])
goto out_port_free_dma_mem;
} else {
pp->sg_tbl[tag] = pp->sg_tbl[0];
pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
}
}
spin_lock_irqsave(ap->lock, flags);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
spin_unlock_irqrestore(ap->lock, flags);
return 0;
out_port_free_dma_mem:
mv_port_free_dma_mem(ap);
return -ENOMEM;
}
/**
* mv_port_stop - Port specific cleanup/stop routine.
* @ap: ATA channel to manipulate
*
* Stop DMA, cleanup port memory.
*
* LOCKING:
* This routine uses the host lock to protect the DMA stop.
*/
static void mv_port_stop(struct ata_port *ap)
{
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
spin_unlock_irqrestore(ap->lock, flags);
mv_port_free_dma_mem(ap);
}
/**
* mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
* @qc: queued command whose SG list to source from
*
* Populate the SG list and mark the last entry.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_fill_sg(struct ata_queued_cmd *qc)
{
struct mv_port_priv *pp = qc->ap->private_data;
struct scatterlist *sg;
struct mv_sg *mv_sg, *last_sg = NULL;
unsigned int si;
mv_sg = pp->sg_tbl[qc->hw_tag];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
while (sg_len) {
u32 offset = addr & 0xffff;
u32 len = sg_len;
if (offset + len > 0x10000)
len = 0x10000 - offset;
mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
mv_sg->flags_size = cpu_to_le32(len & 0xffff);
mv_sg->reserved = 0;
sg_len -= len;
addr += len;
last_sg = mv_sg;
mv_sg++;
}
}
if (likely(last_sg))
last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
mb(); /* ensure data structure is visible to the chipset */
}
static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
*cmdw = cpu_to_le16(tmp);
}
/**
* mv_sff_irq_clear - Clear hardware interrupt after DMA.
* @ap: Port associated with this ATA transaction.
*
* We need this only for ATAPI bmdma transactions,
* as otherwise we experience spurious interrupts
* after libata-sff handles the bmdma interrupts.
*/
static void mv_sff_irq_clear(struct ata_port *ap)
{
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
}
/**
* mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
* @qc: queued command to check for chipset/DMA compatibility.
*
* The bmdma engines cannot handle speculative data sizes
* (bytecount under/over flow). So only allow DMA for
* data transfer commands with known data sizes.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (scmd) {
switch (scmd->cmnd[0]) {
case READ_6:
case READ_10:
case READ_12:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case GPCMD_READ_CD:
case GPCMD_SEND_DVD_STRUCTURE:
case GPCMD_SEND_CUE_SHEET:
return 0; /* DMA is safe */
}
}
return -EOPNOTSUPP; /* use PIO instead */
}
/**
* mv_bmdma_setup - Set up BMDMA transaction
* @qc: queued command to prepare DMA for.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
mv_fill_sg(qc);
/* clear all DMA cmd bits */
writel(0, port_mmio + BMDMA_CMD);
/* load PRD table addr. */
writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
port_mmio + BMDMA_PRD_HIGH);
writelfl(pp->sg_tbl_dma[qc->hw_tag],
port_mmio + BMDMA_PRD_LOW);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
/**
* mv_bmdma_start - Start a BMDMA transaction
* @qc: queued command to start DMA on.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
/* start host DMA transaction */
writelfl(cmd, port_mmio + BMDMA_CMD);
}
/**
* mv_bmdma_stop_ap - Stop BMDMA transfer
* @ap: port to stop
*
* Clears the ATA_DMA_START flag in the bmdma control register
*
* LOCKING:
* Inherited from caller.
*/
static void mv_bmdma_stop_ap(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
if (cmd & ATA_DMA_START) {
cmd &= ~ATA_DMA_START;
writelfl(cmd, port_mmio + BMDMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
}
static void mv_bmdma_stop(struct ata_queued_cmd *qc)
{
mv_bmdma_stop_ap(qc->ap);
}
/**
* mv_bmdma_status - Read BMDMA status
* @ap: port for which to retrieve DMA status.
*
* Read and return equivalent of the sff BMDMA status register.
*
* LOCKING:
* Inherited from caller.
*/
static u8 mv_bmdma_status(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg, status;
/*
* Other bits are valid only if ATA_DMA_ACTIVE==0,
* and the ATA_DMA_INTR bit doesn't exist.
*/
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
else {
/*
* Just because DMA_ACTIVE is 0 (DMA completed),
* this does _not_ mean the device is "done".
* So we should not yet be signalling ATA_DMA_INTR
* in some cases. Eg. DSM/TRIM, and perhaps others.
*/
mv_bmdma_stop_ap(ap);
if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
status = 0;
else
status = ATA_DMA_INTR;
}
return status;
}
static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
/*
* Workaround for 88SX60x1 FEr SATA#24.
*
* Chip may corrupt WRITEs if multi_count >= 4kB.
* Note that READs are unaffected.
*
* It's not clear if this errata really means "4K bytes",
* or if it always happens for multi_count > 7
* regardless of device sector_size.
*
* So, for safety, any write with multi_count > 7
* gets converted here into a regular PIO write instead:
*/
if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
if (qc->dev->multi_count > 7) {
switch (tf->command) {
case ATA_CMD_WRITE_MULTI:
tf->command = ATA_CMD_PIO_WRITE;
break;
case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
fallthrough;
case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT;
break;
}
}
}
}
/**
* mv_qc_prep - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
__le16 *cw;
struct ata_taskfile *tf = &qc->tf;
u16 flags = 0;
unsigned in_index;
switch (tf->protocol) {
case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return AC_ERR_OK;
fallthrough;
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc);
return AC_ERR_OK;
default:
return AC_ERR_OK;
}
/* Fill in command request block
*/
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
flags |= qc->hw_tag << CRQB_TAG_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
pp->crqb[in_index].sg_addr =
cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
pp->crqb[in_index].sg_addr_hi =
cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
cw = &pp->crqb[in_index].ata_cmd[0];
/* Sadly, the CRQB cannot accommodate all registers--there are
* only 11 bytes...so we must pick and choose required
* registers based on the command. So, we drop feature and
* hob_feature for [RW] DMA commands, but they are needed for
* NCQ. NCQ will drop hob_nsect, which is not needed there
* (nsect is used only for the tag; feat/hob_feat hold true nsect).
*/
switch (tf->command) {
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
break;
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_WRITE:
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
break;
default:
/* The only other commands EDMA supports in non-queued and
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
* of which are defined/used by Linux. If we get here, this
* driver needs work.
*/
ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
tf->command);
return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
mv_fill_sg(qc);
return AC_ERR_OK;
}
/**
* mv_qc_prep_iie - Host specific command preparation.
* @qc: queued command to prepare
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it handles prep of the CRQB
* (command request block), does some sanity checking, and calls
* the SG load routine.
*
* LOCKING:
* Inherited from caller.
*/
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct mv_crqb_iie *crqb;
struct ata_taskfile *tf = &qc->tf;
unsigned in_index;
u32 flags = 0;
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return AC_ERR_OK;
if (tf->command == ATA_CMD_DSM)
return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
flags |= qc->hw_tag << CRQB_TAG_SHIFT;
flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
crqb->flags = cpu_to_le32(flags);
crqb->ata_cmd[0] = cpu_to_le32(
(tf->command << 16) |
(tf->feature << 24)
);
crqb->ata_cmd[1] = cpu_to_le32(
(tf->lbal << 0) |
(tf->lbam << 8) |
(tf->lbah << 16) |
(tf->device << 24)
);
crqb->ata_cmd[2] = cpu_to_le32(
(tf->hob_lbal << 0) |
(tf->hob_lbam << 8) |
(tf->hob_lbah << 16) |
(tf->hob_feature << 24)
);
crqb->ata_cmd[3] = cpu_to_le32(
(tf->nsect << 0) |
(tf->hob_nsect << 8)
);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
mv_fill_sg(qc);
return AC_ERR_OK;
}
/**
* mv_sff_check_status - fetch device status, if valid
* @ap: ATA port to fetch status from
*
* When using command issue via mv_qc_issue_fis(),
* the initial ATA_BUSY state does not show up in the
* ATA status (shadow) register. This can confuse libata!
*
* So we have a hook here to fake ATA_BUSY for that situation,
* until the first time a BUSY, DRQ, or ERR bit is seen.
*
* The rest of the time, it simply returns the ATA status register.
*/
static u8 mv_sff_check_status(struct ata_port *ap)
{
u8 stat = ioread8(ap->ioaddr.status_addr);
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
else
stat = ATA_BUSY;
}
return stat;
}
/**
* mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
* @ap: ATA port to send a FIS
* @fis: fis to be sent
* @nwords: number of 32-bit words in the fis
*/
static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 ifctl, old_ifctl, ifstat;
int i, timeout = 200, final_word = nwords - 1;
/* Initiate FIS transmission mode */
old_ifctl = readl(port_mmio + SATA_IFCTL);
ifctl = 0x100 | (old_ifctl & 0xf);
writelfl(ifctl, port_mmio + SATA_IFCTL);
/* Send all words of the FIS except for the final word */
for (i = 0; i < final_word; ++i)
writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
/* Flag end-of-transmission, and then send the final word */
writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
/*
* Wait for FIS transmission to complete.
* This typically takes just a single iteration.
*/
do {
ifstat = readl(port_mmio + SATA_IFSTAT);
} while (!(ifstat & 0x1000) && --timeout);
/* Restore original port configuration */
writelfl(old_ifctl, port_mmio + SATA_IFCTL);
/* See if it worked */
if ((ifstat & 0x3000) != 0x1000) {
ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
__func__, ifstat);
return AC_ERR_OTHER;
}
return 0;
}
/**
* mv_qc_issue_fis - Issue a command directly as a FIS
* @qc: queued command to start
*
* Note that the ATA shadow registers are not updated
* after command issue, so the device will appear "READY"
* if polled, even while it is BUSY processing the command.
*
* So we use a status hook to fake ATA_BUSY until the drive changes state.
*
* Note: we don't get updated shadow regs on *completion*
* of non-data commands. So avoid sending them via this function,
* as they will appear to have completed immediately.
*
* GEN_IIE has special registers that we could get the result tf from,
* but earlier chipsets do not. For now, we ignore those registers.
*/
static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
struct ata_link *link = qc->dev->link;
u32 fis[5];
int err = 0;
ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
if (err)
return err;
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
fallthrough;
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_FIRST;
break;
case ATA_PROT_PIO:
pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
if (qc->tf.flags & ATA_TFLAG_WRITE)
ap->hsm_task_state = HSM_ST_FIRST;
else
ap->hsm_task_state = HSM_ST;
break;
default:
ap->hsm_task_state = HSM_ST_LAST;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_sff_queue_pio_task(link, 0);
return 0;
}
/**
* mv_qc_issue - Initiate a command to the host
* @qc: queued command to start
*
* This routine simply redirects to the general purpose routine
* if command is not DMA. Else, it sanity checks our local
* caches of the request producer/consumer indices then enables
* DMA and bumps the request producer index.
*
* LOCKING:
* Inherited from caller.
*/
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
{
static int limit_warnings = 10;
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
u32 in_index;
unsigned int port_irqs;
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
if (qc->tf.command == ATA_CMD_DSM) {
if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
fallthrough;
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
/* Write the request in pointer to kick the EDMA to life */
writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
port_mmio + EDMA_REQ_Q_IN_PTR);
return 0;
case ATA_PROT_PIO:
/*
* Errata SATA#16, SATA#24: warn if multiple DRQs expected.
*
* Someday, we might implement special polling workarounds
* for these, but it all seems rather unnecessary since we
* normally use only DMA for commands which transfer more
* than a single block of data.
*
* Much of the time, this could just work regardless.
* So for now, just log the incident, and allow the attempt.
*/
if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
--limit_warnings;
ata_link_warn(qc->dev->link, DRV_NAME
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
fallthrough;
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
if (ap->flags & ATA_FLAG_PIO_POLLING)
qc->tf.flags |= ATA_TFLAG_POLLING;
break;
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
port_irqs = ERR_IRQ; /* mask device interrupt when polling */
else
port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
/*
* We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
mv_stop_edma(ap);
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
mv_pmp_select(ap, qc->dev->link->pmp);
if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
struct mv_host_priv *hpriv = ap->host->private_data;
/*
* Workaround for 88SX60x1 FEr SATA#25 (part 2).
*
* After any NCQ error, the READ_LOG_EXT command
* from libata-eh *must* use mv_qc_issue_fis().
* Otherwise it might fail, due to chip errata.
*
* Rather than special-case it, we'll just *always*
* use this method here for READ_LOG_EXT, making for
* easier testing.
*/
if (IS_GEN_II(hpriv))
return mv_qc_issue_fis(qc);
}
return ata_bmdma_qc_issue(qc);
}
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return NULL;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
return qc;
return NULL;
}
static void mv_pmp_error_handler(struct ata_port *ap)
{
unsigned int pmp, pmp_map;
struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
/*
* Perform NCQ error analysis on failed PMPs
* before we freeze the port entirely.
*
* The failed PMPs are marked earlier by mv_pmp_eh_prep().
*/
pmp_map = pp->delayed_eh_pmp_map;
pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
pmp_map &= ~this_pmp;
ata_eh_analyze_ncq_error(link);
}
}
ata_port_freeze(ap);
}
sata_pmp_error_handler(ap);
}
static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
return readl(port_mmio + SATA_TESTCTL) >> 16;
}
static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
unsigned int pmp;
/*
* Initialize EH info for PMPs which saw device errors
*/
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
struct ata_eh_info *ehi = &link->eh_info;
pmp_map &= ~this_pmp;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "dev err");
ehi->err_mask |= AC_ERR_DEV;
ehi->action |= ATA_EH_RESET;
ata_link_abort(link);
}
}
}
static int mv_req_q_empty(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 in_ptr, out_ptr;
in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
return (in_ptr == out_ptr); /* 1 == queue_is_empty */
}
static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{
struct mv_port_priv *pp = ap->private_data;
int failed_links;
unsigned int old_map, new_map;
/*
* Device error during FBS+NCQ operation:
*
* Set a port flag to prevent further I/O being enqueued.
* Leave the EDMA running to drain outstanding commands from this port.
* Perform the post-mortem/EH only when all responses are complete.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
*/
if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
pp->delayed_eh_pmp_map = 0;
}
old_map = pp->delayed_eh_pmp_map;
new_map = old_map | mv_get_err_pmp_map(ap);
if (old_map != new_map) {
pp->delayed_eh_pmp_map = new_map;
mv_pmp_eh_prep(ap, new_map & ~old_map);
}
failed_links = hweight16(new_map);
ata_port_info(ap,
"%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
__func__, pp->delayed_eh_pmp_map,
ap->qc_active, failed_links,
ap->nr_active_links);
if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
mv_process_crpb_entries(ap, pp);
mv_stop_edma(ap);
mv_eh_freeze(ap);
ata_port_info(ap, "%s: done\n", __func__);
return 1; /* handled */
}
ata_port_info(ap, "%s: waiting\n", __func__);
return 1; /* handled */
}
static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{
/*
* Possible future enhancement:
*
* FBS+non-NCQ operation is not yet implemented.
* See related notes in mv_edma_cfg().
*
* Device error during FBS+non-NCQ operation:
*
* We need to snapshot the shadow registers for each failed command.
* Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
*/
return 0; /* not handled */
}
static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{
struct mv_port_priv *pp = ap->private_data;
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0; /* EDMA was not active: not handled */
if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
return 0; /* FBS was not active: not handled */
if (!(edma_err_cause & EDMA_ERR_DEV))
return 0; /* non DEV error: not handled */
edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
return 0; /* other problems: not handled */
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
/*
* EDMA should NOT have self-disabled for this case.
* If it did, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_ncq_dev_err(ap);
} else {
/*
* EDMA should have self-disabled for this case.
* If it did not, then something is wrong elsewhere,
* and we cannot handle it here.
*/
if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
__func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_non_ncq_dev_err(ap);
}
return 0; /* not handled */
}
static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
char *when = "idle";
ata_ehi_clear_desc(ehi);
if (edma_was_enabled) {
when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
when = "polling";
}
ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
ehi->err_mask |= AC_ERR_OTHER;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
}
/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
*
* Most cases require a full reset of the chip's state machine,
* which also performs a COMRESET.
* Also, if the port disabled DMA, update our cached copy to match.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_err_intr(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, eh_freeze_mask, serr = 0;
u32 fis_cause = 0;
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int action = 0, err_mask = 0;
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ata_queued_cmd *qc;
int abort = 0;
/*
* Read and clear the SError and err_cause bits.
* For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
* the FIS_IRQ_CAUSE register before clearing edma_err_cause.
*/
sata_scr_read(&ap->link, SCR_ERROR, &serr);
sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
}
writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
if (edma_err_cause & EDMA_ERR_DEV) {
/*
* Device errors during FIS-based switching operation
* require special handling.
*/
if (mv_handle_dev_err(ap, edma_err_cause))
return;
}
qc = mv_get_active_qc(ap);
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
edma_err_cause, pp->pp_flags);
if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
if (fis_cause & FIS_IRQ_CAUSE_AN) {
u32 ec = edma_err_cause &
~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
sata_async_notification(ap);
if (!ec)
return; /* Just an AN; no need for the nukes */
ata_ehi_push_desc(ehi, "SDB notify");
}
}
/*
* All generations share these EDMA error cause bits:
*/
if (edma_err_cause & EDMA_ERR_DEV) {
err_mask |= AC_ERR_DEV;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "dev error");
}
if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "parity error");
}
if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
"dev disconnect" : "dev connect");
action |= ATA_EH_RESET;
}
/*
* Gen-I has a different SELF_DIS bit,
* different FREEZE bits, and no SERR bit:
*/
if (IS_GEN_I(hpriv)) {
eh_freeze_mask = EDMA_EH_FREEZE_5;
if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
} else {
eh_freeze_mask = EDMA_EH_FREEZE;
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
if (edma_err_cause & EDMA_ERR_SERR) {
ata_ehi_push_desc(ehi, "SError=%08x", serr);
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
}
}
if (!err_mask) {
err_mask = AC_ERR_OTHER;
action |= ATA_EH_RESET;
}
ehi->serror |= serr;
ehi->action |= action;
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
if (err_mask == AC_ERR_DEV) {
/*
* Cannot do ata_port_freeze() here,
* because it would kill PIO access,
* which is needed for further diagnosis.
*/
mv_eh_freeze(ap);
abort = 1;
} else if (edma_err_cause & eh_freeze_mask) {
/*
* Note to self: ata_port_freeze() calls ata_port_abort()
*/
ata_port_freeze(ap);
} else {
abort = 1;
}
if (abort) {
if (qc)
ata_link_abort(qc->dev->link);
else
ata_port_abort(ap);
}
}
static bool mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
u8 ata_status;
u16 edma_status = le16_to_cpu(response->flags);
/*
* edma_status from a response queue entry:
* LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
* MSB is saved ATA status from command completion.
*/
if (!ncq_enabled) {
u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
if (err_cause) {
/*
* Error will be seen/handled by
* mv_err_intr(). So do nothing at all here.
*/
return false;
}
}
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
if (!ac_err_mask(ata_status))
return true;
/* else: leave it for mv_err_intr() */
return false;
}
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_host_priv *hpriv = ap->host->private_data;
u32 in_index;
bool work_done = false;
u32 done_mask = 0;
int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
/* Get the hardware queue position index */
in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
/* Process new responses from since the last time we looked */
while (in_index != pp->resp_idx) {
unsigned int tag;
struct mv_crpb *response = &pp->crpb[pp->resp_idx];
pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
if (IS_GEN_I(hpriv)) {
/* 50xx: no NCQ, only one command active at a time */
tag = ap->link.active_tag;
} else {
/* Gen II/IIE: get command tag from CRPB entry */
tag = le16_to_cpu(response->id) & 0x1f;
}
if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
done_mask |= 1 << tag;
work_done = true;
}
if (work_done) {
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
(pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
port_mmio + EDMA_RSP_Q_OUT_PTR);
}
}
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{
struct mv_port_priv *pp;
int edma_was_enabled;
/*
* Grab a snapshot of the EDMA_EN flag setting,
* so that we have a consistent view for this port,
* even if something we call of our routines changes it.
*/
pp = ap->private_data;
edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
/*
* Process completed CRPB response(s) before other events.
*/
if (edma_was_enabled && (port_cause & DONE_IRQ)) {
mv_process_crpb_entries(ap, pp);
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
mv_handle_fbs_ncq_dev_err(ap);
}
/*
* Handle chip-reported errors, or continue on to handle PIO.
*/
if (unlikely(port_cause & ERR_IRQ)) {
mv_err_intr(ap);
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
}
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host: host specific structure
* @main_irq_cause: Main interrupt cause register for the chip.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base, *hc_mmio;
unsigned int handled = 0, port;
/* If asserted, clear the "all ports" IRQ coalescing bit */
if (main_irq_cause & ALL_PORTS_COAL_DONE)
writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *ap = host->ports[port];
unsigned int p, shift, hardport, port_cause;
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
/*
* Each hc within the host has its own hc_irq_cause register,
* where the interrupting ports bits get ack'd.
*/
if (hardport == 0) { /* first port on this hc ? */
u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
u32 port_mask, ack_irqs;
/*
* Skip this entire hc if nothing pending for any ports
*/
if (!hc_cause) {
port += MV_PORTS_PER_HC - 1;
continue;
}
/*
* We don't need/want to read the hc_irq_cause register,
* because doing so hurts performance, and
* main_irq_cause already gives us everything we need.
*
* But we do have to *write* to the hc_irq_cause to ack
* the ports that we are handling this time through.
*
* This requires that we create a bitmap for those
* ports which interrupted us, and use that bitmap
* to ack (only) those ports via hc_irq_cause.
*/
ack_irqs = 0;
if (hc_cause & PORTS_0_3_COAL_DONE)
ack_irqs = HC_COAL_IRQ;
for (p = 0; p < MV_PORTS_PER_HC; ++p) {
if ((port + p) >= hpriv->n_ports)
break;
port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
if (hc_cause & port_mask)
ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
}
hc_mmio = mv_hc_base_from_port(mmio, port);
writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
handled = 1;
}
/*
* Handle interrupts signalled for this port:
*/
port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
if (port_cause)
mv_port_intr(ap, port_cause);
}
return handled;
}
static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
struct ata_port *ap;
struct ata_queued_cmd *qc;
struct ata_eh_info *ehi;
unsigned int i, err_mask, printed = 0;
u32 err_cause;
err_cause = readl(mmio + hpriv->irq_cause_offset);
dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
mv_dump_all_regs(mmio, to_pci_dev(host->dev));
writelfl(0, mmio + hpriv->irq_cause_offset);
for (i = 0; i < host->n_ports; i++) {
ap = host->ports[i];
if (!ata_link_offline(&ap->link)) {
ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
if (!printed++)
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
ehi->action = ATA_EH_RESET;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ata_port_freeze(ap);
}
}
return 1; /* handled */
}
/**
* mv_interrupt - Main interrupt event handler
* @irq: unused
* @dev_instance: private data; in this case the host structure
*
* Read the read only register to determine if any host
* controllers have pending interrupts. If so, call lower level
* routine to handle. Also check for PCI errors which are only
* reported here.
*
* LOCKING:
* This routine holds the host lock while processing pending
* interrupts.
*/
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct mv_host_priv *hpriv = host->private_data;
unsigned int handled = 0;
int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
u32 main_irq_cause, pending_irqs;
spin_lock(&host->lock);
/* for MSI: block new interrupts while in here */
if (using_msi)
mv_write_main_irq_mask(0, hpriv);
main_irq_cause = readl(hpriv->main_irq_cause_addr);
pending_irqs = main_irq_cause & hpriv->main_irq_mask;
/*
* Deal with cases where we either have nothing pending, or have read
* a bogus register value which can indicate HW removal or PCI fault.
*/
if (pending_irqs && main_irq_cause != 0xffffffffU) {
if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
handled = mv_pci_error(host, hpriv->base);
else
handled = mv_host_intr(host, pending_irqs);
}
/* for MSI: unmask; interrupt cause bits will retrigger now */
if (using_msi)
mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
switch (sc_reg_in) {
case SCR_STATUS:
case SCR_ERROR:
case SCR_CONTROL:
ofs = sc_reg_in * sizeof(u32);
break;
default:
ofs = 0xffffffffU;
break;
}
return ofs;
}
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
*val = readl(addr + ofs);
return 0;
} else
return -EINVAL;
}
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
writelfl(val, addr + ofs);
return 0;
} else
return -EINVAL;
}
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
int early_5080;
early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
if (!early_5080) {
u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= (1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
mv_reset_pci_bus(host, mmio);
}
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x0fcfffff, mmio + FLASH_CTL);
}
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
u32 tmp;
tmp = readl(phy_mmio + MV5_PHY_MODE);
hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
}
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
writel(0, mmio + GPIO_PORT_CTL);
/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
tmp |= ~(1 << 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *phy_mmio = mv5_phy_base(mmio, port);
const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
u32 tmp;
int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
if (fix_apm_sq) {
tmp = readl(phy_mmio + MV5_LTMODE);
tmp |= (1 << 19);
writel(tmp, phy_mmio + MV5_LTMODE);
tmp = readl(phy_mmio + MV5_PHY_CTL);
tmp &= ~0x3;
tmp |= 0x1;
writel(tmp, phy_mmio + MV5_PHY_CTL);
}
tmp = readl(phy_mmio + MV5_PHY_MODE);
tmp &= ~mask;
tmp |= hpriv->signal[port].pre;
tmp |= hpriv->signal[port].amps;
writel(tmp, phy_mmio + MV5_PHY_MODE);
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x11f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int hc)
{
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 tmp;
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
ZERO(0x018);
tmp = readl(hc_mmio + 0x20);
tmp &= 0x1c1c1c1c;
tmp |= 0x03030303;
writel(tmp, hc_mmio + 0x20);
}
#undef ZERO
static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, port;
for (hc = 0; hc < n_hc; hc++) {
for (port = 0; port < MV_PORTS_PER_HC; port++)
mv5_reset_hc_port(hpriv, mmio,
(hc * MV_PORTS_PER_HC) + port);
mv5_reset_one_hc(hpriv, mmio, hc);
}
return 0;
}
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
{
struct mv_host_priv *hpriv = host->private_data;
u32 tmp;
tmp = readl(mmio + MV_PCI_MODE);
tmp &= 0xff00ffff;
writel(tmp, mmio + MV_PCI_MODE);
ZERO(MV_PCI_DISC_TIMER);
ZERO(MV_PCI_MSI_TRIGGER);
writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
ZERO(MV_PCI_SERR_MASK);
ZERO(hpriv->irq_cause_offset);
ZERO(hpriv->irq_mask_offset);
ZERO(MV_PCI_ERR_LOW_ADDRESS);
ZERO(MV_PCI_ERR_HIGH_ADDRESS);
ZERO(MV_PCI_ERR_ATTRIBUTE);
ZERO(MV_PCI_ERR_COMMAND);
}
#undef ZERO
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
mv5_reset_flash(hpriv, mmio);
tmp = readl(mmio + GPIO_PORT_CTL);
tmp &= 0x3;
tmp |= (1 << 5) | (1 << 6);
writel(tmp, mmio + GPIO_PORT_CTL);
}
/*
* mv6_reset_hc - Perform the 6xxx global soft reset
* @mmio: base address of the HBA
*
* This routine only applies to 6xxx parts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
int i, rc = 0;
u32 t;
/* Following procedure defined in PCI "main command and status
* register" table.
*/
t = readl(reg);
writel(t | STOP_PCI_MASTER, reg);
for (i = 0; i < 1000; i++) {
udelay(1);
t = readl(reg);
if (PCI_MASTER_EMPTY & t)
break;
}
if (!(PCI_MASTER_EMPTY & t)) {
dev_err(host->dev, "PCI master won't flush\n");
rc = 1;
goto done;
}
/* set reset */
i = 5;
do {
writel(t | GLOB_SFT_RST, reg);
t = readl(reg);
udelay(1);
} while (!(GLOB_SFT_RST & t) && (i-- > 0));
if (!(GLOB_SFT_RST & t)) {
dev_err(host->dev, "can't set global reset\n");
rc = 1;
goto done;
}
/* clear reset and *reenable the PCI master* (not mentioned in spec) */
i = 5;
do {
writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
t = readl(reg);
udelay(1);
} while ((GLOB_SFT_RST & t) && (i-- > 0));
if (GLOB_SFT_RST & t) {
dev_err(host->dev, "can't clear global reset\n");
rc = 1;
}
done:
return rc;
}
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
tmp = readl(mmio + RESET_CFG);
if ((tmp & (1 << 0)) == 0) {
hpriv->signal[idx].amps = 0x7 << 8;
hpriv->signal[idx].pre = 0x1 << 5;
return;
}
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
writel(0x00000060, mmio + GPIO_PORT_CTL);
}
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 hp_flags = hpriv->hp_flags;
int fix_phy_mode2 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
int fix_phy_mode4 =
hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
u32 m2, m3;
if (fix_phy_mode2) {
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~(1 << 16);
m2 |= (1 << 31);
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~((1 << 16) | (1 << 31));
writel(m2, port_mmio + PHY_MODE2);
udelay(200);
}
/*
* Gen-II/IIe PHY_MODE3 errata RM#2:
* Achieves better receiver noise performance than the h/w default:
*/
m3 = readl(port_mmio + PHY_MODE3);
m3 = (m3 & 0x1f) | (0x5555601 << 5);
/* Guideline 88F5182 (GL# SATA-S11) */
if (IS_SOC(hpriv))
m3 &= ~0x1c;
if (fix_phy_mode4) {
u32 m4 = readl(port_mmio + PHY_MODE4);
/*
* Enforce reserved-bit restrictions on GenIIe devices only.
* For earlier chipsets, force only the internal config field
* (workaround for errata FEr SATA#10 part 1).
*/
if (IS_GEN_IIE(hpriv))
m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
else
m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
writel(m4, port_mmio + PHY_MODE4);
}
/*
* Workaround for 60x1-B2 errata SATA#13:
* Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
* so we must always rewrite PHY_MODE3 after PHY_MODE4.
* Or ensure we use writelfl() when writing PHY_MODE4.
*/
writel(m3, port_mmio + PHY_MODE3);
/* Revert values of pre-emphasis and signal amps to the saved ones */
m2 = readl(port_mmio + PHY_MODE2);
m2 &= ~MV_M2_PREAMP_MASK;
m2 |= hpriv->signal[port].amps;
m2 |= hpriv->signal[port].pre;
m2 &= ~(1 << 16);
/* according to mvSata 3.6.1, some IIE values are fixed */
if (IS_GEN_IIE(hpriv)) {
m2 &= ~0xC30FF01F;
m2 |= 0x0000900F;
}
writel(m2, port_mmio + PHY_MODE2);
}
/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio)
{
void __iomem *port_mmio;
u32 tmp;
port_mmio = mv_port_base(mmio, idx);
tmp = readl(port_mmio + PHY_MODE2);
hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
}
#undef ZERO
#define ZERO(reg) writel(0, port_mmio + (reg))
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x101f, port_mmio + EDMA_CFG);
ZERO(0x004); /* timer */
ZERO(0x008); /* irq err cause */
ZERO(0x00c); /* irq err mask */
ZERO(0x010); /* rq bah */
ZERO(0x014); /* rq inp */
ZERO(0x018); /* rq outp */
ZERO(0x01c); /* respq bah */
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
#define ZERO(reg) writel(0, hc_mmio + (reg))
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
void __iomem *hc_mmio = mv_hc_base(mmio, 0);
ZERO(0x00c);
ZERO(0x010);
ZERO(0x014);
}
#undef ZERO
static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc)
{
struct mv_host_priv *hpriv = host->private_data;
unsigned int port;
for (port = 0; port < hpriv->n_ports; port++)
mv_soc_reset_hc_port(hpriv, mmio, port);
mv_soc_reset_one_hc(hpriv, mmio);
return 0;
}
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio)
{
return;
}
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{
return;
}
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
void __iomem *mmio, unsigned int port)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 reg;
reg = readl(port_mmio + PHY_MODE3);
reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
reg |= (0x1 << 27);
reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
reg |= (0x1 << 29);
writel(reg, port_mmio + PHY_MODE3);
reg = readl(port_mmio + PHY_MODE4);
reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
reg |= (0x1 << 16);
writel(reg, port_mmio + PHY_MODE4);
reg = readl(port_mmio + PHY_MODE9_GEN2);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN2);
reg = readl(port_mmio + PHY_MODE9_GEN1);
reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
reg |= 0x8;
reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
writel(reg, port_mmio + PHY_MODE9_GEN1);
}
/*
* soc_is_65 - check if the soc is 65 nano device
*
* Detect the type of the SoC, this is done by reading the PHYCFG_OFS
* register, this register should contain non-zero value and it exists only
* in the 65 nano devices, when reading it from older devices we get 0.
*/
static bool soc_is_65n(struct mv_host_priv *hpriv)
{
void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
if (readl(port0_mmio + PHYCFG_OFS))
return true;
return false;
}
static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
{
u32 ifcfg = readl(port_mmio + SATA_IFCFG);
ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
if (want_gen2i)
ifcfg |= (1 << 7); /* enable gen2i speed */
writelfl(ifcfg, port_mmio + SATA_IFCFG);
}
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
void __iomem *port_mmio = mv_port_base(mmio, port_no);
/*
* The datasheet warns against setting EDMA_RESET when EDMA is active
* (but doesn't say what the problem might be). So we first try
* to disable the EDMA engine before doing the EDMA_RESET operation.
*/
mv_stop_edma_engine(port_mmio);
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
if (!IS_GEN_I(hpriv)) {
/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
mv_setup_ifcfg(port_mmio, 1);
}
/*
* Strobing EDMA_RESET here causes a hard reset of the SATA transport,
* link, and physical layers. It resets all SATA interface registers
* (except for SATA_IFCFG), and issues a COMRESET to the dev.
*/
writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
udelay(25); /* allow reset propagation */
writelfl(0, port_mmio + EDMA_CMD);
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
usleep_range(500, 1000);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
{
if (sata_pmp_supported(ap)) {
void __iomem *port_mmio = mv_ap_base(ap);
u32 reg = readl(port_mmio + SATA_IFCTL);
int old = reg & 0xf;
if (old != pmp) {
reg = (reg & ~0xf) | pmp;
writelfl(reg, port_mmio + SATA_IFCTL);
}
}
}
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return sata_std_hardreset(link, class, deadline);
}
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
mv_pmp_select(link->ap, sata_srst_pmp(link));
return ata_sff_softreset(link, class, deadline);
}
static int mv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp = ap->private_data;
void __iomem *mmio = hpriv->base;
int rc, attempts = 0, extra = 0;
u32 sstatus;
bool online;
mv_reset_channel(hpriv, mmio, ap->port_no);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
pp->pp_flags &=
~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
/* Workaround for errata FEr SATA#10 (part 2) */
do {
const unsigned int *timing =
sata_ehc_deb_timing(&link->eh_context);
rc = sata_link_hardreset(link, timing, deadline + extra,
&online, NULL);
rc = online ? -EAGAIN : rc;
if (rc)
return rc;
sata_scr_read(link, SCR_STATUS, &sstatus);
if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
/* Force 1.5gb/s link speed and try again */
mv_setup_ifcfg(mv_ap_base(ap), 0);
if (time_after(jiffies + HZ, deadline))
extra = HZ; /* only extend it once, max */
}
} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
mv_save_cached_regs(ap);
mv_edma_cfg(ap, 0, 0);
return rc;
}
static void mv_eh_freeze(struct ata_port *ap)
{
mv_stop_edma(ap);
mv_enable_port_irqs(ap, 0);
}
static void mv_eh_thaw(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int port = ap->port_no;
unsigned int hardport = mv_hardport_from_port(port);
void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
void __iomem *port_mmio = mv_ap_base(ap);
u32 hc_irq_cause;
/* clear EDMA errors on this port */
writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* clear pending irq events */
hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
mv_enable_port_irqs(ap, ERR_IRQ);
}
/**
* mv_port_init - Perform some early initialization on a single port.
* @port: libata data structure storing shadow register addresses
* @port_mmio: base address of the port
*
* Initialize shadow register mmio addresses, clear outstanding
* interrupts on the port, and unmask interrupts for the future
* start of the port.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
{
void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
/* PIO related setup
*/
port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
port->error_addr =
port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
port->status_addr =
port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
/* special case: control/altstatus doesn't have ATA_REG_ address */
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
/* Clear any currently outstanding port interrupt conditions */
serr = port_mmio + mv_scr_offset(SCR_ERROR);
writelfl(readl(serr), serr);
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
/* unmask all non-transient EDMA error interrupts */
writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
}
static unsigned int mv_in_pcix_mode(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
return 0; /* not PCI-X capable */
reg = readl(mmio + MV_PCI_MODE);
if ((reg & MV_PCI_MODE_MASK) == 0)
return 0; /* conventional PCI mode */
return 1; /* chip is in PCI-X mode */
}
static int mv_pci_cut_through_okay(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
u32 reg;
if (!mv_in_pcix_mode(host)) {
reg = readl(mmio + MV_PCI_COMMAND);
if (reg & MV_PCI_COMMAND_MRDTRIG)
return 0; /* not okay */
}
return 1; /* okay */
}
static void mv_60x1b2_errata_pci7(struct ata_host *host)
{
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
/* workaround for 60x1-B2 errata PCI#7 */
if (mv_in_pcix_mode(host)) {
u32 reg = readl(mmio + MV_PCI_COMMAND);
writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
}
}
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u32 hp_flags = hpriv->hp_flags;
switch (board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x1:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying 50XXB2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_504x:
case chip_508x:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
switch (pdev->revision) {
case 0x0:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
case 0x3:
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
break;
case chip_604x:
case chip_608x:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_II;
switch (pdev->revision) {
case 0x7:
mv_60x1b2_errata_pci7(host);
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
case 0x9:
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
}
break;
case chip_7042:
hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
if (pdev->vendor == PCI_VENDOR_ID_TTI &&
(pdev->device == 0x2300 || pdev->device == 0x2310))
{
/*
* Highpoint RocketRAID PCIe 23xx series cards:
*
* Unconfigured drives are treated as "Legacy"
* by the BIOS, and it overwrites sector 8 with
* a "Lgcy" metadata block prior to Linux boot.
*
* Configured drives (RAID or JBOD) leave sector 8
* alone, but instead overwrite a high numbered
* sector for the RAID metadata. This sector can
* be determined exactly, by truncating the physical
* drive capacity to a nice even GB value.
*
* RAID metadata is at: (dev->n_sectors & ~0xfffff)
*
* Warn the user, lest they think we're just buggy.
*/
dev_warn(&pdev->dev, "Highpoint RocketRAID"
" BIOS CORRUPTS DATA on all attached drives,"
" regardless of if/how they are configured."
" BEWARE!\n");
dev_warn(&pdev->dev, "For data safety, do not"
" use sectors 8-9 on \"Legacy\" drives,"
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
fallthrough;
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
hp_flags |= MV_HP_CUT_THROUGH;
switch (pdev->revision) {
case 0x2: /* Rev.B0: the first/only public release */
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
dev_warn(&pdev->dev,
"Applying 60X1C0 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
}
break;
case chip_soc:
if (soc_is_65n(hpriv))
hpriv->ops = &mv_soc_65n_ops;
else
hpriv->ops = &mv_soc_ops;
hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
MV_HP_ERRATA_60X1C0;
break;
default:
dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
return -EINVAL;
}
hpriv->hp_flags = hp_flags;
if (hp_flags & MV_HP_PCIE) {
hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
hpriv->irq_mask_offset = PCIE_IRQ_MASK;
hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
} else {
hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
hpriv->irq_mask_offset = PCI_IRQ_MASK;
hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
}
return 0;
}
/**
* mv_init_host - Perform some early initialization of the host.
* @host: ATA host to initialize
*
* If possible, do an early global reset of the host. Then do
* our port init and clear/unmask all/relevant host interrupts.
*
* LOCKING:
* Inherited from caller.
*/
static int mv_init_host(struct ata_host *host)
{
int rc = 0, n_hc, port, hc;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, hpriv->board_idx);
if (rc)
goto done;
if (IS_SOC(hpriv)) {
hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
} else {
hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
}
/* initialize shadow irq mask with register's value */
hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
/* global interrupt mask: 0 == mask everything */
mv_set_main_irq_mask(host, ~0, 0);
n_hc = mv_get_hc_count(host->ports[0]->flags);
for (port = 0; port < host->n_ports; port++)
if (hpriv->ops->read_preamp)
hpriv->ops->read_preamp(hpriv, port, mmio);
rc = hpriv->ops->reset_hc(host, mmio, n_hc);
if (rc)
goto done;
hpriv->ops->reset_flash(hpriv, mmio);
hpriv->ops->reset_bus(host, mmio);
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_port_init(&ap->ioaddr, port_mmio);
}
for (hc = 0; hc < n_hc; hc++) {
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
"(before clear)=0x%08x\n", hc,
readl(hc_mmio + HC_CFG),
readl(hc_mmio + HC_IRQ_CAUSE));
/* Clear any currently outstanding hc interrupt conditions */
writelfl(0, hc_mmio + HC_IRQ_CAUSE);
}
if (!IS_SOC(hpriv)) {
/* Clear any currently outstanding host interrupt conditions */
writelfl(0, mmio + hpriv->irq_cause_offset);
/* and unmask interrupt generation for host regs */
writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
}
/*
* enable only global host interrupts for now.
* The per-port interrupts get done later as ports are set up.
*/
mv_set_main_irq_mask(host, 0, PCI_ERR);
mv_set_irq_coalescing(host, irq_coalescing_io_count,
irq_coalescing_usecs);
done:
return rc;
}
static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{
hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
MV_CRQB_Q_SZ, 0);
if (!hpriv->crqb_pool)
return -ENOMEM;
hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
MV_CRPB_Q_SZ, 0);
if (!hpriv->crpb_pool)
return -ENOMEM;
hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
MV_SG_TBL_SZ, 0);
if (!hpriv->sg_tbl_pool)
return -ENOMEM;
return 0;
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
const struct mbus_dram_target_info *dram)
{
int i;
for (i = 0; i < 4; i++) {
writel(0, hpriv->base + WINDOW_CTRL(i));
writel(0, hpriv->base + WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
hpriv->base + WINDOW_CTRL(i));
writel(cs->base, hpriv->base + WINDOW_BASE(i));
}
}
/**
* mv_platform_probe - handle a positive probe of an soc Marvell
* host
* @pdev: platform device found
*
* LOCKING:
* Inherited from caller.
*/
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
const struct mbus_dram_target_info *dram;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
int n_ports = 0, irq = 0;
int rc;
int port;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/*
* Simple resource validation ..
*/
if (unlikely(pdev->num_resources != 1)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
/*
* Get the register base first
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
return -EINVAL;
/* allocate host */
if (pdev->dev.of_node) {
rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
&n_ports);
if (rc) {
dev_err(&pdev->dev,
"error parsing nr-ports property: %d\n", rc);
return rc;
}
if (n_ports <= 0) {
dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
n_ports);
return -EINVAL;
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
} else {
mv_platform_data = dev_get_platdata(&pdev->dev);
n_ports = mv_platform_data->n_ports;
irq = platform_get_irq(pdev, 0);
}
if (irq < 0)
return irq;
if (!irq)
return -EINVAL;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
hpriv->port_clks = devm_kcalloc(&pdev->dev,
n_ports, sizeof(struct clk *),
GFP_KERNEL);
if (!hpriv->port_clks)
return -ENOMEM;
hpriv->port_phys = devm_kcalloc(&pdev->dev,
n_ports, sizeof(struct phy *),
GFP_KERNEL);
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!hpriv->base)
return -ENOMEM;
hpriv->base -= SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
dev_notice(&pdev->dev, "cannot get optional clkdev\n");
else
clk_prepare_enable(hpriv->clk);
for (port = 0; port < n_ports; port++) {
char port_number[16];
sprintf(port_number, "%d", port);
hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
if (!IS_ERR(hpriv->port_clks[port]))
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
if (rc != -EPROBE_DEFER)
dev_warn(&pdev->dev, "error getting phy %d", rc);
/* Cleanup only the initialized ports */
hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
/* All the ports have been initialized */
hpriv->n_ports = n_ports;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
mv_conf_mbus_windows(hpriv, dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
goto err;
/*
* To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
* updated in the LP_PHY_CTL register.
*/
if (pdev->dev.of_node &&
of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-370-sata"))
hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
goto err;
dev_info(&pdev->dev, "slots %u ports %d\n",
(unsigned)MV_MAX_Q_DEPTH, host->n_ports);
rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
if (!rc)
return 0;
err:
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
phy_power_off(hpriv->port_phys[port]);
}
return rc;
}
/*
*
* mv_platform_remove - unplug a platform interface
* @pdev: platform device
*
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
static void mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct mv_host_priv *hpriv = host->private_data;
int port;
ata_host_detach(host);
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
for (port = 0; port < host->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
}
phy_power_off(hpriv->port_phys[port]);
}
}
#ifdef CONFIG_PM_SLEEP
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(pdev);
if (host)
ata_host_suspend(host, state);
return 0;
}
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
const struct mbus_dram_target_info *dram;
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
mv_conf_mbus_windows(hpriv, dram);
/* initialize adapter */
ret = mv_init_host(host);
if (ret) {
dev_err(&pdev->dev, "Error during HW init\n");
return ret;
}
ata_host_resume(host);
}
return 0;
}
#else
#define mv_platform_suspend NULL
#define mv_platform_resume NULL
#endif
#ifdef CONFIG_OF
static const struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
#endif
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
.remove_new = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(mv_sata_dt_ids),
},
};
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = mv_pci_device_resume,
#endif
};
/**
* mv_print_info - Dump key info to kernel log for perusal.
* @host: ATA host to print info about
*
* FIXME: complete this.
*
* LOCKING:
* Inherited from caller.
*/
static void mv_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
u8 scc;
const char *scc_s, *gen;
/* Use this to determine the HW stepping of the chip so we know
* what errata to workaround
*/
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
if (scc == 0)
scc_s = "SCSI";
else if (scc == 0x01)
scc_s = "RAID";
else
scc_s = "?";
if (IS_GEN_I(hpriv))
gen = "I";
else if (IS_GEN_II(hpriv))
gen = "II";
else if (IS_GEN_IIE(hpriv))
gen = "IIE";
else
gen = "?";
dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
/**
* mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int)ent->driver_data;
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
int n_ports, port, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = board_idx;
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
}
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
return rc;
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(hpriv->base, port);
unsigned int offset = port_mmio - hpriv->base;
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
}
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
/* Enable message-switched interrupts, if requested */
if (msi && pci_enable_msi(pdev) == 0)
hpriv->hp_flags |= MV_HP_FLAG_MSI;
mv_dump_pci_cfg(pdev, 0x68);
mv_print_info(host);
pci_set_master(pdev);
pci_try_set_mwi(pdev);
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
return rc;
ata_host_resume(host);
return 0;
}
#endif
#endif
static int __init mv_init(void)
{
int rc = -ENODEV;
#ifdef CONFIG_PCI
rc = pci_register_driver(&mv_pci_driver);
if (rc < 0)
return rc;
#endif
rc = platform_driver_register(&mv_platform_driver);
#ifdef CONFIG_PCI
if (rc < 0)
pci_unregister_driver(&mv_pci_driver);
#endif
return rc;
}
static void __exit mv_exit(void)
{
#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
#endif
platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
module_init(mv_init);
module_exit(mv_exit);
| linux-master | drivers/ata/sata_mv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* acard-ahci.c - ACard AHCI SATA support
*
* Maintained by: Tejun Heo <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2010 Red Hat, Inc.
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* AHCI hardware documentation:
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "ahci.h"
#define DRV_NAME "acard-ahci"
#define DRV_VERSION "1.0"
/*
Received FIS structure limited to 80h.
*/
#define ACARD_AHCI_RX_FIS_SZ 128
enum {
AHCI_PCI_BAR = 5,
};
enum board_ids {
board_acard_ahci,
};
struct acard_sg {
__le32 addr;
__le32 addr_hi;
__le32 reserved;
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
};
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
#endif
static const struct scsi_host_template acard_ahci_sht = {
AHCI_SHT("acard-ahci"),
};
static struct ata_port_operations acard_ops = {
.inherits = &ahci_ops,
.qc_prep = acard_ahci_qc_prep,
.qc_fill_rtf = acard_ahci_qc_fill_rtf,
.port_start = acard_ahci_port_start,
};
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info acard_ahci_port_info[] = {
[board_acard_ahci] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &acard_ops,
},
};
static const struct pci_device_id acard_ahci_pci_tbl[] = {
/* ACard */
{ PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
{ } /* terminate list */
};
static struct pci_driver acard_ahci_pci_driver = {
.name = DRV_NAME,
.id_table = acard_ahci_pci_tbl,
.probe = acard_ahci_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = acard_ahci_pci_device_suspend,
.resume = acard_ahci_pci_device_resume,
#endif
};
#ifdef CONFIG_PM_SLEEP
static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (mesg.event & PM_EVENT_SUSPEND &&
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(&pdev->dev,
"BIOS update required for suspend/resume\n");
return -EIO;
}
if (mesg.event & PM_EVENT_SLEEP) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
return ata_pci_device_suspend(pdev, mesg);
}
static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
}
ata_host_resume(host);
return 0;
}
#endif
static void acard_ahci_pci_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 cc;
const char *scc_s;
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
else if (cc == PCI_CLASS_STORAGE_SATA)
scc_s = "SATA";
else if (cc == PCI_CLASS_STORAGE_RAID)
scc_s = "RAID";
else
scc_s = "unknown";
ahci_print_info(host, scc_s);
}
static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
{
struct scatterlist *sg;
struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si, last_si = 0;
/*
* Next, the S/G list.
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
/*
* ACard note:
* We must set an end-of-table (EOT) bit,
* and the segment cannot exceed 64k (0x10000)
*/
acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
acard_sg[si].size = cpu_to_le32(sg_len);
last_si = si;
}
acard_sg[last_si].size |= cpu_to_le32(1 << 31); /* set EOT */
return si;
}
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
int is_atapi = ata_is_atapi(qc->tf.protocol);
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
/*
* Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS.
*/
cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
if (is_atapi) {
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
if (qc->flags & ATA_QCFLAG_DMAMAP)
acard_ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
*
* ACard note: prd table length not filled in
*/
opts = cmd_fis_len | (qc->dev->link->pmp << 12);
if (qc->tf.flags & ATA_TFLAG_WRITE)
opts |= AHCI_CMD_WRITE;
if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
return AC_ERR_OK;
}
static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
/*
* After a successful execution of an ATA PIO data-in command,
* the device doesn't send D2H Reg FIS to update the TF and
* the host should take TF and E_Status from the preceding PIO
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
!(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
}
static int acard_ahci_port_start(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct device *dev = ap->host->dev;
struct ahci_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
size_t dma_sz, rx_fis_sz;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
/* check FBS capability */
if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
ap->port_no);
pp->fbs_supported = true;
} else
dev_warn(dev, "port %d is not capable of FBS\n",
ap->port_no);
}
if (pp->fbs_supported) {
dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
} else {
dma_sz = AHCI_PORT_PRIV_DMA_SZ;
rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
}
mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
/*
* First item in chunk of DMA memory: 32-slot command table,
* 32 bytes each in size
*/
pp->cmd_slot = mem;
pp->cmd_slot_dma = mem_dma;
mem += AHCI_CMD_SLOT_SZ;
mem_dma += AHCI_CMD_SLOT_SZ;
/*
* Second item: Received-FIS area
*/
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
mem += rx_fis_sz;
mem_dma += rx_fis_sz;
/*
* Third item: data area for storing a single command
* and its scatter-gather table
*/
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
/*
* Save off initial list of interrupts to be enabled.
* This could be changed later
*/
pp->intr_mask = DEF_PORT_IRQ;
ap->private_data = pp;
/* engage engines, captain */
return ahci_port_resume(ap);
}
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = acard_ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->irq = pdev->irq;
hpriv->flags |= (unsigned long)pi.private_data;
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
/* save initial config */
ahci_save_initial_config(&pdev->dev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
ahci_set_em_messages(hpriv, &pi);
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
ata_port_pbar_desc(ap, AHCI_PCI_BAR,
0x100 + ap->port_no * 0x80, "port");
/* set initial link pm policy */
/*
ap->pm_policy = NOT_AVAILABLE;
*/
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
/* initialize adapter */
rc = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK((hpriv->cap & HOST_CAP_64) ? 64 : 32));
if (rc) {
dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
}
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
acard_ahci_pci_print_info(host);
pci_set_master(pdev);
return ahci_host_activate(host, &acard_ahci_sht);
}
module_pci_driver(acard_ahci_pci_driver);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/acard-ahci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RZ1000/1001 driver based upon
*
* linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003
* Copyright (C) 1995-1998 Linus Torvalds & author (see below)
* Principal Author: [email protected] (Mark Lord)
*
* See linux/MAINTAINERS for address of current maintainer.
*
* This file provides support for disabling the buggy read-ahead
* mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_rz1000"
#define DRV_VERSION "0.2.4"
/**
* rz1000_set_mode - mode setting function
* @link: ATA link
* @unused: returned device on set_mode failure
*
* Use a non standard set_mode function. We don't want to be tuned. We
* would prefer to be BIOS generic but for the fact our hardware is
* whacked out.
*/
static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static const struct scsi_host_template rz1000_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations rz1000_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_mode = rz1000_set_mode,
};
static int rz1000_fifo_disable(struct pci_dev *pdev)
{
u16 reg;
/* Be exceptionally paranoid as we must be sure to apply the fix */
if (pci_read_config_word(pdev, 0x40, ®) != 0)
return -1;
reg &= 0xDFFF;
if (pci_write_config_word(pdev, 0x40, reg) != 0)
return -1;
dev_info(&pdev->dev, "disabled chipset readahead.\n");
return 0;
}
/**
* rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in rz1000_pci_tbl matching with @pdev
*
* Configure an RZ1000 interface. This doesn't require much special
* handling except that we *MUST* kill the chipset readahead or the
* user may experience data corruption.
*/
static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &rz1000_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
ata_print_version_once(&pdev->dev, DRV_VERSION);
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
dev_err(&pdev->dev, "failed to disable read-ahead on chipset.\n");
/* Not safe to use so skip */
return -ENODEV;
}
#ifdef CONFIG_PM_SLEEP
static int rz1000_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* If this fails on resume (which is a "can't happen" case), we
must stop as any progress risks data loss */
if (rz1000_fifo_disable(pdev))
panic("rz1000 fifo");
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id pata_rz1000[] = {
{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
{ },
};
static struct pci_driver rz1000_pci_driver = {
.name = DRV_NAME,
.id_table = pata_rz1000,
.probe = rz1000_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = rz1000_reinit_one,
#endif
};
module_pci_driver(rz1000_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pata_rz1000);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_rz1000.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* EP93XX PATA controller driver.
*
* Copyright (c) 2012, Metasoft s.c.
* Rafal Prylowski <[email protected]>
*
* Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX
* PATA driver by Lennert Buytenhek and Alessandro Zummo.
* Read/Write timings, resource management and other improvements
* from driver by Joao Ramos and Bartlomiej Zolnierkiewicz.
* DMA engine support based on spi-ep93xx.c by Mika Westerberg.
*
* Original copyrights:
*
* Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs
* PATA host controller driver.
*
* Copyright (c) 2009, Bartlomiej Zolnierkiewicz
*
* Heavily based on the ep93xx-ide.c driver:
*
* Copyright (c) 2009, Joao Ramos <[email protected]>
* INESC Inovacao (INOV)
*
* EP93XX PATA controller driver.
* Copyright (C) 2007 Lennert Buytenhek <[email protected]>
*
* An ATA driver for the Cirrus Logic EP93xx PATA controller.
*
* Based on an earlier version by Alessandro Zummo, which is:
* Copyright (C) 2006 Tower Technologies
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
enum {
/* IDE Control Register */
IDECTRL = 0x00,
IDECTRL_CS0N = (1 << 0),
IDECTRL_CS1N = (1 << 1),
IDECTRL_DIORN = (1 << 5),
IDECTRL_DIOWN = (1 << 6),
IDECTRL_INTRQ = (1 << 9),
IDECTRL_IORDY = (1 << 10),
/*
* the device IDE register to be accessed is selected through
* IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N':
* b4 b3 b2 b1 b0
* A2 A1 A0 CS1N CS0N
* the values filled in this structure allows the value to be directly
* ORed to the IDECTRL register, hence giving directly the A[2:0] and
* CS1N/CS0N values for each IDE register.
* The values correspond to the transformation:
* ((real IDE address) << 2) | CS1N value << 1 | CS0N value
*/
IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */
IDECTRL_ADDR_DATA = (ATA_REG_DATA << 2) + 2,
IDECTRL_ADDR_ERROR = (ATA_REG_ERR << 2) + 2,
IDECTRL_ADDR_FEATURE = (ATA_REG_FEATURE << 2) + 2,
IDECTRL_ADDR_NSECT = (ATA_REG_NSECT << 2) + 2,
IDECTRL_ADDR_LBAL = (ATA_REG_LBAL << 2) + 2,
IDECTRL_ADDR_LBAM = (ATA_REG_LBAM << 2) + 2,
IDECTRL_ADDR_LBAH = (ATA_REG_LBAH << 2) + 2,
IDECTRL_ADDR_DEVICE = (ATA_REG_DEVICE << 2) + 2,
IDECTRL_ADDR_STATUS = (ATA_REG_STATUS << 2) + 2,
IDECTRL_ADDR_COMMAND = (ATA_REG_CMD << 2) + 2,
IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */
IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */
/* IDE Configuration Register */
IDECFG = 0x04,
IDECFG_IDEEN = (1 << 0),
IDECFG_PIO = (1 << 1),
IDECFG_MDMA = (1 << 2),
IDECFG_UDMA = (1 << 3),
IDECFG_MODE_SHIFT = 4,
IDECFG_MODE_MASK = (0xf << 4),
IDECFG_WST_SHIFT = 8,
IDECFG_WST_MASK = (0x3 << 8),
/* MDMA Operation Register */
IDEMDMAOP = 0x08,
/* UDMA Operation Register */
IDEUDMAOP = 0x0c,
IDEUDMAOP_UEN = (1 << 0),
IDEUDMAOP_RWOP = (1 << 1),
/* PIO/MDMA/UDMA Data Registers */
IDEDATAOUT = 0x10,
IDEDATAIN = 0x14,
IDEMDMADATAOUT = 0x18,
IDEMDMADATAIN = 0x1c,
IDEUDMADATAOUT = 0x20,
IDEUDMADATAIN = 0x24,
/* UDMA Status Register */
IDEUDMASTS = 0x28,
IDEUDMASTS_DMAIDE = (1 << 16),
IDEUDMASTS_INTIDE = (1 << 17),
IDEUDMASTS_SBUSY = (1 << 18),
IDEUDMASTS_NDO = (1 << 24),
IDEUDMASTS_NDI = (1 << 25),
IDEUDMASTS_N4X = (1 << 26),
/* UDMA Debug Status Register */
IDEUDMADEBUG = 0x2c,
};
struct ep93xx_pata_data {
const struct platform_device *pdev;
void __iomem *ide_base;
struct ata_timing t;
bool iordy;
unsigned long udma_in_phys;
unsigned long udma_out_phys;
struct dma_chan *dma_rx_channel;
struct ep93xx_dma_data dma_rx_data;
struct dma_chan *dma_tx_channel;
struct ep93xx_dma_data dma_tx_data;
};
static void ep93xx_pata_clear_regs(void __iomem *base)
{
writel(IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN |
IDECTRL_DIOWN, base + IDECTRL);
writel(0, base + IDECFG);
writel(0, base + IDEMDMAOP);
writel(0, base + IDEUDMAOP);
writel(0, base + IDEDATAOUT);
writel(0, base + IDEDATAIN);
writel(0, base + IDEMDMADATAOUT);
writel(0, base + IDEMDMADATAIN);
writel(0, base + IDEUDMADATAOUT);
writel(0, base + IDEUDMADATAIN);
writel(0, base + IDEUDMADEBUG);
}
static bool ep93xx_pata_check_iordy(void __iomem *base)
{
return !!(readl(base + IDECTRL) & IDECTRL_IORDY);
}
/*
* According to EP93xx User's Guide, WST field of IDECFG specifies number
* of HCLK cycles to hold the data bus after a PIO write operation.
* It should be programmed to guarantee following delays:
*
* PIO Mode [ns]
* 0 30
* 1 20
* 2 15
* 3 10
* 4 5
*
* Maximum possible value for HCLK is 100MHz.
*/
static int ep93xx_pata_get_wst(int pio_mode)
{
int val;
if (pio_mode == 0)
val = 3;
else if (pio_mode < 3)
val = 2;
else
val = 1;
return val << IDECFG_WST_SHIFT;
}
static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode)
{
writel(IDECFG_IDEEN | IDECFG_PIO |
ep93xx_pata_get_wst(pio_mode) |
(pio_mode << IDECFG_MODE_SHIFT), base + IDECFG);
}
/*
* Based on delay loop found in mach-pxa/mp900.c.
*
* Single iteration should take 5 cpu cycles. This is 25ns assuming the
* fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings
* than eg. 20ns.
*/
static void ep93xx_pata_delay(unsigned long count)
{
__asm__ volatile (
"0:\n"
"mov r0, r0\n"
"subs %0, %1, #1\n"
"bge 0b\n"
: "=r" (count)
: "0" (count)
);
}
static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base,
unsigned long t2)
{
/*
* According to ATA specification, IORDY pin can be first sampled
* tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse
* width is tB = 1250ns.
*
* We are already t2 delay loop iterations after activation of
* DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional
* delay loop iterations.
*/
unsigned long start = (1250 + 35) / 25 - t2;
unsigned long counter = start;
while (!ep93xx_pata_check_iordy(base) && counter--)
ep93xx_pata_delay(1);
return start - counter;
}
/* common part at start of ep93xx_pata_read/write() */
static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr,
unsigned long t1)
{
writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
ep93xx_pata_delay(t1);
}
/* common part at end of ep93xx_pata_read/write() */
static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr,
bool iordy, unsigned long t0, unsigned long t2,
unsigned long t2i)
{
ep93xx_pata_delay(t2);
/* lengthen t2 if needed */
if (iordy)
t2 += ep93xx_pata_wait_for_iordy(base, t2);
writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
if (t0 > t2 && t0 - t2 > t2i)
ep93xx_pata_delay(t0 - t2);
else
ep93xx_pata_delay(t2i);
}
static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data,
unsigned long addr,
bool reg)
{
void __iomem *base = drv_data->ide_base;
const struct ata_timing *t = &drv_data->t;
unsigned long t0 = reg ? t->cyc8b : t->cycle;
unsigned long t2 = reg ? t->act8b : t->active;
unsigned long t2i = reg ? t->rec8b : t->recover;
ep93xx_pata_rw_begin(base, addr, t->setup);
writel(IDECTRL_DIOWN | addr, base + IDECTRL);
/*
* The IDEDATAIN register is loaded from the DD pins at the positive
* edge of the DIORN signal. (EP93xx UG p27-14)
*/
ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
return readl(base + IDEDATAIN);
}
/* IDE register read */
static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data,
unsigned long addr)
{
return ep93xx_pata_read(drv_data, addr, true);
}
/* PIO data read */
static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data,
unsigned long addr)
{
return ep93xx_pata_read(drv_data, addr, false);
}
static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data,
u16 value, unsigned long addr,
bool reg)
{
void __iomem *base = drv_data->ide_base;
const struct ata_timing *t = &drv_data->t;
unsigned long t0 = reg ? t->cyc8b : t->cycle;
unsigned long t2 = reg ? t->act8b : t->active;
unsigned long t2i = reg ? t->rec8b : t->recover;
ep93xx_pata_rw_begin(base, addr, t->setup);
/*
* Value from IDEDATAOUT register is driven onto the DD pins when
* DIOWN is low. (EP93xx UG p27-13)
*/
writel(value, base + IDEDATAOUT);
writel(IDECTRL_DIORN | addr, base + IDECTRL);
ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
}
/* IDE register write */
static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data,
u16 value, unsigned long addr)
{
ep93xx_pata_write(drv_data, value, addr, true);
}
/* PIO data write */
static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data,
u16 value, unsigned long addr)
{
ep93xx_pata_write(drv_data, value, addr, false);
}
static void ep93xx_pata_set_piomode(struct ata_port *ap,
struct ata_device *adev)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
struct ata_device *pair = ata_dev_pair(adev);
/*
* Calculate timings for the delay loop, assuming ep93xx cpu speed
* is 200MHz (maximum possible for ep93xx). If actual cpu speed is
* slower, we will wait a bit longer in each delay.
* Additional division of cpu speed by 5, because single iteration
* of our delay loop takes 5 cpu cycles (25ns).
*/
unsigned long T = 1000000 / (200 / 5);
ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0);
if (pair && pair->pio_mode) {
struct ata_timing t;
ata_timing_compute(pair, pair->pio_mode, &t, T, 0);
ata_timing_merge(&t, &drv_data->t, &drv_data->t,
ATA_TIMING_SETUP | ATA_TIMING_8BIT);
}
drv_data->iordy = ata_pio_need_iordy(adev);
ep93xx_pata_enable_pio(drv_data->ide_base,
adev->pio_mode - XFER_PIO_0);
}
/* Note: original code is ata_sff_check_status */
static u8 ep93xx_pata_check_status(struct ata_port *ap)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS);
}
static u8 ep93xx_pata_check_altstatus(struct ata_port *ap)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS);
}
/* Note: original code is ata_sff_tf_load */
static void ep93xx_pata_tf_load(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
if (tf->ctl != ap->last_ctl) {
ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
ep93xx_pata_write_reg(drv_data, tf->hob_feature,
IDECTRL_ADDR_FEATURE);
ep93xx_pata_write_reg(drv_data, tf->hob_nsect,
IDECTRL_ADDR_NSECT);
ep93xx_pata_write_reg(drv_data, tf->hob_lbal,
IDECTRL_ADDR_LBAL);
ep93xx_pata_write_reg(drv_data, tf->hob_lbam,
IDECTRL_ADDR_LBAM);
ep93xx_pata_write_reg(drv_data, tf->hob_lbah,
IDECTRL_ADDR_LBAH);
}
if (is_addr) {
ep93xx_pata_write_reg(drv_data, tf->feature,
IDECTRL_ADDR_FEATURE);
ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT);
ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL);
ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM);
ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH);
}
if (tf->flags & ATA_TFLAG_DEVICE)
ep93xx_pata_write_reg(drv_data, tf->device,
IDECTRL_ADDR_DEVICE);
ata_wait_idle(ap);
}
/* Note: original code is ata_sff_tf_read */
static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
tf->status = ep93xx_pata_check_status(ap);
tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH);
tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE);
if (tf->flags & ATA_TFLAG_LBA48) {
ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB,
IDECTRL_ADDR_CTL);
tf->hob_feature = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_FEATURE);
tf->hob_nsect = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_NSECT);
tf->hob_lbal = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_LBAL);
tf->hob_lbam = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_LBAM);
tf->hob_lbah = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_LBAH);
ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
ap->last_ctl = tf->ctl;
}
}
/* Note: original code is ata_sff_exec_command */
static void ep93xx_pata_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
ep93xx_pata_write_reg(drv_data, tf->command,
IDECTRL_ADDR_COMMAND);
ata_sff_pause(ap);
}
/* Note: original code is ata_sff_dev_select */
static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
u8 tmp = ATA_DEVICE_OBS;
if (device != 0)
tmp |= ATA_DEV1;
ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE);
ata_sff_pause(ap); /* needed; also flushes, for mmio */
}
/* Note: original code is ata_sff_set_devctl */
static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL);
}
/* Note: original code is ata_sff_data_xfer */
static unsigned int ep93xx_pata_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_port *ap = qc->dev->link->ap;
struct ep93xx_pata_data *drv_data = ap->host->private_data;
u16 *data = (u16 *)buf;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
while (words--)
if (rw == READ)
*data++ = cpu_to_le16(
ep93xx_pata_read_data(
drv_data, IDECTRL_ADDR_DATA));
else
ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++),
IDECTRL_ADDR_DATA);
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
buf += buflen - 1;
if (rw == READ) {
*pad = cpu_to_le16(
ep93xx_pata_read_data(
drv_data, IDECTRL_ADDR_DATA));
*buf = pad[0];
} else {
pad[0] = *buf;
ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad),
IDECTRL_ADDR_DATA);
}
words++;
}
return words << 1;
}
/* Note: original code is ata_devchk */
static bool ep93xx_pata_device_is_present(struct ata_port *ap,
unsigned int device)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
u8 nsect, lbal;
ap->ops->sff_dev_select(ap, device);
ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT);
ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL);
ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
if ((nsect == 0x55) && (lbal == 0xaa))
return true;
return false;
}
/* Note: original code is ata_sff_wait_after_reset */
static int ep93xx_pata_wait_after_reset(struct ata_link *link,
unsigned int devmask,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ep93xx_pata_data *drv_data = ap->host->private_data;
unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1);
int rc, ret = 0;
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
/* always check readiness of the master device */
rc = ata_sff_wait_ready(link, deadline);
/*
* -ENODEV means the odd clown forgot the D7 pulldown resistor
* and TF status is 0xff, bail out on it too.
*/
if (rc)
return rc;
/*
* if device 1 was found in ata_devchk, wait for register
* access briefly, then wait for BSY to clear.
*/
if (dev1) {
int i;
ap->ops->sff_dev_select(ap, 1);
/*
* Wait for register access. Some ATAPI devices fail
* to set nsect/lbal after reset, so don't waste too
* much time on it. We're gonna wait for !BSY anyway.
*/
for (i = 0; i < 2; i++) {
u8 nsect, lbal;
nsect = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_NSECT);
lbal = ep93xx_pata_read_reg(drv_data,
IDECTRL_ADDR_LBAL);
if (nsect == 1 && lbal == 1)
break;
msleep(50); /* give drive a breather */
}
rc = ata_sff_wait_ready(link, deadline);
if (rc) {
if (rc != -ENODEV)
return rc;
ret = rc;
}
}
/* is all this really necessary? */
ap->ops->sff_dev_select(ap, 0);
if (dev1)
ap->ops->sff_dev_select(ap, 1);
if (dev0)
ap->ops->sff_dev_select(ap, 0);
return ret;
}
/* Note: original code is ata_bus_softreset */
static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
udelay(20); /* FIXME: flush */
ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL);
udelay(20); /* FIXME: flush */
ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
ap->last_ctl = ap->ctl;
return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline);
}
static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
{
if (drv_data->dma_rx_channel) {
dma_release_channel(drv_data->dma_rx_channel);
drv_data->dma_rx_channel = NULL;
}
if (drv_data->dma_tx_channel) {
dma_release_channel(drv_data->dma_tx_channel);
drv_data->dma_tx_channel = NULL;
}
}
static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
{
if (ep93xx_dma_chan_is_m2p(chan))
return false;
chan->private = filter_param;
return true;
}
static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
{
const struct platform_device *pdev = drv_data->pdev;
dma_cap_mask_t mask;
struct dma_slave_config conf;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/*
* Request two channels for IDE. Another possibility would be
* to request only one channel, and reprogram it's direction at
* start of new transfer.
*/
drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
drv_data->dma_rx_data.name = "ep93xx-pata-rx";
drv_data->dma_rx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
if (!drv_data->dma_rx_channel)
return;
drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
drv_data->dma_tx_data.name = "ep93xx-pata-tx";
drv_data->dma_tx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
if (!drv_data->dma_tx_channel) {
dma_release_channel(drv_data->dma_rx_channel);
return;
}
/* Configure receive channel direction and source address */
memset(&conf, 0, sizeof(conf));
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
dev_err(&pdev->dev, "failed to configure rx dma channel\n");
ep93xx_pata_release_dma(drv_data);
return;
}
/* Configure transmit channel direction and destination address */
memset(&conf, 0, sizeof(conf));
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
dev_err(&pdev->dev, "failed to configure tx dma channel\n");
ep93xx_pata_release_dma(drv_data);
}
}
static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
{
struct dma_async_tx_descriptor *txd;
struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
void __iomem *base = drv_data->ide_base;
struct ata_device *adev = qc->dev;
u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0;
struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE
? drv_data->dma_tx_channel : drv_data->dma_rx_channel;
txd = dmaengine_prep_slave_sg(channel, qc->sg, qc->n_elem, qc->dma_dir,
DMA_CTRL_ACK);
if (!txd) {
dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n");
return;
}
txd->callback = NULL;
txd->callback_param = NULL;
if (dmaengine_submit(txd) < 0) {
dev_err(qc->ap->dev, "failed to submit dma transfer\n");
return;
}
dma_async_issue_pending(channel);
/*
* When enabling UDMA operation, IDEUDMAOP register needs to be
* programmed in three step sequence:
* 1) set or clear the RWOP bit,
* 2) perform dummy read of the register,
* 3) set the UEN bit.
*/
writel(v, base + IDEUDMAOP);
readl(base + IDEUDMAOP);
writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP);
writel(IDECFG_IDEEN | IDECFG_UDMA |
((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT),
base + IDECFG);
}
static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc)
{
struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
void __iomem *base = drv_data->ide_base;
/* terminate all dma transfers, if not yet finished */
dmaengine_terminate_all(drv_data->dma_rx_channel);
dmaengine_terminate_all(drv_data->dma_tx_channel);
/*
* To properly stop IDE-DMA, IDEUDMAOP register must to be cleared
* and IDECTRL register must be set to default value.
*/
writel(0, base + IDEUDMAOP);
writel(readl(base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN |
IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL);
ep93xx_pata_enable_pio(drv_data->ide_base,
qc->dev->pio_mode - XFER_PIO_0);
ata_sff_dma_pause(qc->ap);
}
static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc)
{
qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
}
static u8 ep93xx_pata_dma_status(struct ata_port *ap)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
u32 val = readl(drv_data->ide_base + IDEUDMASTS);
/*
* UDMA Status Register bits:
*
* DMAIDE - DMA request signal from UDMA state machine,
* INTIDE - INT line generated by UDMA because of errors in the
* state machine,
* SBUSY - UDMA state machine busy, not in idle state,
* NDO - error for data-out not completed,
* NDI - error for data-in not completed,
* N4X - error for data transferred not multiplies of four
* 32-bit words.
* (EP93xx UG p27-17)
*/
if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI ||
val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE)
return ATA_DMA_ERR;
/* read INTRQ (INT[3]) pin input state */
if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ)
return ATA_DMA_INTR;
if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE)
return ATA_DMA_ACTIVE;
return 0;
}
/* Note: original code is ata_sff_softreset */
static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = al->ap;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
unsigned int devmask = 0;
int rc;
u8 err;
/* determine if device 0/1 are present */
if (ep93xx_pata_device_is_present(ap, 0))
devmask |= (1 << 0);
if (slave_possible && ep93xx_pata_device_is_present(ap, 1))
devmask |= (1 << 1);
/* select device 0 again */
ap->ops->sff_dev_select(al->ap, 0);
/* issue bus reset */
rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
/* if link is ocuppied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
ata_link_err(al, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0),
&err);
if (slave_possible && err != 0x81)
classes[1] = ata_sff_dev_classify(&al->device[1],
devmask & (1 << 1), &err);
return 0;
}
/* Note: original code is ata_sff_drain_fifo */
static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
struct ata_port *ap;
struct ep93xx_pata_data *drv_data;
/* We only need to flush incoming data when a command was running */
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
return;
ap = qc->ap;
drv_data = ap->host->private_data;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
&& count < 65536; count += 2)
ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
}
static int ep93xx_pata_port_start(struct ata_port *ap)
{
struct ep93xx_pata_data *drv_data = ap->host->private_data;
/*
* Set timings to safe values at startup (= number of ns from ATA
* specification), we'll switch to properly calculated values later.
*/
drv_data->t = *ata_timing_find_mode(XFER_PIO_0);
return 0;
}
static const struct scsi_host_template ep93xx_pata_sht = {
ATA_BASE_SHT(DRV_NAME),
/* ep93xx dma implementation limit */
.sg_tablesize = 32,
/* ep93xx dma can't transfer 65536 bytes at once */
.dma_boundary = 0x7fff,
};
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_noop_qc_prep,
.softreset = ep93xx_pata_softreset,
.hardreset = ATA_OP_NULL,
.sff_dev_select = ep93xx_pata_dev_select,
.sff_set_devctl = ep93xx_pata_set_devctl,
.sff_check_status = ep93xx_pata_check_status,
.sff_check_altstatus = ep93xx_pata_check_altstatus,
.sff_tf_load = ep93xx_pata_tf_load,
.sff_tf_read = ep93xx_pata_tf_read,
.sff_exec_command = ep93xx_pata_exec_command,
.sff_data_xfer = ep93xx_pata_data_xfer,
.sff_drain_fifo = ep93xx_pata_drain_fifo,
.sff_irq_clear = ATA_OP_NULL,
.set_piomode = ep93xx_pata_set_piomode,
.bmdma_setup = ep93xx_pata_dma_setup,
.bmdma_start = ep93xx_pata_dma_start,
.bmdma_stop = ep93xx_pata_dma_stop,
.bmdma_status = ep93xx_pata_dma_status,
.cable_detect = ata_cable_unknown,
.port_start = ep93xx_pata_port_start,
};
static const struct soc_device_attribute ep93xx_soc_table[] = {
{ .revision = "E1", .data = (void *)ATA_UDMA3 },
{ .revision = "E2", .data = (void *)ATA_UDMA4 },
{ /* sentinel */ }
};
static int ep93xx_pata_probe(struct platform_device *pdev)
{
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;
err = ep93xx_ide_acquire_gpio(pdev);
if (err)
return err;
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto err_rel_gpio;
}
ide_base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(ide_base)) {
err = PTR_ERR(ide_base);
goto err_rel_gpio;
}
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
err = -ENOMEM;
goto err_rel_gpio;
}
drv_data->pdev = pdev;
drv_data->ide_base = ide_base;
drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
ep93xx_pata_dma_init(drv_data);
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
err = -ENOMEM;
goto err_rel_dma;
}
ep93xx_pata_clear_regs(ide_base);
host->private_data = drv_data;
ap = host->ports[0];
ap->dev = &pdev->dev;
ap->ops = &ep93xx_pata_port_ops;
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
/*
* Maximum UDMA modes:
* EP931x rev.E0 - UDMA2
* EP931x rev.E1 - UDMA3
* EP931x rev.E2 - UDMA4
*
* MWDMA support was removed from EP931x rev.E2,
* so this driver supports only UDMA modes.
*/
if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
const struct soc_device_attribute *match;
match = soc_device_match(ep93xx_soc_table);
if (match)
ap->udma_mask = (unsigned int) match->data;
else
ap->udma_mask = ATA_UDMA2;
}
/* defaults, pio 0 */
ep93xx_pata_enable_pio(ide_base, 0);
dev_info(&pdev->dev, "version " DRV_VERSION "\n");
/* activate host */
err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0,
&ep93xx_pata_sht);
if (err == 0)
return 0;
err_rel_dma:
ep93xx_pata_release_dma(drv_data);
err_rel_gpio:
ep93xx_ide_release_gpio(pdev);
return err;
}
static void ep93xx_pata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ep93xx_pata_data *drv_data = host->private_data;
ata_host_detach(host);
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
ep93xx_ide_release_gpio(pdev);
}
static struct platform_driver ep93xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ep93xx_pata_probe,
.remove_new = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, "
"Bartlomiej Zolnierkiewicz, Rafal Prylowski");
MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:pata_ep93xx");
| linux-master | drivers/ata/pata_ep93xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_ns87415.c - NS87415 (and PARISC SUPERIO 87560) PATA
*
* (C) 2005 Red Hat <[email protected]>
*
* This is a fairly generic MWDMA controller. It has some limitations
* as it requires timing reloads on PIO/DMA transitions but it is otherwise
* fairly well designed.
*
* This driver assumes the firmware has left the chip in a valid ST506
* compliant state, either legacy IRQ 14/15 or native INTA shared. You
* may need to add platform code if your system fails to do this.
*
* The same cell appears in the 87560 controller used by some PARISC
* systems. This has its own special mountain of errata.
*
* TODO:
* Get someone to test on SPARC
* Implement lazy pio/dma switching for better performance
* 8bit shared timing.
* See if we need to kill the FIFO for ATAPI
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_ns87415"
#define DRV_VERSION "0.0.1"
/**
* ns87415_set_mode - Initialize host controller mode timings
* @ap: Port whose timings we are configuring
* @adev: Device whose timings we are configuring
* @mode: Mode to set
*
* Program the mode registers for this controller, channel and
* device. Because the chip is quite an old design we have to do this
* for PIO/DMA switches.
*
* LOCKING:
* None (inherited from caller).
*/
static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
int unit = 2 * ap->port_no + adev->devno;
int timing = 0x44 + 2 * unit;
unsigned long T = 1000000000 / 33333; /* PCI clocks */
struct ata_timing t;
u16 clocking;
u8 iordy;
u8 status;
/* Timing register format is 17 - low nybble read timing with
the high nybble being 16 - x for recovery time in PCI clocks */
ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
clocking = 17 - clamp_val(t.active, 2, 17);
clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
/* Use the same timing for read and write bytes */
clocking |= (clocking << 8);
pci_write_config_word(dev, timing, clocking);
/* Set the IORDY enable versus DMA enable on or off properly */
pci_read_config_byte(dev, 0x42, &iordy);
iordy &= ~(1 << (4 + unit));
if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev))
iordy |= (1 << (4 + unit));
/* Paranoia: We shouldn't ever get here with busy write buffers
but if so wait */
pci_read_config_byte(dev, 0x43, &status);
while (status & 0x03) {
udelay(1);
pci_read_config_byte(dev, 0x43, &status);
}
/* Flip the IORDY/DMA bits now we are sure the write buffers are
clear */
pci_write_config_byte(dev, 0x42, iordy);
/* TODO: Set byte 54 command timing to the best 8bit
mode shared by all four devices */
}
/**
* ns87415_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
ns87415_set_mode(ap, adev, adev->pio_mode);
}
/**
* ns87415_bmdma_setup - Set up DMA
* @qc: Command block
*
* Set up for bus mastering DMA. We have to do this ourselves
* rather than use the helper due to a chip erratum
*/
static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
/* Due to an erratum we need to write these bits to the wrong
place - which does save us an I/O bizarrely */
dmactl |= ATA_DMA_INTR | ATA_DMA_ERR;
if (!rw)
dmactl |= ATA_DMA_WR;
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
/**
* ns87415_bmdma_start - Begin DMA transfer
* @qc: Command block
*
* Switch the timings for the chip and set up for a DMA transfer
* before the DMA burst begins.
*
* FIXME: We should do lazy switching on bmdma_start versus
* ata_pio_data_xfer for better performance.
*/
static void ns87415_bmdma_start(struct ata_queued_cmd *qc)
{
ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode);
ata_bmdma_start(qc);
}
/**
* ns87415_bmdma_stop - End DMA transfer
* @qc: Command block
*
* End DMA mode and switch the controller back into PIO mode
*/
static void ns87415_bmdma_stop(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode);
}
/**
* ns87415_irq_clear - Clear interrupt
* @ap: Channel to clear
*
* Erratum: Due to a chip bug registers 02 and 0A bit 1 and 2 (the
* error bits) are reset by writing to register 00 or 08.
*/
static void ns87415_irq_clear(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.bmdma_addr;
if (!mmio)
return;
iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR),
mmio + ATA_DMA_CMD);
}
/**
* ns87415_check_atapi_dma - ATAPI DMA filter
* @qc: Command block
*
* Disable ATAPI DMA (for now). We may be able to do DMA if we
* kill the prefetching. This isn't clear.
*/
static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
{
return -EOPNOTSUPP;
}
#if defined(CONFIG_SUPERIO)
/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
* Unfortunately, it's built-in on all Astro-based PA-RISC workstations
* which use the integrated NS87514 cell for CD-ROM support.
* i.e we have to support for CD-ROM installs.
* See drivers/parisc/superio.c for more gory details.
*
* Workarounds taken from drivers/ide/pci/ns87415.c
*/
#include <asm/superio.h>
#define SUPERIO_IDE_MAX_RETRIES 25
/**
* ns87560_read_buggy - workaround buggy Super I/O chip
* @port: Port to read
*
* Work around chipset problems in the 87560 SuperIO chip
*/
static u8 ns87560_read_buggy(void __iomem *port)
{
u8 tmp;
int retries = SUPERIO_IDE_MAX_RETRIES;
do {
tmp = ioread8(port);
if (tmp != 0)
return tmp;
udelay(50);
} while(retries-- > 0);
return tmp;
}
/**
* ns87560_check_status
* @ap: channel to check
*
* Return the status of the channel working around the
* 87560 flaws.
*/
static u8 ns87560_check_status(struct ata_port *ap)
{
return ns87560_read_buggy(ap->ioaddr.status_addr);
}
/**
* ns87560_tf_read - input device's ATA taskfile shadow registers
* @ap: Port from which input is read
* @tf: ATA taskfile register set for storing input
*
* Reads ATA taskfile registers for currently-selected device
* into @tf. Work around the 87560 bugs.
*
* LOCKING:
* Inherited from caller.
*/
static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->status = ns87560_check_status(ap);
tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
tf->lbah = ioread8(ioaddr->lbah_addr);
tf->device = ns87560_read_buggy(ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
tf->hob_feature = ioread8(ioaddr->error_addr);
tf->hob_nsect = ioread8(ioaddr->nsect_addr);
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
}
}
/**
* ns87560_bmdma_status
* @ap: channel to check
*
* Return the DMA status of the channel working around the
* 87560 flaws.
*/
static u8 ns87560_bmdma_status(struct ata_port *ap)
{
return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
#endif /* 87560 SuperIO Support */
static struct ata_port_operations ns87415_pata_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = ns87415_check_atapi_dma,
.bmdma_setup = ns87415_bmdma_setup,
.bmdma_start = ns87415_bmdma_start,
.bmdma_stop = ns87415_bmdma_stop,
.sff_irq_clear = ns87415_irq_clear,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87415_set_piomode,
};
#if defined(CONFIG_SUPERIO)
static struct ata_port_operations ns87560_pata_ops = {
.inherits = &ns87415_pata_ops,
.sff_tf_read = ns87560_tf_read,
.sff_check_status = ns87560_check_status,
.bmdma_status = ns87560_bmdma_status,
};
#endif
static const struct scsi_host_template ns87415_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static void ns87415_fixup(struct pci_dev *pdev)
{
/* Select 512 byte sectors */
pci_write_config_byte(pdev, 0x55, 0xEE);
/* Select PIO0 8bit clocking */
pci_write_config_byte(pdev, 0x54, 0xB7);
}
/**
* ns87415_init_one - Register 87415 ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in ns87415_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ns87415_pata_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
int rc;
#if defined(CONFIG_SUPERIO)
static const struct ata_port_info info87560 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ns87560_pata_ops,
};
if (PCI_SLOT(pdev->devfn) == 0x0E)
ppi[0] = &info87560;
#endif
ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
ns87415_fixup(pdev);
return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
}
static const struct pci_device_id ns87415_pci_tbl[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), },
{ } /* terminate list */
};
#ifdef CONFIG_PM_SLEEP
static int ns87415_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
ns87415_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static struct pci_driver ns87415_pci_driver = {
.name = DRV_NAME,
.id_table = ns87415_pci_tbl,
.probe = ns87415_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ns87415_reinit_one,
#endif
};
module_pci_driver(ns87415_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_ns87415.c |
/*
* pata_piccolo.c - Toshiba Piccolo PATA/SATA controller driver.
*
* This is basically an update to ata_generic.c to add Toshiba Piccolo support
* then split out to keep ata_generic "clean".
*
* Copyright 2005 Red Hat Inc, all rights reserved.
*
* Elements from ide/pci/generic.c
* Copyright (C) 2001-2002 Andre Hedrick <[email protected]>
* Portions (C) Copyright 2002 Red Hat Inc <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public License
*
* The timing data tables/programming info are courtesy of the NetBSD driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_piccolo"
#define DRV_VERSION "0.0.1"
static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 pio[6] = { /* For reg 0x50 low word & E088 */
0x0566, 0x0433, 0x0311, 0x0201, 0x0200, 0x0100
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 conf;
pci_read_config_word(pdev, 0x50, &conf);
conf &= 0xE088;
conf |= pio[adev->pio_mode - XFER_PIO_0];
pci_write_config_word(pdev, 0x50, conf);
}
static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 conf;
pci_read_config_dword(pdev, 0x5C, &conf);
conf &= 0x78FFE088; /* Keep the other bits */
if (adev->dma_mode >= XFER_UDMA_0) {
int udma = adev->dma_mode - XFER_UDMA_0;
conf |= 0x80000000;
conf |= (udma + 2) << 28;
conf |= (2 - udma) * 0x111; /* spread into three nibbles */
} else {
static const u32 mwdma[4] = {
0x0655, 0x0200, 0x0200, 0x0100
};
conf |= mwdma[adev->dma_mode - XFER_MW_DMA_0];
}
pci_write_config_dword(pdev, 0x5C, conf);
}
static const struct scsi_host_template tosh_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations tosh_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_unknown,
.set_piomode = tosh_set_piomode,
.set_dmamode = tosh_set_dmamode
};
/**
* ata_tosh_init_one - attach generic IDE
* @dev: PCI device found
* @id: match entry
*
* Called each time a matching IDE interface is found. We check if the
* interface is one we wish to claim and if so we perform any chip
* specific hacks then let the ATA layer do the heavy lifting.
*/
static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &tosh_port_ops
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
/* Just one port for the moment */
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
static struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
{ 0, },
};
static struct pci_driver ata_tosh_pci_driver = {
.name = DRV_NAME,
.id_table = ata_tosh,
.probe = ata_tosh_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(ata_tosh_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ata_tosh);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_piccolo.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sata_inic162x.c - Driver for Initio 162x SATA controllers
*
* Copyright 2006 SUSE Linux Products GmbH
* Copyright 2006 Tejun Heo <[email protected]>
*
* **** WARNING ****
*
* This driver never worked properly and unfortunately data corruption is
* relatively common. There isn't anyone working on the driver and there's
* no support from the vendor. Do not use this driver in any production
* environment.
*
* http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
* https://bugzilla.kernel.org/show_bug.cgi?id=60565
*
* *****************
*
* This controller is eccentric and easily locks up if something isn't
* right. Documentation is available at initio's website but it only
* documents registers (not programming model).
*
* This driver has interesting history. The first version was written
* from the documentation and a 2.4 IDE driver posted on a Taiwan
* company, which didn't use any IDMA features and couldn't handle
* LBA48. The resulting driver couldn't handle LBA48 devices either
* making it pretty useless.
*
* After a while, initio picked the driver up, renamed it to
* sata_initio162x, updated it to use IDMA for ATA DMA commands and
* posted it on their website. It only used ATA_PROT_DMA for IDMA and
* attaching both devices and issuing IDMA and !IDMA commands
* simultaneously broke it due to PIRQ masking interaction but it did
* show how to use the IDMA (ADMA + some initio specific twists)
* engine.
*
* Then, I picked up their changes again and here's the usable driver
* which uses IDMA for everything. Everything works now including
* LBA48, CD/DVD burning, suspend/resume and hotplug. There are some
* issues tho. Result Tf is not resported properly, NCQ isn't
* supported yet and CD/DVD writing works with DMA assisted PIO
* protocol (which, for native SATA devices, shouldn't cause any
* noticeable difference).
*
* Anyways, so, here's finally a working driver for inic162x. Enjoy!
*
* initio: If you guys wanna improve the driver regarding result TF
* access and other stuff, please feel free to contact me. I'll be
* happy to assist.
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/blkdev.h>
#include <scsi/scsi_device.h>
#define DRV_NAME "sata_inic162x"
#define DRV_VERSION "0.4"
enum {
MMIO_BAR_PCI = 5,
MMIO_BAR_CARDBUS = 1,
NR_PORTS = 2,
IDMA_CPB_TBL_SIZE = 4 * 32,
INIC_DMA_BOUNDARY = 0xffffff,
HOST_ACTRL = 0x08,
HOST_CTL = 0x7c,
HOST_STAT = 0x7e,
HOST_IRQ_STAT = 0xbc,
HOST_IRQ_MASK = 0xbe,
PORT_SIZE = 0x40,
/* registers for ATA TF operation */
PORT_TF_DATA = 0x00,
PORT_TF_FEATURE = 0x01,
PORT_TF_NSECT = 0x02,
PORT_TF_LBAL = 0x03,
PORT_TF_LBAM = 0x04,
PORT_TF_LBAH = 0x05,
PORT_TF_DEVICE = 0x06,
PORT_TF_COMMAND = 0x07,
PORT_TF_ALT_STAT = 0x08,
PORT_IRQ_STAT = 0x09,
PORT_IRQ_MASK = 0x0a,
PORT_PRD_CTL = 0x0b,
PORT_PRD_ADDR = 0x0c,
PORT_PRD_XFERLEN = 0x10,
PORT_CPB_CPBLAR = 0x18,
PORT_CPB_PTQFIFO = 0x1c,
/* IDMA register */
PORT_IDMA_CTL = 0x14,
PORT_IDMA_STAT = 0x16,
PORT_RPQ_FIFO = 0x1e,
PORT_RPQ_CNT = 0x1f,
PORT_SCR = 0x20,
/* HOST_CTL bits */
HCTL_LEDEN = (1 << 3), /* enable LED operation */
HCTL_IRQOFF = (1 << 8), /* global IRQ off */
HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
HCTL_PWRDWN = (1 << 12), /* power down PHYs */
HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
HCTL_RPGSEL = (1 << 15), /* register page select */
HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
HCTL_RPGSEL,
/* HOST_IRQ_(STAT|MASK) bits */
HIRQ_PORT0 = (1 << 0),
HIRQ_PORT1 = (1 << 1),
HIRQ_SOFT = (1 << 14),
HIRQ_GLOBAL = (1 << 15), /* STAT only */
/* PORT_IRQ_(STAT|MASK) bits */
PIRQ_OFFLINE = (1 << 0), /* device unplugged */
PIRQ_ONLINE = (1 << 1), /* device plugged */
PIRQ_COMPLETE = (1 << 2), /* completion interrupt */
PIRQ_FATAL = (1 << 3), /* fatal error */
PIRQ_ATA = (1 << 4), /* ATA interrupt */
PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */
PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
PIRQ_MASK_FREEZE = 0xff,
/* PORT_PRD_CTL bits */
PRD_CTL_START = (1 << 0),
PRD_CTL_WR = (1 << 3),
PRD_CTL_DMAEN = (1 << 7), /* DMA enable */
/* PORT_IDMA_CTL bits */
IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */
IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinery */
IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
/* PORT_IDMA_STAT bits */
IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
IDMA_STAT_DONE = (1 << 7), /* ADMA done */
IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
/* CPB Control Flags*/
CPB_CTL_VALID = (1 << 0), /* CPB valid */
CPB_CTL_QUEUED = (1 << 1), /* queued command */
CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
/* CPB Response Flags */
CPB_RESP_DONE = (1 << 0), /* ATA command complete */
CPB_RESP_REL = (1 << 1), /* ATA release */
CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
/* PRD Control Flags */
PRD_DRAIN = (1 << 1), /* ignore data excess */
PRD_CDB = (1 << 2), /* atapi packet command pointer */
PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
PRD_DMA = (1 << 4), /* data transfer method */
PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
PRD_IOM = (1 << 6), /* io/memory transfer */
PRD_END = (1 << 7), /* APRD chain end */
};
/* Comman Parameter Block */
struct inic_cpb {
u8 resp_flags; /* Response Flags */
u8 error; /* ATA Error */
u8 status; /* ATA Status */
u8 ctl_flags; /* Control Flags */
__le32 len; /* Total Transfer Length */
__le32 prd; /* First PRD pointer */
u8 rsvd[4];
/* 16 bytes */
u8 feature; /* ATA Feature */
u8 hob_feature; /* ATA Ex. Feature */
u8 device; /* ATA Device/Head */
u8 mirctl; /* Mirror Control */
u8 nsect; /* ATA Sector Count */
u8 hob_nsect; /* ATA Ex. Sector Count */
u8 lbal; /* ATA Sector Number */
u8 hob_lbal; /* ATA Ex. Sector Number */
u8 lbam; /* ATA Cylinder Low */
u8 hob_lbam; /* ATA Ex. Cylinder Low */
u8 lbah; /* ATA Cylinder High */
u8 hob_lbah; /* ATA Ex. Cylinder High */
u8 command; /* ATA Command */
u8 ctl; /* ATA Control */
u8 slave_error; /* Slave ATA Error */
u8 slave_status; /* Slave ATA Status */
/* 32 bytes */
} __packed;
/* Physical Region Descriptor */
struct inic_prd {
__le32 mad; /* Physical Memory Address */
__le16 len; /* Transfer Length */
u8 rsvd;
u8 flags; /* Control Flags */
} __packed;
struct inic_pkt {
struct inic_cpb cpb;
struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */
u8 cdb[ATAPI_CDB_LEN];
} __packed;
struct inic_host_priv {
void __iomem *mmio_base;
u16 cached_hctl;
};
struct inic_port_priv {
struct inic_pkt *pkt;
dma_addr_t pkt_dma;
u32 *cpb_tbl;
dma_addr_t cpb_tbl_dma;
};
static const struct scsi_host_template inic_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
/*
* This controller is braindamaged. dma_boundary is 0xffff like others
* but it will lock up the whole machine HARD if 65536 byte PRD entry
* is fed. Reduce maximum segment size.
*/
.dma_boundary = INIC_DMA_BOUNDARY,
.max_segment_size = 65536 - 512,
};
static const int scr_map[] = {
[SCR_STATUS] = 0,
[SCR_ERROR] = 1,
[SCR_CONTROL] = 2,
};
static void __iomem *inic_port_base(struct ata_port *ap)
{
struct inic_host_priv *hpriv = ap->host->private_data;
return hpriv->mmio_base + ap->port_no * PORT_SIZE;
}
static void inic_reset_port(void __iomem *port_base)
{
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
/* stop IDMA engine */
readw(idma_ctl); /* flush */
msleep(1);
/* mask IRQ and assert reset */
writew(IDMA_CTL_RST_IDMA, idma_ctl);
readw(idma_ctl); /* flush */
msleep(1);
/* release reset */
writew(0, idma_ctl);
/* clear irq */
writeb(0xff, port_base + PORT_IRQ_STAT);
}
static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
*val = readl(scr_addr + scr_map[sc_reg] * 4);
/* this controller has stuck DIAG.N, ignore it */
if (sc_reg == SCR_ERROR)
*val &= ~SERR_PHYRDY_CHG;
return 0;
}
static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
{
void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
writel(val, scr_addr + scr_map[sc_reg] * 4);
return 0;
}
static void inic_stop_idma(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
readb(port_base + PORT_RPQ_FIFO);
readb(port_base + PORT_RPQ_CNT);
writew(0, port_base + PORT_IDMA_CTL);
}
static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct inic_port_priv *pp = ap->private_data;
struct inic_cpb *cpb = &pp->pkt->cpb;
bool freeze = false;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
irq_stat, idma_stat);
inic_stop_idma(ap);
if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
ata_ehi_push_desc(ehi, "hotplug");
ata_ehi_hotplugged(ehi);
freeze = true;
}
if (idma_stat & IDMA_STAT_PERR) {
ata_ehi_push_desc(ehi, "PCI error");
freeze = true;
}
if (idma_stat & IDMA_STAT_CPBERR) {
ata_ehi_push_desc(ehi, "CPB error");
if (cpb->resp_flags & CPB_RESP_IGNORED) {
__ata_ehi_push_desc(ehi, " ignored");
ehi->err_mask |= AC_ERR_INVALID;
freeze = true;
}
if (cpb->resp_flags & CPB_RESP_ATA_ERR)
ehi->err_mask |= AC_ERR_DEV;
if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
__ata_ehi_push_desc(ehi, " spurious-intr");
ehi->err_mask |= AC_ERR_HSM;
freeze = true;
}
if (cpb->resp_flags &
(CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
__ata_ehi_push_desc(ehi, " data-over/underflow");
ehi->err_mask |= AC_ERR_HSM;
freeze = true;
}
}
if (freeze)
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static void inic_host_intr(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
u8 irq_stat;
u16 idma_stat;
/* read and clear IRQ status */
irq_stat = readb(port_base + PORT_IRQ_STAT);
writeb(irq_stat, port_base + PORT_IRQ_STAT);
idma_stat = readw(port_base + PORT_IDMA_STAT);
if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
inic_host_err_intr(ap, irq_stat, idma_stat);
if (unlikely(!qc))
goto spurious;
if (likely(idma_stat & IDMA_STAT_DONE)) {
inic_stop_idma(ap);
/* Depending on circumstances, device error
* isn't reported by IDMA, check it explicitly.
*/
if (unlikely(readb(port_base + PORT_TF_COMMAND) &
(ATA_DF | ATA_ERR)))
qc->err_mask |= AC_ERR_DEV;
ata_qc_complete(qc);
return;
}
spurious:
ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
}
static irqreturn_t inic_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct inic_host_priv *hpriv = host->private_data;
u16 host_irq_stat;
int i, handled = 0;
host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
goto out;
spin_lock(&host->lock);
for (i = 0; i < NR_PORTS; i++)
if (host_irq_stat & (HIRQ_PORT0 << i)) {
inic_host_intr(host->ports[i]);
handled++;
}
spin_unlock(&host->lock);
out:
return IRQ_RETVAL(handled);
}
static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
{
/* For some reason ATAPI_PROT_DMA doesn't work for some
* commands including writes and other misc ops. Use PIO
* protocol instead, which BTW is driven by the DMA engine
* anyway, so it shouldn't make much difference for native
* SATA devices.
*/
if (atapi_cmd_type(qc->cdb[0]) == READ)
return 0;
return 1;
}
static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
unsigned int si;
u8 flags = 0;
if (qc->tf.flags & ATA_TFLAG_WRITE)
flags |= PRD_WRITE;
if (ata_is_dma(qc->tf.protocol))
flags |= PRD_DMA;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
prd->mad = cpu_to_le32(sg_dma_address(sg));
prd->len = cpu_to_le16(sg_dma_len(sg));
prd->flags = flags;
prd++;
}
WARN_ON(!si);
prd[-1].flags |= PRD_END;
}
static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
{
struct inic_port_priv *pp = qc->ap->private_data;
struct inic_pkt *pkt = pp->pkt;
struct inic_cpb *cpb = &pkt->cpb;
struct inic_prd *prd = pkt->prd;
bool is_atapi = ata_is_atapi(qc->tf.protocol);
bool is_data = ata_is_data(qc->tf.protocol);
unsigned int cdb_len = 0;
if (is_atapi)
cdb_len = qc->dev->cdb_len;
/* prepare packet, based on initio driver */
memset(pkt, 0, sizeof(struct inic_pkt));
cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
if (is_atapi || is_data)
cpb->ctl_flags |= CPB_CTL_DATA;
cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
cpb->device = qc->tf.device;
cpb->feature = qc->tf.feature;
cpb->nsect = qc->tf.nsect;
cpb->lbal = qc->tf.lbal;
cpb->lbam = qc->tf.lbam;
cpb->lbah = qc->tf.lbah;
if (qc->tf.flags & ATA_TFLAG_LBA48) {
cpb->hob_feature = qc->tf.hob_feature;
cpb->hob_nsect = qc->tf.hob_nsect;
cpb->hob_lbal = qc->tf.hob_lbal;
cpb->hob_lbam = qc->tf.hob_lbam;
cpb->hob_lbah = qc->tf.hob_lbah;
}
cpb->command = qc->tf.command;
/* don't load ctl - dunno why. it's like that in the initio driver */
/* setup PRD for CDB */
if (is_atapi) {
memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
prd->mad = cpu_to_le32(pp->pkt_dma +
offsetof(struct inic_pkt, cdb));
prd->len = cpu_to_le16(cdb_len);
prd->flags = PRD_CDB | PRD_WRITE;
if (!is_data)
prd->flags |= PRD_END;
prd++;
}
/* setup sg table */
if (is_data)
inic_fill_sg(prd, qc);
pp->cpb_tbl[0] = pp->pkt_dma;
return AC_ERR_OK;
}
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_base = inic_port_base(ap);
/* fire up the ADMA engine */
writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL);
writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
writeb(0, port_base + PORT_CPB_PTQFIFO);
return 0;
}
static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
void __iomem *port_base = inic_port_base(ap);
tf->error = readb(port_base + PORT_TF_FEATURE);
tf->nsect = readb(port_base + PORT_TF_NSECT);
tf->lbal = readb(port_base + PORT_TF_LBAL);
tf->lbam = readb(port_base + PORT_TF_LBAM);
tf->lbah = readb(port_base + PORT_TF_LBAH);
tf->device = readb(port_base + PORT_TF_DEVICE);
tf->status = readb(port_base + PORT_TF_COMMAND);
}
static void inic_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ata_taskfile *rtf = &qc->result_tf;
struct ata_taskfile tf;
/* FIXME: Except for status and error, result TF access
* doesn't work. I tried reading from BAR0/2, CPB and BAR5.
* None works regardless of which command interface is used.
* For now return true iff status indicates device error.
* This means that we're reporting bogus sector for RW
* failures. Eeekk....
*/
inic_tf_read(qc->ap, &tf);
if (tf.status & ATA_ERR) {
rtf->status = tf.status;
rtf->error = tf.error;
}
}
static void inic_freeze(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
writeb(0xff, port_base + PORT_IRQ_STAT);
}
static void inic_thaw(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
writeb(0xff, port_base + PORT_IRQ_STAT);
writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
}
static int inic_check_ready(struct ata_link *link)
{
void __iomem *port_base = inic_port_base(link->ap);
return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
}
/*
* SRST and SControl hardreset don't give valid signature on this
* controller. Only controller specific hardreset mechanism works.
*/
static int inic_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_base = inic_port_base(ap);
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
int rc;
/* hammer it into sane state */
inic_reset_port(port_base);
writew(IDMA_CTL_RST_ATA, idma_ctl);
readw(idma_ctl); /* flush */
ata_msleep(ap, 1);
writew(0, idma_ctl);
rc = sata_link_resume(link, timing, deadline);
if (rc) {
ata_link_warn(link,
"failed to resume link after reset (errno=%d)\n",
rc);
return rc;
}
*class = ATA_DEV_NONE;
if (ata_link_online(link)) {
struct ata_taskfile tf;
/* wait for link to become ready */
rc = ata_wait_after_reset(link, deadline, inic_check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
ata_link_warn(link,
"device not ready after hardreset (errno=%d)\n",
rc);
return rc;
}
inic_tf_read(ap, &tf);
*class = ata_port_classify(ap, &tf);
}
return 0;
}
static void inic_error_handler(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
inic_reset_port(port_base);
ata_std_error_handler(ap);
}
static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
{
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_EH)
inic_reset_port(inic_port_base(qc->ap));
}
static void init_port(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
struct inic_port_priv *pp = ap->private_data;
/* clear packet and CPB table */
memset(pp->pkt, 0, sizeof(struct inic_pkt));
memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
/* setup CPB lookup table addresses */
writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
}
static int inic_port_resume(struct ata_port *ap)
{
init_port(ap);
return 0;
}
static int inic_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct inic_port_priv *pp;
/* alloc and initialize private data */
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
/* Alloc resources */
pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
&pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
&pp->cpb_tbl_dma, GFP_KERNEL);
if (!pp->cpb_tbl)
return -ENOMEM;
init_port(ap);
return 0;
}
static struct ata_port_operations inic_port_ops = {
.inherits = &sata_port_ops,
.check_atapi_dma = inic_check_atapi_dma,
.qc_prep = inic_qc_prep,
.qc_issue = inic_qc_issue,
.qc_fill_rtf = inic_qc_fill_rtf,
.freeze = inic_freeze,
.thaw = inic_thaw,
.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
.scr_read = inic_scr_read,
.scr_write = inic_scr_write,
.port_resume = inic_port_resume,
.port_start = inic_port_start,
};
static const struct ata_port_info inic_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &inic_port_ops
};
static int init_controller(void __iomem *mmio_base, u16 hctl)
{
int i;
u16 val;
hctl &= ~HCTL_KNOWN_BITS;
/* Soft reset whole controller. Spec says reset duration is 3
* PCI clocks, be generous and give it 10ms.
*/
writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
readw(mmio_base + HOST_CTL); /* flush */
for (i = 0; i < 10; i++) {
msleep(1);
val = readw(mmio_base + HOST_CTL);
if (!(val & HCTL_SOFTRST))
break;
}
if (val & HCTL_SOFTRST)
return -EIO;
/* mask all interrupts and reset ports */
for (i = 0; i < NR_PORTS; i++) {
void __iomem *port_base = mmio_base + i * PORT_SIZE;
writeb(0xff, port_base + PORT_IRQ_MASK);
inic_reset_port(port_base);
}
/* port IRQ is masked now, unmask global IRQ */
writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
val = readw(mmio_base + HOST_IRQ_MASK);
val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
writew(val, mmio_base + HOST_IRQ_MASK);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int inic_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
struct inic_host_priv *hpriv = host->private_data;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc)
return rc;
}
ata_host_resume(host);
return 0;
}
#endif
static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
struct ata_host *host;
struct inic_host_priv *hpriv;
void __iomem * const *iomap;
int mmio_bar;
int i, rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact [email protected] if in production use\n");
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
/* Acquire resources and fill host. Note that PCI and cardbus
* use different BARs.
*/
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
mmio_bar = MMIO_BAR_PCI;
else
mmio_bar = MMIO_BAR_CARDBUS;
rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
if (rc)
return rc;
host->iomap = iomap = pcim_iomap_table(pdev);
hpriv->mmio_base = iomap[mmio_bar];
hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
for (i = 0; i < NR_PORTS; i++) {
struct ata_port *ap = host->ports[i];
ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
}
/* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
dev_err(&pdev->dev, "failed to initialize controller\n");
return rc;
}
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
&inic_sht);
}
static const struct pci_device_id inic_pci_tbl[] = {
{ PCI_VDEVICE(INIT, 0x1622), },
{ },
};
static struct pci_driver inic_pci_driver = {
.name = DRV_NAME,
.id_table = inic_pci_tbl,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = inic_pci_device_resume,
#endif
.probe = inic_init_one,
.remove = ata_pci_remove_one,
};
module_pci_driver(inic_pci_driver);
MODULE_AUTHOR("Tejun Heo");
MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_inic162x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell PATA driver.
*
* For the moment we drive the PATA port in legacy mode. That
* isn't making full use of the device functionality but it is
* easy to get working.
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_marvell"
#define DRV_VERSION "0.1.6"
/**
* marvell_pata_active - check if PATA is active
* @pdev: PCI device
*
* Returns 1 if the PATA port may be active. We know how to check this
* for the 6145 but not the other devices
*/
static int marvell_pata_active(struct pci_dev *pdev)
{
u32 devices;
void __iomem *barp;
/* We don't yet know how to do this for other devices */
if (pdev->device != 0x6145)
return 1;
barp = pci_iomap(pdev, 5, 0x10);
if (barp == NULL)
return -ENOMEM;
devices = ioread32(barp + 0x0C);
pci_iounmap(pdev, barp);
if (devices & 0x10)
return 1;
return 0;
}
/**
* marvell_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*/
static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->device == 0x6145 && ap->port_no == 0 &&
!marvell_pata_active(pdev)) /* PATA enable ? */
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static int marvell_cable_detect(struct ata_port *ap)
{
/* Cable type */
switch(ap->port_no)
{
case 0:
if (!ap->ioaddr.bmdma_addr)
return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
case 1: /* Legacy SATA port */
return ATA_CBL_SATA;
}
BUG();
return 0; /* Our BUG macro needs the right markup */
}
/* No PIO or DMA methods needed for this device */
static const struct scsi_host_template marvell_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
.prereset = marvell_pre_reset,
};
/**
* marvell_init_one - Register Marvell ATA PCI device with kernel services
* @pdev: PCI device to register
* @id: PCI device ID
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &marvell_ops,
};
static const struct ata_port_info info_sata = {
/* Slave possible as its magically mapped not real */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &marvell_ops,
};
const struct ata_port_info *ppi[] = { &info, &info_sata };
if (pdev->device == 0x6101)
ppi[1] = &ata_dummy_port_info;
#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
dev_info(&pdev->dev,
"PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
}
#endif
return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
}
static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6101), },
{ PCI_DEVICE(0x11AB, 0x6121), },
{ PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
{ PCI_DEVICE(0x1B4B, 0x91A0), },
{ PCI_DEVICE(0x1B4B, 0x91A4), },
{ } /* terminate list */
};
static struct pci_driver marvell_pci_driver = {
.name = DRV_NAME,
.id_table = marvell_pci_tbl,
.probe = marvell_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
module_pci_driver(marvell_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_marvell.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libata-trace.c - trace functions for libata
*
* Copyright 2015 Hannes Reinecke
* Copyright 2015 SUSE Linux GmbH
*/
#include <linux/kernel.h>
#include <linux/trace_seq.h>
#include <trace/events/libata.h>
const char *
libata_trace_parse_status(struct trace_seq *p, unsigned char status)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "{ ");
if (status & ATA_BUSY)
trace_seq_printf(p, "BUSY ");
if (status & ATA_DRDY)
trace_seq_printf(p, "DRDY ");
if (status & ATA_DF)
trace_seq_printf(p, "DF ");
if (status & ATA_DSC)
trace_seq_printf(p, "DSC ");
if (status & ATA_DRQ)
trace_seq_printf(p, "DRQ ");
if (status & ATA_CORR)
trace_seq_printf(p, "CORR ");
if (status & ATA_SENSE)
trace_seq_printf(p, "SENSE ");
if (status & ATA_ERR)
trace_seq_printf(p, "ERR ");
trace_seq_putc(p, '}');
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_host_stat(struct trace_seq *p, unsigned char host_stat)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "{ ");
if (host_stat & ATA_DMA_INTR)
trace_seq_printf(p, "INTR ");
if (host_stat & ATA_DMA_ERR)
trace_seq_printf(p, "ERR ");
if (host_stat & ATA_DMA_ACTIVE)
trace_seq_printf(p, "ACTIVE ");
trace_seq_putc(p, '}');
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", eh_action);
if (eh_action) {
trace_seq_printf(p, "{ ");
if (eh_action & ATA_EH_REVALIDATE)
trace_seq_printf(p, "REVALIDATE ");
if (eh_action & (ATA_EH_SOFTRESET | ATA_EH_HARDRESET))
trace_seq_printf(p, "RESET ");
else if (eh_action & ATA_EH_SOFTRESET)
trace_seq_printf(p, "SOFTRESET ");
else if (eh_action & ATA_EH_HARDRESET)
trace_seq_printf(p, "HARDRESET ");
if (eh_action & ATA_EH_ENABLE_LINK)
trace_seq_printf(p, "ENABLE_LINK ");
if (eh_action & ATA_EH_PARK)
trace_seq_printf(p, "PARK ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", eh_err_mask);
if (eh_err_mask) {
trace_seq_printf(p, "{ ");
if (eh_err_mask & AC_ERR_DEV)
trace_seq_printf(p, "DEV ");
if (eh_err_mask & AC_ERR_HSM)
trace_seq_printf(p, "HSM ");
if (eh_err_mask & AC_ERR_TIMEOUT)
trace_seq_printf(p, "TIMEOUT ");
if (eh_err_mask & AC_ERR_MEDIA)
trace_seq_printf(p, "MEDIA ");
if (eh_err_mask & AC_ERR_ATA_BUS)
trace_seq_printf(p, "ATA_BUS ");
if (eh_err_mask & AC_ERR_HOST_BUS)
trace_seq_printf(p, "HOST_BUS ");
if (eh_err_mask & AC_ERR_SYSTEM)
trace_seq_printf(p, "SYSTEM ");
if (eh_err_mask & AC_ERR_INVALID)
trace_seq_printf(p, "INVALID ");
if (eh_err_mask & AC_ERR_OTHER)
trace_seq_printf(p, "OTHER ");
if (eh_err_mask & AC_ERR_NODEV_HINT)
trace_seq_printf(p, "NODEV_HINT ");
if (eh_err_mask & AC_ERR_NCQ)
trace_seq_printf(p, "NCQ ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", qc_flags);
if (qc_flags) {
trace_seq_printf(p, "{ ");
if (qc_flags & ATA_QCFLAG_ACTIVE)
trace_seq_printf(p, "ACTIVE ");
if (qc_flags & ATA_QCFLAG_DMAMAP)
trace_seq_printf(p, "DMAMAP ");
if (qc_flags & ATA_QCFLAG_IO)
trace_seq_printf(p, "IO ");
if (qc_flags & ATA_QCFLAG_RESULT_TF)
trace_seq_printf(p, "RESULT_TF ");
if (qc_flags & ATA_QCFLAG_CLEAR_EXCL)
trace_seq_printf(p, "CLEAR_EXCL ");
if (qc_flags & ATA_QCFLAG_QUIET)
trace_seq_printf(p, "QUIET ");
if (qc_flags & ATA_QCFLAG_RETRY)
trace_seq_printf(p, "RETRY ");
if (qc_flags & ATA_QCFLAG_EH)
trace_seq_printf(p, "FAILED ");
if (qc_flags & ATA_QCFLAG_SENSE_VALID)
trace_seq_printf(p, "SENSE_VALID ");
if (qc_flags & ATA_QCFLAG_EH_SCHEDULED)
trace_seq_printf(p, "EH_SCHEDULED ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_tf_flags(struct trace_seq *p, unsigned int tf_flags)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", tf_flags);
if (tf_flags) {
trace_seq_printf(p, "{ ");
if (tf_flags & ATA_TFLAG_LBA48)
trace_seq_printf(p, "LBA48 ");
if (tf_flags & ATA_TFLAG_ISADDR)
trace_seq_printf(p, "ISADDR ");
if (tf_flags & ATA_TFLAG_DEVICE)
trace_seq_printf(p, "DEV ");
if (tf_flags & ATA_TFLAG_WRITE)
trace_seq_printf(p, "WRITE ");
if (tf_flags & ATA_TFLAG_LBA)
trace_seq_printf(p, "LBA ");
if (tf_flags & ATA_TFLAG_FUA)
trace_seq_printf(p, "FUA ");
if (tf_flags & ATA_TFLAG_POLLING)
trace_seq_printf(p, "POLL ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
unsigned char feature, unsigned char hob_nsect)
{
const char *ret = trace_seq_buffer_ptr(p);
switch (cmd) {
case ATA_CMD_FPDMA_RECV:
switch (hob_nsect & 0x5f) {
case ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT:
trace_seq_printf(p, " READ_LOG_DMA_EXT");
break;
case ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN:
trace_seq_printf(p, " ZAC_MGMT_IN");
break;
}
break;
case ATA_CMD_FPDMA_SEND:
switch (hob_nsect & 0x5f) {
case ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT:
trace_seq_printf(p, " WRITE_LOG_DMA_EXT");
break;
case ATA_SUBCMD_FPDMA_SEND_DSM:
trace_seq_printf(p, " DATASET_MANAGEMENT");
break;
}
break;
case ATA_CMD_NCQ_NON_DATA:
switch (feature) {
case ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE:
trace_seq_printf(p, " ABORT_QUEUE");
break;
case ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES:
trace_seq_printf(p, " SET_FEATURES");
break;
case ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT:
trace_seq_printf(p, " ZERO_EXT");
break;
case ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT:
trace_seq_printf(p, " ZAC_MGMT_OUT");
break;
}
break;
case ATA_CMD_ZAC_MGMT_IN:
switch (feature) {
case ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES:
trace_seq_printf(p, " REPORT_ZONES");
break;
}
break;
case ATA_CMD_ZAC_MGMT_OUT:
switch (feature) {
case ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE:
trace_seq_printf(p, " CLOSE_ZONE");
break;
case ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE:
trace_seq_printf(p, " FINISH_ZONE");
break;
case ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE:
trace_seq_printf(p, " OPEN_ZONE");
break;
case ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER:
trace_seq_printf(p, " RESET_WRITE_POINTER");
break;
}
break;
}
trace_seq_putc(p, 0);
return ret;
}
| linux-master | drivers/ata/libata-trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Faraday Technology FTIDE010 driver
* Copyright (C) 2017 Linus Walleij <[email protected]>
*
* Includes portions of the SL2312/SL3516/Gemini PATA driver
* Copyright (C) 2003 StorLine, Inc <[email protected]>
* Copyright (C) 2009 Janos Laube <[email protected]>
* Copyright (C) 2010 Frederic Pecourt <[email protected]>
* Copyright (C) 2011 Tobias Waldvogel <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/clk.h>
#include "sata_gemini.h"
#define DRV_NAME "pata_ftide010"
/**
* struct ftide010 - state container for the Faraday FTIDE010
* @dev: pointer back to the device representing this controller
* @base: remapped I/O space address
* @pclk: peripheral clock for the IDE block
* @host: pointer to the ATA host for this device
* @master_cbl: master cable type
* @slave_cbl: slave cable type
* @sg: Gemini SATA bridge pointer, if running on the Gemini
* @master_to_sata0: Gemini SATA bridge: the ATA master is connected
* to the SATA0 bridge
* @slave_to_sata0: Gemini SATA bridge: the ATA slave is connected
* to the SATA0 bridge
* @master_to_sata1: Gemini SATA bridge: the ATA master is connected
* to the SATA1 bridge
* @slave_to_sata1: Gemini SATA bridge: the ATA slave is connected
* to the SATA1 bridge
*/
struct ftide010 {
struct device *dev;
void __iomem *base;
struct clk *pclk;
struct ata_host *host;
unsigned int master_cbl;
unsigned int slave_cbl;
/* Gemini-specific properties */
struct sata_gemini *sg;
bool master_to_sata0;
bool slave_to_sata0;
bool master_to_sata1;
bool slave_to_sata1;
};
#define FTIDE010_DMA_REG 0x00
#define FTIDE010_DMA_STATUS 0x02
#define FTIDE010_IDE_BMDTPR 0x04
#define FTIDE010_IDE_DEVICE_ID 0x08
#define FTIDE010_PIO_TIMING 0x10
#define FTIDE010_MWDMA_TIMING 0x11
#define FTIDE010_UDMA_TIMING0 0x12 /* Master */
#define FTIDE010_UDMA_TIMING1 0x13 /* Slave */
#define FTIDE010_CLK_MOD 0x14
/* These registers are mapped directly to the IDE registers */
#define FTIDE010_CMD_DATA 0x20
#define FTIDE010_ERROR_FEATURES 0x21
#define FTIDE010_NSECT 0x22
#define FTIDE010_LBAL 0x23
#define FTIDE010_LBAM 0x24
#define FTIDE010_LBAH 0x25
#define FTIDE010_DEVICE 0x26
#define FTIDE010_STATUS_COMMAND 0x27
#define FTIDE010_ALTSTAT_CTRL 0x36
/* Set this bit for UDMA mode 5 and 6 */
#define FTIDE010_UDMA_TIMING_MODE_56 BIT(7)
/* 0 = 50 MHz, 1 = 66 MHz */
#define FTIDE010_CLK_MOD_DEV0_CLK_SEL BIT(0)
#define FTIDE010_CLK_MOD_DEV1_CLK_SEL BIT(1)
/* Enable UDMA on a device */
#define FTIDE010_CLK_MOD_DEV0_UDMA_EN BIT(4)
#define FTIDE010_CLK_MOD_DEV1_UDMA_EN BIT(5)
static const struct scsi_host_template pata_ftide010_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Bus timings
*
* The unit of the below required timings is two clock periods of the ATA
* reference clock which is 30 nanoseconds per unit at 66MHz and 20
* nanoseconds per unit at 50 MHz. The PIO timings assume 33MHz speed for
* PIO.
*
* pio_active_time: array of 5 elements for T2 timing for Mode 0,
* 1, 2, 3 and 4. Range 0..15.
* pio_recovery_time: array of 5 elements for T2l timing for Mode 0,
* 1, 2, 3 and 4. Range 0..15.
* mdma_50_active_time: array of 4 elements for Td timing for multi
* word DMA, Mode 0, 1, and 2 at 50 MHz. Range 0..15.
* mdma_50_recovery_time: array of 4 elements for Tk timing for
* multi word DMA, Mode 0, 1 and 2 at 50 MHz. Range 0..15.
* mdma_66_active_time: array of 4 elements for Td timing for multi
* word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
* mdma_66_recovery_time: array of 4 elements for Tk timing for
* multi word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
* udma_50_setup_time: array of 4 elements for Tvds timing for ultra
* DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz. Range 0..7.
* udma_50_hold_time: array of 4 elements for Tdvh timing for
* multi word DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz, Range 0..7.
* udma_66_setup_time: array of 4 elements for Tvds timing for multi
* word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
* udma_66_hold_time: array of 4 elements for Tdvh timing for
* multi word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
*/
static const u8 pio_active_time[5] = {10, 10, 10, 3, 3};
static const u8 pio_recovery_time[5] = {10, 3, 1, 3, 1};
static const u8 mwdma_50_active_time[3] = {6, 2, 2};
static const u8 mwdma_50_recovery_time[3] = {6, 2, 1};
static const u8 mwdma_66_active_time[3] = {8, 3, 3};
static const u8 mwdma_66_recovery_time[3] = {8, 2, 1};
static const u8 udma_50_setup_time[6] = {3, 3, 2, 2, 1, 1};
static const u8 udma_50_hold_time[6] = {3, 1, 1, 1, 1, 1};
static const u8 udma_66_setup_time[7] = {4, 4, 3, 2, };
static const u8 udma_66_hold_time[7] = {};
/*
* We set 66 MHz for all MWDMA modes
*/
static const bool set_mdma_66_mhz[] = { true, true, true, true };
/*
* We set 66 MHz for UDMA modes 3, 4 and 6 and no others
*/
static const bool set_udma_66_mhz[] = { false, false, false, true, true, false, true };
static void ftide010_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct ftide010 *ftide = ap->host->private_data;
u8 speed = adev->dma_mode;
u8 devno = adev->devno & 1;
u8 udma_en_mask;
u8 f66m_en_mask;
u8 clkreg;
u8 timreg;
u8 i;
/* Target device 0 (master) or 1 (slave) */
if (!devno) {
udma_en_mask = FTIDE010_CLK_MOD_DEV0_UDMA_EN;
f66m_en_mask = FTIDE010_CLK_MOD_DEV0_CLK_SEL;
} else {
udma_en_mask = FTIDE010_CLK_MOD_DEV1_UDMA_EN;
f66m_en_mask = FTIDE010_CLK_MOD_DEV1_CLK_SEL;
}
clkreg = readb(ftide->base + FTIDE010_CLK_MOD);
clkreg &= ~udma_en_mask;
clkreg &= ~f66m_en_mask;
if (speed & XFER_UDMA_0) {
i = speed & ~XFER_UDMA_0;
dev_dbg(ftide->dev, "set UDMA mode %02x, index %d\n",
speed, i);
clkreg |= udma_en_mask;
if (set_udma_66_mhz[i]) {
clkreg |= f66m_en_mask;
timreg = udma_66_setup_time[i] << 4 |
udma_66_hold_time[i];
} else {
timreg = udma_50_setup_time[i] << 4 |
udma_50_hold_time[i];
}
/* A special bit needs to be set for modes 5 and 6 */
if (i >= 5)
timreg |= FTIDE010_UDMA_TIMING_MODE_56;
dev_dbg(ftide->dev, "UDMA write clkreg = %02x, timreg = %02x\n",
clkreg, timreg);
writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
writeb(timreg, ftide->base + FTIDE010_UDMA_TIMING0 + devno);
} else {
i = speed & ~XFER_MW_DMA_0;
dev_dbg(ftide->dev, "set MWDMA mode %02x, index %d\n",
speed, i);
if (set_mdma_66_mhz[i]) {
clkreg |= f66m_en_mask;
timreg = mwdma_66_active_time[i] << 4 |
mwdma_66_recovery_time[i];
} else {
timreg = mwdma_50_active_time[i] << 4 |
mwdma_50_recovery_time[i];
}
dev_dbg(ftide->dev,
"MWDMA write clkreg = %02x, timreg = %02x\n",
clkreg, timreg);
/* This will affect all devices */
writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
writeb(timreg, ftide->base + FTIDE010_MWDMA_TIMING);
}
/*
* Store the current device (master or slave) in ap->private_data
* so that .qc_issue() can detect if this changes and reprogram
* the DMA settings.
*/
ap->private_data = adev;
return;
}
static void ftide010_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ftide010 *ftide = ap->host->private_data;
u8 pio = adev->pio_mode - XFER_PIO_0;
dev_dbg(ftide->dev, "set PIO mode %02x, index %d\n",
adev->pio_mode, pio);
writeb(pio_active_time[pio] << 4 | pio_recovery_time[pio],
ftide->base + FTIDE010_PIO_TIMING);
}
/*
* We implement our own qc_issue() callback since we may need to set up
* the timings differently for master and slave transfers: the CLK_MOD_REG
* and MWDMA_TIMING_REG is shared between master and slave, so reprogramming
* this may be necessary.
*/
static unsigned int ftide010_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/*
* If the device changed, i.e. slave->master, master->slave,
* then set up the DMA mode again so we are sure the timings
* are correct.
*/
if (adev != ap->private_data && ata_dma_enabled(adev))
ftide010_set_dmamode(ap, adev);
return ata_bmdma_qc_issue(qc);
}
static struct ata_port_operations pata_ftide010_port_ops = {
.inherits = &ata_bmdma_port_ops,
.set_dmamode = ftide010_set_dmamode,
.set_piomode = ftide010_set_piomode,
.qc_issue = ftide010_qc_issue,
};
static struct ata_port_info ftide010_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.pio_mask = ATA_PIO4,
.port_ops = &pata_ftide010_port_ops,
};
#if IS_ENABLED(CONFIG_SATA_GEMINI)
static int pata_ftide010_gemini_port_start(struct ata_port *ap)
{
struct ftide010 *ftide = ap->host->private_data;
struct device *dev = ftide->dev;
struct sata_gemini *sg = ftide->sg;
int bridges = 0;
int ret;
ret = ata_bmdma_port_start(ap);
if (ret)
return ret;
if (ftide->master_to_sata0) {
dev_info(dev, "SATA0 (master) start\n");
ret = gemini_sata_start_bridge(sg, 0);
if (!ret)
bridges++;
}
if (ftide->master_to_sata1) {
dev_info(dev, "SATA1 (master) start\n");
ret = gemini_sata_start_bridge(sg, 1);
if (!ret)
bridges++;
}
/* Avoid double-starting */
if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
dev_info(dev, "SATA0 (slave) start\n");
ret = gemini_sata_start_bridge(sg, 0);
if (!ret)
bridges++;
}
/* Avoid double-starting */
if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
dev_info(dev, "SATA1 (slave) start\n");
ret = gemini_sata_start_bridge(sg, 1);
if (!ret)
bridges++;
}
dev_info(dev, "brought %d bridges online\n", bridges);
return (bridges > 0) ? 0 : -EINVAL; // -ENODEV;
}
static void pata_ftide010_gemini_port_stop(struct ata_port *ap)
{
struct ftide010 *ftide = ap->host->private_data;
struct device *dev = ftide->dev;
struct sata_gemini *sg = ftide->sg;
if (ftide->master_to_sata0) {
dev_info(dev, "SATA0 (master) stop\n");
gemini_sata_stop_bridge(sg, 0);
}
if (ftide->master_to_sata1) {
dev_info(dev, "SATA1 (master) stop\n");
gemini_sata_stop_bridge(sg, 1);
}
/* Avoid double-stopping */
if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
dev_info(dev, "SATA0 (slave) stop\n");
gemini_sata_stop_bridge(sg, 0);
}
/* Avoid double-stopping */
if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
dev_info(dev, "SATA1 (slave) stop\n");
gemini_sata_stop_bridge(sg, 1);
}
}
static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
{
struct ftide010 *ftide = ap->host->private_data;
/*
* Return the master cable, I have no clue how to return a different
* cable for the slave than for the master.
*/
return ftide->master_cbl;
}
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
struct ata_port_info *pi,
bool is_ata1)
{
struct device *dev = ftide->dev;
struct sata_gemini *sg;
enum gemini_muxmode muxmode;
/* Look up SATA bridge */
sg = gemini_sata_bridge_get();
if (IS_ERR(sg))
return PTR_ERR(sg);
ftide->sg = sg;
muxmode = gemini_sata_get_muxmode(sg);
/* Special ops */
pata_ftide010_port_ops.port_start =
pata_ftide010_gemini_port_start;
pata_ftide010_port_ops.port_stop =
pata_ftide010_gemini_port_stop;
pata_ftide010_port_ops.cable_detect =
pata_ftide010_gemini_cable_detect;
/* Flag port as SATA-capable */
if (gemini_sata_bridge_enabled(sg, is_ata1))
pi->flags |= ATA_FLAG_SATA;
/* This device has broken DMA, only PIO works */
if (of_machine_is_compatible("itian,sq201")) {
pi->mwdma_mask = 0;
pi->udma_mask = 0;
}
/*
* We assume that a simple 40-wire cable is used in the PATA mode.
* if you're adding a system using the PATA interface, make sure
* the right cable is set up here, it might be necessary to use
* special hardware detection or encode the cable type in the device
* tree with special properties.
*/
if (!is_ata1) {
switch (muxmode) {
case GEMINI_MUXMODE_0:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_PATA40;
ftide->master_to_sata0 = true;
break;
case GEMINI_MUXMODE_1:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_NONE;
ftide->master_to_sata0 = true;
break;
case GEMINI_MUXMODE_2:
ftide->master_cbl = ATA_CBL_PATA40;
ftide->slave_cbl = ATA_CBL_PATA40;
break;
case GEMINI_MUXMODE_3:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_SATA;
ftide->master_to_sata0 = true;
ftide->slave_to_sata1 = true;
break;
}
} else {
switch (muxmode) {
case GEMINI_MUXMODE_0:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_NONE;
ftide->master_to_sata1 = true;
break;
case GEMINI_MUXMODE_1:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_PATA40;
ftide->master_to_sata1 = true;
break;
case GEMINI_MUXMODE_2:
ftide->master_cbl = ATA_CBL_SATA;
ftide->slave_cbl = ATA_CBL_SATA;
ftide->slave_to_sata0 = true;
ftide->master_to_sata1 = true;
break;
case GEMINI_MUXMODE_3:
ftide->master_cbl = ATA_CBL_PATA40;
ftide->slave_cbl = ATA_CBL_PATA40;
break;
}
}
dev_info(dev, "set up Gemini PATA%d\n", is_ata1);
return 0;
}
#else
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
struct ata_port_info *pi,
bool is_ata1)
{
return -ENOTSUPP;
}
#endif
static int pata_ftide010_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct ata_port_info pi = ftide010_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ftide010 *ftide;
struct resource *res;
int irq;
int ret;
int i;
ftide = devm_kzalloc(dev, sizeof(*ftide), GFP_KERNEL);
if (!ftide)
return -ENOMEM;
ftide->dev = dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ftide->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ftide->base))
return PTR_ERR(ftide->base);
ftide->pclk = devm_clk_get(dev, "PCLK");
if (!IS_ERR(ftide->pclk)) {
ret = clk_prepare_enable(ftide->pclk);
if (ret) {
dev_err(dev, "failed to enable PCLK\n");
return ret;
}
}
/* Some special Cortina Gemini init, if needed */
if (of_device_is_compatible(np, "cortina,gemini-pata")) {
/*
* We need to know which instance is probing (the
* Gemini has two instances of FTIDE010) and we do
* this simply by looking at the physical base
* address, which is 0x63400000 for ATA1, else we
* are ATA0. This will also set up the cable types.
*/
ret = pata_ftide010_gemini_init(ftide,
&pi,
(res->start == 0x63400000));
if (ret)
goto err_dis_clk;
} else {
/* Else assume we are connected using PATA40 */
ftide->master_cbl = ATA_CBL_PATA40;
ftide->slave_cbl = ATA_CBL_PATA40;
}
ftide->host = ata_host_alloc_pinfo(dev, ppi, 1);
if (!ftide->host) {
ret = -ENOMEM;
goto err_dis_clk;
}
ftide->host->private_data = ftide;
for (i = 0; i < ftide->host->n_ports; i++) {
struct ata_port *ap = ftide->host->ports[i];
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->bmdma_addr = ftide->base + FTIDE010_DMA_REG;
ioaddr->cmd_addr = ftide->base + FTIDE010_CMD_DATA;
ioaddr->ctl_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
ioaddr->altstatus_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
ata_sff_std_ports(ioaddr);
}
dev_info(dev, "device ID %08x, irq %d, reg %pR\n",
readl(ftide->base + FTIDE010_IDE_DEVICE_ID), irq, res);
ret = ata_host_activate(ftide->host, irq, ata_bmdma_interrupt,
0, &pata_ftide010_sht);
if (ret)
goto err_dis_clk;
return 0;
err_dis_clk:
clk_disable_unprepare(ftide->pclk);
return ret;
}
static void pata_ftide010_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ftide010 *ftide = host->private_data;
ata_host_detach(ftide->host);
clk_disable_unprepare(ftide->pclk);
}
static const struct of_device_id pata_ftide010_of_match[] = {
{ .compatible = "faraday,ftide010", },
{ /* sentinel */ }
};
static struct platform_driver pata_ftide010_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
.remove_new = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_ftide010.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IDE tuning and bus mastering support for the CS5510/CS5520
* chipsets
*
* The CS5510/CS5520 are slightly unusual devices. Unlike the
* typical IDE controllers they do bus mastering with the drive in
* PIO mode and smarter silicon.
*
* The practical upshot of this is that we must always tune the
* drive for the right PIO mode. We must also ignore all the blacklists
* and the drive bus mastering DMA information. Also to confuse matters
* further we can do DMA on PIO only drives.
*
* DMA on the 5510 also requires we disable_hlt() during DMA on early
* revisions.
*
* *** This driver is strictly experimental ***
*
* (c) Copyright Red Hat Inc 2002
*
* Documentation:
* Not publicly available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cs5520"
#define DRV_VERSION "0.6.6"
struct pio_clocks
{
int address;
int assert;
int recovery;
};
static const struct pio_clocks cs5520_pio_clocks[]={
{3, 6, 11},
{2, 5, 6},
{1, 4, 3},
{1, 3, 2},
{1, 2, 1}
};
/**
* cs5520_set_timings - program PIO timings
* @ap: ATA port
* @adev: ATA device
* @pio: PIO ID
*
* Program the PIO mode timings for the controller according to the pio
* clocking table.
*/
static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int slave = adev->devno;
pio -= XFER_PIO_0;
/* Channel command timing */
pci_write_config_byte(pdev, 0x62 + ap->port_no,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
/* FIXME: should these use address ? */
/* Read command timing */
pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
/* Write command timing */
pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
}
/**
* cs5520_set_piomode - program PIO timings
* @ap: ATA port
* @adev: ATA device
*
* Program the PIO mode timings for the controller according to the pio
* clocking table.
*/
static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
cs5520_set_timings(ap, adev, adev->pio_mode);
}
static const struct scsi_host_template cs5520_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5520_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = ata_cable_40wire,
.set_piomode = cs5520_set_piomode,
};
static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
struct ata_port_info pi = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cs5520_port_ops,
};
const struct ata_port_info *ppi[2];
u8 pcicfg;
void __iomem *iomap[5];
struct ata_host *host;
struct ata_ioports *ioaddr;
int i, rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* IDE port enable bits */
pci_read_config_byte(pdev, 0x60, &pcicfg);
/* Check if the ATA ports are enabled */
if ((pcicfg & 3) == 0)
return -ENODEV;
ppi[0] = ppi[1] = &ata_dummy_port_info;
if (pcicfg & 1)
ppi[0] = π
if (pcicfg & 2)
ppi[1] = π
if ((pcicfg & 0x40) == 0) {
dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
pi.mwdma_mask = id->driver_data;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* Perform set up for DMA */
if (pci_enable_device_io(pdev)) {
dev_err(&pdev->dev, "unable to configure BAR2.\n");
return -ENODEV;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
}
/* Map IO ports and initialize host accordingly */
iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
iomap[4] = pcim_iomap(pdev, 2, 0);
if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
return -ENOMEM;
ioaddr = &host->ports[0]->ioaddr;
ioaddr->cmd_addr = iomap[0];
ioaddr->ctl_addr = iomap[1];
ioaddr->altstatus_addr = iomap[1];
ioaddr->bmdma_addr = iomap[4];
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[0],
"cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
ioaddr = &host->ports[1]->ioaddr;
ioaddr->cmd_addr = iomap[2];
ioaddr->ctl_addr = iomap[3];
ioaddr->altstatus_addr = iomap[3];
ioaddr->bmdma_addr = iomap[4] + 8;
ata_sff_std_ports(ioaddr);
ata_port_desc(host->ports[1],
"cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
/* activate the host */
pci_set_master(pdev);
rc = ata_host_start(host);
if (rc)
return rc;
for (i = 0; i < 2; i++) {
static const int irq[] = { 14, 15 };
struct ata_port *ap = host->ports[i];
if (ata_port_is_dummy(ap))
continue;
rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
ata_bmdma_interrupt, 0, DRV_NAME, host);
if (rc)
return rc;
ata_port_desc(ap, "irq %d", irq[i]);
}
return ata_host_register(host, &cs5520_sht);
}
#ifdef CONFIG_PM_SLEEP
/**
* cs5520_reinit_one - device resume
* @pdev: PCI device
*
* Do any reconfiguration work needed by a resume from RAM. We need
* to restore DMA mode support on BIOSen which disabled it
*/
static int cs5520_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
u8 pcicfg;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
pci_read_config_byte(pdev, 0x60, &pcicfg);
if ((pcicfg & 0x40) == 0)
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
ata_host_resume(host);
return 0;
}
/**
* cs5520_pci_device_suspend - device suspend
* @pdev: PCI device
* @mesg: PM event message
*
* We have to cut and waste bits from the standard method because
* the 5520 is a bit odd and not just a pure ATA device. As a result
* we must not disable it. The needed code is short and this avoids
* chip specific mess in the core code.
*/
static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
ata_host_suspend(host, mesg);
pci_save_state(pdev);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
/* For now keep DMA off. We can set it for all but A rev CS5510 once the
core ATA code can handle it */
static const struct pci_device_id pata_cs5520[] = {
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
{ },
};
static struct pci_driver cs5520_pci_driver = {
.name = DRV_NAME,
.id_table = pata_cs5520,
.probe = cs5520_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = cs5520_pci_device_suspend,
.resume = cs5520_reinit_one,
#endif
};
module_pci_driver(cs5520_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pata_cs5520);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cs5520.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DaVinci DA850 AHCI SATA platform driver
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
#include "ahci.h"
#define DRV_NAME "ahci_da850"
#define HARDRESET_RETRIES 5
/* SATA PHY Control Register offset from AHCI base */
#define SATA_P0PHYCR_REG 0x178
#define SATA_PHY_MPY(x) ((x) << 0)
#define SATA_PHY_LOS(x) ((x) << 6)
#define SATA_PHY_RXCDR(x) ((x) << 10)
#define SATA_PHY_RXEQ(x) ((x) << 13)
#define SATA_PHY_TXSWING(x) ((x) << 19)
#define SATA_PHY_ENPLL(x) ((x) << 31)
static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
void __iomem *ahci_base, u32 mpy)
{
unsigned int val;
/* Enable SATA clock receiver */
val = readl(pwrdn_reg);
val &= ~BIT(0);
writel(val, pwrdn_reg);
val = SATA_PHY_MPY(mpy) | SATA_PHY_LOS(1) | SATA_PHY_RXCDR(4) |
SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) | SATA_PHY_ENPLL(1);
writel(val, ahci_base + SATA_P0PHYCR_REG);
}
static u32 ahci_da850_calculate_mpy(unsigned long refclk_rate)
{
u32 pll_output = 1500000000, needed;
/*
* We need to determine the value of the multiplier (MPY) bits.
* In order to include the 12.5 multiplier we need to first divide
* the refclk rate by ten.
*
* __div64_32() turned out to be unreliable, sometimes returning
* false results.
*/
WARN((refclk_rate % 10) != 0, "refclk must be divisible by 10");
needed = pll_output / (refclk_rate / 10);
/*
* What we have now is (multiplier * 10).
*
* Let's determine the actual register value we need to write.
*/
switch (needed) {
case 50:
return 0x1;
case 60:
return 0x2;
case 80:
return 0x4;
case 100:
return 0x5;
case 120:
return 0x6;
case 125:
return 0x7;
case 150:
return 0x8;
case 200:
return 0x9;
case 250:
return 0xa;
default:
/*
* We should have divided evenly - if not, return an invalid
* value.
*/
return 0;
}
}
static int ahci_da850_softreset(struct ata_link *link,
unsigned int *class, unsigned long deadline)
{
int pmp, ret;
pmp = sata_srst_pmp(link);
/*
* There's an issue with the SATA controller on da850 SoCs: if we
* enable Port Multiplier support, but the drive is connected directly
* to the board, it can't be detected. As a workaround: if PMP is
* enabled, we first call ahci_do_softreset() and pass it the result of
* sata_srst_pmp(). If this call fails, we retry with pmp = 0.
*/
ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
if (pmp && ret == -EBUSY)
return ahci_do_softreset(link, class, 0,
deadline, ahci_check_ready);
return ret;
}
static int ahci_da850_hardreset(struct ata_link *link,
unsigned int *class, unsigned long deadline)
{
int ret, retry = HARDRESET_RETRIES;
bool online;
/*
* In order to correctly service the LCD controller of the da850 SoC,
* we increased the PLL0 frequency to 456MHz from the default 300MHz.
*
* This made the SATA controller unstable and the hardreset operation
* does not always succeed the first time. Before really giving up to
* bring up the link, retry the reset a couple times.
*/
do {
ret = ahci_do_hardreset(link, class, deadline, &online);
if (online)
return ret;
} while (retry--);
return ret;
}
static struct ata_port_operations ahci_da850_port_ops = {
.inherits = &ahci_platform_ops,
.softreset = ahci_da850_softreset,
/*
* No need to override .pmp_softreset - it's only used for actual
* PMP-enabled ports.
*/
.hardreset = ahci_da850_hardreset,
.pmp_hardreset = ahci_da850_hardreset,
};
static const struct ata_port_info ahci_da850_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_da850_port_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int ahci_da850_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
void __iomem *pwrdn_reg;
struct resource *res;
u32 mpy;
int rc;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
/*
* Internally ahci_platform_get_resources() calls the bulk clocks
* get method or falls back to using a single clk_get_optional().
* This AHCI SATA controller uses two clocks: functional clock
* with "fck" connection id and external reference clock with
* "refclk" id. If we haven't got all of them re-try the clocks
* getting procedure with the explicitly specified ids.
*/
if (hpriv->n_clks < 2) {
hpriv->clks = devm_kcalloc(dev, 2, sizeof(*hpriv->clks), GFP_KERNEL);
if (!hpriv->clks)
return -ENOMEM;
hpriv->clks[0].id = "fck";
hpriv->clks[1].id = "refclk";
hpriv->n_clks = 2;
rc = devm_clk_bulk_get(dev, hpriv->n_clks, hpriv->clks);
if (rc)
return rc;
}
mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1].clk));
if (mpy == 0) {
dev_err(dev, "invalid REFCLK multiplier value: 0x%x", mpy);
return -EINVAL;
}
rc = ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
rc = -ENODEV;
goto disable_resources;
}
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
if (!pwrdn_reg) {
rc = -ENOMEM;
goto disable_resources;
}
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
&ahci_platform_sht);
if (rc)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
ahci_platform_resume);
static const struct of_device_id ahci_da850_of_match[] = {
{ .compatible = "ti,da850-ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_da850_of_match,
.pm = &ahci_da850_pm_ops,
},
};
module_platform_driver(ahci_da850_driver);
MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver");
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/ata/ahci_da850.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libata-sff.c - helper library for PCI IDE BMDMA
*
* Copyright 2003-2006 Red Hat, Inc. All rights reserved.
* Copyright 2003-2006 Jeff Garzik
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from http://www.t13.org/ and
* http://www.sata-io.org/
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/highmem.h>
#include <trace/events/libata.h>
#include "libata.h"
static struct workqueue_struct *ata_sff_wq;
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
.qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
.freeze = ata_sff_freeze,
.thaw = ata_sff_thaw,
.prereset = ata_sff_prereset,
.softreset = ata_sff_softreset,
.hardreset = sata_sff_hardreset,
.postreset = ata_sff_postreset,
.error_handler = ata_sff_error_handler,
.sff_dev_select = ata_sff_dev_select,
.sff_check_status = ata_sff_check_status,
.sff_tf_load = ata_sff_tf_load,
.sff_tf_read = ata_sff_tf_read,
.sff_exec_command = ata_sff_exec_command,
.sff_data_xfer = ata_sff_data_xfer,
.sff_drain_fifo = ata_sff_drain_fifo,
.lost_interrupt = ata_sff_lost_interrupt,
};
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
/**
* ata_sff_check_status - Read device status reg & clear interrupt
* @ap: port where the device is
*
* Reads ATA taskfile status register for currently-selected device
* and return its value. This also clears pending interrupts
* from this device
*
* LOCKING:
* Inherited from caller.
*/
u8 ata_sff_check_status(struct ata_port *ap)
{
return ioread8(ap->ioaddr.status_addr);
}
EXPORT_SYMBOL_GPL(ata_sff_check_status);
/**
* ata_sff_altstatus - Read device alternate status reg
* @ap: port where the device is
* @status: pointer to a status value
*
* Reads ATA alternate status register for currently-selected device
* and return its value.
*
* RETURN:
* true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
{
u8 tmp;
if (ap->ops->sff_check_altstatus) {
tmp = ap->ops->sff_check_altstatus(ap);
goto read;
}
if (ap->ioaddr.altstatus_addr) {
tmp = ioread8(ap->ioaddr.altstatus_addr);
goto read;
}
return false;
read:
if (status)
*status = tmp;
return true;
}
/**
* ata_sff_irq_status - Check if the device is busy
* @ap: port where the device is
*
* Determine if the port is currently busy. Uses altstatus
* if available in order to avoid clearing shared IRQ status
* when finding an IRQ source. Non ctl capable devices don't
* share interrupt lines fortunately for us.
*
* LOCKING:
* Inherited from caller.
*/
static u8 ata_sff_irq_status(struct ata_port *ap)
{
u8 status;
/* Not us: We are busy */
if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
return status;
/* Clear INTRQ latch */
status = ap->ops->sff_check_status(ap);
return status;
}
/**
* ata_sff_sync - Flush writes
* @ap: Port to wait for.
*
* CAUTION:
* If we have an mmio device with no ctl and no altstatus
* method this will fail. No such devices are known to exist.
*
* LOCKING:
* Inherited from caller.
*/
static void ata_sff_sync(struct ata_port *ap)
{
ata_sff_altstatus(ap, NULL);
}
/**
* ata_sff_pause - Flush writes and wait 400nS
* @ap: Port to pause for.
*
* CAUTION:
* If we have an mmio device with no ctl and no altstatus
* method this will fail. No such devices are known to exist.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_pause(struct ata_port *ap)
{
ata_sff_sync(ap);
ndelay(400);
}
EXPORT_SYMBOL_GPL(ata_sff_pause);
/**
* ata_sff_dma_pause - Pause before commencing DMA
* @ap: Port to pause for.
*
* Perform I/O fencing and ensure sufficient cycle delays occur
* for the HDMA1:0 transition
*/
void ata_sff_dma_pause(struct ata_port *ap)
{
/*
* An altstatus read will cause the needed delay without
* messing up the IRQ status
*/
if (ata_sff_altstatus(ap, NULL))
return;
/* There are no DMA controllers without ctl. BUG here to ensure
we never violate the HDMA1:0 transition timing and risk
corruption. */
BUG();
}
EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
static int ata_sff_check_ready(struct ata_link *link)
{
u8 status = link->ap->ops->sff_check_status(link->ap);
return ata_check_ready(status);
}
/**
* ata_sff_wait_ready - sleep until BSY clears, or timeout
* @link: SFF link to wait ready status for
* @deadline: deadline jiffies for the operation
*
* Sleep until ATA Status register bit BSY clears, or timeout
* occurs.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
{
return ata_wait_ready(link, deadline, ata_sff_check_ready);
}
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
/**
* ata_sff_set_devctl - Write device control reg
* @ap: port where the device is
* @ctl: value to write
*
* Writes ATA device control register.
*
* RETURN:
* true if the register exists, false if not.
*
* LOCKING:
* Inherited from caller.
*/
static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
{
if (ap->ops->sff_set_devctl) {
ap->ops->sff_set_devctl(ap, ctl);
return true;
}
if (ap->ioaddr.ctl_addr) {
iowrite8(ctl, ap->ioaddr.ctl_addr);
return true;
}
return false;
}
/**
* ata_sff_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
* @device: ATA device (numbered from zero) to select
*
* Use the method defined in the ATA specification to
* make either device 0, or device 1, active on the
* ATA channel. Works with both PIO and MMIO.
*
* May be used as the dev_select() entry in ata_port_operations.
*
* LOCKING:
* caller.
*/
void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
{
u8 tmp;
if (device == 0)
tmp = ATA_DEVICE_OBS;
else
tmp = ATA_DEVICE_OBS | ATA_DEV1;
iowrite8(tmp, ap->ioaddr.device_addr);
ata_sff_pause(ap); /* needed; also flushes, for mmio */
}
EXPORT_SYMBOL_GPL(ata_sff_dev_select);
/**
* ata_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
* @device: ATA device (numbered from zero) to select
* @wait: non-zero to wait for Status register BSY bit to clear
* @can_sleep: non-zero if context allows sleeping
*
* Use the method defined in the ATA specification to
* make either device 0, or device 1, active on the
* ATA channel.
*
* This is a high-level version of ata_sff_dev_select(), which
* additionally provides the services of inserting the proper
* pauses and status polling, where needed.
*
* LOCKING:
* caller.
*/
static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (wait)
ata_wait_idle(ap);
ap->ops->sff_dev_select(ap, device);
if (wait) {
if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
ata_msleep(ap, 150);
ata_wait_idle(ap);
}
}
/**
* ata_sff_irq_on - Enable interrupts on a port.
* @ap: Port on which interrupts are enabled.
*
* Enable interrupts on a legacy IDE device using MMIO or PIO,
* wait for idle, clear any pending interrupts.
*
* Note: may NOT be used as the sff_irq_on() entry in
* ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_irq_on(struct ata_port *ap)
{
if (ap->ops->sff_irq_on) {
ap->ops->sff_irq_on(ap);
return;
}
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
ata_sff_set_devctl(ap, ap->ctl);
ata_wait_idle(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
/**
* ata_sff_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
*
* Outputs ATA taskfile to standard ATA host controller.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
if (tf->ctl != ap->last_ctl) {
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
WARN_ON_ONCE(!ioaddr->ctl_addr);
iowrite8(tf->hob_feature, ioaddr->feature_addr);
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
}
if (is_addr) {
iowrite8(tf->feature, ioaddr->feature_addr);
iowrite8(tf->nsect, ioaddr->nsect_addr);
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
iowrite8(tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
/**
* ata_sff_tf_read - input device's ATA taskfile shadow registers
* @ap: Port from which input is read
* @tf: ATA taskfile register set for storing input
*
* Reads ATA taskfile registers for currently-selected device
* into @tf. Assumes the device has a fully SFF compliant task file
* layout and behaviour. If you device does not (eg has a different
* status method) then you will need to provide a replacement tf_read
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
tf->status = ata_sff_check_status(ap);
tf->error = ioread8(ioaddr->error_addr);
tf->nsect = ioread8(ioaddr->nsect_addr);
tf->lbal = ioread8(ioaddr->lbal_addr);
tf->lbam = ioread8(ioaddr->lbam_addr);
tf->lbah = ioread8(ioaddr->lbah_addr);
tf->device = ioread8(ioaddr->device_addr);
if (tf->flags & ATA_TFLAG_LBA48) {
if (likely(ioaddr->ctl_addr)) {
iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
tf->hob_feature = ioread8(ioaddr->error_addr);
tf->hob_nsect = ioread8(ioaddr->nsect_addr);
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
} else
WARN_ON_ONCE(1);
}
}
EXPORT_SYMBOL_GPL(ata_sff_tf_read);
/**
* ata_sff_exec_command - issue ATA command to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
*
* Issues ATA command, with proper synchronization with interrupt
* handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
iowrite8(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_exec_command);
/**
* ata_tf_to_host - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
* @tag: tag of the associated command
*
* Issues ATA taskfile register set to ATA host controller,
* with proper synchronization with interrupt handler and
* other threads.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static inline void ata_tf_to_host(struct ata_port *ap,
const struct ata_taskfile *tf,
unsigned int tag)
{
trace_ata_tf_load(ap, tf);
ap->ops->sff_tf_load(ap, tf);
trace_ata_exec_command(ap, tf, tag);
ap->ops->sff_exec_command(ap, tf);
}
/**
* ata_sff_data_xfer - Transfer data by PIO
* @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
*
* Transfer data from/to the device data register by PIO.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* Bytes consumed.
*/
unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_port *ap = qc->dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 1;
/* Transfer multiple of 2 bytes */
if (rw == READ)
ioread16_rep(data_addr, buf, words);
else
iowrite16_rep(data_addr, buf, words);
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
/*
* Use io*16_rep() accessors here as well to avoid pointlessly
* swapping bytes to and from on the big endian machines...
*/
if (rw == READ) {
ioread16_rep(data_addr, pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
iowrite16_rep(data_addr, pad, 1);
}
words++;
}
return words << 1;
}
EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
/**
* ata_sff_data_xfer32 - Transfer data by PIO
* @qc: queued command
* @buf: data buffer
* @buflen: buffer length
* @rw: read/write
*
* Transfer data from/to the device data register by PIO using 32bit
* I/O operations.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* Bytes consumed.
*/
unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
unsigned int buflen, int rw)
{
struct ata_device *dev = qc->dev;
struct ata_port *ap = dev->link->ap;
void __iomem *data_addr = ap->ioaddr.data_addr;
unsigned int words = buflen >> 2;
int slop = buflen & 3;
if (!(ap->pflags & ATA_PFLAG_PIO32))
return ata_sff_data_xfer(qc, buf, buflen, rw);
/* Transfer multiple of 4 bytes */
if (rw == READ)
ioread32_rep(data_addr, buf, words);
else
iowrite32_rep(data_addr, buf, words);
/* Transfer trailing bytes, if any */
if (unlikely(slop)) {
unsigned char pad[4] = { };
/* Point buf to the tail of buffer */
buf += buflen - slop;
/*
* Use io*_rep() accessors here as well to avoid pointlessly
* swapping bytes to and from on the big endian machines...
*/
if (rw == READ) {
if (slop < 3)
ioread16_rep(data_addr, pad, 1);
else
ioread32_rep(data_addr, pad, 1);
memcpy(buf, pad, slop);
} else {
memcpy(pad, buf, slop);
if (slop < 3)
iowrite16_rep(data_addr, pad, 1);
else
iowrite32_rep(data_addr, pad, 1);
}
}
return (buflen + 1) & ~1;
}
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
unsigned int offset, size_t xfer_size)
{
bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
unsigned char *buf;
buf = kmap_atomic(page);
qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
kunmap_atomic(buf);
if (!do_write && !PageSlab(page))
flush_dcache_page(page);
}
/**
* ata_pio_sector - Transfer a sector of data.
* @qc: Command on going
*
* Transfer qc->sect_size bytes of data from/to the ATA device.
*
* LOCKING:
* Inherited from caller.
*/
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
unsigned int offset;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
return;
}
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
page = sg_page(qc->cursg);
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
if (offset + qc->sect_size > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
ata_pio_xfer(qc, nth_page(page, 1), 0,
qc->sect_size - split_len);
} else {
ata_pio_xfer(qc, page, offset, qc->sect_size);
}
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
if (!qc->cursg)
ap->hsm_task_state = HSM_ST_LAST;
qc->cursg_ofs = 0;
}
}
/**
* ata_pio_sectors - Transfer one or many sectors.
* @qc: Command on going
*
* Transfer one or many sectors of data from/to the
* ATA device for the DRQ request.
*
* LOCKING:
* Inherited from caller.
*/
static void ata_pio_sectors(struct ata_queued_cmd *qc)
{
if (is_multi_taskfile(&qc->tf)) {
/* READ/WRITE MULTIPLE */
unsigned int nsect;
WARN_ON_ONCE(qc->dev->multi_count == 0);
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
qc->dev->multi_count);
while (nsect--)
ata_pio_sector(qc);
} else
ata_pio_sector(qc);
ata_sff_sync(qc->ap); /* flush */
}
/**
* atapi_send_cdb - Write CDB bytes to hardware
* @ap: Port to which ATAPI device is attached.
* @qc: Taskfile currently active
*
* When device has indicated its readiness to accept
* a CDB, this function is called. Send the CDB.
*
* LOCKING:
* caller.
*/
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
{
/* send SCSI cdb */
trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
WARN_ON_ONCE(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
ata_sff_sync(ap);
/* FIXME: If the CDB is for DMA do we need to do the transition delay
or is bmdma_start guaranteed to do it ? */
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
ap->hsm_task_state = HSM_ST;
break;
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_LAST;
break;
#ifdef CONFIG_ATA_BMDMA
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc);
break;
#endif /* CONFIG_ATA_BMDMA */
default:
BUG();
}
}
/**
* __atapi_pio_bytes - Transfer data from/to the ATAPI device.
* @qc: Command on going
* @bytes: number of bytes
*
* Transfer data from/to the ATAPI device.
*
* LOCKING:
* Inherited from caller.
*
*/
static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
struct ata_port *ap = qc->ap;
struct ata_device *dev = qc->dev;
struct ata_eh_info *ehi = &dev->link->eh_info;
struct scatterlist *sg;
struct page *page;
unsigned char *buf;
unsigned int offset, count, consumed;
next_sg:
sg = qc->cursg;
if (unlikely(!sg)) {
ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
"buf=%u cur=%u bytes=%u",
qc->nbytes, qc->curbytes, bytes);
return -1;
}
page = sg_page(sg);
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
/* don't overrun current sg */
count = min(sg->length - qc->cursg_ofs, bytes);
/* don't cross page boundaries */
count = min(count, (unsigned int)PAGE_SIZE - offset);
trace_atapi_pio_transfer_data(qc, offset, count);
/* do the actual data transfer */
buf = kmap_atomic(page);
consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
kunmap_atomic(buf);
bytes -= min(bytes, consumed);
qc->curbytes += count;
qc->cursg_ofs += count;
if (qc->cursg_ofs == sg->length) {
qc->cursg = sg_next(qc->cursg);
qc->cursg_ofs = 0;
}
/*
* There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
* Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
* check correctly as it doesn't know if it is the last request being
* made. Somebody should implement a proper sanity check.
*/
if (bytes)
goto next_sg;
return 0;
}
/**
* atapi_pio_bytes - Transfer data from/to the ATAPI device.
* @qc: Command on going
*
* Transfer Transfer data from/to the ATAPI device.
*
* LOCKING:
* Inherited from caller.
*/
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *dev = qc->dev;
struct ata_eh_info *ehi = &dev->link->eh_info;
unsigned int ireason, bc_lo, bc_hi, bytes;
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
/* Abuse qc->result_tf for temp storage of intermediate TF
* here to save some kernel stack usage.
* For normal completion, qc->result_tf is not relevant. For
* error, qc->result_tf is later overwritten by ata_qc_complete().
* So, the correctness of qc->result_tf is not affected.
*/
ap->ops->sff_tf_read(ap, &qc->result_tf);
ireason = qc->result_tf.nsect;
bc_lo = qc->result_tf.lbam;
bc_hi = qc->result_tf.lbah;
bytes = (bc_hi << 8) | bc_lo;
/* shall be cleared to zero, indicating xfer of data */
if (unlikely(ireason & ATAPI_COD))
goto atapi_check;
/* make sure transfer direction matches expected */
i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
if (unlikely(do_write != i_write))
goto atapi_check;
if (unlikely(!bytes))
goto atapi_check;
if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out;
ata_sff_sync(ap); /* flush */
return;
atapi_check:
ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
ireason, bytes);
err_out:
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
}
/**
* ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
* @ap: the target ata_port
* @qc: qc on going
*
* RETURNS:
* 1 if ok in workqueue, 0 otherwise.
*/
static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
if (qc->tf.flags & ATA_TFLAG_POLLING)
return 1;
if (ap->hsm_task_state == HSM_ST_FIRST) {
if (qc->tf.protocol == ATA_PROT_PIO &&
(qc->tf.flags & ATA_TFLAG_WRITE))
return 1;
if (ata_is_atapi(qc->tf.protocol) &&
!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
return 1;
}
return 0;
}
/**
* ata_hsm_qc_complete - finish a qc running on standard HSM
* @qc: Command to complete
* @in_wq: 1 if called from workqueue, 0 otherwise
*
* Finish @qc which is running on standard HSM.
*
* LOCKING:
* If @in_wq is zero, spin_lock_irqsave(host lock).
* Otherwise, none on entry and grabs host lock.
*/
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
struct ata_port *ap = qc->ap;
if (in_wq) {
/* EH might have kicked in while host lock is released. */
qc = ata_qc_from_tag(ap, qc->tag);
if (qc) {
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
ata_sff_irq_on(ap);
ata_qc_complete(qc);
} else
ata_port_freeze(ap);
}
} else {
if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc);
else
ata_port_freeze(ap);
}
}
/**
* ata_sff_hsm_move - move the HSM to the next state.
* @ap: the target ata_port
* @qc: qc on going
* @status: current device status
* @in_wq: 1 if called from workqueue, 0 otherwise
*
* RETURNS:
* 1 when poll next status needed, 0 otherwise.
*/
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
struct ata_link *link = qc->dev->link;
struct ata_eh_info *ehi = &link->eh_info;
int poll_next;
lockdep_assert_held(ap->lock);
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
/* Make sure ata_sff_qc_issue() does not throw things
* like DMA polling into the workqueue. Notice that
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
*/
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
fsm_start:
trace_ata_sff_hsm_state(qc, status);
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
/* Send first data block or PACKET CDB */
/* If polling, we will stay in the work queue after
* sending the data. Otherwise, interrupt handler
* takes over after sending the data.
*/
poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
/* check device status */
if (unlikely((status & ATA_DRQ) == 0)) {
/* handle BSY=0, DRQ=0 as error */
if (likely(status & (ATA_ERR | ATA_DF)))
/* device stops HSM for abort/error */
qc->err_mask |= AC_ERR_DEV;
else {
/* HSM violation. Let EH handle this */
ata_ehi_push_desc(ehi,
"ST_FIRST: !(DRQ|ERR|DF)");
qc->err_mask |= AC_ERR_HSM;
}
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
/* Device should not ask for data transfer (DRQ=1)
* when it finds something wrong.
* We ignore DRQ here and stop the HSM by
* changing hsm_task_state to HSM_ST_ERR and
* let the EH abort the command or reset the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
/* Some ATAPI tape drives forget to clear the ERR bit
* when doing the next command (mostly request sense).
* We ignore ERR here to workaround and proceed sending
* the CDB.
*/
if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
ata_ehi_push_desc(ehi, "ST_FIRST: "
"DRQ=1 with device error, "
"dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
}
if (qc->tf.protocol == ATA_PROT_PIO) {
/* PIO data out protocol.
* send first data block.
*/
/* ata_pio_sectors() might change the state
* to HSM_ST_LAST. so, the state is changed here
* before ata_pio_sectors().
*/
ap->hsm_task_state = HSM_ST;
ata_pio_sectors(qc);
} else
/* send CDB */
atapi_send_cdb(ap, qc);
/* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
break;
case HSM_ST:
/* complete command or read/write the data register */
if (qc->tf.protocol == ATAPI_PROT_PIO) {
/* ATAPI PIO protocol */
if ((status & ATA_DRQ) == 0) {
/* No more data to transfer or device error.
* Device error will be tagged in HSM_ST_LAST.
*/
ap->hsm_task_state = HSM_ST_LAST;
goto fsm_start;
}
/* Device should not ask for data transfer (DRQ=1)
* when it finds something wrong.
* We ignore DRQ here and stop the HSM by
* changing hsm_task_state to HSM_ST_ERR and
* let the EH abort the command or reset the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
ata_ehi_push_desc(ehi, "ST-ATAPI: "
"DRQ=1 with device error, "
"dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
atapi_pio_bytes(qc);
if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
/* bad ireason reported by device */
goto fsm_start;
} else {
/* ATA PIO protocol */
if (unlikely((status & ATA_DRQ) == 0)) {
/* handle BSY=0, DRQ=0 as error */
if (likely(status & (ATA_ERR | ATA_DF))) {
/* device stops HSM for abort/error */
qc->err_mask |= AC_ERR_DEV;
/* If diagnostic failed and this is
* IDENTIFY, it's likely a phantom
* device. Mark hint.
*/
if (qc->dev->horkage &
ATA_HORKAGE_DIAGNOSTIC)
qc->err_mask |=
AC_ERR_NODEV_HINT;
} else {
/* HSM violation. Let EH handle this.
* Phantom devices also trigger this
* condition. Mark hint.
*/
ata_ehi_push_desc(ehi, "ST-ATA: "
"DRQ=0 without device error, "
"dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM |
AC_ERR_NODEV_HINT;
}
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
/* For PIO reads, some devices may ask for
* data transfer (DRQ=1) alone with ERR=1.
* We respect DRQ here and transfer one
* block of junk data before changing the
* hsm_task_state to HSM_ST_ERR.
*
* For PIO writes, ERR=1 DRQ=1 doesn't make
* sense since the data block has been
* transferred to the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
/* data might be corrputed */
qc->err_mask |= AC_ERR_DEV;
if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
ata_pio_sectors(qc);
status = ata_wait_idle(ap);
}
if (status & (ATA_BUSY | ATA_DRQ)) {
ata_ehi_push_desc(ehi, "ST-ATA: "
"BUSY|DRQ persists on ERR|DF, "
"dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
}
/* There are oddball controllers with
* status register stuck at 0x7f and
* lbal/m/h at zero which makes it
* pass all other presence detection
* mechanisms we have. Set NODEV_HINT
* for it. Kernel bz#7241.
*/
if (status == 0x7f)
qc->err_mask |= AC_ERR_NODEV_HINT;
/* ata_pio_sectors() might change the
* state to HSM_ST_LAST. so, the state
* is changed after ata_pio_sectors().
*/
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
ata_pio_sectors(qc);
if (ap->hsm_task_state == HSM_ST_LAST &&
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
/* all data read */
status = ata_wait_idle(ap);
goto fsm_start;
}
}
poll_next = 1;
break;
case HSM_ST_LAST:
if (unlikely(!ata_ok(status))) {
qc->err_mask |= __ac_err_mask(status);
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
}
/* no more data to transfer */
trace_ata_sff_hsm_command_complete(qc, status);
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
ap->hsm_task_state = HSM_ST_IDLE;
/* complete taskfile transaction */
ata_hsm_qc_complete(qc, in_wq);
poll_next = 0;
break;
case HSM_ST_ERR:
ap->hsm_task_state = HSM_ST_IDLE;
/* complete taskfile transaction */
ata_hsm_qc_complete(qc, in_wq);
poll_next = 0;
break;
default:
poll_next = 0;
WARN(true, "ata%d: SFF host state machine in invalid state %d",
ap->print_id, ap->hsm_task_state);
}
return poll_next;
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
void ata_sff_queue_work(struct work_struct *work)
{
queue_work(ata_sff_wq, work);
}
EXPORT_SYMBOL_GPL(ata_sff_queue_work);
void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
{
queue_delayed_work(ata_sff_wq, dwork, delay);
}
EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
{
struct ata_port *ap = link->ap;
WARN_ON((ap->sff_pio_task_link != NULL) &&
(ap->sff_pio_task_link != link));
ap->sff_pio_task_link = link;
/* may fail if ata_sff_flush_pio_task() in progress */
ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
}
EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
void ata_sff_flush_pio_task(struct ata_port *ap)
{
trace_ata_sff_flush_pio_task(ap);
cancel_delayed_work_sync(&ap->sff_pio_task);
/*
* We wanna reset the HSM state to IDLE. If we do so without
* grabbing the port lock, critical sections protected by it which
* expect the HSM state to stay stable may get surprised. For
* example, we may set IDLE in between the time
* __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
* ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
*/
spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
spin_unlock_irq(ap->lock);
ap->sff_pio_task_link = NULL;
}
static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, sff_pio_task.work);
struct ata_link *link = ap->sff_pio_task_link;
struct ata_queued_cmd *qc;
u8 status;
int poll_next;
spin_lock_irq(ap->lock);
BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) {
ap->sff_pio_task_link = NULL;
goto out_unlock;
}
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
/*
* This is purely heuristic. This is a fast path.
* Sometimes when we enter, BSY will be cleared in
* a chk-status or two. If not, the drive is probably seeking
* or something. Snooze for a couple msecs, then
* chk-status again. If still busy, queue delayed work.
*/
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
if (status & ATA_BUSY) {
spin_unlock_irq(ap->lock);
ata_msleep(ap, 2);
spin_lock_irq(ap->lock);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
goto out_unlock;
}
}
/*
* hsm_move() may trigger another command to be processed.
* clean the link beforehand.
*/
ap->sff_pio_task_link = NULL;
/* move the HSM */
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
/* another command or interrupt handler
* may be running at this point.
*/
if (poll_next)
goto fsm_start;
out_unlock:
spin_unlock_irq(ap->lock);
}
/**
* ata_sff_qc_issue - issue taskfile to a SFF controller
* @qc: command to issue to device
*
* This function issues a PIO or NODATA command to a SFF
* controller.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_link *link = qc->dev->link;
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
*/
if (ap->flags & ATA_FLAG_PIO_POLLING)
qc->tf.flags |= ATA_TFLAG_POLLING;
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
/* start the command */
switch (qc->tf.protocol) {
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_sff_queue_pio_task(link, 0);
break;
case ATA_PROT_PIO:
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
ata_tf_to_host(ap, &qc->tf, qc->tag);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
ata_sff_queue_pio_task(link, 0);
/* always send first data block using the
* ata_sff_pio_task() codepath.
*/
} else {
/* PIO data in protocol */
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_sff_queue_pio_task(link, 0);
/* if polling, ata_sff_pio_task() handles the
* rest. otherwise, interrupt handler takes
* over from here.
*/
}
break;
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_FIRST;
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
ata_sff_queue_pio_task(link, 0);
break;
default:
return AC_ERR_SYSTEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
/**
* ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
* @qc: qc to fill result TF for
*
* @qc is finished and result TF needs to be filled. Fill it
* using ->sff_tf_read.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
{
qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
static unsigned int ata_sff_idle_irq(struct ata_port *ap)
{
ap->stats.idle_irq++;
#ifdef ATA_IRQ_TRAP
if ((ap->stats.idle_irq % 1000) == 0) {
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
ata_port_warn(ap, "irq trap\n");
return 1;
}
#endif
return 0; /* irq not handled */
}
static unsigned int __ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc,
bool hsmv_on_idle)
{
u8 status;
trace_ata_sff_port_intr(qc, hsmv_on_idle);
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
/* Some pre-ATAPI-4 devices assert INTRQ
* at this state when ready to receive CDB.
*/
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
* The flag was turned on only for atapi devices. No
* need to check ata_is_atapi(qc->tf.protocol) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
return ata_sff_idle_irq(ap);
break;
case HSM_ST_IDLE:
return ata_sff_idle_irq(ap);
default:
break;
}
/* check main status, clearing INTRQ if needed */
status = ata_sff_irq_status(ap);
if (status & ATA_BUSY) {
if (hsmv_on_idle) {
/* BMDMA engine is already stopped, we're screwed */
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
} else
return ata_sff_idle_irq(ap);
}
/* clear irq events */
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
ata_sff_hsm_move(ap, qc, status, 0);
return 1; /* irq handled */
}
/**
* ata_sff_port_intr - Handle SFF port interrupt
* @ap: Port on which interrupt arrived (possibly...)
* @qc: Taskfile currently active in engine
*
* Handle port interrupt for given queued command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* One if interrupt was handled, zero if not (shared irq).
*/
unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
{
return __ata_sff_port_intr(ap, qc, false);
}
EXPORT_SYMBOL_GPL(ata_sff_port_intr);
static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
{
struct ata_host *host = dev_instance;
bool retried = false;
unsigned int i;
unsigned int handled, idle, polling;
unsigned long flags;
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
spin_lock_irqsave(&host->lock, flags);
retry:
handled = idle = polling = 0;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
handled |= port_intr(ap, qc);
else
polling |= 1 << i;
} else
idle |= 1 << i;
}
/*
* If no port was expecting IRQ but the controller is actually
* asserting IRQ line, nobody cared will ensue. Check IRQ
* pending status if available and clear spurious IRQ.
*/
if (!handled && !retried) {
bool retry = false;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (polling & (1 << i))
continue;
if (!ap->ops->sff_irq_check ||
!ap->ops->sff_irq_check(ap))
continue;
if (idle & (1 << i)) {
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
} else {
/* clear INTRQ and check if BUSY cleared */
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
retry |= true;
/*
* With command in flight, we can't do
* sff_irq_clear() w/o racing with completion.
*/
}
}
if (retry) {
retried = true;
goto retry;
}
}
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
/**
* ata_sff_interrupt - Default SFF ATA host interrupt handler
* @irq: irq line (unused)
* @dev_instance: pointer to our ata_host information structure
*
* Default interrupt handler for PCI IDE devices. Calls
* ata_sff_port_intr() for each port that is not disabled.
*
* LOCKING:
* Obtains host lock during operation.
*
* RETURNS:
* IRQ_NONE or IRQ_HANDLED.
*/
irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
{
return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
}
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
/**
* ata_sff_lost_interrupt - Check for an apparent lost interrupt
* @ap: port that appears to have timed out
*
* Called from the libata error handlers when the core code suspects
* an interrupt has been lost. If it has complete anything we can and
* then return. Interface must support altstatus for this faster
* recovery to occur.
*
* Locking:
* Caller holds host lock
*/
void ata_sff_lost_interrupt(struct ata_port *ap)
{
u8 status = 0;
struct ata_queued_cmd *qc;
/* Only one outstanding command per SFF channel */
qc = ata_qc_from_tag(ap, ap->link.active_tag);
/* We cannot lose an interrupt on a non-existent or polled command */
if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
return;
/* See if the controller thinks it is still busy - if so the command
isn't a lost IRQ but is still in progress */
if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
return;
if (status & ATA_BUSY)
return;
/* There was a command running, we are no longer busy and we have
no interrupt. */
ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
ata_sff_port_intr(ap, qc);
}
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
/**
* ata_sff_freeze - Freeze SFF controller port
* @ap: port to freeze
*
* Freeze SFF controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_freeze(struct ata_port *ap)
{
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
ata_sff_set_devctl(ap, ap->ctl);
/* Under certain circumstances, some controllers raise IRQ on
* ATA_NIEN manipulation. Also, many controllers fail to mask
* previously pending IRQ on ATA_NIEN assertion. Clear it.
*/
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_freeze);
/**
* ata_sff_thaw - Thaw SFF controller port
* @ap: port to thaw
*
* Thaw SFF controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_thaw(struct ata_port *ap)
{
/* clear & re-enable interrupts */
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
ata_sff_irq_on(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_thaw);
/**
* ata_sff_prereset - prepare SFF link for reset
* @link: SFF link to be reset
* @deadline: deadline jiffies for the operation
*
* SFF link @link is about to be reset. Initialize it. It first
* calls ata_std_prereset() and wait for !BSY if the port is
* being softreset.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* Always 0.
*/
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
int rc;
/* The standard prereset is best-effort and always returns 0 */
ata_std_prereset(link, deadline);
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
return 0;
/* wait for !BSY if we don't know that no device is attached */
if (!ata_link_offline(link)) {
rc = ata_sff_wait_ready(link, deadline);
if (rc && rc != -ENODEV) {
ata_link_warn(link,
"device not ready (errno=%d), forcing hardreset\n",
rc);
ehc->i.action |= ATA_EH_HARDRESET;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_prereset);
/**
* ata_devchk - PATA device presence detection
* @ap: ATA channel to examine
* @device: Device to examine (starting at zero)
*
* This technique was originally described in
* Hale Landis's ATADRVR (www.ata-atapi.com), and
* later found its way into the ATA/ATAPI spec.
*
* Write a pattern to the ATA shadow registers,
* and if a device is present, it will respond by
* correctly storing and echoing back the
* ATA shadow register contents.
*
* RETURN:
* true if device is present, false if not.
*
* LOCKING:
* caller.
*/
static bool ata_devchk(struct ata_port *ap, unsigned int device)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u8 nsect, lbal;
ap->ops->sff_dev_select(ap, device);
iowrite8(0x55, ioaddr->nsect_addr);
iowrite8(0xaa, ioaddr->lbal_addr);
iowrite8(0xaa, ioaddr->nsect_addr);
iowrite8(0x55, ioaddr->lbal_addr);
iowrite8(0x55, ioaddr->nsect_addr);
iowrite8(0xaa, ioaddr->lbal_addr);
nsect = ioread8(ioaddr->nsect_addr);
lbal = ioread8(ioaddr->lbal_addr);
if ((nsect == 0x55) && (lbal == 0xaa))
return true; /* we found a device */
return false; /* nothing found */
}
/**
* ata_sff_dev_classify - Parse returned ATA device signature
* @dev: ATA device to classify (starting at zero)
* @present: device seems present
* @r_err: Value of error register on completion
*
* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
* an ATA/ATAPI-defined set of values is placed in the ATA
* shadow registers, indicating the results of device detection
* and diagnostics.
*
* Select the ATA device, and read the values from the ATA shadow
* registers. Then parse according to the Error register value,
* and the spec-defined values examined by ata_dev_classify().
*
* LOCKING:
* caller.
*
* RETURNS:
* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
*/
unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
u8 *r_err)
{
struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf;
unsigned int class;
u8 err;
ap->ops->sff_dev_select(ap, dev->devno);
memset(&tf, 0, sizeof(tf));
ap->ops->sff_tf_read(ap, &tf);
err = tf.error;
if (r_err)
*r_err = err;
/* see if device passed diags: continue and warn later */
if (err == 0)
/* diagnostic fail : do nothing _YET_ */
dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
else if (err == 1)
/* do nothing */ ;
else if ((dev->devno == 0) && (err == 0x81))
/* do nothing */ ;
else
return ATA_DEV_NONE;
/* determine if device is ATA or ATAPI */
class = ata_port_classify(ap, &tf);
switch (class) {
case ATA_DEV_UNKNOWN:
/*
* If the device failed diagnostic, it's likely to
* have reported incorrect device signature too.
* Assume ATA device if the device seems present but
* device signature is invalid with diagnostic
* failure.
*/
if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
class = ATA_DEV_ATA;
else
class = ATA_DEV_NONE;
break;
case ATA_DEV_ATA:
if (ap->ops->sff_check_status(ap) == 0)
class = ATA_DEV_NONE;
break;
}
return class;
}
EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
/**
* ata_sff_wait_after_reset - wait for devices to become ready after reset
* @link: SFF link which is just reset
* @devmask: mask of present devices
* @deadline: deadline jiffies for the operation
*
* Wait devices attached to SFF @link to become ready after
* reset. It contains preceding 150ms wait to avoid accessing TF
* status register too early.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -ENODEV if some or all of devices in @devmask
* don't seem to exist. -errno on other errors.
*/
int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int dev0 = devmask & (1 << 0);
unsigned int dev1 = devmask & (1 << 1);
int rc, ret = 0;
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
/* always check readiness of the master device */
rc = ata_sff_wait_ready(link, deadline);
/* -ENODEV means the odd clown forgot the D7 pulldown resistor
* and TF status is 0xff, bail out on it too.
*/
if (rc)
return rc;
/* if device 1 was found in ata_devchk, wait for register
* access briefly, then wait for BSY to clear.
*/
if (dev1) {
int i;
ap->ops->sff_dev_select(ap, 1);
/* Wait for register access. Some ATAPI devices fail
* to set nsect/lbal after reset, so don't waste too
* much time on it. We're gonna wait for !BSY anyway.
*/
for (i = 0; i < 2; i++) {
u8 nsect, lbal;
nsect = ioread8(ioaddr->nsect_addr);
lbal = ioread8(ioaddr->lbal_addr);
if ((nsect == 1) && (lbal == 1))
break;
ata_msleep(ap, 50); /* give drive a breather */
}
rc = ata_sff_wait_ready(link, deadline);
if (rc) {
if (rc != -ENODEV)
return rc;
ret = rc;
}
}
/* is all this really necessary? */
ap->ops->sff_dev_select(ap, 0);
if (dev1)
ap->ops->sff_dev_select(ap, 1);
if (dev0)
ap->ops->sff_dev_select(ap, 0);
return ret;
}
EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
if (ap->ioaddr.ctl_addr) {
/* software reset. causes dev0 to be selected */
iowrite8(ap->ctl, ioaddr->ctl_addr);
udelay(20); /* FIXME: flush */
iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
udelay(20); /* FIXME: flush */
iowrite8(ap->ctl, ioaddr->ctl_addr);
ap->last_ctl = ap->ctl;
}
/* wait the port to become ready */
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
}
/**
* ata_sff_softreset - reset host port via ATA SRST
* @link: ATA link to reset
* @classes: resulting classes of attached devices
* @deadline: deadline jiffies for the operation
*
* Reset host port using ATA SRST.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
unsigned int devmask = 0;
int rc;
u8 err;
/* determine if device 0/1 are present */
if (ata_devchk(ap, 0))
devmask |= (1 << 0);
if (slave_possible && ata_devchk(ap, 1))
devmask |= (1 << 1);
/* select device 0 again */
ap->ops->sff_dev_select(ap, 0);
/* issue bus reset */
rc = ata_bus_softreset(ap, devmask, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0],
devmask & (1 << 0), &err);
if (slave_possible && err != 0x81)
classes[1] = ata_sff_dev_classify(&link->device[1],
devmask & (1 << 1), &err);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_softreset);
/**
* sata_sff_hardreset - reset host port via SATA phy reset
* @link: link to reset
* @class: resulting class of attached device
* @deadline: deadline jiffies for the operation
*
* SATA phy-reset host port using DET bits of SControl register,
* wait for !BSY and classify the attached device.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
const unsigned int *timing = sata_ehc_deb_timing(ehc);
bool online;
int rc;
rc = sata_link_hardreset(link, timing, deadline, &online,
ata_sff_check_ready);
if (online)
*class = ata_sff_dev_classify(link->device, 1, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
/**
* ata_sff_postreset - SFF postreset callback
* @link: the target SFF ata_link
* @classes: classes of attached devices
*
* This function is invoked after a successful reset. It first
* calls ata_std_postreset() and performs SFF specific postreset
* processing.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
{
struct ata_port *ap = link->ap;
ata_std_postreset(link, classes);
/* is double-select really necessary? */
if (classes[0] != ATA_DEV_NONE)
ap->ops->sff_dev_select(ap, 1);
if (classes[1] != ATA_DEV_NONE)
ap->ops->sff_dev_select(ap, 0);
/* bail out if no device is present */
if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
return;
/* set up device control */
if (ata_sff_set_devctl(ap, ap->ctl))
ap->last_ctl = ap->ctl;
}
EXPORT_SYMBOL_GPL(ata_sff_postreset);
/**
* ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
* @qc: command
*
* Drain the FIFO and device of any stuck data following a command
* failing to complete. In some cases this is necessary before a
* reset will recover the device.
*
*/
void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
struct ata_port *ap;
/* We only need to flush incoming data when a command was running */
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
return;
ap = qc->ap;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
&& count < 65536; count += 2)
ioread16(ap->ioaddr.data_addr);
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
/**
* ata_sff_error_handler - Stock error handler for SFF controller
* @ap: port to handle error for
*
* Stock error handler for SFF controller. It can handle both
* PATA and SATA controllers. Many controllers should be able to
* use this EH as-is or with some added handling before and
* after.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_sff_error_handler(struct ata_port *ap)
{
ata_reset_fn_t softreset = ap->ops->softreset;
ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
spin_lock_irqsave(ap->lock, flags);
/*
* We *MUST* do FIFO draining before we issue a reset as
* several devices helpfully clear their internal state and
* will lock solid if we touch the data port post reset. Pass
* qc in case anyone wants to do different PIO/DMA recovery or
* has per command fixups
*/
if (ap->ops->sff_drain_fifo)
ap->ops->sff_drain_fifo(qc);
spin_unlock_irqrestore(ap->lock, flags);
/* ignore built-in hardresets if SCR access is not available */
if ((hardreset == sata_std_hardreset ||
hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
ap->ops->postreset);
}
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
/**
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr: IO address structure to be initialized
*
* Utility function which initializes data_addr, error_addr,
* feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
* device_addr, status_addr, and command_addr to standard offsets
* relative to cmd_addr.
*
* Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
*/
void ata_sff_std_ports(struct ata_ioports *ioaddr)
{
ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
}
EXPORT_SYMBOL_GPL(ata_sff_std_ports);
#ifdef CONFIG_PCI
static bool ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
/* Check the PCI resources for this channel are enabled */
port *= 2;
for (i = 0; i < 2; i++) {
if (pci_resource_start(pdev, port + i) == 0 ||
pci_resource_len(pdev, port + i) == 0)
return false;
}
return true;
}
/**
* ata_pci_sff_init_host - acquire native PCI ATA resources and init host
* @host: target ATA host
*
* Acquire native PCI ATA resources for @host and initialize the
* first two ports of @host accordingly. Ports marked dummy are
* skipped and allocation failure makes the port dummy.
*
* Note that native PCI resources are valid even for legacy hosts
* as we fix up pdev resources array early in boot, so this
* function can be used for both native and legacy SFF hosts.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 if at least one port is initialized, -ENODEV if no port is
* available.
*/
int ata_pci_sff_init_host(struct ata_host *host)
{
struct device *gdev = host->dev;
struct pci_dev *pdev = to_pci_dev(gdev);
unsigned int mask = 0;
int i, rc;
/* request, iomap BARs and init port addresses accordingly */
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
int base = i * 2;
void __iomem * const *iomap;
if (ata_port_is_dummy(ap))
continue;
/* Discard disabled ports. Some controllers show
* their unused channels this way. Disabled ports are
* made dummy.
*/
if (!ata_resources_present(pdev, i)) {
ap->ops = &ata_dummy_port_ops;
continue;
}
rc = pcim_iomap_regions(pdev, 0x3 << base,
dev_driver_string(gdev));
if (rc) {
dev_warn(gdev,
"failed to request/iomap BARs for port %d (errno=%d)\n",
i, rc);
if (rc == -EBUSY)
pcim_pin_device(pdev);
ap->ops = &ata_dummy_port_ops;
continue;
}
host->iomap = iomap = pcim_iomap_table(pdev);
ap->ioaddr.cmd_addr = iomap[base];
ap->ioaddr.altstatus_addr =
ap->ioaddr.ctl_addr = (void __iomem *)
((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
ata_sff_std_ports(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
(unsigned long long)pci_resource_start(pdev, base),
(unsigned long long)pci_resource_start(pdev, base + 1));
mask |= 1 << i;
}
if (!mask) {
dev_err(gdev, "no available native port\n");
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
/**
* ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
*
* Helper to allocate PIO-only SFF ATA host for @pdev, acquire
* all PCI resources and initialize it accordingly in one go.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_pci_sff_prepare_host(struct pci_dev *pdev,
const struct ata_port_info * const *ppi,
struct ata_host **r_host)
{
struct ata_host *host;
int rc;
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
return -ENOMEM;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host) {
dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
rc = ata_pci_sff_init_host(host);
if (rc)
goto err_out;
devres_remove_group(&pdev->dev, NULL);
*r_host = host;
return 0;
err_out:
devres_release_group(&pdev->dev, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
/**
* ata_pci_sff_activate_host - start SFF host, request IRQ and register it
* @host: target SFF ATA host
* @irq_handler: irq_handler used when requesting IRQ(s)
* @sht: scsi_host_template to use when registering the host
*
* This is the counterpart of ata_host_activate() for SFF ATA
* hosts. This separate helper is necessary because SFF hosts
* use two separate interrupts in legacy mode.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
const struct scsi_host_template *sht)
{
struct device *dev = host->dev;
struct pci_dev *pdev = to_pci_dev(dev);
const char *drv_name = dev_driver_string(host->dev);
int legacy_mode = 0, rc;
rc = ata_host_start(host);
if (rc)
return rc;
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
u8 tmp8, mask = 0;
/*
* ATA spec says we should use legacy mode when one
* port is in legacy mode, but disabled ports on some
* PCI hosts appear as fixed legacy ports, e.g SB600/700
* on which the secondary port is not wired, so
* ignore ports that are marked as 'dummy' during
* this check
*/
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
if (!ata_port_is_dummy(host->ports[0]))
mask |= (1 << 0);
if (!ata_port_is_dummy(host->ports[1]))
mask |= (1 << 2);
if ((tmp8 & mask) != mask)
legacy_mode = 1;
}
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return -ENOMEM;
if (!legacy_mode && pdev->irq) {
int i;
rc = devm_request_irq(dev, pdev->irq, irq_handler,
IRQF_SHARED, drv_name, host);
if (rc)
goto out;
for (i = 0; i < 2; i++) {
if (ata_port_is_dummy(host->ports[i]))
continue;
ata_port_desc(host->ports[i], "irq %d", pdev->irq);
}
} else if (legacy_mode) {
if (!ata_port_is_dummy(host->ports[0])) {
rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
irq_handler, IRQF_SHARED,
drv_name, host);
if (rc)
goto out;
ata_port_desc(host->ports[0], "irq %d",
ATA_PRIMARY_IRQ(pdev));
}
if (!ata_port_is_dummy(host->ports[1])) {
rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
irq_handler, IRQF_SHARED,
drv_name, host);
if (rc)
goto out;
ata_port_desc(host->ports[1], "irq %d",
ATA_SECONDARY_IRQ(pdev));
}
}
rc = ata_host_register(host, sht);
out:
if (rc == 0)
devres_remove_group(dev, NULL);
else
devres_release_group(dev, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
static const struct ata_port_info *ata_sff_find_valid_pi(
const struct ata_port_info * const *ppi)
{
int i;
/* look up the first valid port_info */
for (i = 0; i < 2 && ppi[i]; i++)
if (ppi[i]->port_ops != &ata_dummy_port_ops)
return ppi[i];
return NULL;
}
static int ata_pci_init_one(struct pci_dev *pdev,
const struct ata_port_info * const *ppi,
const struct scsi_host_template *sht, void *host_priv,
int hflags, bool bmdma)
{
struct device *dev = &pdev->dev;
const struct ata_port_info *pi;
struct ata_host *host = NULL;
int rc;
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_err(&pdev->dev, "no valid port_info specified\n");
return -EINVAL;
}
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return -ENOMEM;
rc = pcim_enable_device(pdev);
if (rc)
goto out;
#ifdef CONFIG_ATA_BMDMA
if (bmdma)
/* prepare and activate BMDMA host */
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
else
#endif
/* prepare and activate SFF host */
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
goto out;
host->private_data = host_priv;
host->flags |= hflags;
#ifdef CONFIG_ATA_BMDMA
if (bmdma) {
pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
} else
#endif
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
devres_remove_group(&pdev->dev, NULL);
else
devres_release_group(&pdev->dev, NULL);
return rc;
}
/**
* ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
* @pdev: Controller to be initialized
* @ppi: array of port_info, must be enough for two ports
* @sht: scsi_host_template to use when registering the host
* @host_priv: host private_data
* @hflag: host flags
*
* This is a helper function which can be called from a driver's
* xxx_init_one() probe function if the hardware uses traditional
* IDE taskfile registers and is PIO only.
*
* ASSUMPTION:
* Nobody makes a single channel controller that appears solely as
* the secondary legacy port on PCI.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, negative on errno-based value on error.
*/
int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const *ppi,
const struct scsi_host_template *sht, void *host_priv, int hflag)
{
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
}
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
/*
* BMDMA support
*/
#ifdef CONFIG_ATA_BMDMA
const struct ata_port_operations ata_bmdma_port_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_bmdma_qc_prep,
.qc_issue = ata_bmdma_qc_issue,
.sff_irq_clear = ata_bmdma_irq_clear,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.port_start = ata_bmdma_port_start,
};
EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
const struct ata_port_operations ata_bmdma32_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
.port_start = ata_bmdma_port_start32,
};
EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
/**
* ata_bmdma_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
prd[pi].addr = cpu_to_le32(addr);
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
pi++;
sg_len -= len;
addr += len;
}
}
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command. Perform the fill
* so that we avoid writing any length 64K records for
* controllers that don't follow the spec.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
unsigned int si, pi;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len, len, blen;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
blen = len & 0xffff;
prd[pi].addr = cpu_to_le32(addr);
if (blen == 0) {
/* Some PATA chipsets like the CS5530 can't
cope with 0x0000 meaning 64K as the spec
says */
prd[pi].flags_len = cpu_to_le32(0x8000);
blen = 0x8000;
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
}
prd[pi].flags_len = cpu_to_le32(blen);
pi++;
sg_len -= len;
addr += len;
}
}
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
/**
* ata_bmdma_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
ata_bmdma_fill_sg(qc);
return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
/**
* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
*
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
{
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return AC_ERR_OK;
ata_bmdma_fill_sg_dumb(qc);
return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
/**
* ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
* @qc: command to issue to device
*
* This function issues a PIO, NODATA or DMA command to a
* SFF/BMDMA controller. PIO and NODATA are handled by
* ata_sff_qc_issue().
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_link *link = qc->dev->link;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
return ata_sff_qc_issue(qc);
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
/* start the command */
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc); /* initiate bmdma */
ap->hsm_task_state = HSM_ST_LAST;
break;
case ATAPI_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
ap->hsm_task_state = HSM_ST_FIRST;
/* send cdb by polling if no cdb interrupt */
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
ata_sff_queue_pio_task(link, 0);
break;
default:
WARN_ON(1);
return AC_ERR_SYSTEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
/**
* ata_bmdma_port_intr - Handle BMDMA port interrupt
* @ap: Port on which interrupt arrived (possibly...)
* @qc: Taskfile currently active in engine
*
* Handle port interrupt for given queued command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* One if interrupt was handled, zero if not (shared irq).
*/
unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
u8 host_stat = 0;
bool bmdma_stopped = false;
unsigned int handled;
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
/* check status of DMA engine */
host_stat = ap->ops->bmdma_status(ap);
trace_ata_bmdma_status(ap, host_stat);
/* if it's not our irq... */
if (!(host_stat & ATA_DMA_INTR))
return ata_sff_idle_irq(ap);
/* before we do anything else, clear DMA-Start bit */
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
bmdma_stopped = true;
if (unlikely(host_stat & ATA_DMA_ERR)) {
/* error when transferring data to/from memory */
qc->err_mask |= AC_ERR_HOST_BUS;
ap->hsm_task_state = HSM_ST_ERR;
}
}
handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
return handled;
}
EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
/**
* ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
* @irq: irq line (unused)
* @dev_instance: pointer to our ata_host information structure
*
* Default interrupt handler for PCI IDE devices. Calls
* ata_bmdma_port_intr() for each port that is not disabled.
*
* LOCKING:
* Obtains host lock during operation.
*
* RETURNS:
* IRQ_NONE or IRQ_HANDLED.
*/
irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
{
return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
}
EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
/**
* ata_bmdma_error_handler - Stock error handler for BMDMA controller
* @ap: port to handle error for
*
* Stock error handler for BMDMA controller. It can handle both
* PATA and SATA controllers. Most BMDMA controllers should be
* able to use this EH as-is or with some added handling before
* and after.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_bmdma_error_handler(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
unsigned long flags;
bool thaw = false;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
/* reset PIO HSM and stop DMA engine */
spin_lock_irqsave(ap->lock, flags);
if (qc && ata_is_dma(qc->tf.protocol)) {
u8 host_stat;
host_stat = ap->ops->bmdma_status(ap);
trace_ata_bmdma_status(ap, host_stat);
/* BMDMA controllers indicate host bus error by
* setting DMA_ERR bit and timing out. As it wasn't
* really a timeout event, adjust error mask and
* cancel frozen state.
*/
if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
qc->err_mask = AC_ERR_HOST_BUS;
thaw = true;
}
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
/* if we're gonna thaw, make sure IRQ is clear */
if (thaw) {
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
}
}
spin_unlock_irqrestore(ap->lock, flags);
if (thaw)
ata_eh_thaw_port(ap);
ata_sff_error_handler(ap);
}
EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
/**
* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
* @qc: internal command to clean up
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned long flags;
if (ata_is_dma(qc->tf.protocol)) {
spin_lock_irqsave(ap->lock, flags);
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
spin_unlock_irqrestore(ap->lock, flags);
}
}
EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
/**
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
* Clear interrupt and error flags in DMA status register.
*
* May be used as the irq_clear() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_irq_clear(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.bmdma_addr;
if (!mmio)
return;
iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
}
EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
/**
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 dmactl;
/* start host DMA transaction */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* Strictly, one may wish to issue an ioread8() here, to
* flush the mmio write. However, control also passes
* to the hardware at this point, and it will interrupt
* us when we are to resume control. So, in effect,
* we don't care when the mmio write flushes.
* Further, a read of the DMA status register _immediately_
* following the write may not be what certain flaky hardware
* is expected, so I think it is best to not add a readb()
* without first all the MMIO ATA cards/mobos.
* Or maybe I'm just being paranoid.
*
* FIXME: The posting of this write means I/O starts are
* unnecessarily delayed for MMIO
*/
}
EXPORT_SYMBOL_GPL(ata_bmdma_start);
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
* @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
* May be used as the bmdma_stop() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = ap->ioaddr.bmdma_addr;
/* clear start/stop bit */
iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
mmio + ATA_DMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
EXPORT_SYMBOL_GPL(ata_bmdma_stop);
/**
* ata_bmdma_status - Read PCI IDE BMDMA status
* @ap: Port associated with this ATA transaction.
*
* Read and return BMDMA status register.
*
* May be used as the bmdma_status() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
u8 ata_bmdma_status(struct ata_port *ap)
{
return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
EXPORT_SYMBOL_GPL(ata_bmdma_status);
/**
* ata_bmdma_port_start - Set port up for bmdma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates space for PRD table.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_bmdma_port_start(struct ata_port *ap)
{
if (ap->mwdma_mask || ap->udma_mask) {
ap->bmdma_prd =
dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
&ap->bmdma_prd_dma, GFP_KERNEL);
if (!ap->bmdma_prd)
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
/**
* ata_bmdma_port_start32 - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Enables 32bit PIO and allocates space for PRD
* table.
*
* May be used as the port_start() entry in ata_port_operations for
* devices that are capable of 32bit PIO.
*
* LOCKING:
* Inherited from caller.
*/
int ata_bmdma_port_start32(struct ata_port *ap)
{
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
return ata_bmdma_port_start(ap);
}
EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
#ifdef CONFIG_PCI
/**
* ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
* @pdev: PCI device
*
* Some PCI ATA devices report simplex mode but in fact can be told to
* enter non simplex mode. This implements the necessary logic to
* perform the task on such devices. Calling it on other devices will
* have -undefined- behaviour.
*/
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
{
unsigned long bmdma = pci_resource_start(pdev, 4);
u8 simplex;
if (bmdma == 0)
return -ENOENT;
simplex = inb(bmdma + 0x02);
outb(simplex & 0x60, bmdma + 0x02);
simplex = inb(bmdma + 0x02);
if (simplex & 0x80)
return -EOPNOTSUPP;
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
{
int i;
dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
for (i = 0; i < 2; i++) {
host->ports[i]->mwdma_mask = 0;
host->ports[i]->udma_mask = 0;
}
}
/**
* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
* @host: target ATA host
*
* Acquire PCI BMDMA resources and initialize @host accordingly.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
void ata_pci_bmdma_init(struct ata_host *host)
{
struct device *gdev = host->dev;
struct pci_dev *pdev = to_pci_dev(gdev);
int i, rc;
/* No BAR4 allocation: No DMA */
if (pci_resource_start(pdev, 4) == 0) {
ata_bmdma_nodma(host, "BAR4 is zero");
return;
}
/*
* Some controllers require BMDMA region to be initialized
* even if DMA is not in use to clear IRQ status via
* ->sff_irq_clear method. Try to initialize bmdma_addr
* regardless of dma masks.
*/
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
ata_bmdma_nodma(host, "failed to set dma mask");
/* request and iomap DMA region */
rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
if (rc) {
ata_bmdma_nodma(host, "failed to request/iomap BAR4");
return;
}
host->iomap = pcim_iomap_table(pdev);
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
void __iomem *bmdma = host->iomap[4] + 8 * i;
if (ata_port_is_dummy(ap))
continue;
ap->ioaddr.bmdma_addr = bmdma;
if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
(ioread8(bmdma + 2) & 0x80))
host->flags |= ATA_HOST_SIMPLEX;
ata_port_desc(ap, "bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
}
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
/**
* ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
*
* Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
* resources and initialize it accordingly in one go.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
struct ata_host **r_host)
{
int rc;
rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
if (rc)
return rc;
ata_pci_bmdma_init(*r_host);
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
/**
* ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
* @pdev: Controller to be initialized
* @ppi: array of port_info, must be enough for two ports
* @sht: scsi_host_template to use when registering the host
* @host_priv: host private_data
* @hflags: host flags
*
* This function is similar to ata_pci_sff_init_one() but also
* takes care of BMDMA initialization.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, negative on errno-based value on error.
*/
int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
const struct scsi_host_template *sht, void *host_priv,
int hflags)
{
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
#endif /* CONFIG_PCI */
#endif /* CONFIG_ATA_BMDMA */
/**
* ata_sff_port_init - Initialize SFF/BMDMA ATA port
* @ap: Port to initialize
*
* Called on port allocation to initialize SFF/BMDMA specific
* fields.
*
* LOCKING:
* None.
*/
void ata_sff_port_init(struct ata_port *ap)
{
INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
ap->ctl = ATA_DEVCTL_OBS;
ap->last_ctl = 0xFF;
}
int __init ata_sff_init(void)
{
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
return 0;
}
void ata_sff_exit(void)
{
destroy_workqueue(ata_sff_wq);
}
| linux-master | drivers/ata/libata-sff.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Calxeda Highbank AHCI SATA platform driver
* Copyright 2012 Calxeda, Inc.
*
* based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include "ahci.h"
#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
#define SERDES_CR_CTL 0x80a0
#define SERDES_CR_ADDR 0x80a1
#define SERDES_CR_DATA 0x80a2
#define CR_BUSY 0x0001
#define CR_START 0x0001
#define CR_WR_RDN 0x0002
#define CPHY_TX_INPUT_STS 0x2001
#define CPHY_RX_INPUT_STS 0x2002
#define CPHY_SATA_TX_OVERRIDE 0x8000
#define CPHY_SATA_RX_OVERRIDE 0x4000
#define CPHY_TX_OVERRIDE 0x2004
#define CPHY_RX_OVERRIDE 0x2005
#define SPHY_LANE 0x100
#define SPHY_HALF_RATE 0x0001
#define CPHY_SATA_DPLL_MODE 0x0700
#define CPHY_SATA_DPLL_SHIFT 8
#define CPHY_SATA_DPLL_RESET (1 << 11)
#define CPHY_SATA_TX_ATTEN 0x1c00
#define CPHY_SATA_TX_ATTEN_SHIFT 10
#define CPHY_PHY_COUNT 6
#define CPHY_LANE_COUNT 4
#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
static DEFINE_SPINLOCK(cphy_lock);
/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
* sata ports to their phys and then to their lanes within the phys
*/
struct phy_lane_info {
void __iomem *phy_base;
u8 lane_mapping;
u8 phy_devs;
u8 tx_atten;
};
static struct phy_lane_info port_data[CPHY_PORT_COUNT];
static DEFINE_SPINLOCK(sgpio_lock);
#define SCLOCK 0
#define SLOAD 1
#define SDATA 2
#define SGPIO_PINS 3
#define SGPIO_PORTS 8
struct ecx_plat_data {
u32 n_ports;
/* number of extra clocks that the SGPIO PIC controller expects */
u32 pre_clocks;
u32 post_clocks;
struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
u32 sgpio_pattern;
u32 port_to_sgpio[SGPIO_PORTS];
};
#define SGPIO_SIGNALS 3
#define ECX_ACTIVITY_BITS 0x300000
#define ECX_ACTIVITY_SHIFT 0
#define ECX_LOCATE_BITS 0x80000
#define ECX_LOCATE_SHIFT 1
#define ECX_FAULT_BITS 0x400000
#define ECX_FAULT_SHIFT 2
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
u32 shift)
{
return 1 << (3 * pdata->port_to_sgpio[port] + shift);
}
static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
{
if (state & ECX_ACTIVITY_BITS)
pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
ECX_ACTIVITY_SHIFT);
else
pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
ECX_ACTIVITY_SHIFT);
if (state & ECX_LOCATE_BITS)
pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
ECX_LOCATE_SHIFT);
else
pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
ECX_LOCATE_SHIFT);
if (state & ECX_FAULT_BITS)
pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
ECX_FAULT_SHIFT);
else
pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
ECX_FAULT_SHIFT);
}
/*
* Tell the LED controller that the signal has changed by raising the clock
* line for 50 uS and then lowering it for 50 uS.
*/
static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
{
gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
udelay(50);
gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
udelay(50);
}
static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ecx_plat_data *pdata = hpriv->plat_data;
struct ahci_port_priv *pp = ap->private_data;
unsigned long flags;
int pmp, i;
struct ahci_em_priv *emp;
u32 sgpio_out;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < EM_MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
return size;
spin_lock_irqsave(&sgpio_lock, flags);
ecx_parse_sgpio(pdata, ap->port_no, state);
sgpio_out = pdata->sgpio_pattern;
for (i = 0; i < pdata->pre_clocks; i++)
ecx_led_cycle_clock(pdata);
gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
ecx_led_cycle_clock(pdata);
gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
/*
* bit-bang out the SGPIO pattern, by consuming a bit and then
* clocking it out.
*/
for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
sgpio_out >>= 1;
ecx_led_cycle_clock(pdata);
}
for (i = 0; i < pdata->post_clocks; i++)
ecx_led_cycle_clock(pdata);
/* save off new led state for port/slot */
emp->led_state = state;
spin_unlock_irqrestore(&sgpio_lock, flags);
return size;
}
static void highbank_set_em_messages(struct device *dev,
struct ahci_host_priv *hpriv,
struct ata_port_info *pi)
{
struct device_node *np = dev->of_node;
struct ecx_plat_data *pdata = hpriv->plat_data;
int i;
for (i = 0; i < SGPIO_PINS; i++) {
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
GPIOD_OUT_HIGH);
if (IS_ERR(gpiod)) {
dev_err(dev, "failed to get GPIO %d\n", i);
continue;
}
gpiod_set_consumer_name(gpiod, "CX SGPIO");
pdata->sgpio_gpiod[i] = gpiod;
}
of_property_read_u32_array(np, "calxeda,led-order",
pdata->port_to_sgpio,
pdata->n_ports);
if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
pdata->pre_clocks = 0;
if (of_property_read_u32(np, "calxeda,post-clocks",
&pdata->post_clocks))
pdata->post_clocks = 0;
/* store em_loc */
hpriv->em_loc = 0;
hpriv->em_buf_sz = 4;
hpriv->em_msg_type = EM_MSG_TYPE_LED;
pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
}
static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
{
u32 data;
u8 dev = port_data[sata_port].phy_devs;
spin_lock(&cphy_lock);
writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
spin_unlock(&cphy_lock);
return data;
}
static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
{
u8 dev = port_data[sata_port].phy_devs;
spin_lock(&cphy_lock);
writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
spin_unlock(&cphy_lock);
}
static void combo_phy_wait_for_ready(u8 sata_port)
{
while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
udelay(5);
}
static u32 combo_phy_read(u8 sata_port, u32 addr)
{
combo_phy_wait_for_ready(sata_port);
__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
combo_phy_wait_for_ready(sata_port);
return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
}
static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
{
combo_phy_wait_for_ready(sata_port);
__combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
__combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
__combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
}
static void highbank_cphy_disable_overrides(u8 sata_port)
{
u8 lane = port_data[sata_port].lane_mapping;
u32 tmp;
if (unlikely(port_data[sata_port].phy_base == NULL))
return;
tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
tmp &= ~CPHY_SATA_RX_OVERRIDE;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
}
static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
{
u8 lane = port_data[sata_port].lane_mapping;
u32 tmp;
if (val & 0x8)
return;
tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
tmp &= ~CPHY_SATA_TX_OVERRIDE;
combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp |= CPHY_SATA_TX_OVERRIDE;
combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
}
static void cphy_override_rx_mode(u8 sata_port, u32 val)
{
u8 lane = port_data[sata_port].lane_mapping;
u32 tmp;
tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
tmp &= ~CPHY_SATA_RX_OVERRIDE;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp |= CPHY_SATA_RX_OVERRIDE;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp &= ~CPHY_SATA_DPLL_MODE;
tmp |= val << CPHY_SATA_DPLL_SHIFT;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp |= CPHY_SATA_DPLL_RESET;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp &= ~CPHY_SATA_DPLL_RESET;
combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
msleep(15);
}
static void highbank_cphy_override_lane(u8 sata_port)
{
u8 lane = port_data[sata_port].lane_mapping;
u32 tmp, k = 0;
if (unlikely(port_data[sata_port].phy_base == NULL))
return;
do {
tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
lane * SPHY_LANE);
} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
cphy_override_rx_mode(sata_port, 3);
cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
}
static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
{
struct device_node *sata_node = dev->of_node;
int phy_count = 0, phy, port = 0, i;
void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
u32 tx_atten[CPHY_PORT_COUNT] = {};
memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
do {
u32 tmp;
struct of_phandle_args phy_data;
if (of_parse_phandle_with_args(sata_node,
"calxeda,port-phys", "#phy-cells",
port, &phy_data))
break;
for (phy = 0; phy < phy_count; phy++) {
if (phy_nodes[phy] == phy_data.np)
break;
}
if (phy_nodes[phy] == NULL) {
phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) {
return 0;
}
phy_count += 1;
}
port_data[port].lane_mapping = phy_data.args[0];
of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
port_data[port].phy_devs = tmp;
port_data[port].phy_base = cphy_base[phy];
of_node_put(phy_data.np);
port += 1;
} while (port < CPHY_PORT_COUNT);
of_property_read_u32_array(sata_node, "calxeda,tx-atten",
tx_atten, port);
for (i = 0; i < port; i++)
port_data[i].tx_atten = (u8) tx_atten[i];
return 0;
}
/*
* The Calxeda SATA phy intermittently fails to bring up a link with Gen3
* Retrying the phy hard reset can work around the issue, but the drive
* may fail again. In less than 150 out of 15000 test runs, it took more
* than 10 tries for the link to be established (but never more than 35).
* Triple the maximum observed retry count to provide plenty of margin for
* rare events and to guarantee that the link is established.
*
* Also, the default 2 second time-out on a failed drive is too long in
* this situation. The uboot implementation of the same driver function
* uses a much shorter time-out period and never experiences a time out
* issue. Reducing the time-out to 500ms improves the responsiveness.
* The other timing constants were kept the same as the stock AHCI driver.
* This change was also tested 15000 times on 24 drives and none of them
* experienced a time out.
*/
static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
static const unsigned int timing[] = { 5, 100, 500};
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
u32 sstatus;
int rc;
int retry = 100;
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
highbank_cphy_disable_overrides(link->ap->port_no);
rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
highbank_cphy_override_lane(link->ap->port_no);
/* If the status is 1, we are connected, but the link did not
* come up. So retry resetting the link again.
*/
if (sata_scr_read(link, SCR_STATUS, &sstatus))
break;
if (!(sstatus & 0x3))
break;
} while (!online && retry--);
hpriv->start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
return rc;
}
static struct ata_port_operations ahci_highbank_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_highbank_hardreset,
.transmit_led_message = ecx_transmit_led_message,
};
static const struct ata_port_info ahci_highbank_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_highbank_ops,
};
static const struct scsi_host_template ahci_highbank_platform_sht = {
AHCI_SHT("sata_highbank"),
};
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
static int ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ecx_plat_data *pdata;
struct ata_host *host;
struct resource *mem;
int irq;
int i;
int rc;
u32 n_ports;
struct ata_port_info pi = ahci_highbank_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(dev, "no mmio space\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!irq)
return -EINVAL;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
dev_err(dev, "can't alloc ahci_host_priv\n");
return -ENOMEM;
}
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "can't alloc ecx_plat_data\n");
return -ENOMEM;
}
hpriv->irq = irq;
hpriv->flags |= (unsigned long)pi.private_data;
hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
if (!hpriv->mmio) {
dev_err(dev, "can't map %pR\n", mem);
return -ENOMEM;
}
rc = highbank_initialize_phys(dev, hpriv->mmio);
if (rc)
return rc;
ahci_save_initial_config(dev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
if (hpriv->cap & HOST_CAP_64)
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
pdata->n_ports = n_ports;
hpriv->plat_data = pdata;
highbank_set_em_messages(dev, hpriv, &pi);
host = ata_host_alloc_pinfo(dev, ppi, n_ports);
if (!host) {
rc = -ENOMEM;
goto err0;
}
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_desc(ap, "mmio %pR", mem);
ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
rc = ahci_reset_controller(host);
if (rc)
goto err0;
ahci_init_controller(host);
ahci_print_info(host, "platform");
rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
if (rc)
goto err0;
return 0;
err0:
return rc;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(dev, "firmware update required for suspend/resume\n");
return -EIO;
}
/*
* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
ata_host_suspend(host, PMSG_SUSPEND);
return 0;
}
static int ahci_highbank_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
int rc;
if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
}
ata_host_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
.remove_new = ata_platform_remove_one,
.driver = {
.name = "highbank-ahci",
.of_match_table = ahci_of_match,
.pm = &ahci_highbank_pm_ops,
},
.probe = ahci_highbank_probe,
};
module_platform_driver(ahci_highbank_driver);
MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
MODULE_AUTHOR("Mark Langsdorf <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("sata:highbank");
| linux-master | drivers/ata/sata_highbank.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
*
* Copyright 2005 Tejun Heo
*
* Based on preview driver from Silicon Image.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#define DRV_NAME "sata_sil24"
#define DRV_VERSION "1.1"
/*
* Port request block (PRB) 32 bytes
*/
struct sil24_prb {
__le16 ctrl;
__le16 prot;
__le32 rx_cnt;
u8 fis[6 * 4];
};
/*
* Scatter gather entry (SGE) 16 bytes
*/
struct sil24_sge {
__le64 addr;
__le32 cnt;
__le32 flags;
};
enum {
SIL24_HOST_BAR = 0,
SIL24_PORT_BAR = 2,
/* sil24 fetches in chunks of 64bytes. The first block
* contains the PRB and two SGEs. From the second block, it's
* consisted of four SGEs and called SGT. Calculate the
* number of SGTs that fit into one page.
*/
SIL24_PRB_SZ = sizeof(struct sil24_prb)
+ 2 * sizeof(struct sil24_sge),
SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
/ (4 * sizeof(struct sil24_sge)),
/* This will give us one unused SGEs for ATA. This extra SGE
* will be used to store CDB for ATAPI devices.
*/
SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1,
/*
* Global controller registers (128 bytes @ BAR0)
*/
/* 32 bit regs */
HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
HOST_CTRL = 0x40,
HOST_IRQ_STAT = 0x44,
HOST_PHY_CFG = 0x48,
HOST_BIST_CTRL = 0x50,
HOST_BIST_PTRN = 0x54,
HOST_BIST_STAT = 0x58,
HOST_MEM_BIST_STAT = 0x5c,
HOST_FLASH_CMD = 0x70,
/* 8 bit regs */
HOST_FLASH_DATA = 0x74,
HOST_TRANSITION_DETECT = 0x75,
HOST_GPIO_CTRL = 0x76,
HOST_I2C_ADDR = 0x78, /* 32 bit */
HOST_I2C_DATA = 0x7c,
HOST_I2C_XFER_CNT = 0x7e,
HOST_I2C_CTRL = 0x7f,
/* HOST_SLOT_STAT bits */
HOST_SSTAT_ATTN = (1 << 31),
/* HOST_CTRL bits */
HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
/*
* Port registers
* (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
*/
PORT_REGS_SIZE = 0x2000,
PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
PORT_PMP_STATUS = 0x0000, /* port device status offset */
PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
/* 32 bit regs */
PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
PORT_ACTIVATE_UPPER_ADDR= 0x101c,
PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
PORT_CMD_ERR = 0x1024, /* command error number */
PORT_FIS_CFG = 0x1028,
PORT_FIFO_THRES = 0x102c,
/* 16 bit regs */
PORT_DECODE_ERR_CNT = 0x1040,
PORT_DECODE_ERR_THRESH = 0x1042,
PORT_CRC_ERR_CNT = 0x1044,
PORT_CRC_ERR_THRESH = 0x1046,
PORT_HSHK_ERR_CNT = 0x1048,
PORT_HSHK_ERR_THRESH = 0x104a,
/* 32 bit regs */
PORT_PHY_CFG = 0x1050,
PORT_SLOT_STAT = 0x1800,
PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
PORT_CONTEXT = 0x1e04,
PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
PORT_SCONTROL = 0x1f00,
PORT_SSTATUS = 0x1f04,
PORT_SERROR = 0x1f08,
PORT_SACTIVE = 0x1f0c,
/* PORT_CTRL_STAT bits */
PORT_CS_PORT_RST = (1 << 0), /* port reset */
PORT_CS_DEV_RST = (1 << 1), /* device reset */
PORT_CS_INIT = (1 << 2), /* port initialize */
PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
/* PORT_IRQ_STAT/ENABLE_SET/CLR */
/* bits[11:0] are masked */
PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
PORT_IRQ_ERROR = (1 << 1), /* command execution error */
PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
/* bits[27:16] are unmasked (raw) */
PORT_IRQ_RAW_SHIFT = 16,
PORT_IRQ_MASKED_MASK = 0x7ff,
PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
/* ENABLE_SET/CLR specific, intr steering - 2 bit field */
PORT_IRQ_STEER_SHIFT = 30,
PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
/* PORT_CMD_ERR constants */
PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
/* bits of PRB control field */
PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
/* PRB protocol field */
PRB_PROT_PACKET = (1 << 0),
PRB_PROT_TCQ = (1 << 1),
PRB_PROT_NCQ = (1 << 2),
PRB_PROT_READ = (1 << 3),
PRB_PROT_WRITE = (1 << 4),
PRB_PROT_TRANSPARENT = (1 << 5),
/*
* Other constants
*/
SGE_TRM = (1 << 31), /* Last SGE in chain */
SGE_LNK = (1 << 30), /* linked list
Points to SGT, not SGE */
SGE_DRD = (1 << 29), /* discard data read (/dev/null)
data address ignored */
SIL24_MAX_CMDS = 31,
/* board id */
BID_SIL3124 = 0,
BID_SIL3132 = 1,
BID_SIL3131 = 2,
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
ATA_FLAG_AN | ATA_FLAG_PMP,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
};
struct sil24_ata_block {
struct sil24_prb prb;
struct sil24_sge sge[SIL24_MAX_SGE];
};
struct sil24_atapi_block {
struct sil24_prb prb;
u8 cdb[16];
struct sil24_sge sge[SIL24_MAX_SGE];
};
union sil24_cmd_block {
struct sil24_ata_block ata;
struct sil24_atapi_block atapi;
};
static const struct sil24_cerr_info {
unsigned int err_mask, action;
const char *desc;
} sil24_cerr_db[] = {
[0] = { AC_ERR_DEV, 0,
"device error" },
[PORT_CERR_DEV] = { AC_ERR_DEV, 0,
"device error via D2H FIS" },
[PORT_CERR_SDB] = { AC_ERR_DEV, 0,
"device error via SDB FIS" },
[PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
"error in data FIS" },
[PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
"failed to transmit command FIS" },
[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
"protocol mismatch" },
[PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
"data direction mismatch" },
[PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while writing" },
[PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while reading" },
[PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
"invalid data direction for ATAPI CDB" },
[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
"SGT not on qword boundary" },
[PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI target abort while fetching SGT" },
[PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI master abort while fetching SGT" },
[PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI parity error while fetching SGT" },
[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
"PRB not on qword boundary" },
[PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI target abort while fetching PRB" },
[PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI master abort while fetching PRB" },
[PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI parity error while fetching PRB" },
[PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"undefined error while transferring data" },
[PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI target abort while transferring data" },
[PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI master abort while transferring data" },
[PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
"PCI parity error while transferring data" },
[PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET,
"FIS received while sending service FIS" },
};
/*
* ap->private_data
*
* The preview driver always returned 0 for status. We emulate it
* here from the previous interrupt.
*/
struct sil24_port_priv {
union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
dma_addr_t cmd_block_dma; /* DMA base addr for them */
int do_port_rst;
};
static void sil24_dev_config(struct ata_device *dev);
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
static void sil24_pmp_detach(struct ata_port *ap);
static void sil24_freeze(struct ata_port *ap);
static void sil24_thaw(struct ata_port *ap);
static int sil24_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int sil24_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void sil24_error_handler(struct ata_port *ap);
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
static int sil24_port_start(struct ata_port *ap);
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int sil24_pci_device_resume(struct pci_dev *pdev);
#endif
#ifdef CONFIG_PM
static int sil24_port_resume(struct ata_port *ap);
#endif
static const struct pci_device_id sil24_pci_tbl[] = {
{ PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
{ PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
{ PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
{ PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
{ PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
{ PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
{ PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
{ } /* terminate list */
};
static struct pci_driver sil24_pci_driver = {
.name = DRV_NAME,
.id_table = sil24_pci_tbl,
.probe = sil24_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = sil24_pci_device_resume,
#endif
};
static const struct scsi_host_template sil24_sht = {
__ATA_BASE_SHT(DRV_NAME),
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.slave_configure = ata_scsi_slave_config
};
static struct ata_port_operations sil24_ops = {
.inherits = &sata_pmp_port_ops,
.qc_defer = sil24_qc_defer,
.qc_prep = sil24_qc_prep,
.qc_issue = sil24_qc_issue,
.qc_fill_rtf = sil24_qc_fill_rtf,
.freeze = sil24_freeze,
.thaw = sil24_thaw,
.softreset = sil24_softreset,
.hardreset = sil24_hardreset,
.pmp_softreset = sil24_softreset,
.pmp_hardreset = sil24_pmp_hardreset,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.dev_config = sil24_dev_config,
.scr_read = sil24_scr_read,
.scr_write = sil24_scr_write,
.pmp_attach = sil24_pmp_attach,
.pmp_detach = sil24_pmp_detach,
.port_start = sil24_port_start,
#ifdef CONFIG_PM
.port_resume = sil24_port_resume,
#endif
};
static bool sata_sil24_msi; /* Disable MSI */
module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
/*
* Use bits 30-31 of port_flags to encode available port numbers.
* Current maxium is 4.
*/
#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
static const struct ata_port_info sil24_port_info[] = {
/* sil_3124 */
{
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
SIL24_FLAG_PCIX_IRQ_WOC,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil24_ops,
},
/* sil_3132 */
{
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil24_ops,
},
/* sil_3131/sil_3531 */
{
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &sil24_ops,
},
};
static int sil24_tag(int tag)
{
if (unlikely(ata_tag_internal(tag)))
return 0;
return tag;
}
static unsigned long sil24_port_offset(struct ata_port *ap)
{
return ap->port_no * PORT_REGS_SIZE;
}
static void __iomem *sil24_port_base(struct ata_port *ap)
{
return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
}
static void sil24_dev_config(struct ata_device *dev)
{
void __iomem *port = sil24_port_base(dev->link->ap);
if (dev->cdb_len == 16)
writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
else
writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
}
static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_prb __iomem *prb;
u8 fis[6 * 4];
prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
memcpy_fromio(fis, prb->fis, sizeof(fis));
ata_tf_from_fis(fis, tf);
}
static int sil24_scr_map[] = {
[SCR_CONTROL] = 0,
[SCR_STATUS] = 1,
[SCR_ERROR] = 2,
[SCR_ACTIVE] = 3,
};
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
return -EINVAL;
}
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
{
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
return -EINVAL;
}
static void sil24_config_port(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
/* configure IRQ WoC */
if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
else
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
/* zero error counters. */
writew(0x8000, port + PORT_DECODE_ERR_THRESH);
writew(0x8000, port + PORT_CRC_ERR_THRESH);
writew(0x8000, port + PORT_HSHK_ERR_THRESH);
writew(0x0000, port + PORT_DECODE_ERR_CNT);
writew(0x0000, port + PORT_CRC_ERR_CNT);
writew(0x0000, port + PORT_HSHK_ERR_CNT);
/* always use 64bit activation */
writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
/* clear port multiplier enable and resume bits */
writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
}
static void sil24_config_pmp(struct ata_port *ap, int attached)
{
void __iomem *port = sil24_port_base(ap);
if (attached)
writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
else
writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
}
static void sil24_clear_pmp(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
int i;
writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
writel(0, pmp_base + PORT_PMP_STATUS);
writel(0, pmp_base + PORT_PMP_QACTIVE);
}
}
static int sil24_init_port(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
u32 tmp;
/* clear PMP error status */
if (sata_pmp_attached(ap))
sil24_clear_pmp(ap);
writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
ata_wait_register(ap, port + PORT_CTRL_STAT,
PORT_CS_INIT, PORT_CS_INIT, 10, 100);
tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
PORT_CS_RDY, 0, 10, 100);
if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
pp->do_port_rst = 1;
ap->link.eh_context.i.action |= ATA_EH_RESET;
return -EIO;
}
return 0;
}
static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
const struct ata_taskfile *tf,
int is_cmd, u32 ctrl,
unsigned int timeout_msec)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
dma_addr_t paddr = pp->cmd_block_dma;
u32 irq_enabled, irq_mask, irq_stat;
int rc;
prb->ctrl = cpu_to_le16(ctrl);
ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
/* temporarily plug completion and error interrupts */
irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
/*
* The barrier is required to ensure that writes to cmd_block reach
* the memory before the write to PORT_CMD_ACTIVATE.
*/
wmb();
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
10, timeout_msec);
writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
irq_stat >>= PORT_IRQ_RAW_SHIFT;
if (irq_stat & PORT_IRQ_COMPLETE)
rc = 0;
else {
/* force port into known state */
sil24_init_port(ap);
if (irq_stat & PORT_IRQ_ERROR)
rc = -EIO;
else
rc = -EBUSY;
}
/* restore IRQ enabled */
writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
return rc;
}
static int sil24_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
int pmp = sata_srst_pmp(link);
unsigned int timeout_msec = 0;
struct ata_taskfile tf;
const char *reason;
int rc;
/* put the port into known state */
if (sil24_init_port(ap)) {
reason = "port not ready";
goto err;
}
/* do SRST */
if (time_after(deadline, jiffies))
timeout_msec = jiffies_to_msecs(deadline - jiffies);
ata_tf_init(link->device, &tf); /* doesn't really matter */
rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
timeout_msec);
if (rc == -EBUSY) {
reason = "timeout";
goto err;
} else if (rc) {
reason = "SRST command error";
goto err;
}
sil24_read_tf(ap, 0, &tf);
*class = ata_port_classify(ap, &tf);
return 0;
err:
ata_link_err(link, "softreset failed (%s)\n", reason);
return -EIO;
}
static int sil24_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
int did_port_rst = 0;
const char *reason;
int tout_msec, rc;
u32 tmp;
retry:
/* Sometimes, DEV_RST is not enough to recover the controller.
* This happens often after PM DMA CS errata.
*/
if (pp->do_port_rst) {
ata_port_warn(ap,
"controller in dubious state, performing PORT_RST\n");
writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
ata_msleep(ap, 10);
writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
10, 5000);
/* restore port configuration */
sil24_config_port(ap);
sil24_config_pmp(ap, ap->nr_pmp_links);
pp->do_port_rst = 0;
did_port_rst = 1;
}
/* sil24 does the right thing(tm) without any protection */
sata_set_spd(link);
tout_msec = 100;
if (ata_link_online(link))
tout_msec = 5000;
writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
tout_msec);
/* SStatus oscillates between zero and valid status after
* DEV_RST, debounce it.
*/
rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
if (rc) {
reason = "PHY debouncing failed";
goto err;
}
if (tmp & PORT_CS_DEV_RST) {
if (ata_link_offline(link))
return 0;
reason = "link not ready";
goto err;
}
/* Sil24 doesn't store signature FIS after hardreset, so we
* can't wait for BSY to clear. Some devices take a long time
* to get ready and those devices will choke if we don't wait
* for BSY clearance here. Tell libata to perform follow-up
* softreset.
*/
return -EAGAIN;
err:
if (!did_port_rst) {
pp->do_port_rst = 1;
goto retry;
}
ata_link_err(link, "hardreset failed (%s)\n", reason);
return -EIO;
}
static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
struct sil24_sge *sge)
{
struct scatterlist *sg;
struct sil24_sge *last_sge = NULL;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
sge->addr = cpu_to_le64(sg_dma_address(sg));
sge->cnt = cpu_to_le32(sg_dma_len(sg));
sge->flags = 0;
last_sge = sge;
sge++;
}
last_sge->flags = cpu_to_le32(SGE_TRM);
}
static int sil24_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
struct ata_port *ap = link->ap;
u8 prot = qc->tf.protocol;
/*
* There is a bug in the chip:
* Port LRAM Causes the PRB/SGT Data to be Corrupted
* If the host issues a read request for LRAM and SActive registers
* while active commands are available in the port, PRB/SGT data in
* the LRAM can become corrupted. This issue applies only when
* reading from, but not writing to, the LRAM.
*
* Therefore, reading LRAM when there is no particular error [and
* other commands may be outstanding] is prohibited.
*
* To avoid this bug there are two situations where a command must run
* exclusive of any other commands on the port:
*
* - ATAPI commands which check the sense data
* - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
* set.
*
*/
int is_excl = (ata_is_atapi(prot) ||
(qc->flags & ATA_QCFLAG_RESULT_TF));
if (unlikely(ap->excl_link)) {
if (link == ap->excl_link) {
if (ap->nr_active_links)
return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
} else
return ATA_DEFER_PORT;
} else if (unlikely(is_excl)) {
ap->excl_link = link;
if (ap->nr_active_links)
return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
}
return ata_std_qc_defer(qc);
}
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
union sil24_cmd_block *cb;
struct sil24_prb *prb;
struct sil24_sge *sge;
u16 ctrl = 0;
cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
if (!ata_is_atapi(qc->tf.protocol)) {
prb = &cb->ata.prb;
sge = cb->ata.sge;
if (ata_is_data(qc->tf.protocol)) {
u16 prot = 0;
ctrl = PRB_CTRL_PROTOCOL;
if (ata_is_ncq(qc->tf.protocol))
prot |= PRB_PROT_NCQ;
if (qc->tf.flags & ATA_TFLAG_WRITE)
prot |= PRB_PROT_WRITE;
else
prot |= PRB_PROT_READ;
prb->prot = cpu_to_le16(prot);
}
} else {
prb = &cb->atapi.prb;
sge = cb->atapi.sge;
memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
if (ata_is_data(qc->tf.protocol)) {
if (qc->tf.flags & ATA_TFLAG_WRITE)
ctrl = PRB_CTRL_PACKET_WRITE;
else
ctrl = PRB_CTRL_PACKET_READ;
}
}
prb->ctrl = cpu_to_le16(ctrl);
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
return AC_ERR_OK;
}
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
void __iomem *port = sil24_port_base(ap);
unsigned int tag = sil24_tag(qc->hw_tag);
dma_addr_t paddr;
void __iomem *activate;
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
activate = port + PORT_CMD_ACTIVATE + tag * 8;
/*
* The barrier is required to ensure that writes to cmd_block reach
* the memory before the write to PORT_CMD_ACTIVATE.
*/
wmb();
writel((u32)paddr, activate);
writel((u64)paddr >> 32, activate + 4);
return 0;
}
static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
{
sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
}
static void sil24_pmp_attach(struct ata_port *ap)
{
u32 *gscr = ap->link.device->gscr;
sil24_config_pmp(ap, 1);
sil24_init_port(ap);
if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
sata_pmp_gscr_devid(gscr) == 0x4140) {
ata_port_info(ap,
"disabling NCQ support due to sil24-mv4140 quirk\n");
ap->flags &= ~ATA_FLAG_NCQ;
}
}
static void sil24_pmp_detach(struct ata_port *ap)
{
sil24_init_port(ap);
sil24_config_pmp(ap, 0);
ap->flags |= ATA_FLAG_NCQ;
}
static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int rc;
rc = sil24_init_port(link->ap);
if (rc) {
ata_link_err(link, "hardreset failed (port not ready)\n");
return rc;
}
return sata_std_hardreset(link, class, deadline);
}
static void sil24_freeze(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
* PORT_IRQ_ENABLE instead.
*/
writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
}
static void sil24_thaw(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
u32 tmp;
/* clear IRQ */
tmp = readl(port + PORT_IRQ_STAT);
writel(tmp, port + PORT_IRQ_STAT);
/* turn IRQ back on */
writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
}
static void sil24_error_intr(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc = NULL;
struct ata_link *link;
struct ata_eh_info *ehi;
int abort = 0, freeze = 0;
u32 irq_stat;
/* on error, we need to clear IRQ explicitly */
irq_stat = readl(port + PORT_IRQ_STAT);
writel(irq_stat, port + PORT_IRQ_STAT);
/* first, analyze and record host port events */
link = &ap->link;
ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
ata_ehi_push_desc(ehi, "SDB notify");
sata_async_notification(ap);
}
if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "%s",
irq_stat & PORT_IRQ_PHYRDY_CHG ?
"PHY RDY changed" : "device exchanged");
freeze = 1;
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "unknown FIS");
freeze = 1;
}
/* deal with command error */
if (irq_stat & PORT_IRQ_ERROR) {
const struct sil24_cerr_info *ci = NULL;
unsigned int err_mask = 0, action = 0;
u32 context, cerr;
int pmp;
abort = 1;
/* DMA Context Switch Failure in Port Multiplier Mode
* errata. If we have active commands to 3 or more
* devices, any error condition on active devices can
* corrupt DMA context switching.
*/
if (ap->nr_active_links >= 3) {
ehi->err_mask |= AC_ERR_OTHER;
ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "PMP DMA CS errata");
pp->do_port_rst = 1;
freeze = 1;
}
/* find out the offending link and qc */
if (sata_pmp_attached(ap)) {
context = readl(port + PORT_CONTEXT);
pmp = (context >> 5) & 0xf;
if (pmp < ap->nr_pmp_links) {
link = &ap->pmp_link[pmp];
ehi = &link->eh_info;
qc = ata_qc_from_tag(ap, link->active_tag);
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
irq_stat);
} else {
err_mask |= AC_ERR_HSM;
action |= ATA_EH_RESET;
freeze = 1;
}
} else
qc = ata_qc_from_tag(ap, link->active_tag);
/* analyze CMD_ERR */
cerr = readl(port + PORT_CMD_ERR);
if (cerr < ARRAY_SIZE(sil24_cerr_db))
ci = &sil24_cerr_db[cerr];
if (ci && ci->desc) {
err_mask |= ci->err_mask;
action |= ci->action;
if (action & ATA_EH_RESET)
freeze = 1;
ata_ehi_push_desc(ehi, "%s", ci->desc);
} else {
err_mask |= AC_ERR_OTHER;
action |= ATA_EH_RESET;
freeze = 1;
ata_ehi_push_desc(ehi, "unknown command error %d",
cerr);
}
/* record error info */
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
ehi->action |= action;
/* if PMP, resume */
if (sata_pmp_attached(ap))
writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
}
/* freeze or abort */
if (freeze)
ata_port_freeze(ap);
else if (abort) {
if (qc)
ata_link_abort(qc->dev->link);
else
ata_port_abort(ap);
}
}
static inline void sil24_host_intr(struct ata_port *ap)
{
void __iomem *port = sil24_port_base(ap);
u32 slot_stat, qc_active;
int rc;
/* If PCIX_IRQ_WOC, there's an inherent race window between
* clearing IRQ pending status and reading PORT_SLOT_STAT
* which may cause spurious interrupts afterwards. This is
* unavoidable and much better than losing interrupts which
* happens if IRQ pending is cleared after reading
* PORT_SLOT_STAT.
*/
if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
slot_stat = readl(port + PORT_SLOT_STAT);
if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
sil24_error_intr(ap);
return;
}
qc_active = slot_stat & ~HOST_SSTAT_ATTN;
rc = ata_qc_complete_multiple(ap, qc_active);
if (rc > 0)
return;
if (rc < 0) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
return;
}
/* spurious interrupts are expected if PCIX_IRQ_WOC */
if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
ata_port_info(ap,
"spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
slot_stat, ap->link.active_tag, ap->link.sactive);
}
static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
unsigned handled = 0;
u32 status;
int i;
status = readl(host_base + HOST_IRQ_STAT);
if (status == 0xffffffff) {
dev_err(host->dev, "IRQ status == 0xffffffff, "
"PCI fault or device removal?\n");
goto out;
}
if (!(status & IRQ_STAT_4PORTS))
goto out;
spin_lock(&host->lock);
for (i = 0; i < host->n_ports; i++)
if (status & (1 << i)) {
sil24_host_intr(host->ports[i]);
handled++;
}
spin_unlock(&host->lock);
out:
return IRQ_RETVAL(handled);
}
static void sil24_error_handler(struct ata_port *ap)
{
struct sil24_port_priv *pp = ap->private_data;
if (sil24_init_port(ap))
ata_eh_freeze_port(ap);
sata_pmp_error_handler(ap);
pp->do_port_rst = 0;
}
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if ((qc->flags & ATA_QCFLAG_EH) && sil24_init_port(ap))
ata_eh_freeze_port(ap);
}
static int sil24_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct sil24_port_priv *pp;
union sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
dma_addr_t cb_dma;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
if (!cb)
return -ENOMEM;
pp->cmd_block = cb;
pp->cmd_block_dma = cb_dma;
ap->private_data = pp;
ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
return 0;
}
static void sil24_init_controller(struct ata_host *host)
{
void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
u32 tmp;
int i;
/* GPIO off */
writel(0, host_base + HOST_FLASH_CMD);
/* clear global reset & mask interrupts during initialization */
writel(0, host_base + HOST_CTRL);
/* init ports */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
void __iomem *port = sil24_port_base(ap);
/* Initial PHY setting */
writel(0x20c, port + PORT_PHY_CFG);
/* Clear port RST */
tmp = readl(port + PORT_CTRL_STAT);
if (tmp & PORT_CS_PORT_RST) {
writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
PORT_CS_PORT_RST,
PORT_CS_PORT_RST, 10, 100);
if (tmp & PORT_CS_PORT_RST)
dev_err(host->dev,
"failed to clear port RST\n");
}
/* configure port */
sil24_config_port(ap);
}
/* Turn on interrupts */
writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
}
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
struct ata_port_info pi = sil24_port_info[ent->driver_data];
const struct ata_port_info *ppi[] = { &pi, NULL };
void __iomem * const *iomap;
struct ata_host *host;
int rc;
u32 tmp;
/* cause link error if sil24_cmd_block is sized wrongly */
if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
__MARKER__sil24_cmd_block_is_sized_wrongly = 1;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev,
(1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
DRV_NAME);
if (rc)
return rc;
iomap = pcim_iomap_table(pdev);
/* apply workaround for completion IRQ loss on PCI-X errata */
if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
dev_info(&pdev->dev,
"Applying completion IRQ loss on PCI-X errata fix\n");
else
pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
/* allocate and fill host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi,
SIL24_FLAG2NPORTS(ppi[0]->flags));
if (!host)
return -ENOMEM;
host->iomap = iomap;
/* configure and activate the device */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
dev_err(&pdev->dev, "DMA enable failed\n");
return rc;
}
/* Set max read request size to 4096. This slightly increases
* write throughput for pci-e variants.
*/
pcie_set_readrq(pdev, 4096);
sil24_init_controller(host);
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
pci_intx(pdev, 0);
}
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
&sil24_sht);
}
#ifdef CONFIG_PM_SLEEP
static int sil24_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
sil24_init_controller(host);
ata_host_resume(host);
return 0;
}
#endif
#ifdef CONFIG_PM
static int sil24_port_resume(struct ata_port *ap)
{
sil24_config_pmp(ap, ap->nr_pmp_links);
return 0;
}
#endif
module_pci_driver(sil24_pci_driver);
MODULE_AUTHOR("Tejun Heo");
MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
| linux-master | drivers/ata/sata_sil24.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata-cs5530.c - CS5530 PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* based upon cs5530.c by Mark Lord.
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Available from AMD web site.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_cs5530"
#define DRV_VERSION "0.7.4"
static void __iomem *cs5530_port_base(struct ata_port *ap)
{
unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
}
/**
* cs5530_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the CS5530
* chips.
*/
static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const unsigned int cs5530_pio_timings[2][5] = {
{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
};
void __iomem *base = cs5530_port_base(ap);
u32 tuning;
int format;
/* Find out which table to use */
tuning = ioread32(base + 0x04);
format = (tuning & 0x80000000UL) ? 1 : 0;
/* Now load the right timing register */
if (adev->devno)
base += 0x08;
iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
}
/**
* cs5530_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* We cannot mix MWDMA and UDMA without reloading timings each switch
* master to slave. We track the last DMA setup in order to minimise
* reloads.
*/
static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
void __iomem *base = cs5530_port_base(ap);
u32 tuning, timing = 0;
u8 reg;
/* Find out which table to use */
tuning = ioread32(base + 0x04);
switch(adev->dma_mode) {
case XFER_UDMA_0:
timing = 0x00921250;break;
case XFER_UDMA_1:
timing = 0x00911140;break;
case XFER_UDMA_2:
timing = 0x00911030;break;
case XFER_MW_DMA_0:
timing = 0x00077771;break;
case XFER_MW_DMA_1:
timing = 0x00012121;break;
case XFER_MW_DMA_2:
timing = 0x00002020;break;
default:
BUG();
}
/* Merge in the PIO format bit */
timing |= (tuning & 0x80000000UL);
if (adev->devno == 0) /* Master */
iowrite32(timing, base + 0x04);
else {
if (timing & 0x00100000)
tuning |= 0x00100000; /* UDMA for both */
else
tuning &= ~0x00100000; /* MWDMA for both */
iowrite32(tuning, base + 0x04);
iowrite32(timing, base + 0x0C);
}
/* Set the DMA capable bit in the BMDMA area */
reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
reg |= (1 << (5 + adev->devno));
iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
/* Remember the last DMA setup we did */
ap->private_data = adev;
}
/**
* cs5530_qc_issue - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* necessary. Specifically we have a problem that there is only
* one MWDMA/UDMA bit.
*/
static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
(ata_using_udma(prev) && !ata_using_udma(adev)))
/* Switch the mode bits */
cs5530_set_dmamode(ap, adev);
}
return ata_bmdma_qc_issue(qc);
}
static const struct scsi_host_template cs5530_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static struct ata_port_operations cs5530_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = cs5530_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = cs5530_set_piomode,
.set_dmamode = cs5530_set_dmamode,
};
static const struct dmi_system_id palmax_dmi_table[] = {
{
.ident = "Palmax PD1100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
},
},
{ }
};
static int cs5530_is_palmax(void)
{
if (dmi_check_system(palmax_dmi_table)) {
printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
return 1;
}
return 0;
}
/**
* cs5530_init_chip - Chipset init
*
* Perform the chip initialisation work that is shared between both
* setup and resume paths
*/
static int cs5530_init_chip(void)
{
struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
switch (dev->device) {
case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
master_0 = pci_dev_get(dev);
break;
case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
cs5530_0 = pci_dev_get(dev);
break;
}
}
if (!master_0) {
printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
goto fail_put;
}
if (!cs5530_0) {
printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
goto fail_put;
}
pci_set_master(cs5530_0);
pci_try_set_mwi(cs5530_0);
/*
* Set PCI CacheLineSize to 16-bytes:
* --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
*
* Note: This value is constant because the 5530 is only a Geode companion
*/
pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
/*
* Disable trapping of UDMA register accesses (Win98 hack):
* --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
*/
pci_write_config_word(cs5530_0, 0xd0, 0x5006);
/*
* Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
* The other settings are what is necessary to get the register
* into a sane state for IDE DMA operation.
*/
pci_write_config_byte(master_0, 0x40, 0x1e);
/*
* Set max PCI burst size (16-bytes seems to work best):
* 16bytes: set bit-1 at 0x41 (reg value of 0x16)
* all others: clear bit-1 at 0x41, and do:
* 128bytes: OR 0x00 at 0x41
* 256bytes: OR 0x04 at 0x41
* 512bytes: OR 0x08 at 0x41
* 1024bytes: OR 0x0c at 0x41
*/
pci_write_config_byte(master_0, 0x41, 0x14);
/*
* These settings are necessary to get the chip
* into a sane state for IDE DMA operation.
*/
pci_write_config_byte(master_0, 0x42, 0x00);
pci_write_config_byte(master_0, 0x43, 0xc1);
pci_dev_put(master_0);
pci_dev_put(cs5530_0);
return 0;
fail_put:
pci_dev_put(master_0);
pci_dev_put(cs5530_0);
return -ENODEV;
}
/**
* cs5530_init_one - Initialise a CS5530
* @pdev: PCI device
* @id: Entry in match table
*
* Install a driver for the newly found CS5530 companion chip. Most of
* this is just housekeeping. We have to set the chip up correctly and
* turn off various bits of emulation magic.
*/
static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &cs5530_port_ops
};
/* The docking connector doesn't do UDMA, and it seems not MWDMA */
static const struct ata_port_info info_palmax_secondary = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cs5530_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* Chip initialisation */
if (cs5530_init_chip())
return -ENODEV;
if (cs5530_is_palmax())
ppi[1] = &info_palmax_secondary;
/* Now kick off ATA set up */
return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int cs5530_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
/* If we fail on resume we are doomed */
if (cs5530_init_chip())
return -EIO;
ata_host_resume(host);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct pci_device_id cs5530[] = {
{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
{ },
};
static struct pci_driver cs5530_pci_driver = {
.name = DRV_NAME,
.id_table = cs5530,
.probe = cs5530_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = cs5530_reinit_one,
#endif
};
module_pci_driver(cs5530_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5530);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_cs5530.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MediaTek AHCI SATA driver
*
* Copyright (c) 2017 MediaTek Inc.
* Author: Ryder Lee <[email protected]>
*/
#include <linux/ahci_platform.h>
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include "ahci.h"
#define DRV_NAME "ahci-mtk"
#define SYS_CFG 0x14
#define SYS_CFG_SATA_MSK GENMASK(31, 30)
#define SYS_CFG_SATA_EN BIT(31)
struct mtk_ahci_plat {
struct regmap *mode;
struct reset_control *axi_rst;
struct reset_control *sw_rst;
struct reset_control *reg_rst;
};
static const struct ata_port_info ahci_port_info = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
};
static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct mtk_ahci_plat *plat = hpriv->plat_data;
int err;
/* reset AXI bus and PHY part */
plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi");
if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER)
return PTR_ERR(plat->axi_rst);
plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw");
if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER)
return PTR_ERR(plat->sw_rst);
plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg");
if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER)
return PTR_ERR(plat->reg_rst);
err = reset_control_assert(plat->axi_rst);
if (err) {
dev_err(dev, "failed to assert AXI bus\n");
return err;
}
err = reset_control_assert(plat->sw_rst);
if (err) {
dev_err(dev, "failed to assert PHY digital part\n");
return err;
}
err = reset_control_assert(plat->reg_rst);
if (err) {
dev_err(dev, "failed to assert PHY register part\n");
return err;
}
err = reset_control_deassert(plat->reg_rst);
if (err) {
dev_err(dev, "failed to deassert PHY register part\n");
return err;
}
err = reset_control_deassert(plat->sw_rst);
if (err) {
dev_err(dev, "failed to deassert PHY digital part\n");
return err;
}
err = reset_control_deassert(plat->axi_rst);
if (err) {
dev_err(dev, "failed to deassert AXI bus\n");
return err;
}
return 0;
}
static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
struct device *dev)
{
struct mtk_ahci_plat *plat = hpriv->plat_data;
struct device_node *np = dev->of_node;
/* enable SATA function if needed */
if (of_property_present(np, "mediatek,phy-mode")) {
plat->mode = syscon_regmap_lookup_by_phandle(
np, "mediatek,phy-mode");
if (IS_ERR(plat->mode)) {
dev_err(dev, "missing phy-mode phandle\n");
return PTR_ERR(plat->mode);
}
regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK,
SYS_CFG_SATA_EN);
}
return 0;
}
static int mtk_ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ahci_plat *plat;
struct ahci_host_priv *hpriv;
int err;
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = plat;
err = mtk_ahci_parse_property(hpriv, dev);
if (err)
return err;
err = mtk_ahci_platform_resets(hpriv, dev);
if (err)
return err;
err = ahci_platform_enable_resources(hpriv);
if (err)
return err;
err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
&ahci_platform_sht);
if (err)
goto disable_resources;
return 0;
disable_resources:
ahci_platform_disable_resources(hpriv);
return err;
}
static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
ahci_platform_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "mediatek,mtk-ahci", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver mtk_ahci_driver = {
.probe = mtk_ahci_probe,
.remove_new = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
.pm = &ahci_pm_ops,
},
};
module_platform_driver(mtk_ahci_driver);
MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ata/ahci_mtk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pata_serverworks.c - Serverworks PATA for new ATA layer
* (C) 2005 Red Hat Inc
* (C) 2010 Bartlomiej Zolnierkiewicz
*
* based upon
*
* serverworks.c
*
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
* Copyright (C) 1998-2000 Andre Hedrick <[email protected]>
* Portions copyright (c) 2001 Sun Microsystems
*
*
* RCC/ServerWorks IDE driver for Linux
*
* OSB4: `Open South Bridge' IDE Interface (fn 1)
* supports UDMA mode 2 (33 MB/s)
*
* CSB5: `Champion South Bridge' IDE Interface (fn 1)
* all revisions support UDMA mode 4 (66 MB/s)
* revision A2.0 and up support UDMA mode 5 (100 MB/s)
*
* *** The CSB5 does not provide ANY register ***
* *** to detect 80-conductor cable presence. ***
*
* CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
*
* Documentation:
* Available under NDA only. Errata info very hard to get.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_serverworks"
#define DRV_VERSION "0.4.3"
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
* can overrun their FIFOs when used with the CSB5 */
static const char *csb_bad_ata100[] = {
"ST320011A",
"ST340016A",
"ST360021A",
"ST380021A",
NULL
};
/**
* oem_cable - Dell/Sun serverworks cable detection
* @ap: ATA port to do cable detect
*
* Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select
* for their interfaces in the top two bits of the subsystem ID.
*/
static int oem_cable(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
struct sv_cable_table {
int device;
int subvendor;
int (*cable_detect)(struct ata_port *ap);
};
static struct sv_cable_table cable_detect[] = {
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, oem_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, oem_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, oem_cable },
{ PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, ata_cable_40wire },
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, ata_cable_unknown },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, ata_cable_unknown },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, ata_cable_unknown },
{ PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown },
{ }
};
/**
* serverworks_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection according to the device and subvendor
* identifications
*/
static int serverworks_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct sv_cable_table *cb = cable_detect;
while(cb->device) {
if (cb->device == pdev->device &&
(cb->subvendor == pdev->subsystem_vendor ||
cb->subvendor == PCI_ANY_ID)) {
return cb->cable_detect(ap);
}
cb++;
}
BUG();
return -1; /* kill compiler warning */
}
/**
* serverworks_is_csb - Check for CSB or OSB
* @pdev: PCI device to check
*
* Returns true if the device being checked is known to be a CSB
* series device.
*/
static u8 serverworks_is_csb(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
return 1;
default:
break;
}
return 0;
}
/**
* serverworks_osb4_filter - mode selection filter
* @adev: ATA device
* @mask: Mask of proposed modes
*
* Filter the offered modes for the device to apply controller
* specific rules. OSB4 requires no UDMA for disks due to a FIFO
* bug we hit.
*/
static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned int mask)
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
return mask;
}
/**
* serverworks_csb_filter - mode selection filter
* @adev: ATA device
* @mask: Mask of proposed modes
*
* Check the blacklist and disable UDMA5 if matched
*/
static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask)
{
const char *p;
char model_num[ATA_ID_PROD_LEN + 1];
int i;
/* Disk, UDMA */
if (adev->class != ATA_DEV_ATA)
return mask;
/* Actually do need to check */
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
if (!strcmp(p, model_num))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return mask;
}
/**
* serverworks_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the OSB4/CSB5 timing registers for PIO. The PIO register
* load is done as a simple lookup.
*/
static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
int offset = 1 + 2 * ap->port_no - adev->devno;
int devbits = (2 * ap->port_no + adev->devno) * 4;
u16 csb5_pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
/* The OSB4 just requires the timing but the CSB series want the
mode number as well */
if (serverworks_is_csb(pdev)) {
pci_read_config_word(pdev, 0x4A, &csb5_pio);
csb5_pio &= ~(0x0F << devbits);
pci_write_config_word(pdev, 0x4A, csb5_pio | (pio << devbits));
}
}
/**
* serverworks_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
* chipset. The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
int offset = 1 + 2 * ap->port_no - adev->devno;
int devbits = 2 * ap->port_no + adev->devno;
u8 ultra;
u8 ultra_cfg;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x54, &ultra_cfg);
pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
ultra &= ~(0x0F << (adev->devno * 4));
if (adev->dma_mode >= XFER_UDMA_0) {
pci_write_config_byte(pdev, 0x44 + offset, 0x20);
ultra |= (adev->dma_mode - XFER_UDMA_0)
<< (adev->devno * 4);
ultra_cfg |= (1 << devbits);
} else {
pci_write_config_byte(pdev, 0x44 + offset,
dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
ultra_cfg &= ~(1 << devbits);
}
pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
pci_write_config_byte(pdev, 0x54, ultra_cfg);
}
static const struct scsi_host_template serverworks_osb4_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static const struct scsi_host_template serverworks_csb_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations serverworks_osb4_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = serverworks_cable_detect,
.mode_filter = serverworks_osb4_filter,
.set_piomode = serverworks_set_piomode,
.set_dmamode = serverworks_set_dmamode,
};
static struct ata_port_operations serverworks_csb_port_ops = {
.inherits = &serverworks_osb4_port_ops,
.qc_prep = ata_bmdma_qc_prep,
.mode_filter = serverworks_csb_filter,
};
static int serverworks_fixup_osb4(struct pci_dev *pdev)
{
u32 reg;
struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
if (isa_dev) {
pci_read_config_dword(isa_dev, 0x64, ®);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
if (!(reg & 0x00004000))
dev_info(&pdev->dev, "UDMA not BIOS enabled.\n");
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
pci_dev_put(isa_dev);
return 0;
}
dev_warn(&pdev->dev, "Unable to find bridge.\n");
return -ENODEV;
}
static int serverworks_fixup_csb(struct pci_dev *pdev)
{
u8 btr;
/* Third Channel Test */
if (!(PCI_FUNC(pdev->devfn) & 1)) {
struct pci_dev * findev = NULL;
u32 reg4c = 0;
findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
if (findev) {
pci_read_config_dword(findev, 0x4C, ®4c);
reg4c &= ~0x000007FF;
reg4c |= 0x00000040;
reg4c |= 0x00000020;
pci_write_config_dword(findev, 0x4C, reg4c);
pci_dev_put(findev);
}
} else {
struct pci_dev * findev = NULL;
u8 reg41 = 0;
findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
if (findev) {
pci_read_config_byte(findev, 0x41, ®41);
reg41 &= ~0x40;
pci_write_config_byte(findev, 0x41, reg41);
pci_dev_put(findev);
}
}
/* setup the UDMA Control register
*
* 1. clear bit 6 to enable DMA
* 2. enable DMA modes with bits 0-1
* 00 : legacy
* 01 : udma2
* 10 : udma2/udma4
* 11 : udma2/udma4/udma5
*/
pci_read_config_byte(pdev, 0x5A, &btr);
btr &= ~0x40;
if (!(PCI_FUNC(pdev->devfn) & 1))
btr |= 0x2;
else
btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
pci_write_config_byte(pdev, 0x5A, btr);
return btr;
}
static void serverworks_fixup_ht1000(struct pci_dev *pdev)
{
u8 btr;
/* Setup HT1000 SouthBridge Controller - Single Channel Only */
pci_read_config_byte(pdev, 0x5A, &btr);
btr &= ~0x40;
btr |= 0x3;
pci_write_config_byte(pdev, 0x5A, btr);
}
static int serverworks_fixup(struct pci_dev *pdev)
{
int rc = 0;
/* Force master latency timer to 64 PCI clocks */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
switch (pdev->device) {
case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
rc = serverworks_fixup_osb4(pdev);
break;
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
ata_pci_bmdma_clear_simplex(pdev);
fallthrough;
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
rc = serverworks_fixup_csb(pdev);
break;
case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
serverworks_fixup_ht1000(pdev);
break;
}
return rc;
}
static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info[4] = {
{ /* OSB4 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &serverworks_osb4_port_ops
}, { /* OSB4 no UDMA */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
/* No UDMA */
.port_ops = &serverworks_osb4_port_ops
}, { /* CSB5 */
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &serverworks_csb_port_ops
}, { /* CSB5 - later revisions*/
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &serverworks_csb_port_ops
}
};
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
const struct scsi_host_template *sht = &serverworks_csb_sht;
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = serverworks_fixup(pdev);
/* OSB4 : South Bridge and IDE */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
/* Select non UDMA capable OSB4 if we can't do fixups */
if (rc < 0)
ppi[0] = &info[1];
sht = &serverworks_osb4_sht;
}
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
(pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
(pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
/* If the returned btr is the newer revision then
select the right info block */
if (rc == 3)
ppi[0] = &info[3];
/* Is this the 3rd channel CSB6 IDE ? */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
ppi[1] = &ata_dummy_port_info;
}
return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
}
#ifdef CONFIG_PM_SLEEP
static int serverworks_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
(void)serverworks_fixup(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id serverworks[] = {
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
{ },
};
static struct pci_driver serverworks_pci_driver = {
.name = DRV_NAME,
.id_table = serverworks,
.probe = serverworks_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = serverworks_reinit_one,
#endif
};
module_pci_driver(serverworks_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, serverworks);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_serverworks.c |
/*
* drivers/ata/pata_mpc52xx.c
*
* libata driver for the Freescale MPC52xx on-chip IDE interface
*
* Copyright (C) 2006 Sylvain Munaut <[email protected]>
* Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
*
* UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby),
* Domen Puncer and Tim Yamin.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/libata.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
#include <linux/fsl/bestcomm/bestcomm.h>
#include <linux/fsl/bestcomm/bestcomm_priv.h>
#include <linux/fsl/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
/* Private structures used by the driver */
struct mpc52xx_ata_timings {
u32 pio1;
u32 pio2;
u32 mdma1;
u32 mdma2;
u32 udma1;
u32 udma2;
u32 udma3;
u32 udma4;
u32 udma5;
int using_udma;
};
struct mpc52xx_ata_priv {
unsigned int ipb_period;
struct mpc52xx_ata __iomem *ata_regs;
phys_addr_t ata_regs_pa;
int ata_irq;
struct mpc52xx_ata_timings timings[2];
int csel;
/* DMA */
struct bcom_task *dmatsk;
const struct udmaspec *udmaspec;
const struct mdmaspec *mdmaspec;
int mpc52xx_ata_dma_last_write;
int waiting_for_dma;
};
/* ATAPI-4 PIO specs (in ns) */
static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
/* ======================================================================== */
/* ATAPI-4 MDMA specs (in clocks) */
struct mdmaspec {
u8 t0M;
u8 td;
u8 th;
u8 tj;
u8 tkw;
u8 tm;
u8 tn;
};
static const struct mdmaspec mdmaspec66[3] = {
{ .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 },
{ .t0M = 10, .td = 6, .th = 1, .tj = 1, .tkw = 4, .tm = 2, .tn = 1 },
{ .t0M = 8, .td = 5, .th = 1, .tj = 1, .tkw = 2, .tm = 2, .tn = 1 },
};
static const struct mdmaspec mdmaspec132[3] = {
{ .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 },
{ .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7, .tm = 4, .tn = 1 },
{ .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4, .tm = 4, .tn = 1 },
};
/* ATAPI-4 UDMA specs (in clocks) */
struct udmaspec {
u8 tcyc;
u8 t2cyc;
u8 tds;
u8 tdh;
u8 tdvs;
u8 tdvh;
u8 tfs;
u8 tli;
u8 tmli;
u8 taz;
u8 tzah;
u8 tenv;
u8 tsr;
u8 trfs;
u8 trp;
u8 tack;
u8 tss;
};
static const struct udmaspec udmaspec66[6] = {
{ .tcyc = 8, .t2cyc = 16, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
.tfs = 16, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 3, .trfs = 5, .trp = 11, .tack = 2, .tss = 4,
},
{ .tcyc = 5, .t2cyc = 11, .tds = 1, .tdh = 1, .tdvs = 4, .tdvh = 1,
.tfs = 14, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 5, .trp = 9, .tack = 2, .tss = 4,
},
{ .tcyc = 4, .t2cyc = 8, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
.tfs = 12, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 2, .tdvh = 1,
.tfs = 9, .tli = 7, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 2, .t2cyc = 4, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 8, .tli = 8, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 2, .t2cyc = 2, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 6, .tli = 5, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 6, .tack = 2, .tss = 4,
},
};
static const struct udmaspec udmaspec132[6] = {
{ .tcyc = 15, .t2cyc = 31, .tds = 2, .tdh = 1, .tdvs = 10, .tdvh = 1,
.tfs = 30, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 7, .trfs = 10, .trp = 22, .tack = 3, .tss = 7,
},
{ .tcyc = 10, .t2cyc = 21, .tds = 2, .tdh = 1, .tdvs = 7, .tdvh = 1,
.tfs = 27, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 4, .trfs = 10, .trp = 17, .tack = 3, .tss = 7,
},
{ .tcyc = 6, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
.tfs = 23, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
},
{ .tcyc = 7, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
.tfs = 15, .tli = 13, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
},
{ .tcyc = 2, .t2cyc = 5, .tds = 0, .tdh = 0, .tdvs = 1, .tdvh = 1,
.tfs = 16, .tli = 14, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 7, .trp = 13, .tack = 2, .tss = 6,
},
{ .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 12, .tli = 10, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 7, .trp = 12, .tack = 3, .tss = 7,
},
};
/* ======================================================================== */
/* Bit definitions inside the registers */
#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */
#define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */
#define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */
#define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */
#define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */
#define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */
#define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */
#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */
#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */
#define MPC52xx_ATA_FIFOSTAT_ERROR 0x40 /* FIFO Error */
#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */
#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */
#define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */
#define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */
#define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */
#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */
#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */
#define MAX_DMA_BUFFERS 128
#define MAX_DMA_BUFFER_SIZE 0x20000u
/* Structure of the hardware registers */
struct mpc52xx_ata {
/* Host interface registers */
u32 config; /* ATA + 0x00 Host configuration */
u32 host_status; /* ATA + 0x04 Host controller status */
u32 pio1; /* ATA + 0x08 PIO Timing 1 */
u32 pio2; /* ATA + 0x0c PIO Timing 2 */
u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */
u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */
u32 udma1; /* ATA + 0x18 UDMA Timing 1 */
u32 udma2; /* ATA + 0x1c UDMA Timing 2 */
u32 udma3; /* ATA + 0x20 UDMA Timing 3 */
u32 udma4; /* ATA + 0x24 UDMA Timing 4 */
u32 udma5; /* ATA + 0x28 UDMA Timing 5 */
u32 share_cnt; /* ATA + 0x2c ATA share counter */
u32 reserved0[3];
/* FIFO registers */
u32 fifo_data; /* ATA + 0x3c */
u8 fifo_status_frame; /* ATA + 0x40 */
u8 fifo_status; /* ATA + 0x41 */
u16 reserved7[1];
u8 fifo_control; /* ATA + 0x44 */
u8 reserved8[5];
u16 fifo_alarm; /* ATA + 0x4a */
u16 reserved9;
u16 fifo_rdp; /* ATA + 0x4e */
u16 reserved10;
u16 fifo_wrp; /* ATA + 0x52 */
u16 reserved11;
u16 fifo_lfrdp; /* ATA + 0x56 */
u16 reserved12;
u16 fifo_lfwrp; /* ATA + 0x5a */
/* Drive TaskFile registers */
u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */
u8 reserved13[3];
u16 tf_data; /* ATA + 0x60 TASKFILE Data */
u16 reserved14;
u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */
u8 reserved15[3];
u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */
u8 reserved16[3];
u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */
u8 reserved17[3];
u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */
u8 reserved18[3];
u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */
u8 reserved19[3];
u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */
u8 reserved20[3];
u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */
u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */
u8 reserved21[2];
};
/* ======================================================================== */
/* Aux fns */
/* ======================================================================== */
/* MPC52xx low level hw control */
static int
mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
unsigned int ipb_period = priv->ipb_period;
u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
if ((pio < 0) || (pio > 4))
return -EINVAL;
t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
return 0;
}
static int
mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
int speed)
{
struct mpc52xx_ata_timings *t = &priv->timings[dev];
const struct mdmaspec *s = &priv->mdmaspec[speed];
if (speed < 0 || speed > 2)
return -EINVAL;
t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t->using_udma = 0;
return 0;
}
static int
mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
int speed)
{
struct mpc52xx_ata_timings *t = &priv->timings[dev];
const struct udmaspec *s = &priv->udmaspec[speed];
if (speed < 0 || speed > 2)
return -EINVAL;
t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
t->udma5 = (u32)s->tzah << 24;
t->using_udma = 1;
return 0;
}
static void
mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
{
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
struct mpc52xx_ata_timings *timing = &priv->timings[device];
out_be32(®s->pio1, timing->pio1);
out_be32(®s->pio2, timing->pio2);
out_be32(®s->mdma1, timing->mdma1);
out_be32(®s->mdma2, timing->mdma2);
out_be32(®s->udma1, timing->udma1);
out_be32(®s->udma2, timing->udma2);
out_be32(®s->udma3, timing->udma3);
out_be32(®s->udma4, timing->udma4);
out_be32(®s->udma5, timing->udma5);
priv->csel = device;
}
static int
mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
{
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
int tslot;
/* Clear share_cnt (all sample code do this ...) */
out_be32(®s->share_cnt, 0);
/* Configure and reset host */
out_be32(®s->config,
MPC52xx_ATA_HOSTCONF_IE |
MPC52xx_ATA_HOSTCONF_IORDY |
MPC52xx_ATA_HOSTCONF_SMR |
MPC52xx_ATA_HOSTCONF_FR);
udelay(10);
out_be32(®s->config,
MPC52xx_ATA_HOSTCONF_IE |
MPC52xx_ATA_HOSTCONF_IORDY);
/* Set the time slot to 1us */
tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
out_be32(®s->share_cnt, tslot << 16);
/* Init timings to PIO0 */
memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
mpc52xx_ata_compute_pio_timings(priv, 0, 0);
mpc52xx_ata_compute_pio_timings(priv, 1, 0);
mpc52xx_ata_apply_timings(priv, 0);
return 0;
}
/* ======================================================================== */
/* libata driver */
/* ======================================================================== */
static void
mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
int pio, rv;
pio = adev->pio_mode - XFER_PIO_0;
rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
if (rv) {
dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio);
return;
}
mpc52xx_ata_apply_timings(priv, adev->devno);
}
static void
mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
int rv;
if (adev->dma_mode >= XFER_UDMA_0) {
int dma = adev->dma_mode - XFER_UDMA_0;
rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma);
} else {
int dma = adev->dma_mode - XFER_MW_DMA_0;
rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma);
}
if (rv) {
dev_alert(ap->dev,
"Trying to select invalid DMA mode %d\n",
adev->dma_mode);
return;
}
mpc52xx_ata_apply_timings(priv, adev->devno);
}
static void
mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
if (device != priv->csel)
mpc52xx_ata_apply_timings(priv, device);
ata_sff_dev_select(ap, device);
}
static int
mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
struct bcom_ata_bd *bd;
unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si;
struct scatterlist *sg;
int count = 0;
if (read)
bcom_ata_rx_prepare(priv->dmatsk);
else
bcom_ata_tx_prepare(priv->dmatsk);
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t cur_addr = sg_dma_address(sg);
u32 cur_len = sg_dma_len(sg);
while (cur_len) {
unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE);
bd = (struct bcom_ata_bd *)
bcom_prepare_next_buffer(priv->dmatsk);
if (read) {
bd->status = tc;
bd->src_pa = (__force u32) priv->ata_regs_pa +
offsetof(struct mpc52xx_ata, fifo_data);
bd->dst_pa = (__force u32) cur_addr;
} else {
bd->status = tc;
bd->src_pa = (__force u32) cur_addr;
bd->dst_pa = (__force u32) priv->ata_regs_pa +
offsetof(struct mpc52xx_ata, fifo_data);
}
bcom_submit_next_buffer(priv->dmatsk, NULL);
cur_addr += tc;
cur_len -= tc;
count++;
if (count > MAX_DMA_BUFFERS) {
dev_alert(ap->dev, "dma table"
"too small\n");
goto use_pio_instead;
}
}
}
return 1;
use_pio_instead:
bcom_ata_reset_bd(priv->dmatsk);
return 0;
}
static void
mpc52xx_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE);
u8 dma_mode;
if (!mpc52xx_ata_build_dmatable(qc))
dev_alert(ap->dev, "%s: %i, return 1?\n",
__func__, __LINE__);
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
if (read) {
dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ |
MPC52xx_ATA_DMAMODE_FE;
/* Setup FIFO if direction changed */
if (priv->mpc52xx_ata_dma_last_write != 0) {
priv->mpc52xx_ata_dma_last_write = 0;
/* Configure FIFO with granularity to 7 */
out_8(®s->fifo_control, 7);
out_be16(®s->fifo_alarm, 128);
/* Set FIFO Reset bit (FR) */
out_8(®s->dma_mode, MPC52xx_ATA_DMAMODE_FR);
}
} else {
dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE;
/* Setup FIFO if direction changed */
if (priv->mpc52xx_ata_dma_last_write != 1) {
priv->mpc52xx_ata_dma_last_write = 1;
/* Configure FIFO with granularity to 4 */
out_8(®s->fifo_control, 4);
out_be16(®s->fifo_alarm, 128);
}
}
if (priv->timings[qc->dev->devno].using_udma)
dma_mode |= MPC52xx_ATA_DMAMODE_UDMA;
out_8(®s->dma_mode, dma_mode);
priv->waiting_for_dma = ATA_DMA_ACTIVE;
ata_wait_idle(ap);
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void
mpc52xx_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum);
bcom_enable(priv->dmatsk);
}
static void
mpc52xx_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
bcom_disable(priv->dmatsk);
bcom_ata_reset_bd(priv->dmatsk);
priv->waiting_for_dma = 0;
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
}
static u8
mpc52xx_bmdma_status(struct ata_port *ap)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) {
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
return priv->waiting_for_dma | ATA_DMA_ERR;
}
return priv->waiting_for_dma;
}
static irqreturn_t
mpc52xx_ata_task_irq(int irq, void *vpriv)
{
struct mpc52xx_ata_priv *priv = vpriv;
while (bcom_buffer_done(priv->dmatsk))
bcom_retrieve_buffer(priv->dmatsk, NULL, NULL);
priv->waiting_for_dma |= ATA_DMA_INTR;
return IRQ_HANDLED;
}
static const struct scsi_host_template mpc52xx_ata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
.set_piomode = mpc52xx_ata_set_piomode,
.set_dmamode = mpc52xx_ata_set_dmamode,
.bmdma_setup = mpc52xx_bmdma_setup,
.bmdma_start = mpc52xx_bmdma_start,
.bmdma_stop = mpc52xx_bmdma_stop,
.bmdma_status = mpc52xx_bmdma_status,
.qc_prep = ata_noop_qc_prep,
};
static int mpc52xx_ata_init_one(struct device *dev,
struct mpc52xx_ata_priv *priv,
unsigned long raw_ata_regs,
int mwdma_mask, int udma_mask)
{
struct ata_host *host;
struct ata_port *ap;
struct ata_ioports *aio;
host = ata_host_alloc(dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = mwdma_mask;
ap->udma_mask = udma_mask;
ap->ops = &mpc52xx_ata_port_ops;
host->private_data = priv;
aio = &ap->ioaddr;
aio->cmd_addr = NULL; /* Don't have a classic reg block */
aio->altstatus_addr = &priv->ata_regs->tf_control;
aio->ctl_addr = &priv->ata_regs->tf_control;
aio->data_addr = &priv->ata_regs->tf_data;
aio->error_addr = &priv->ata_regs->tf_features;
aio->feature_addr = &priv->ata_regs->tf_features;
aio->nsect_addr = &priv->ata_regs->tf_sec_count;
aio->lbal_addr = &priv->ata_regs->tf_sec_num;
aio->lbam_addr = &priv->ata_regs->tf_cyl_low;
aio->lbah_addr = &priv->ata_regs->tf_cyl_high;
aio->device_addr = &priv->ata_regs->tf_dev_head;
aio->status_addr = &priv->ata_regs->tf_command;
aio->command_addr = &priv->ata_regs->tf_command;
ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
/* activate host */
return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
&mpc52xx_ata_sht);
}
/* ======================================================================== */
/* OF Platform driver */
/* ======================================================================== */
static int mpc52xx_ata_probe(struct platform_device *op)
{
unsigned int ipb_freq;
struct resource res_mem;
int ata_irq = 0;
struct mpc52xx_ata __iomem *ata_regs;
struct mpc52xx_ata_priv *priv = NULL;
int rv, task_irq;
int mwdma_mask = 0, udma_mask = 0;
const __be32 *prop;
int proplen;
struct bcom_task *dmatsk;
/* Get ipb frequency */
ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
}
/* Get device base address from device tree, request the region
* and ioremap it. */
rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
if (rv) {
dev_err(&op->dev, "could not determine device base address\n");
return rv;
}
if (!devm_request_mem_region(&op->dev, res_mem.start,
sizeof(*ata_regs), DRV_NAME)) {
dev_err(&op->dev, "error requesting register region\n");
return -EBUSY;
}
ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs));
if (!ata_regs) {
dev_err(&op->dev, "error mapping device registers\n");
return -ENOMEM;
}
/*
* By default, all DMA modes are disabled for the MPC5200. Some
* boards don't have the required signals routed to make DMA work.
* Also, the MPC5200B has a silicon bug that causes data corruption
* with UDMA if it is used at the same time as the LocalPlus bus.
*
* Instead of trying to guess what modes are usable, check the
* ATA device tree node to find out what DMA modes work on the board.
* UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>"
* to the kernel boot parameters.
*
* The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
* UDMA modes 0, 1 and 2.
*/
prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
if ((prop) && (proplen >= 4))
mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
if ((prop) && (proplen >= 4))
udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (!ata_irq) {
dev_err(&op->dev, "error mapping irq\n");
return -EINVAL;
}
/* Prepare our private structure */
priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
rv = -ENOMEM;
goto err1;
}
priv->ipb_period = 1000000000 / (ipb_freq / 1000);
priv->ata_regs = ata_regs;
priv->ata_regs_pa = res_mem.start;
priv->ata_irq = ata_irq;
priv->csel = -1;
priv->mpc52xx_ata_dma_last_write = -1;
if (ipb_freq/1000000 == 66) {
priv->mdmaspec = mdmaspec66;
priv->udmaspec = udmaspec66;
} else {
priv->mdmaspec = mdmaspec132;
priv->udmaspec = udmaspec132;
}
/* Allocate a BestComm task for DMA */
dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE);
if (!dmatsk) {
dev_err(&op->dev, "bestcomm initialization failed\n");
rv = -ENOMEM;
goto err1;
}
task_irq = bcom_get_task_irq(dmatsk);
rv = devm_request_irq(&op->dev, task_irq, &mpc52xx_ata_task_irq, 0,
"ATA task", priv);
if (rv) {
dev_err(&op->dev, "error requesting DMA IRQ\n");
goto err2;
}
priv->dmatsk = dmatsk;
/* Init the hw */
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
dev_err(&op->dev, "error initializing hardware\n");
goto err2;
}
/* Register ourselves to libata */
rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start,
mwdma_mask, udma_mask);
if (rv) {
dev_err(&op->dev, "error registering with ATA layer\n");
goto err2;
}
return 0;
err2:
irq_dispose_mapping(task_irq);
bcom_ata_release(dmatsk);
err1:
irq_dispose_mapping(ata_irq);
return rv;
}
static void mpc52xx_ata_remove(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
int task_irq;
/* Deregister the ATA interface */
ata_platform_remove_one(op);
/* Clean up DMA */
task_irq = bcom_get_task_irq(priv->dmatsk);
irq_dispose_mapping(task_irq);
bcom_ata_release(priv->dmatsk);
irq_dispose_mapping(priv->ata_irq);
}
#ifdef CONFIG_PM_SLEEP
static int
mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
ata_host_suspend(host, state);
return 0;
}
static int
mpc52xx_ata_resume(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
int rv;
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
dev_err(host->dev, "error initializing hardware\n");
return rv;
}
ata_host_resume(host);
return 0;
}
#endif
static const struct of_device_id mpc52xx_ata_of_match[] = {
{ .compatible = "fsl,mpc5200-ata", },
{ .compatible = "mpc5200-ata", },
{ /* sentinel */ }
};
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
.remove_new = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
#endif
.driver = {
.name = DRV_NAME,
.of_match_table = mpc52xx_ata_of_match,
},
};
module_platform_driver(mpc52xx_ata_of_platform_driver);
MODULE_AUTHOR("Sylvain Munaut <[email protected]>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
| linux-master | drivers/ata/pata_mpc52xx.c |
/*
* pata_hpt3x3 - HPT3x3 driver
* (c) Copyright 2005-2006 Red Hat
*
* Was pata_hpt34x but the naming was confusing as it supported the
* 343 and 363 so it has been renamed.
*
* Based on:
* linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
* Copyright (C) 1998-2000 Andre Hedrick <[email protected]>
*
* May be copied or modified under the terms of the GNU General Public
* License
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x3"
#define DRV_VERSION "0.6.1"
/**
* hpt3x3_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the HPT3x3 as
* all we have to do is clear the MWDMA and UDMA bits then load the
* mode number.
*/
static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 r1, r2;
int dn = 2 * ap->port_no + adev->devno;
pci_read_config_dword(pdev, 0x44, &r1);
pci_read_config_dword(pdev, 0x48, &r2);
/* Load the PIO timing number */
r1 &= ~(7 << (3 * dn));
r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
pci_write_config_dword(pdev, 0x44, r1);
pci_write_config_dword(pdev, 0x48, r2);
}
#if defined(CONFIG_PATA_HPT3X3_DMA)
/**
* hpt3x3_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
*
* 0x44 : bit 0-2 master mode, 3-5 slave mode, etc
* 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
*/
static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 r1, r2;
int dn = 2 * ap->port_no + adev->devno;
int mode_num = adev->dma_mode & 0x0F;
pci_read_config_dword(pdev, 0x44, &r1);
pci_read_config_dword(pdev, 0x48, &r2);
/* Load the timing number */
r1 &= ~(7 << (3 * dn));
r1 |= (mode_num << (3 * dn));
r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
if (adev->dma_mode >= XFER_UDMA_0)
r2 |= (0x01 << dn); /* Ultra mode */
else
r2 |= (0x10 << dn); /* MWDMA */
pci_write_config_dword(pdev, 0x44, r1);
pci_write_config_dword(pdev, 0x48, r2);
}
/**
* hpt3x3_freeze - DMA workaround
* @ap: port to freeze
*
* When freezing an HPT3x3 we must stop any pending DMA before
* writing to the control register or the chip will hang
*/
static void hpt3x3_freeze(struct ata_port *ap)
{
void __iomem *mmio = ap->ioaddr.bmdma_addr;
iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START,
mmio + ATA_DMA_CMD);
ata_sff_dma_pause(ap);
ata_sff_freeze(ap);
}
/**
* hpt3x3_bmdma_setup - DMA workaround
* @qc: Queued command
*
* When issuing BMDMA we must clean up the error/active bits in
* software on this device
*/
static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
r |= ATA_DMA_INTR | ATA_DMA_ERR;
iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
return ata_bmdma_setup(qc);
}
/**
* hpt3x3_atapi_dma - ATAPI DMA check
* @qc: Queued command
*
* Just say no - we don't do ATAPI DMA
*/
static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
{
return 1;
}
#endif /* CONFIG_PATA_HPT3X3_DMA */
static const struct scsi_host_template hpt3x3_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations hpt3x3_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = hpt3x3_set_piomode,
#if defined(CONFIG_PATA_HPT3X3_DMA)
.set_dmamode = hpt3x3_set_dmamode,
.bmdma_setup = hpt3x3_bmdma_setup,
.check_atapi_dma= hpt3x3_atapi_dma,
.freeze = hpt3x3_freeze,
#endif
};
/**
* hpt3x3_init_chipset - chip setup
* @dev: PCI device
*
* Perform the setup required at boot and on resume.
*/
static void hpt3x3_init_chipset(struct pci_dev *dev)
{
u16 cmd;
/* Initialize the board */
pci_write_config_word(dev, 0x80, 0x00);
/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_MEMORY)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
else
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
}
/**
* hpt3x3_init_one - Initialise an HPT343/363
* @pdev: PCI device
* @id: Entry in match table
*
* Perform basic initialisation. We set the device up so we access all
* ports via BAR4. This is necessary to work around errata.
*/
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
#if defined(CONFIG_PATA_HPT3X3_DMA)
/* Further debug needed */
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
#endif
.port_ops = &hpt3x3_port_ops
};
/* Register offsets of taskfiles in BAR4 area */
static const u8 offset_cmd[2] = { 0x20, 0x28 };
static const u8 offset_ctl[2] = { 0x36, 0x3E };
const struct ata_port_info *ppi[] = { &info, NULL };
struct ata_host *host;
int i, rc;
void __iomem *base;
hpt3x3_init_chipset(pdev);
ata_print_version_once(&pdev->dev, DRV_VERSION);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* Everything is relative to BAR4 if we set up this way */
rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
base = host->iomap[4]; /* Bus mastering base */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
struct ata_ioports *ioaddr = &ap->ioaddr;
ioaddr->cmd_addr = base + offset_cmd[i];
ioaddr->altstatus_addr =
ioaddr->ctl_addr = base + offset_ctl[i];
ioaddr->scr_addr = NULL;
ata_sff_std_ports(ioaddr);
ioaddr->bmdma_addr = base + 8 * i;
ata_port_pbar_desc(ap, 4, -1, "ioport");
ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
}
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &hpt3x3_sht);
}
#ifdef CONFIG_PM_SLEEP
static int hpt3x3_reinit_one(struct pci_dev *dev)
{
struct ata_host *host = pci_get_drvdata(dev);
int rc;
rc = ata_pci_device_do_resume(dev);
if (rc)
return rc;
hpt3x3_init_chipset(dev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id hpt3x3[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
{ },
};
static struct pci_driver hpt3x3_pci_driver = {
.name = DRV_NAME,
.id_table = hpt3x3,
.probe = hpt3x3_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
.resume = hpt3x3_reinit_one,
#endif
};
module_pci_driver(hpt3x3_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt3x3);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/pata_hpt3x3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sata_svw.c - ServerWorks / Apple K2 SATA
*
* Maintained by: Benjamin Herrenschmidt <[email protected]> and
* Jeff Garzik <[email protected]>
* Please ALWAYS copy [email protected]
* on emails.
*
* Copyright 2003 Benjamin Herrenschmidt <[email protected]>
*
* Bits from Jeff Garzik, Copyright RedHat, Inc.
*
* This driver probably works with non-Apple versions of the
* Broadcom chipset...
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available under NDA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi.h>
#include <linux/libata.h>
#include <linux/of.h>
#include <linux/of_address.h>
#define DRV_NAME "sata_svw"
#define DRV_VERSION "2.3"
enum {
/* ap->flags bits */
K2_FLAG_SATA_8_PORTS = (1 << 24),
K2_FLAG_NO_ATAPI_DMA = (1 << 25),
K2_FLAG_BAR_POS_3 = (1 << 26),
/* Taskfile registers offsets */
K2_SATA_TF_CMD_OFFSET = 0x00,
K2_SATA_TF_DATA_OFFSET = 0x00,
K2_SATA_TF_ERROR_OFFSET = 0x04,
K2_SATA_TF_NSECT_OFFSET = 0x08,
K2_SATA_TF_LBAL_OFFSET = 0x0c,
K2_SATA_TF_LBAM_OFFSET = 0x10,
K2_SATA_TF_LBAH_OFFSET = 0x14,
K2_SATA_TF_DEVICE_OFFSET = 0x18,
K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
K2_SATA_TF_CTL_OFFSET = 0x20,
/* DMA base */
K2_SATA_DMA_CMD_OFFSET = 0x30,
/* SCRs base */
K2_SATA_SCR_STATUS_OFFSET = 0x40,
K2_SATA_SCR_ERROR_OFFSET = 0x44,
K2_SATA_SCR_CONTROL_OFFSET = 0x48,
/* Others */
K2_SATA_SICR1_OFFSET = 0x80,
K2_SATA_SICR2_OFFSET = 0x84,
K2_SATA_SIM_OFFSET = 0x88,
/* Port stride */
K2_SATA_PORT_OFFSET = 0x100,
chip_svw4 = 0,
chip_svw8 = 1,
chip_svw42 = 2, /* bar 3 */
chip_svw43 = 3, /* bar 5 */
};
static u8 k2_stat_check_status(struct ata_port *ap);
static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
{
u8 cmnd = qc->scsicmd->cmnd[0];
if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
return -1; /* ATAPI DMA not supported */
else {
switch (cmnd) {
case READ_10:
case READ_12:
case READ_16:
case WRITE_10:
case WRITE_12:
case WRITE_16:
return 0;
default:
return -1;
}
}
}
static int k2_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int k2_sata_scr_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int k2_sata_softreset(struct ata_link *link,
unsigned int *class, unsigned long deadline)
{
u8 dmactl;
void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
dmactl = readb(mmio + ATA_DMA_CMD);
/* Clear the start bit */
if (dmactl & ATA_DMA_START) {
dmactl &= ~ATA_DMA_START;
writeb(dmactl, mmio + ATA_DMA_CMD);
}
return ata_sff_softreset(link, class, deadline);
}
static int k2_sata_hardreset(struct ata_link *link,
unsigned int *class, unsigned long deadline)
{
u8 dmactl;
void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
dmactl = readb(mmio + ATA_DMA_CMD);
/* Clear the start bit */
if (dmactl & ATA_DMA_START) {
dmactl &= ~ATA_DMA_START;
writeb(dmactl, mmio + ATA_DMA_CMD);
}
return sata_sff_hardreset(link, class, deadline);
}
static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
if (tf->ctl != ap->last_ctl) {
writeb(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
writew(tf->feature | (((u16)tf->hob_feature) << 8),
ioaddr->feature_addr);
writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
ioaddr->nsect_addr);
writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
ioaddr->lbal_addr);
writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
ioaddr->lbam_addr);
writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
ioaddr->lbah_addr);
} else if (is_addr) {
writew(tf->feature, ioaddr->feature_addr);
writew(tf->nsect, ioaddr->nsect_addr);
writew(tf->lbal, ioaddr->lbal_addr);
writew(tf->lbam, ioaddr->lbam_addr);
writew(tf->lbah, ioaddr->lbah_addr);
}
if (tf->flags & ATA_TFLAG_DEVICE)
writeb(tf->device, ioaddr->device_addr);
ata_wait_idle(ap);
}
static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
u16 nsect, lbal, lbam, lbah, error;
tf->status = k2_stat_check_status(ap);
tf->device = readw(ioaddr->device_addr);
error = readw(ioaddr->error_addr);
nsect = readw(ioaddr->nsect_addr);
lbal = readw(ioaddr->lbal_addr);
lbam = readw(ioaddr->lbam_addr);
lbah = readw(ioaddr->lbah_addr);
tf->error = error;
tf->nsect = nsect;
tf->lbal = lbal;
tf->lbam = lbam;
tf->lbah = lbah;
if (tf->flags & ATA_TFLAG_LBA48) {
tf->hob_feature = error >> 8;
tf->hob_nsect = nsect >> 8;
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
tf->hob_lbah = lbah >> 8;
}
}
/**
* k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
void __iomem *mmio = ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = readb(mmio + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
writeb(dmactl, mmio + ATA_DMA_CMD);
/* issue r/w command if this is not a ATA DMA command*/
if (qc->tf.protocol != ATA_PROT_DMA)
ap->ops->sff_exec_command(ap, &qc->tf);
}
/**
* k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = ap->ioaddr.bmdma_addr;
u8 dmactl;
/* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
/* This works around possible data corruption.
On certain SATA controllers that can be seen when the r/w
command is given to the controller before the host DMA is
started.
On a Read command, the controller would initiate the
command to the drive even before it sees the DMA
start. When there are very fast drives connected to the
controller, or when the data request hits in the drive
cache, there is the possibility that the drive returns a
part or all of the requested data to the controller before
the DMA start is issued. In this case, the controller
would become confused as to what to do with the data. In
the worst case when all the data is returned back to the
controller, the controller could hang. In other cases it
could return partial data returning in data
corruption. This problem has been seen in PPC systems and
can also appear on an system with very fast disks, where
the SATA controller is sitting behind a number of bridges,
and hence there is significant latency between the r/w
command and the start command. */
/* issue r/w command if the access is to ATA */
if (qc->tf.protocol == ATA_PROT_DMA)
ap->ops->sff_exec_command(ap, &qc->tf);
}
static u8 k2_stat_check_status(struct ata_port *ap)
{
return readl(ap->ioaddr.status_addr);
}
static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
{
struct ata_port *ap;
struct device_node *np;
int index;
/* Find the ata_port */
ap = ata_shost_to_port(shost);
if (ap == NULL)
return 0;
/* Find the OF node for the PCI device proper */
np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
if (np == NULL)
return 0;
/* Match it to a port node */
index = (ap == ap->host->ports[0]) ? 0 : 1;
for (np = np->child; np != NULL; np = np->sibling) {
u64 reg;
if (of_property_read_reg(np, 0, ®, NULL))
continue;
if (index == reg) {
seq_printf(m, "devspec: %pOF\n", np);
break;
}
}
return 0;
}
static const struct scsi_host_template k2_sata_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.show_info = k2_sata_show_info,
};
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
.softreset = k2_sata_softreset,
.hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
.check_atapi_dma = k2_sata_check_atapi_dma,
.bmdma_setup = k2_bmdma_setup_mmio,
.bmdma_start = k2_bmdma_start_mmio,
.scr_read = k2_sata_scr_read,
.scr_write = k2_sata_scr_write,
};
static const struct ata_port_info k2_port_info[] = {
/* chip_svw4 */
{
.flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &k2_sata_ops,
},
/* chip_svw8 */
{
.flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
K2_FLAG_SATA_8_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &k2_sata_ops,
},
/* chip_svw42 */
{
.flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &k2_sata_ops,
},
/* chip_svw43 */
{
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &k2_sata_ops,
},
};
static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
port->feature_addr =
port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
port->command_addr =
port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
port->altstatus_addr =
port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
}
static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] =
{ &k2_port_info[ent->driver_data], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int n_ports, i, rc, bar_pos;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 4;
if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS)
n_ports = 8;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
bar_pos = 5;
if (ppi[0]->flags & K2_FLAG_BAR_POS_3)
bar_pos = 3;
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/*
* Check if we have resources mapped at all (second function may
* have been disabled by firmware)
*/
if (pci_resource_len(pdev, bar_pos) == 0) {
/* In IDE mode we need to pin the device to ensure that
pcim_release does not clear the busmaster bit in config
space, clearing causes busmaster DMA to fail on
ports 3 & 4 */
pcim_pin_device(pdev);
return -ENODEV;
}
/* Request and iomap PCI regions */
rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
mmio_base = host->iomap[bar_pos];
/* different controllers have different number of ports - currently 4 or 8 */
/* All ports are on the same function. Multi-function device is no
* longer available. This should not be seen in any system. */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int offset = i * K2_SATA_PORT_OFFSET;
k2_sata_setup_port(&ap->ioaddr, mmio_base + offset);
ata_port_pbar_desc(ap, 5, -1, "mmio");
ata_port_pbar_desc(ap, 5, offset, "port");
}
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
/* Clear a magic bit in SCR1 according to Darwin, those help
* some funky seagate drives (though so far, those were already
* set by the firmware on the machines I had access to)
*/
writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
mmio_base + K2_SATA_SICR1_OFFSET);
/* Clear SATA error & interrupts we don't use */
writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &k2_sata_sht);
}
/* 0x240 is device ID for Apple K2 device
* 0x241 is device ID for Serverworks Frodo4
* 0x242 is device ID for Serverworks Frodo8
* 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
* controller
* */
static const struct pci_device_id k2_sata_pci_tbl[] = {
{ PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 },
{ PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw8 },
{ PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw4 },
{ PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 },
{ PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 },
{ PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 },
{ PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 },
{ }
};
static struct pci_driver k2_sata_pci_driver = {
.name = DRV_NAME,
.id_table = k2_sata_pci_tbl,
.probe = k2_sata_init_one,
.remove = ata_pci_remove_one,
};
module_pci_driver(k2_sata_pci_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/ata/sata_svw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ixp4xx PATA/Compact Flash driver
* Copyright (C) 2006-07 Tower Technologies
* Author: Alessandro Zummo <[email protected]>
*
* An ATA driver to handle a Compact Flash connected
* to the ixp4xx expansion bus in TrueIDE mode. The CF
* must have it chip selects connected to two CS lines
* on the ixp4xx. In the irq is not available, you might
* want to modify both this driver and libata to run in
* polling mode.
*/
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <scsi/scsi_host.h>
#define DRV_NAME "pata_ixp4xx_cf"
#define DRV_VERSION "1.0"
struct ixp4xx_pata {
struct ata_host *host;
struct regmap *rmap;
u32 cmd_csreg;
void __iomem *cmd;
void __iomem *ctl;
};
#define IXP4XX_EXP_TIMING_STRIDE 0x04
/* The timings for the chipselect is in bits 29..16 */
#define IXP4XX_EXP_T1_T5_MASK GENMASK(29, 16)
#define IXP4XX_EXP_PIO_0_8 0x0a470000
#define IXP4XX_EXP_PIO_1_8 0x06430000
#define IXP4XX_EXP_PIO_2_8 0x02410000
#define IXP4XX_EXP_PIO_3_8 0x00820000
#define IXP4XX_EXP_PIO_4_8 0x00400000
#define IXP4XX_EXP_PIO_0_16 0x29640000
#define IXP4XX_EXP_PIO_1_16 0x05030000
#define IXP4XX_EXP_PIO_2_16 0x00b20000
#define IXP4XX_EXP_PIO_3_16 0x00820000
#define IXP4XX_EXP_PIO_4_16 0x00400000
#define IXP4XX_EXP_BW_MASK (BIT(6)|BIT(0))
#define IXP4XX_EXP_BYTE_RD16 BIT(6) /* Byte reads on half-word devices */
#define IXP4XX_EXP_BYTE_EN BIT(0) /* Use 8bit data bus if set */
static void ixp4xx_set_8bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode)
{
switch (pio_mode) {
case XFER_PIO_0:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_8);
break;
case XFER_PIO_1:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_8);
break;
case XFER_PIO_2:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_8);
break;
case XFER_PIO_3:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_8);
break;
case XFER_PIO_4:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_8);
break;
default:
break;
}
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16|IXP4XX_EXP_BYTE_EN);
}
static void ixp4xx_set_16bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode)
{
switch (pio_mode){
case XFER_PIO_0:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_16);
break;
case XFER_PIO_1:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_16);
break;
case XFER_PIO_2:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_16);
break;
case XFER_PIO_3:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_16);
break;
case XFER_PIO_4:
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_16);
break;
default:
break;
}
regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16);
}
/* This sets up the timing on the chipselect CMD accordingly */
static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ixp4xx_pata *ixpp = ap->host->private_data;
ata_dev_info(adev, "configured for PIO%d 8bit\n",
adev->pio_mode - XFER_PIO_0);
ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
}
static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
unsigned int i;
unsigned int words = buflen >> 1;
u16 *buf16 = (u16 *) buf;
struct ata_device *adev = qc->dev;
struct ata_port *ap = qc->dev->link->ap;
void __iomem *mmio = ap->ioaddr.data_addr;
struct ixp4xx_pata *ixpp = ap->host->private_data;
unsigned long flags;
ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
buflen);
spin_lock_irqsave(ap->lock, flags);
/* set the expansion bus in 16bit mode and restore
* 8 bit mode after the transaction.
*/
ixp4xx_set_16bit_timing(ixpp, adev->pio_mode);
udelay(5);
/* Transfer multiple of 2 bytes */
if (rw == READ)
for (i = 0; i < words; i++)
buf16[i] = readw(mmio);
else
for (i = 0; i < words; i++)
writew(buf16[i], mmio);
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
u16 align_buf[1] = { 0 };
unsigned char *trailing_buf = buf + buflen - 1;
if (rw == READ) {
align_buf[0] = readw(mmio);
memcpy(trailing_buf, align_buf, 1);
} else {
memcpy(align_buf, trailing_buf, 1);
writew(align_buf[0], mmio);
}
words++;
}
ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
udelay(5);
spin_unlock_irqrestore(ap->lock, flags);
return words << 1;
}
static const struct scsi_host_template ixp4xx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations ixp4xx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ixp4xx_mmio_data_xfer,
.cable_detect = ata_cable_40wire,
.set_piomode = ixp4xx_set_piomode,
};
static struct ata_port_info ixp4xx_port_info = {
.flags = ATA_FLAG_NO_ATAPI,
.pio_mask = ATA_PIO4,
.port_ops = &ixp4xx_port_ops,
};
static void ixp4xx_setup_port(struct ata_port *ap,
struct ixp4xx_pata *ixpp,
unsigned long raw_cmd, unsigned long raw_ctl)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
raw_ctl += 0x06;
ioaddr->cmd_addr = ixpp->cmd;
ioaddr->altstatus_addr = ixpp->ctl + 0x06;
ioaddr->ctl_addr = ixpp->ctl + 0x06;
ata_sff_std_ports(ioaddr);
if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/* adjust the addresses to handle the address swizzling of the
* ixp4xx in little endian mode.
*/
*(unsigned long *)&ioaddr->data_addr ^= 0x02;
*(unsigned long *)&ioaddr->cmd_addr ^= 0x03;
*(unsigned long *)&ioaddr->altstatus_addr ^= 0x03;
*(unsigned long *)&ioaddr->ctl_addr ^= 0x03;
*(unsigned long *)&ioaddr->error_addr ^= 0x03;
*(unsigned long *)&ioaddr->feature_addr ^= 0x03;
*(unsigned long *)&ioaddr->nsect_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbal_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbam_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbah_addr ^= 0x03;
*(unsigned long *)&ioaddr->device_addr ^= 0x03;
*(unsigned long *)&ioaddr->status_addr ^= 0x03;
*(unsigned long *)&ioaddr->command_addr ^= 0x03;
raw_cmd ^= 0x03;
raw_ctl ^= 0x03;
}
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
}
static int ixp4xx_pata_probe(struct platform_device *pdev)
{
struct resource *cmd, *ctl;
struct ata_port_info pi = ixp4xx_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct ixp4xx_pata *ixpp;
u32 csindex;
int ret;
int irq;
ixpp = devm_kzalloc(dev, sizeof(*ixpp), GFP_KERNEL);
if (!ixpp)
return -ENOMEM;
ixpp->rmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(ixpp->rmap))
return dev_err_probe(dev, PTR_ERR(ixpp->rmap), "no regmap\n");
/* Inspect our address to figure out what chipselect the CMD is on */
ret = of_property_read_u32_index(np, "reg", 0, &csindex);
if (ret)
return dev_err_probe(dev, ret, "can't inspect CMD address\n");
dev_info(dev, "using CS%d for PIO timing configuration\n", csindex);
ixpp->cmd_csreg = csindex * IXP4XX_EXP_TIMING_STRIDE;
ixpp->host = ata_host_alloc_pinfo(dev, ppi, 1);
if (!ixpp->host)
return -ENOMEM;
ixpp->host->private_data = ixpp;
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
ixpp->cmd = devm_platform_get_and_ioremap_resource(pdev, 0, &cmd);
if (IS_ERR(ixpp->cmd))
return PTR_ERR(ixpp->cmd);
ixpp->ctl = devm_platform_get_and_ioremap_resource(pdev, 1, &ctl);
if (IS_ERR(ixpp->ctl))
return PTR_ERR(ixpp->ctl);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Just one port to set up */
ixp4xx_setup_port(ixpp->host->ports[0], ixpp, cmd->start, ctl->start);
ata_print_version_once(dev, DRV_VERSION);
return ata_host_activate(ixpp->host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
}
static const struct of_device_id ixp4xx_pata_of_match[] = {
{ .compatible = "intel,ixp4xx-compact-flash", },
{ /* sentinel */ }
};
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = ixp4xx_pata_of_match,
},
.probe = ixp4xx_pata_probe,
.remove_new = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <[email protected]>");
MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/ata/pata_ixp4xx_cf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* libata-core.c - helper library for ATA
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from http://www.t13.org/ and
* http://www.sata-io.org/
*
* Standards documents from:
* http://www.t13.org (ATA standards, PCI DMA IDE spec)
* http://www.t10.org (SCSI MMC - for ATAPI MMC)
* http://www.sata-io.org (SATA)
* http://www.compactflash.org (CF)
* http://www.qic.org (QIC157 - Tape and DSC)
* http://www.ce-ata.org (CE-ATA: not supported)
*
* libata is essentially a library of internal helper functions for
* low-level ATA host controller drivers. As such, the API/ABI is
* likely to change as new drivers are added and updated.
* Do not depend on ABI/API stability.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/glob.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/leds.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <asm/setup.h>
#define CREATE_TRACE_POINTS
#include <trace/events/libata.h>
#include "libata.h"
#include "libata-transport.h"
const struct ata_port_operations ata_base_port_ops = {
.prereset = ata_std_prereset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
};
const struct ata_port_operations sata_port_ops = {
.inherits = &ata_base_port_ops,
.qc_defer = ata_std_qc_defer,
.hardreset = sata_std_hardreset,
};
EXPORT_SYMBOL_GPL(sata_port_ops);
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
atomic_t ata_print_id = ATOMIC_INIT(0);
#ifdef CONFIG_ATA_FORCE
struct ata_force_param {
const char *name;
u8 cbl;
u8 spd_limit;
unsigned int xfer_mask;
unsigned int horkage_on;
unsigned int horkage_off;
u16 lflags_on;
u16 lflags_off;
};
struct ata_force_ent {
int port;
int device;
struct ata_force_param param;
};
static struct ata_force_ent *ata_force_tbl;
static int ata_force_tbl_size;
static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
/* param_buf is thrown away after initialization, disallow read */
module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
#endif
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
static int atapi_dmadir = 0;
module_param(atapi_dmadir, int, 0444);
MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
int atapi_passthru16 = 1;
module_param(atapi_passthru16, int, 0444);
MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
static int ata_ignore_hpa;
module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
module_param_named(dma, libata_dma_mask, int, 0444);
MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
static int ata_probe_timeout;
module_param(ata_probe_timeout, int, 0444);
MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
int libata_noacpi = 0;
module_param_named(noacpi, libata_noacpi, int, 0444);
MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
static int atapi_an;
module_param(atapi_an, int, 0444);
MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static inline bool ata_dev_print_info(struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
return ehc->i.flags & ATA_EHI_PRINTINFO;
}
static bool ata_sstatus_online(u32 sstatus)
{
return (sstatus & 0xf) == 0x3;
}
/**
* ata_link_next - link iteration helper
* @link: the previous link, NULL to start
* @ap: ATA port containing links to iterate
* @mode: iteration mode, one of ATA_LITER_*
*
* LOCKING:
* Host lock or EH context.
*
* RETURNS:
* Pointer to the next link.
*/
struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
enum ata_link_iter_mode mode)
{
BUG_ON(mode != ATA_LITER_EDGE &&
mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
/* NULL link indicates start of iteration */
if (!link)
switch (mode) {
case ATA_LITER_EDGE:
case ATA_LITER_PMP_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
fallthrough;
case ATA_LITER_HOST_FIRST:
return &ap->link;
}
/* we just iterated over the host link, what's next? */
if (link == &ap->link)
switch (mode) {
case ATA_LITER_HOST_FIRST:
if (sata_pmp_attached(ap))
return ap->pmp_link;
fallthrough;
case ATA_LITER_PMP_FIRST:
if (unlikely(ap->slave_link))
return ap->slave_link;
fallthrough;
case ATA_LITER_EDGE:
return NULL;
}
/* slave_link excludes PMP */
if (unlikely(link == ap->slave_link))
return NULL;
/* we were over a PMP link */
if (++link < ap->pmp_link + ap->nr_pmp_links)
return link;
if (mode == ATA_LITER_PMP_FIRST)
return &ap->link;
return NULL;
}
EXPORT_SYMBOL_GPL(ata_link_next);
/**
* ata_dev_next - device iteration helper
* @dev: the previous device, NULL to start
* @link: ATA link containing devices to iterate
* @mode: iteration mode, one of ATA_DITER_*
*
* LOCKING:
* Host lock or EH context.
*
* RETURNS:
* Pointer to the next device.
*/
struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
enum ata_dev_iter_mode mode)
{
BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
/* NULL dev indicates start of iteration */
if (!dev)
switch (mode) {
case ATA_DITER_ENABLED:
case ATA_DITER_ALL:
dev = link->device;
goto check;
case ATA_DITER_ENABLED_REVERSE:
case ATA_DITER_ALL_REVERSE:
dev = link->device + ata_link_max_devices(link) - 1;
goto check;
}
next:
/* move to the next one */
switch (mode) {
case ATA_DITER_ENABLED:
case ATA_DITER_ALL:
if (++dev < link->device + ata_link_max_devices(link))
goto check;
return NULL;
case ATA_DITER_ENABLED_REVERSE:
case ATA_DITER_ALL_REVERSE:
if (--dev >= link->device)
goto check;
return NULL;
}
check:
if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
!ata_dev_enabled(dev))
goto next;
return dev;
}
EXPORT_SYMBOL_GPL(ata_dev_next);
/**
* ata_dev_phys_link - find physical link for a device
* @dev: ATA device to look up physical link for
*
* Look up physical link which @dev is attached to. Note that
* this is different from @dev->link only when @dev is on slave
* link. For all other cases, it's the same as @dev->link.
*
* LOCKING:
* Don't care.
*
* RETURNS:
* Pointer to the found physical link.
*/
struct ata_link *ata_dev_phys_link(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
if (!ap->slave_link)
return dev->link;
if (!dev->devno)
return &ap->link;
return ap->slave_link;
}
#ifdef CONFIG_ATA_FORCE
/**
* ata_force_cbl - force cable type according to libata.force
* @ap: ATA port of interest
*
* Force cable type according to libata.force and whine about it.
* The last entry which has matching port number is used, so it
* can be specified as part of device force parameters. For
* example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
* same effect.
*
* LOCKING:
* EH context.
*/
void ata_force_cbl(struct ata_port *ap)
{
int i;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
if (fe->port != -1 && fe->port != ap->print_id)
continue;
if (fe->param.cbl == ATA_CBL_NONE)
continue;
ap->cbl = fe->param.cbl;
ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
return;
}
}
/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
* Force link flags and SATA spd limit according to libata.force
* and whine about it. When only the port part is specified
* (e.g. 1:), the limit applies to all links connected to both
* the host link and all fan-out ports connected via PMP. If the
* device part is specified as 0 (e.g. 1.00:), it specifies the
* first fan-out link not the host link. Device number 15 always
* points to the host link whether PMP is attached or not. If the
* controller has slave link, device number 16 points to it.
*
* LOCKING:
* EH context.
*/
static void ata_force_link_limits(struct ata_link *link)
{
bool did_spd = false;
int linkno = link->pmp;
int i;
if (ata_is_host_link(link))
linkno += 15;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
if (fe->port != -1 && fe->port != link->ap->print_id)
continue;
if (fe->device != -1 && fe->device != linkno)
continue;
/* only honor the first spd limit */
if (!did_spd && fe->param.spd_limit) {
link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
fe->param.name);
did_spd = true;
}
/* let lflags stack */
if (fe->param.lflags_on) {
link->flags |= fe->param.lflags_on;
ata_link_notice(link,
"FORCE: link flag 0x%x forced -> 0x%x\n",
fe->param.lflags_on, link->flags);
}
if (fe->param.lflags_off) {
link->flags &= ~fe->param.lflags_off;
ata_link_notice(link,
"FORCE: link flag 0x%x cleared -> 0x%x\n",
fe->param.lflags_off, link->flags);
}
}
}
/**
* ata_force_xfermask - force xfermask according to libata.force
* @dev: ATA device of interest
*
* Force xfer_mask according to libata.force and whine about it.
* For consistency with link selection, device number 15 selects
* the first device connected to the host link.
*
* LOCKING:
* EH context.
*/
static void ata_force_xfermask(struct ata_device *dev)
{
int devno = dev->link->pmp + dev->devno;
int alt_devno = devno;
int i;
/* allow n.15/16 for devices attached to host port */
if (ata_is_host_link(dev->link))
alt_devno += 15;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
unsigned int pio_mask, mwdma_mask, udma_mask;
if (fe->port != -1 && fe->port != dev->link->ap->print_id)
continue;
if (fe->device != -1 && fe->device != devno &&
fe->device != alt_devno)
continue;
if (!fe->param.xfer_mask)
continue;
ata_unpack_xfermask(fe->param.xfer_mask,
&pio_mask, &mwdma_mask, &udma_mask);
if (udma_mask)
dev->udma_mask = udma_mask;
else if (mwdma_mask) {
dev->udma_mask = 0;
dev->mwdma_mask = mwdma_mask;
} else {
dev->udma_mask = 0;
dev->mwdma_mask = 0;
dev->pio_mask = pio_mask;
}
ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
fe->param.name);
return;
}
}
/**
* ata_force_horkage - force horkage according to libata.force
* @dev: ATA device of interest
*
* Force horkage according to libata.force and whine about it.
* For consistency with link selection, device number 15 selects
* the first device connected to the host link.
*
* LOCKING:
* EH context.
*/
static void ata_force_horkage(struct ata_device *dev)
{
int devno = dev->link->pmp + dev->devno;
int alt_devno = devno;
int i;
/* allow n.15/16 for devices attached to host port */
if (ata_is_host_link(dev->link))
alt_devno += 15;
for (i = 0; i < ata_force_tbl_size; i++) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
if (fe->port != -1 && fe->port != dev->link->ap->print_id)
continue;
if (fe->device != -1 && fe->device != devno &&
fe->device != alt_devno)
continue;
if (!(~dev->horkage & fe->param.horkage_on) &&
!(dev->horkage & fe->param.horkage_off))
continue;
dev->horkage |= fe->param.horkage_on;
dev->horkage &= ~fe->param.horkage_off;
ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
fe->param.name);
}
}
#else
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
static inline void ata_force_horkage(struct ata_device *dev) { }
#endif
/**
* atapi_cmd_type - Determine ATAPI command type from SCSI opcode
* @opcode: SCSI opcode
*
* Determine ATAPI command type from @opcode.
*
* LOCKING:
* None.
*
* RETURNS:
* ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
*/
int atapi_cmd_type(u8 opcode)
{
switch (opcode) {
case GPCMD_READ_10:
case GPCMD_READ_12:
return ATAPI_READ;
case GPCMD_WRITE_10:
case GPCMD_WRITE_12:
case GPCMD_WRITE_AND_VERIFY_10:
return ATAPI_WRITE;
case GPCMD_READ_CD:
case GPCMD_READ_CD_MSF:
return ATAPI_READ_CD;
case ATA_16:
case ATA_12:
if (atapi_passthru16)
return ATAPI_PASS_THRU;
fallthrough;
default:
return ATAPI_MISC;
}
}
EXPORT_SYMBOL_GPL(atapi_cmd_type);
static const u8 ata_rw_cmds[] = {
/* pio multi */
ATA_CMD_READ_MULTI,
ATA_CMD_WRITE_MULTI,
ATA_CMD_READ_MULTI_EXT,
ATA_CMD_WRITE_MULTI_EXT,
0,
0,
0,
0,
/* pio */
ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE,
ATA_CMD_PIO_READ_EXT,
ATA_CMD_PIO_WRITE_EXT,
0,
0,
0,
0,
/* dma */
ATA_CMD_READ,
ATA_CMD_WRITE,
ATA_CMD_READ_EXT,
ATA_CMD_WRITE_EXT,
0,
0,
0,
ATA_CMD_WRITE_FUA_EXT
};
/**
* ata_set_rwcmd_protocol - set taskfile r/w command and protocol
* @dev: target device for the taskfile
* @tf: taskfile to examine and configure
*
* Examine the device configuration and tf->flags to determine
* the proper read/write command and protocol to use for @tf.
*
* LOCKING:
* caller.
*/
static bool ata_set_rwcmd_protocol(struct ata_device *dev,
struct ata_taskfile *tf)
{
u8 cmd;
int index, fua, lba48, write;
fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
/* Unable to use DMA due to host limitation */
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
} else {
tf->protocol = ATA_PROT_DMA;
index = 16;
}
cmd = ata_rw_cmds[index + fua + lba48 + write];
if (!cmd)
return false;
tf->command = cmd;
return true;
}
/**
* ata_tf_read_block - Read block address from ATA taskfile
* @tf: ATA taskfile of interest
* @dev: ATA device @tf belongs to
*
* LOCKING:
* None.
*
* Read block address from @tf. This function can handle all
* three address formats - LBA, LBA48 and CHS. tf->protocol and
* flags select the address format to use.
*
* RETURNS:
* Block address read from @tf.
*/
u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
block |= (u64)tf->hob_lbal << 24;
} else
block |= (tf->device & 0xf) << 24;
block |= tf->lbah << 16;
block |= tf->lbam << 8;
block |= tf->lbal;
} else {
u32 cyl, head, sect;
cyl = tf->lbam | (tf->lbah << 8);
head = tf->device & 0xf;
sect = tf->lbal;
if (!sect) {
ata_dev_warn(dev,
"device reported invalid CHS sector 0\n");
return U64_MAX;
}
block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
}
return block;
}
/*
* Set a taskfile command duration limit index.
*/
static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
{
struct ata_taskfile *tf = &qc->tf;
if (tf->protocol == ATA_PROT_NCQ)
tf->auxiliary |= cdl;
else
tf->feature |= cdl;
/*
* Mark this command as having a CDL and request the result
* task file so that we can inspect the sense data available
* bit on completion.
*/
qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
}
/**
* ata_build_rw_tf - Build ATA taskfile for given read/write request
* @qc: Metadata associated with the taskfile to build
* @block: Block address
* @n_block: Number of blocks
* @tf_flags: RW/FUA etc...
* @cdl: Command duration limit index
* @class: IO priority class
*
* LOCKING:
* None.
*
* Build ATA taskfile for the command @qc for read/write request described
* by @block, @n_block, @tf_flags and @class.
*
* RETURNS:
*
* 0 on success, -ERANGE if the request is too large for @dev,
* -EINVAL if the request is invalid.
*/
int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
unsigned int tf_flags, int cdl, int class)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
if (ata_ncq_enabled(dev)) {
/* yay, NCQ */
if (!lba_48_ok(block, n_block))
return -ERANGE;
tf->protocol = ATA_PROT_NCQ;
tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
if (tf->flags & ATA_TFLAG_WRITE)
tf->command = ATA_CMD_FPDMA_WRITE;
else
tf->command = ATA_CMD_FPDMA_READ;
tf->nsect = qc->hw_tag << 3;
tf->hob_feature = (n_block >> 8) & 0xff;
tf->feature = n_block & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->device = ATA_LBA;
if (tf->flags & ATA_TFLAG_FUA)
tf->device |= 1 << 7;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
class == IOPRIO_CLASS_RT)
tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
ata_set_tf_cdl(qc, cdl);
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
ata_set_tf_cdl(qc, cdl);
/* Both FUA writes and a CDL index require 48-bit commands */
if (!(tf->flags & ATA_TFLAG_FUA) &&
!(qc->flags & ATA_QCFLAG_HAS_CDL) &&
lba_28_ok(block, n_block)) {
/* use LBA28 */
tf->device |= (block >> 24) & 0xf;
} else if (lba_48_ok(block, n_block)) {
if (!(dev->flags & ATA_DFLAG_LBA48))
return -ERANGE;
/* use LBA48 */
tf->flags |= ATA_TFLAG_LBA48;
tf->hob_nsect = (n_block >> 8) & 0xff;
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
} else {
/* request too large even for LBA48 */
return -ERANGE;
}
if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
tf->nsect = n_block & 0xff;
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
tf->lbal = block & 0xff;
tf->device |= ATA_LBA;
} else {
/* CHS */
u32 sect, head, cyl, track;
/* The request -may- be too large for CHS addressing. */
if (!lba_28_ok(block, n_block))
return -ERANGE;
if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
/* Convert LBA to CHS */
track = (u32)block / dev->sectors;
cyl = track / dev->heads;
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
Sector: 1-255*/
if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
return -ERANGE;
tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
tf->lbal = sect;
tf->lbam = cyl;
tf->lbah = cyl >> 8;
tf->device |= head;
}
return 0;
}
/**
* ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
* @pio_mask: pio_mask
* @mwdma_mask: mwdma_mask
* @udma_mask: udma_mask
*
* Pack @pio_mask, @mwdma_mask and @udma_mask into a single
* unsigned int xfer_mask.
*
* LOCKING:
* None.
*
* RETURNS:
* Packed xfer_mask.
*/
unsigned int ata_pack_xfermask(unsigned int pio_mask,
unsigned int mwdma_mask,
unsigned int udma_mask)
{
return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
}
EXPORT_SYMBOL_GPL(ata_pack_xfermask);
/**
* ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
* @xfer_mask: xfer_mask to unpack
* @pio_mask: resulting pio_mask
* @mwdma_mask: resulting mwdma_mask
* @udma_mask: resulting udma_mask
*
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
* Any NULL destination masks will be ignored.
*/
void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
unsigned int *mwdma_mask, unsigned int *udma_mask)
{
if (pio_mask)
*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
if (mwdma_mask)
*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
if (udma_mask)
*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
}
static const struct ata_xfer_ent {
int shift, bits;
u8 base;
} ata_xfer_tbl[] = {
{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
{ -1, },
};
/**
* ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
* @xfer_mask: xfer_mask of interest
*
* Return matching XFER_* value for @xfer_mask. Only the highest
* bit of @xfer_mask is considered.
*
* LOCKING:
* None.
*
* RETURNS:
* Matching XFER_* value, 0xff if no match found.
*/
u8 ata_xfer_mask2mode(unsigned int xfer_mask)
{
int highbit = fls(xfer_mask) - 1;
const struct ata_xfer_ent *ent;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
return ent->base + highbit - ent->shift;
return 0xff;
}
EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
/**
* ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
* @xfer_mode: XFER_* of interest
*
* Return matching xfer_mask for @xfer_mode.
*
* LOCKING:
* None.
*
* RETURNS:
* Matching xfer_mask, 0 if no match found.
*/
unsigned int ata_xfer_mode2mask(u8 xfer_mode)
{
const struct ata_xfer_ent *ent;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
& ~((1 << ent->shift) - 1);
return 0;
}
EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
/**
* ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
* @xfer_mode: XFER_* of interest
*
* Return matching xfer_shift for @xfer_mode.
*
* LOCKING:
* None.
*
* RETURNS:
* Matching xfer_shift, -1 if no match found.
*/
int ata_xfer_mode2shift(u8 xfer_mode)
{
const struct ata_xfer_ent *ent;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
return ent->shift;
return -1;
}
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
/**
* ata_mode_string - convert xfer_mask to string
* @xfer_mask: mask of bits supported; only highest bit counts.
*
* Determine string which represents the highest speed
* (highest bit in @modemask).
*
* LOCKING:
* None.
*
* RETURNS:
* Constant C string representing highest speed listed in
* @mode_mask, or the constant C string "<n/a>".
*/
const char *ata_mode_string(unsigned int xfer_mask)
{
static const char * const xfer_mode_str[] = {
"PIO0",
"PIO1",
"PIO2",
"PIO3",
"PIO4",
"PIO5",
"PIO6",
"MWDMA0",
"MWDMA1",
"MWDMA2",
"MWDMA3",
"MWDMA4",
"UDMA/16",
"UDMA/25",
"UDMA/33",
"UDMA/44",
"UDMA/66",
"UDMA/100",
"UDMA/133",
"UDMA7",
};
int highbit;
highbit = fls(xfer_mask) - 1;
if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
return xfer_mode_str[highbit];
return "<n/a>";
}
EXPORT_SYMBOL_GPL(ata_mode_string);
const char *sata_spd_string(unsigned int spd)
{
static const char * const spd_str[] = {
"1.5 Gbps",
"3.0 Gbps",
"6.0 Gbps",
};
if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
return "<unknown>";
return spd_str[spd - 1];
}
/**
* ata_dev_classify - determine device type based on ATA-spec signature
* @tf: ATA taskfile register set for device to be identified
*
* Determine from taskfile register contents whether a device is
* ATA or ATAPI, as per "Signature and persistence" section
* of ATA/PI spec (volume 1, sect 5.14).
*
* LOCKING:
* None.
*
* RETURNS:
* Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
* %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
*/
unsigned int ata_dev_classify(const struct ata_taskfile *tf)
{
/* Apple's open source Darwin code hints that some devices only
* put a proper signature into the LBA mid/high registers,
* So, we only check those. It's sufficient for uniqueness.
*
* ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
* signatures for ATA and ATAPI devices attached on SerialATA,
* 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
* spec has never mentioned about using different signatures
* for ATA/ATAPI devices. Then, Serial ATA II: Port
* Multiplier specification began to use 0x69/0x96 to identify
* port multpliers and 0x3c/0xc3 to identify SEMB device.
* ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
* 0x69/0x96 shortly and described them as reserved for
* SerialATA.
*
* We follow the current spec and consider that 0x69/0x96
* identifies a port multiplier and 0x3c/0xc3 a SEMB device.
* Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
* SEMB signature. This is worked around in
* ata_dev_read_id().
*/
if (tf->lbam == 0 && tf->lbah == 0)
return ATA_DEV_ATA;
if (tf->lbam == 0x14 && tf->lbah == 0xeb)
return ATA_DEV_ATAPI;
if (tf->lbam == 0x69 && tf->lbah == 0x96)
return ATA_DEV_PMP;
if (tf->lbam == 0x3c && tf->lbah == 0xc3)
return ATA_DEV_SEMB;
if (tf->lbam == 0xcd && tf->lbah == 0xab)
return ATA_DEV_ZAC;
return ATA_DEV_UNKNOWN;
}
EXPORT_SYMBOL_GPL(ata_dev_classify);
/**
* ata_id_string - Convert IDENTIFY DEVICE page into string
* @id: IDENTIFY DEVICE results we will examine
* @s: string into which data is output
* @ofs: offset into identify device page
* @len: length of string to return. must be an even number.
*
* The strings in the IDENTIFY DEVICE page are broken up into
* 16-bit chunks. Run through the string, and output each
* 8-bit chunk linearly, regardless of platform.
*
* LOCKING:
* caller.
*/
void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned int c;
BUG_ON(len & 1);
while (len > 0) {
c = id[ofs] >> 8;
*s = c;
s++;
c = id[ofs] & 0xff;
*s = c;
s++;
ofs++;
len -= 2;
}
}
EXPORT_SYMBOL_GPL(ata_id_string);
/**
* ata_id_c_string - Convert IDENTIFY DEVICE page into C string
* @id: IDENTIFY DEVICE results we will examine
* @s: string into which data is output
* @ofs: offset into identify device page
* @len: length of string to return. must be an odd number.
*
* This function is identical to ata_id_string except that it
* trims trailing spaces and terminates the resulting string with
* null. @len must be actual maximum length (even number) + 1.
*
* LOCKING:
* caller.
*/
void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned char *p;
ata_id_string(id, s, ofs, len - 1);
p = s + strnlen(s, len - 1);
while (p > s && p[-1] == ' ')
p--;
*p = '\0';
}
EXPORT_SYMBOL_GPL(ata_id_c_string);
static u64 ata_id_n_sectors(const u16 *id)
{
if (ata_id_has_lba(id)) {
if (ata_id_has_lba48(id))
return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
}
if (ata_id_current_chs_valid(id))
return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
(u32)id[ATA_ID_CUR_SECTORS];
return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
(u32)id[ATA_ID_SECTORS];
}
u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
{
u64 sectors = 0;
sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
sectors |= (tf->lbah & 0xff) << 16;
sectors |= (tf->lbam & 0xff) << 8;
sectors |= (tf->lbal & 0xff);
return sectors;
}
u64 ata_tf_to_lba(const struct ata_taskfile *tf)
{
u64 sectors = 0;
sectors |= (tf->device & 0x0f) << 24;
sectors |= (tf->lbah & 0xff) << 16;
sectors |= (tf->lbam & 0xff) << 8;
sectors |= (tf->lbal & 0xff);
return sectors;
}
/**
* ata_read_native_max_address - Read native max address
* @dev: target device
* @max_sectors: out parameter for the result native max address
*
* Perform an LBA48 or LBA28 native size query upon the device in
* question.
*
* RETURNS:
* 0 on success, -EACCES if command is aborted by the drive.
* -EIO on other errors.
*/
static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
{
unsigned int err_mask;
struct ata_taskfile tf;
int lba48 = ata_id_has_lba48(dev->id);
ata_tf_init(dev, &tf);
/* always clear all address registers */
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
if (lba48) {
tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
tf.flags |= ATA_TFLAG_LBA48;
} else
tf.command = ATA_CMD_READ_NATIVE_MAX;
tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
ata_dev_warn(dev,
"failed to read native max address (err_mask=0x%x)\n",
err_mask);
if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
return -EACCES;
return -EIO;
}
if (lba48)
*max_sectors = ata_tf_to_lba48(&tf) + 1;
else
*max_sectors = ata_tf_to_lba(&tf) + 1;
if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
(*max_sectors)--;
return 0;
}
/**
* ata_set_max_sectors - Set max sectors
* @dev: target device
* @new_sectors: new max sectors value to set for the device
*
* Set max sectors of @dev to @new_sectors.
*
* RETURNS:
* 0 on success, -EACCES if command is aborted or denied (due to
* previous non-volatile SET_MAX) by the drive. -EIO on other
* errors.
*/
static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
{
unsigned int err_mask;
struct ata_taskfile tf;
int lba48 = ata_id_has_lba48(dev->id);
new_sectors--;
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
if (lba48) {
tf.command = ATA_CMD_SET_MAX_EXT;
tf.flags |= ATA_TFLAG_LBA48;
tf.hob_lbal = (new_sectors >> 24) & 0xff;
tf.hob_lbam = (new_sectors >> 32) & 0xff;
tf.hob_lbah = (new_sectors >> 40) & 0xff;
} else {
tf.command = ATA_CMD_SET_MAX;
tf.device |= (new_sectors >> 24) & 0xf;
}
tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
tf.lbal = (new_sectors >> 0) & 0xff;
tf.lbam = (new_sectors >> 8) & 0xff;
tf.lbah = (new_sectors >> 16) & 0xff;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
ata_dev_warn(dev,
"failed to set max address (err_mask=0x%x)\n",
err_mask);
if (err_mask == AC_ERR_DEV &&
(tf.error & (ATA_ABORTED | ATA_IDNF)))
return -EACCES;
return -EIO;
}
return 0;
}
/**
* ata_hpa_resize - Resize a device with an HPA set
* @dev: Device to resize
*
* Read the size of an LBA28 or LBA48 disk with HPA features and resize
* it if required to the full size of the media. The caller must check
* the drive has the HPA feature set enabled.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int ata_hpa_resize(struct ata_device *dev)
{
bool print_info = ata_dev_print_info(dev);
bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
u64 sectors = ata_id_n_sectors(dev->id);
u64 native_sectors;
int rc;
/* do we need to do it? */
if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
!ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
(dev->horkage & ATA_HORKAGE_BROKEN_HPA))
return 0;
/* read native max address */
rc = ata_read_native_max_address(dev, &native_sectors);
if (rc) {
/* If device aborted the command or HPA isn't going to
* be unlocked, skip HPA resizing.
*/
if (rc == -EACCES || !unlock_hpa) {
ata_dev_warn(dev,
"HPA support seems broken, skipping HPA handling\n");
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
/* we can continue if device aborted the command */
if (rc == -EACCES)
rc = 0;
}
return rc;
}
dev->n_native_sectors = native_sectors;
/* nothing to do? */
if (native_sectors <= sectors || !unlock_hpa) {
if (!print_info || native_sectors == sectors)
return 0;
if (native_sectors > sectors)
ata_dev_info(dev,
"HPA detected: current %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
else if (native_sectors < sectors)
ata_dev_warn(dev,
"native sectors (%llu) is smaller than sectors (%llu)\n",
(unsigned long long)native_sectors,
(unsigned long long)sectors);
return 0;
}
/* let's unlock HPA */
rc = ata_set_max_sectors(dev, native_sectors);
if (rc == -EACCES) {
/* if device aborted the command, skip HPA resizing */
ata_dev_warn(dev,
"device aborted resize (%llu -> %llu), skipping HPA handling\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
return 0;
} else if (rc)
return rc;
/* re-read IDENTIFY data */
rc = ata_dev_reread_id(dev, 0);
if (rc) {
ata_dev_err(dev,
"failed to re-read IDENTIFY data after HPA resizing\n");
return rc;
}
if (print_info) {
u64 new_sectors = ata_id_n_sectors(dev->id);
ata_dev_info(dev,
"HPA unlocked: %llu -> %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)new_sectors,
(unsigned long long)native_sectors);
}
return 0;
}
/**
* ata_dump_id - IDENTIFY DEVICE info debugging output
* @dev: device from which the information is fetched
* @id: IDENTIFY DEVICE page to dump
*
* Dump selected 16-bit words from the given IDENTIFY DEVICE
* page.
*
* LOCKING:
* caller.
*/
static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
{
ata_dev_dbg(dev,
"49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
"80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
"88==0x%04x 93==0x%04x\n",
id[49], id[53], id[63], id[64], id[75], id[80],
id[81], id[82], id[83], id[84], id[88], id[93]);
}
/**
* ata_id_xfermask - Compute xfermask from the given IDENTIFY data
* @id: IDENTIFY data to compute xfer mask from
*
* Compute the xfermask for this device. This is not as trivial
* as it seems if we must consider early devices correctly.
*
* FIXME: pre IDE drive timing (do we care ?).
*
* LOCKING:
* None.
*
* RETURNS:
* Computed xfermask
*/
unsigned int ata_id_xfermask(const u16 *id)
{
unsigned int pio_mask, mwdma_mask, udma_mask;
/* Usual case. Word 53 indicates word 64 is valid */
if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
pio_mask <<= 3;
pio_mask |= 0x7;
} else {
/* If word 64 isn't valid then Word 51 high byte holds
* the PIO timing number for the maximum. Turn it into
* a mask.
*/
u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
if (mode < 5) /* Valid PIO range */
pio_mask = (2 << mode) - 1;
else
pio_mask = 1;
/* But wait.. there's more. Design your standards by
* committee and you too can get a free iordy field to
* process. However it is the speeds not the modes that
* are supported... Note drivers using the timing API
* will get this right anyway
*/
}
mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
if (ata_id_is_cfa(id)) {
/*
* Process compact flash extended modes
*/
int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
if (pio)
pio_mask |= (1 << 5);
if (pio > 1)
pio_mask |= (1 << 6);
if (dma)
mwdma_mask |= (1 << 3);
if (dma > 1)
mwdma_mask |= (1 << 4);
}
udma_mask = 0;
if (id[ATA_ID_FIELD_VALID] & (1 << 2))
udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}
EXPORT_SYMBOL_GPL(ata_id_xfermask);
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
complete(waiting);
}
/**
* ata_exec_internal_sg - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data transfer direction of the command
* @sgl: sg list for the data buffer of the command
* @n_elem: Number of sg entries
* @timeout: Timeout in msecs (0 for default)
*
* Executes libata internal command with timeout. @tf contains
* command on entry and result on return. Timeout and error
* conditions are reported via return value. No recovery action
* is taken after a command times out. It's caller's duty to
* clean up after timeout.
*
* LOCKING:
* None. Should be called with kernel context, might sleep.
*
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
static unsigned ata_exec_internal_sg(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, struct scatterlist *sgl,
unsigned int n_elem, unsigned int timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
u8 command = tf->command;
int auto_timeout = 0;
struct ata_queued_cmd *qc;
unsigned int preempted_tag;
u32 preempted_sactive;
u64 preempted_qc_active;
int preempted_nr_active_links;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
unsigned int err_mask;
int rc;
spin_lock_irqsave(ap->lock, flags);
/* no internal command while frozen */
if (ata_port_is_frozen(ap)) {
spin_unlock_irqrestore(ap->lock, flags);
return AC_ERR_SYSTEM;
}
/* initialize internal qc */
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
qc->tag = ATA_TAG_INTERNAL;
qc->hw_tag = 0;
qc->scsicmd = NULL;
qc->ap = ap;
qc->dev = dev;
ata_qc_reinit(qc);
preempted_tag = link->active_tag;
preempted_sactive = link->sactive;
preempted_qc_active = ap->qc_active;
preempted_nr_active_links = ap->nr_active_links;
link->active_tag = ATA_TAG_POISON;
link->sactive = 0;
ap->qc_active = 0;
ap->nr_active_links = 0;
/* prepare & issue qc */
qc->tf = *tf;
if (cdb)
memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
/* some SATA bridges need us to indicate data xfer direction */
if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
dma_dir == DMA_FROM_DEVICE)
qc->tf.feature |= ATAPI_DMADIR;
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
unsigned int i, buflen = 0;
struct scatterlist *sg;
for_each_sg(sgl, sg, n_elem, i)
buflen += sg->length;
ata_sg_init(qc, sgl, n_elem);
qc->nbytes = buflen;
}
qc->private_data = &wait;
qc->complete_fn = ata_qc_complete_internal;
ata_qc_issue(qc);
spin_unlock_irqrestore(ap->lock, flags);
if (!timeout) {
if (ata_probe_timeout)
timeout = ata_probe_timeout * 1000;
else {
timeout = ata_internal_cmd_timeout(dev, command);
auto_timeout = 1;
}
}
ata_eh_release(ap);
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
ata_eh_acquire(ap);
ata_sff_flush_pio_task(ap);
if (!rc) {
spin_lock_irqsave(ap->lock, flags);
/* We're racing with irq here. If we lose, the
* following test prevents us from completing the qc
* twice. If we win, the port is frozen and will be
* cleaned up by ->post_internal_cmd().
*/
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
ata_port_freeze(ap);
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
}
spin_unlock_irqrestore(ap->lock, flags);
}
/* do post_internal_cmd */
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
/* perform minimal error analysis */
if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
if (!qc->err_mask)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
qc->result_tf.status |= ATA_SENSE;
}
/* finish up */
spin_lock_irqsave(ap->lock, flags);
*tf = qc->result_tf;
err_mask = qc->err_mask;
ata_qc_free(qc);
link->active_tag = preempted_tag;
link->sactive = preempted_sactive;
ap->qc_active = preempted_qc_active;
ap->nr_active_links = preempted_nr_active_links;
spin_unlock_irqrestore(ap->lock, flags);
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
ata_internal_cmd_timed_out(dev, command);
return err_mask;
}
/**
* ata_exec_internal - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data transfer direction of the command
* @buf: Data buffer of the command
* @buflen: Length of data buffer
* @timeout: Timeout in msecs (0 for default)
*
* Wrapper around ata_exec_internal_sg() which takes simple
* buffer instead of sg list.
*
* LOCKING:
* None. Should be called with kernel context, might sleep.
*
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
unsigned int timeout)
{
struct scatterlist *psg = NULL, sg;
unsigned int n_elem = 0;
if (dma_dir != DMA_NONE) {
WARN_ON(!buf);
sg_init_one(&sg, buf, buflen);
psg = &sg;
n_elem++;
}
return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
timeout);
}
/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
* Check if the current speed of the device requires IORDY. Used
* by various controllers for chip configuration.
*/
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
/* Don't set IORDY if we're preparing for reset. IORDY may
* lead to controller lock up on certain controllers if the
* port is not occupied. See bko#11703 for details.
*/
if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
return 0;
/* Controller doesn't support IORDY. Probably a pointless
* check as the caller should know this.
*/
if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
if (ata_id_is_cfa(adev->id)
&& (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
return 0;
/* PIO3 and higher it is mandatory */
if (adev->pio_mode > XFER_PIO_2)
return 1;
/* We turn it on when possible */
if (ata_id_has_iordy(adev->id))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
/**
* ata_pio_mask_no_iordy - Return the non IORDY mask
* @adev: ATA device
*
* Compute the highest mode possible if we are not using iordy. Return
* -1 if no iordy mode is available.
*/
static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
{
/* If we have no drive specific rule, then PIO 2 is non IORDY */
if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
u16 pio = adev->id[ATA_ID_EIDE_PIO];
/* Is the speed faster than the drive allows non IORDY ? */
if (pio) {
/* This is cycle times not frequency - watch the logic! */
if (pio > 240) /* PIO2 is 240nS per cycle */
return 3 << ATA_SHIFT_PIO;
return 7 << ATA_SHIFT_PIO;
}
}
return 3 << ATA_SHIFT_PIO;
}
/**
* ata_do_dev_read_id - default ID read method
* @dev: device
* @tf: proposed taskfile
* @id: data buffer
*
* Issue the identify taskfile and hand back the buffer containing
* identify data. For some RAID controllers and for pre ATA devices
* this function is wrapped or replaced by the driver
*/
unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, __le16 *id)
{
return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
}
EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
/**
* ata_dev_read_id - Read ID data from the specified device
* @dev: target device
* @p_class: pointer to class of the target device (may be changed)
* @flags: ATA_READID_* flags
* @id: buffer to read IDENTIFY data into
*
* Read ID data from the specified device. ATA_CMD_ID_ATA is
* performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
* devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
* for pre-ATA4 drives.
*
* FIXME: ATA_CMD_ID_ATA is optional for early drives and right
* now we abort if we hit that case.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
unsigned int flags, u16 *id)
{
struct ata_port *ap = dev->link->ap;
unsigned int class = *p_class;
struct ata_taskfile tf;
unsigned int err_mask = 0;
const char *reason;
bool is_semb = class == ATA_DEV_SEMB;
int may_fallback = 1, tried_spinup = 0;
int rc;
retry:
ata_tf_init(dev, &tf);
switch (class) {
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
fallthrough;
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
break;
case ATA_DEV_ATAPI:
tf.command = ATA_CMD_ID_ATAPI;
break;
default:
rc = -ENODEV;
reason = "unsupported class";
goto err_out;
}
tf.protocol = ATA_PROT_PIO;
/* Some devices choke if TF registers contain garbage. Make
* sure those are properly initialized.
*/
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
/* Device presence detection is unreliable on some
* controllers. Always poll IDENTIFY if available.
*/
tf.flags |= ATA_TFLAG_POLLING;
if (ap->ops->read_id)
err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
else
err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
ata_dev_dbg(dev, "NODEV after polling detection\n");
return -ENOENT;
}
if (is_semb) {
ata_dev_info(dev,
"IDENTIFY failed on device w/ SEMB sig, disabled\n");
/* SEMB is not supported yet */
*p_class = ATA_DEV_SEMB_UNSUP;
return 0;
}
if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
/* Device or controller might have reported
* the wrong device class. Give a shot at the
* other IDENTIFY if the current one is
* aborted by the device.
*/
if (may_fallback) {
may_fallback = 0;
if (class == ATA_DEV_ATA)
class = ATA_DEV_ATAPI;
else
class = ATA_DEV_ATA;
goto retry;
}
/* Control reaches here iff the device aborted
* both flavors of IDENTIFYs which happens
* sometimes with phantom devices.
*/
ata_dev_dbg(dev,
"both IDENTIFYs aborted, assuming NODEV\n");
return -ENOENT;
}
rc = -EIO;
reason = "I/O error";
goto err_out;
}
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
}
/* Falling back doesn't make sense if ID data was read
* successfully at least once.
*/
may_fallback = 0;
swap_buf_le16(id, ATA_ID_WORDS);
/* sanity check */
rc = -EINVAL;
reason = "device reports invalid type";
if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
goto err_out;
if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
ata_id_is_ata(id)) {
ata_dev_dbg(dev,
"host indicates ignore ATA devices, ignored\n");
return -ENOENT;
}
} else {
if (ata_id_is_ata(id))
goto err_out;
}
if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
tried_spinup = 1;
/*
* Drive powered-up in standby mode, and requires a specific
* SET_FEATURES spin-up subcommand before it will accept
* anything other than the original IDENTIFY command.
*/
err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
if (err_mask && id[2] != 0x738c) {
rc = -EIO;
reason = "SPINUP failed";
goto err_out;
}
/*
* If the drive initially returned incomplete IDENTIFY info,
* we now must reissue the IDENTIFY command.
*/
if (id[2] == 0x37c8)
goto retry;
}
if ((flags & ATA_READID_POSTRESET) &&
(class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
/*
* The exact sequence expected by certain pre-ATA4 drives is:
* SRST RESET
* IDENTIFY (optional in early ATA)
* INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
* anything else..
* Some drives were very specific about that exact sequence.
*
* Note that ATA4 says lba is mandatory so the second check
* should never trigger.
*/
if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
err_mask = ata_dev_init_params(dev, id[3], id[6]);
if (err_mask) {
rc = -EIO;
reason = "INIT_DEV_PARAMS failed";
goto err_out;
}
/* current CHS translation info (id[53-58]) might be
* changed. reread the identify device info.
*/
flags &= ~ATA_READID_POSTRESET;
goto retry;
}
}
*p_class = class;
return 0;
err_out:
ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
reason, err_mask);
return rc;
}
/**
* ata_read_log_page - read a specific log page
* @dev: target device
* @log: log to read
* @page: page to read
* @buf: buffer to store read page
* @sectors: number of sectors to read
*
* Read log page using READ_LOG_EXT command.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors)
{
unsigned long ap_flags = dev->link->ap->flags;
struct ata_taskfile tf;
unsigned int err_mask;
bool dma = false;
ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
/*
* Return error without actually issuing the command on controllers
* which e.g. lockup on a read log page.
*/
if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
return AC_ERR_DEV;
retry:
ata_tf_init(dev, &tf);
if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
!(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
} else {
tf.command = ATA_CMD_READ_LOG_EXT;
tf.protocol = ATA_PROT_PIO;
dma = false;
}
tf.lbal = log;
tf.lbam = page;
tf.nsect = sectors;
tf.hob_nsect = sectors >> 8;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
buf, sectors * ATA_SECT_SIZE, 0);
if (err_mask) {
if (dma) {
dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
if (!ata_port_is_frozen(dev->link->ap))
goto retry;
}
ata_dev_err(dev,
"Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
(unsigned int)log, (unsigned int)page, err_mask);
}
return err_mask;
}
static int ata_log_supported(struct ata_device *dev, u8 log)
{
struct ata_port *ap = dev->link->ap;
if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
return 0;
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
return 0;
return get_unaligned_le16(&ap->sector_buf[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
{
struct ata_port *ap = dev->link->ap;
unsigned int err, i;
if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
return false;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
/*
* IDENTIFY DEVICE data log is defined as mandatory starting
* with ACS-3 (ATA version 10). Warn about the missing log
* for drives which implement this ATA level or above.
*/
if (ata_id_major_version(dev->id) >= 10)
ata_dev_warn(dev,
"ATA Identify Device Log not supported\n");
dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
return false;
}
/*
* Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
* supported.
*/
err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
1);
if (err)
return false;
for (i = 0; i < ap->sector_buf[8]; i++) {
if (ap->sector_buf[9 + i] == page)
return true;
}
return false;
}
static int ata_do_link_spd_horkage(struct ata_device *dev)
{
struct ata_link *plink = ata_dev_phys_link(dev);
u32 target, target_limit;
if (!sata_scr_valid(plink))
return 0;
if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
target = 1;
else
return 0;
target_limit = (1 << target) - 1;
/* if already on stricter limit, no need to push further */
if (plink->sata_spd_limit <= target_limit)
return 0;
plink->sata_spd_limit = target_limit;
/* Request another EH round by returning -EAGAIN if link is
* going faster than the target speed. Forward progress is
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
ata_dev_info(dev, "applying link speed limit horkage to %s\n",
sata_spd_string(target));
return -EAGAIN;
}
return 0;
}
static inline u8 ata_dev_knobble(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
return 0;
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
0, ap->sector_buf, 1);
if (!err_mask) {
u8 *cmds = dev->ncq_send_recv_cmds;
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
ata_dev_dbg(dev, "disabling queued TRIM support\n");
cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
}
}
}
static void ata_dev_config_ncq_non_data(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
"NCQ Send/Recv Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
0, ap->sector_buf, 1);
if (!err_mask) {
u8 *cmds = dev->ncq_non_data_cmds;
memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
}
}
static void ata_dev_config_ncq_prio(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
return;
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
ap->sector_buf,
1);
if (err_mask)
goto not_supported;
if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
goto not_supported;
dev->flags |= ATA_DFLAG_NCQ_PRIO;
return;
not_supported:
dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
}
static bool ata_dev_check_adapter(struct ata_device *dev,
unsigned short vendor_id)
{
struct pci_dev *pcidev = NULL;
struct device *parent_dev = NULL;
for (parent_dev = dev->tdev.parent; parent_dev != NULL;
parent_dev = parent_dev->parent) {
if (dev_is_pci(parent_dev)) {
pcidev = to_pci_dev(parent_dev);
if (pcidev->vendor == vendor_id)
return true;
break;
}
}
return false;
}
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
struct ata_port *ap = dev->link->ap;
int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
unsigned int err_mask;
char *aa_desc = "";
if (!ata_id_has_ncq(dev->id)) {
desc[0] = '\0';
return 0;
}
if (!IS_ENABLED(CONFIG_SATA_HOST))
return 0;
if (dev->horkage & ATA_HORKAGE_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
snprintf(desc, desc_sz, "NCQ (not used)");
return 0;
}
if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
dev->flags |= ATA_DFLAG_NCQ;
}
if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
(ap->flags & ATA_FLAG_FPDMA_AA) &&
ata_id_has_fpdma_aa(dev->id)) {
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
SATA_FPDMA_AA);
if (err_mask) {
ata_dev_err(dev,
"failed to enable AA (error_mask=0x%x)\n",
err_mask);
if (err_mask != AC_ERR_DEV) {
dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
return -EIO;
}
} else
aa_desc = ", AA";
}
if (hdepth >= ddepth)
snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
else
snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
ddepth, aa_desc);
if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
if (ata_id_has_ncq_send_and_recv(dev->id))
ata_dev_config_ncq_send_recv(dev);
if (ata_id_has_ncq_non_data(dev->id))
ata_dev_config_ncq_non_data(dev);
if (ata_id_has_ncq_prio(dev->id))
ata_dev_config_ncq_prio(dev);
}
return 0;
}
static void ata_dev_config_sense_reporting(struct ata_device *dev)
{
unsigned int err_mask;
if (!ata_id_has_sense_reporting(dev->id))
return;
if (ata_id_sense_reporting_enabled(dev->id))
return;
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
if (err_mask) {
ata_dev_dbg(dev,
"failed to enable Sense Data Reporting, Emask 0x%x\n",
err_mask);
}
}
static void ata_dev_config_zac(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
u8 *identify_buf = ap->sector_buf;
dev->zac_zones_optimal_open = U32_MAX;
dev->zac_zones_optimal_nonseq = U32_MAX;
dev->zac_zones_max_open = U32_MAX;
/*
* Always set the 'ZAC' flag for Host-managed devices.
*/
if (dev->class == ATA_DEV_ZAC)
dev->flags |= ATA_DFLAG_ZAC;
else if (ata_id_zoned_cap(dev->id) == 0x01)
/*
* Check for host-aware devices.
*/
dev->flags |= ATA_DFLAG_ZAC;
if (!(dev->flags & ATA_DFLAG_ZAC))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
ata_dev_warn(dev,
"ATA Zoned Information Log not supported\n");
return;
}
/*
* Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
*/
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_ZONED_INFORMATION,
identify_buf, 1);
if (!err_mask) {
u64 zoned_cap, opt_open, opt_nonseq, max_open;
zoned_cap = get_unaligned_le64(&identify_buf[8]);
if ((zoned_cap >> 63))
dev->zac_zoned_cap = (zoned_cap & 1);
opt_open = get_unaligned_le64(&identify_buf[24]);
if ((opt_open >> 63))
dev->zac_zones_optimal_open = (u32)opt_open;
opt_nonseq = get_unaligned_le64(&identify_buf[32]);
if ((opt_nonseq >> 63))
dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
max_open = get_unaligned_le64(&identify_buf[40]);
if ((max_open >> 63))
dev->zac_zones_max_open = (u32)max_open;
}
}
static void ata_dev_config_trusted(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
u64 trusted_cap;
unsigned int err;
if (!ata_id_has_trusted(dev->id))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
return;
}
err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
ap->sector_buf, 1);
if (err)
return;
trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
if (!(trusted_cap & (1ULL << 63))) {
ata_dev_dbg(dev,
"Trusted Computing capability qword not valid!\n");
return;
}
if (trusted_cap & (1 << 0))
dev->flags |= ATA_DFLAG_TRUSTED;
}
static void ata_dev_config_cdl(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
bool cdl_enabled;
u64 val;
if (ata_id_major_version(dev->id) < 12)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
!ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) ||
!ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS))
goto not_supported;
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SUPPORTED_CAPABILITIES,
ap->sector_buf, 1);
if (err_mask)
goto not_supported;
/* Check Command Duration Limit Supported bits */
val = get_unaligned_le64(&ap->sector_buf[168]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
goto not_supported;
/* Warn the user if command duration guideline is not supported */
if (!(val & BIT_ULL(1)))
ata_dev_warn(dev,
"Command duration guideline is not supported\n");
/*
* We must have support for the sense data for successful NCQ commands
* log indicated by the successful NCQ command sense data supported bit.
*/
val = get_unaligned_le64(&ap->sector_buf[8]);
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
ata_dev_warn(dev,
"CDL supported but Successful NCQ Command Sense Data is not supported\n");
goto not_supported;
}
/* Without NCQ autosense, the successful NCQ commands log is useless. */
if (!ata_id_has_ncq_autosense(dev->id)) {
ata_dev_warn(dev,
"CDL supported but NCQ autosense is not supported\n");
goto not_supported;
}
/*
* If CDL is marked as enabled, make sure the feature is enabled too.
* Conversely, if CDL is disabled, make sure the feature is turned off.
*/
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_CURRENT_SETTINGS,
ap->sector_buf, 1);
if (err_mask)
goto not_supported;
val = get_unaligned_le64(&ap->sector_buf[8]);
cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
if (!cdl_enabled) {
/* Enable CDL on the device */
err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1);
if (err_mask) {
ata_dev_err(dev,
"Enable CDL feature failed\n");
goto not_supported;
}
}
} else {
if (cdl_enabled) {
/* Disable CDL on the device */
err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0);
if (err_mask) {
ata_dev_err(dev,
"Disable CDL feature failed\n");
goto not_supported;
}
}
}
/*
* While CDL itself has to be enabled using sysfs, CDL requires that
* sense data for successful NCQ commands is enabled to work properly.
* Just like ata_dev_config_sense_reporting(), enable it unconditionally
* if supported.
*/
if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) {
err_mask = ata_dev_set_feature(dev,
SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1);
if (err_mask) {
ata_dev_warn(dev,
"failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n",
err_mask);
goto not_supported;
}
}
/*
* Allocate a buffer to handle reading the sense data for successful
* NCQ Commands log page for commands using a CDL with one of the limit
* policy set to 0xD (successful completion with sense data available
* bit set).
*/
if (!ap->ncq_sense_buf) {
ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
if (!ap->ncq_sense_buf)
goto not_supported;
}
/*
* Command duration limits is supported: cache the CDL log page 18h
* (command duration descriptors).
*/
err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
if (err_mask) {
ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
goto not_supported;
}
memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
dev->flags |= ATA_DFLAG_CDL;
return;
not_supported:
dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
kfree(ap->ncq_sense_buf);
ap->ncq_sense_buf = NULL;
}
static int ata_dev_config_lba(struct ata_device *dev)
{
const u16 *id = dev->id;
const char *lba_desc;
char ncq_desc[24];
int ret;
dev->flags |= ATA_DFLAG_LBA;
if (ata_id_has_lba48(id)) {
lba_desc = "LBA48";
dev->flags |= ATA_DFLAG_LBA48;
if (dev->n_sectors >= (1UL << 28) &&
ata_id_has_flush_ext(id))
dev->flags |= ATA_DFLAG_FLUSH_EXT;
} else {
lba_desc = "LBA";
}
/* config NCQ */
ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
/* print device info to dmesg */
if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u: %s %s\n",
(unsigned long long)dev->n_sectors,
dev->multi_count, lba_desc, ncq_desc);
return ret;
}
static void ata_dev_config_chs(struct ata_device *dev)
{
const u16 *id = dev->id;
if (ata_id_current_chs_valid(id)) {
/* Current CHS translation is valid. */
dev->cylinders = id[54];
dev->heads = id[55];
dev->sectors = id[56];
} else {
/* Default translation */
dev->cylinders = id[1];
dev->heads = id[3];
dev->sectors = id[6];
}
/* print device info to dmesg */
if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u, CHS %u/%u/%u\n",
(unsigned long long)dev->n_sectors,
dev->multi_count, dev->cylinders,
dev->heads, dev->sectors);
}
static void ata_dev_config_fua(struct ata_device *dev)
{
/* Ignore FUA support if its use is disabled globally */
if (!libata_fua)
goto nofua;
/* Ignore devices without support for WRITE DMA FUA EXT */
if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
goto nofua;
/* Ignore known bad devices and devices that lack NCQ support */
if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
goto nofua;
dev->flags |= ATA_DFLAG_FUA;
return;
nofua:
dev->flags &= ~ATA_DFLAG_FUA;
}
static void ata_dev_config_devslp(struct ata_device *dev)
{
u8 *sata_setting = dev->link->ap->sector_buf;
unsigned int err_mask;
int i, j;
/*
* Check device sleep capability. Get DevSlp timing variables
* from SATA Settings page of Identify Device Data Log.
*/
if (!ata_id_has_devslp(dev->id) ||
!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
return;
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
sata_setting, 1);
if (err_mask)
return;
dev->flags |= ATA_DFLAG_DEVSLP;
for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
j = ATA_LOG_DEVSLP_OFFSET + i;
dev->devslp_timing[i] = sata_setting[j];
}
}
static void ata_dev_config_cpr(struct ata_device *dev)
{
unsigned int err_mask;
size_t buf_len;
int i, nr_cpr = 0;
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
if (ata_id_major_version(dev->id) < 11)
goto out;
buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
if (buf_len == 0)
goto out;
/*
* Read the concurrent positioning ranges log (0x47). We can have at
* most 255 32B range descriptors plus a 64B header. This log varies in
* size, so use the size reported in the GPL directory. Reading beyond
* the supported length will result in an error.
*/
buf_len <<= 9;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
0, buf, buf_len >> 9);
if (err_mask)
goto out;
nr_cpr = buf[0];
if (!nr_cpr)
goto out;
cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
if (!cpr_log)
goto out;
cpr_log->nr_cpr = nr_cpr;
desc = &buf[64];
for (i = 0; i < nr_cpr; i++, desc += 32) {
cpr_log->cpr[i].num = desc[0];
cpr_log->cpr[i].num_storage_elements = desc[1];
cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
}
out:
swap(dev->cpr_log, cpr_log);
kfree(cpr_log);
kfree(buf);
}
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
"Features:%s%s%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
dev->cpr_log ? " CPR" : "");
}
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
*
* Configure @dev according to @dev->id. Generic and low-level
* driver specific fixups are also applied.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, -errno otherwise
*/
int ata_dev_configure(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
bool print_info = ata_dev_print_info(dev);
const u16 *id = dev->id;
unsigned int xfer_mask;
unsigned int err_mask;
char revbuf[7]; /* XYZ-99\0 */
char fwrevbuf[ATA_ID_FW_REV_LEN+1];
char modelbuf[ATA_ID_PROD_LEN+1];
int rc;
if (!ata_dev_enabled(dev)) {
ata_dev_dbg(dev, "no device\n");
return 0;
}
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
ata_force_horkage(dev);
if (dev->horkage & ATA_HORKAGE_DISABLE) {
ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
}
if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
dev->class == ATA_DEV_ATAPI) {
ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
atapi_enabled ? "not supported with this driver"
: "disabled");
ata_dev_disable(dev);
return 0;
}
rc = ata_do_link_spd_horkage(dev);
if (rc)
return rc;
/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->horkage |= ATA_HORKAGE_NOLPM;
if (ap->flags & ATA_FLAG_NO_LPM)
dev->horkage |= ATA_HORKAGE_NOLPM;
if (dev->horkage & ATA_HORKAGE_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
}
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
return rc;
/* massage HPA, do it early as it might change IDENTIFY data */
rc = ata_hpa_resize(dev);
if (rc)
return rc;
/* print device capabilities */
ata_dev_dbg(dev,
"%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
"85:%04x 86:%04x 87:%04x 88:%04x\n",
__func__,
id[49], id[82], id[83], id[84],
id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
dev->flags &= ~ATA_DFLAG_CFG_MASK;
dev->max_sectors = 0;
dev->cdb_len = 0;
dev->n_sectors = 0;
dev->cylinders = 0;
dev->heads = 0;
dev->sectors = 0;
dev->multi_count = 0;
/*
* common ATA, ATAPI feature tests
*/
/* find max transfer mode; for printk only */
xfer_mask = ata_id_xfermask(id);
ata_dump_id(dev, id);
/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
sizeof(fwrevbuf));
ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
sizeof(modelbuf));
/* ATA-specific feature tests */
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
if (ata_id_is_cfa(id)) {
/* CPRM may make this media unusable */
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
ata_dev_warn(dev,
"supports DRM functions and may not be fully accessible\n");
snprintf(revbuf, 7, "CFA");
} else {
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
/* Warn the user if the device has TPM extensions */
if (ata_id_has_tpm(id))
ata_dev_warn(dev,
"supports DRM functions and may not be fully accessible\n");
}
dev->n_sectors = ata_id_n_sectors(id);
/* get current R/W Multiple count setting */
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
unsigned int max = dev->id[47] & 0xff;
unsigned int cnt = dev->id[59] & 0xff;
/* only recognize/allow powers of two here */
if (is_power_of_2(max) && is_power_of_2(cnt))
if (cnt <= max)
dev->multi_count = cnt;
}
/* print device info to dmesg */
if (print_info)
ata_dev_info(dev, "%s: %s, %s, max %s\n",
revbuf, modelbuf, fwrevbuf,
ata_mode_string(xfer_mask));
if (ata_id_has_lba(id)) {
rc = ata_dev_config_lba(dev);
if (rc)
return rc;
} else {
ata_dev_config_chs(dev);
}
ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
ata_dev_config_trusted(dev);
ata_dev_config_cpr(dev);
ata_dev_config_cdl(dev);
dev->cdb_len = 32;
if (print_info)
ata_dev_print_features(dev);
}
/* ATAPI-specific feature tests */
else if (dev->class == ATA_DEV_ATAPI) {
const char *cdb_intr_string = "";
const char *atapi_an_string = "";
const char *dma_dir_string = "";
u32 sntf;
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
rc = -EINVAL;
goto err_out_nosup;
}
dev->cdb_len = (unsigned int) rc;
/* Enable ATAPI AN if both the host and device have
* the support. If PMP is attached, SNTF is required
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
if (atapi_an &&
(ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
/* issue SET feature command to turn this on */
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_AN);
if (err_mask)
ata_dev_err(dev,
"failed to enable ATAPI AN (err_mask=0x%x)\n",
err_mask);
else {
dev->flags |= ATA_DFLAG_AN;
atapi_an_string = ", ATAPI AN";
}
}
if (ata_id_cdb_intr(dev->id)) {
dev->flags |= ATA_DFLAG_CDB_INTR;
cdb_intr_string = ", CDB intr";
}
if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
dev->flags |= ATA_DFLAG_DMADIR;
dma_dir_string = ", DMADIR";
}
if (ata_id_has_da(dev->id)) {
dev->flags |= ATA_DFLAG_DA;
zpodd_init(dev);
}
/* print device info to dmesg */
if (print_info)
ata_dev_info(dev,
"ATAPI: %s, %s, max %s%s%s%s\n",
modelbuf, fwrevbuf,
ata_mode_string(xfer_mask),
cdb_intr_string, atapi_an_string,
dma_dir_string);
}
/* determine max_sectors */
dev->max_sectors = ATA_MAX_SECTORS;
if (dev->flags & ATA_DFLAG_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
/* Limit PATA drive on SATA cable bridge transfers to udma5,
200 sectors */
if (ata_dev_knobble(dev)) {
if (print_info)
ata_dev_info(dev, "applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS;
}
if ((dev->class == ATA_DEV_ATAPI) &&
(atapi_command_packet_set(id) == TYPE_TAPE)) {
dev->max_sectors = ATA_MAX_SECTORS_TAPE;
dev->horkage |= ATA_HORKAGE_STUCK_ERR;
}
if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
/* Let the user know. We don't want to disallow opens for
rescue purposes, or in case the vendor is just a blithering
idiot. Do this after the dev_config call as some controllers
with buggy firmware may want to avoid reporting false device
bugs */
if (print_info) {
ata_dev_warn(dev,
"Drive reports diagnostics failure. This may indicate a drive\n");
ata_dev_warn(dev,
"fault or invalid emulation. Contact drive vendor for information.\n");
}
}
if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
return 0;
err_out_nosup:
return rc;
}
/**
* ata_cable_40wire - return 40 wire cable type
* @ap: port
*
* Helper method for drivers which want to hardwire 40 wire cable
* detection.
*/
int ata_cable_40wire(struct ata_port *ap)
{
return ATA_CBL_PATA40;
}
EXPORT_SYMBOL_GPL(ata_cable_40wire);
/**
* ata_cable_80wire - return 80 wire cable type
* @ap: port
*
* Helper method for drivers which want to hardwire 80 wire cable
* detection.
*/
int ata_cable_80wire(struct ata_port *ap)
{
return ATA_CBL_PATA80;
}
EXPORT_SYMBOL_GPL(ata_cable_80wire);
/**
* ata_cable_unknown - return unknown PATA cable.
* @ap: port
*
* Helper method for drivers which have no PATA cable detection.
*/
int ata_cable_unknown(struct ata_port *ap)
{
return ATA_CBL_PATA_UNK;
}
EXPORT_SYMBOL_GPL(ata_cable_unknown);
/**
* ata_cable_ignore - return ignored PATA cable.
* @ap: port
*
* Helper method for drivers which don't use cable type to limit
* transfer mode.
*/
int ata_cable_ignore(struct ata_port *ap)
{
return ATA_CBL_PATA_IGN;
}
EXPORT_SYMBOL_GPL(ata_cable_ignore);
/**
* ata_cable_sata - return SATA cable type
* @ap: port
*
* Helper method for drivers which have SATA cables
*/
int ata_cable_sata(struct ata_port *ap)
{
return ATA_CBL_SATA;
}
EXPORT_SYMBOL_GPL(ata_cable_sata);
/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
* This function prints link speed and status of a SATA link.
*
* LOCKING:
* None.
*/
static void sata_print_link_status(struct ata_link *link)
{
u32 sstatus, scontrol, tmp;
if (sata_scr_read(link, SCR_STATUS, &sstatus))
return;
if (sata_scr_read(link, SCR_CONTROL, &scontrol))
return;
if (ata_phys_link_online(link)) {
tmp = (sstatus >> 4) & 0xf;
ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
sata_spd_string(tmp), sstatus, scontrol);
} else {
ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
sstatus, scontrol);
}
}
/**
* ata_dev_pair - return other device on cable
* @adev: device
*
* Obtain the other device on the same cable, or if none is
* present NULL is returned
*/
struct ata_device *ata_dev_pair(struct ata_device *adev)
{
struct ata_link *link = adev->link;
struct ata_device *pair = &link->device[1 - adev->devno];
if (!ata_dev_enabled(pair))
return NULL;
return pair;
}
EXPORT_SYMBOL_GPL(ata_dev_pair);
/**
* sata_down_spd_limit - adjust SATA spd limit downward
* @link: Link to adjust SATA spd limit for
* @spd_limit: Additional limit
*
* Adjust SATA spd limit of @link downward. Note that this
* function only adjusts the limit. The change must be applied
* using sata_set_spd().
*
* If @spd_limit is non-zero, the speed is limited to equal to or
* lower than @spd_limit if such speed is supported. If
* @spd_limit is slower than any supported speed, only the lowest
* supported speed is allowed.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* 0 on success, negative errno on failure
*/
int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
{
u32 sstatus, spd, mask;
int rc, bit;
if (!sata_scr_valid(link))
return -EOPNOTSUPP;
/* If SCR can be read, use it to determine the current SPD.
* If not, use cached value in link->sata_spd.
*/
rc = sata_scr_read(link, SCR_STATUS, &sstatus);
if (rc == 0 && ata_sstatus_online(sstatus))
spd = (sstatus >> 4) & 0xf;
else
spd = link->sata_spd;
mask = link->sata_spd_limit;
if (mask <= 1)
return -EINVAL;
/* unconditionally mask off the highest bit */
bit = fls(mask) - 1;
mask &= ~(1 << bit);
/*
* Mask off all speeds higher than or equal to the current one. At
* this point, if current SPD is not available and we previously
* recorded the link speed from SStatus, the driver has already
* masked off the highest bit so mask should already be 1 or 0.
* Otherwise, we should not force 1.5Gbps on a link where we have
* not previously recorded speed from SStatus. Just return in this
* case.
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
if (!mask)
return -EINVAL;
if (spd_limit) {
if (mask & ((1 << spd_limit) - 1))
mask &= (1 << spd_limit) - 1;
else {
bit = ffs(mask) - 1;
mask = 1 << bit;
}
}
link->sata_spd_limit = mask;
ata_link_warn(link, "limiting SATA link speed to %s\n",
sata_spd_string(fls(mask)));
return 0;
}
#ifdef CONFIG_ATA_ACPI
/**
* ata_timing_cycle2mode - find xfer mode for the specified cycle duration
* @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
* @cycle: cycle duration in ns
*
* Return matching xfer mode for @cycle. The returned mode is of
* the transfer type specified by @xfer_shift. If @cycle is too
* slow for @xfer_shift, 0xff is returned. If @cycle is faster
* than the fastest known mode, the fasted mode is returned.
*
* LOCKING:
* None.
*
* RETURNS:
* Matching xfer_mode, 0xff if no match found.
*/
u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
{
u8 base_mode = 0xff, last_mode = 0xff;
const struct ata_xfer_ent *ent;
const struct ata_timing *t;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (ent->shift == xfer_shift)
base_mode = ent->base;
for (t = ata_timing_find_mode(base_mode);
t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
unsigned short this_cycle;
switch (xfer_shift) {
case ATA_SHIFT_PIO:
case ATA_SHIFT_MWDMA:
this_cycle = t->cycle;
break;
case ATA_SHIFT_UDMA:
this_cycle = t->udma;
break;
default:
return 0xff;
}
if (cycle > this_cycle)
break;
last_mode = t->mode;
}
return last_mode;
}
#endif
/**
* ata_down_xfermask_limit - adjust dev xfer masks downward
* @dev: Device to adjust xfer masks
* @sel: ATA_DNXFER_* selector
*
* Adjust xfer masks of @dev downward. Note that this function
* does not apply the change. Invoking ata_set_mode() afterwards
* will apply the limit.
*
* LOCKING:
* Inherited from caller.
*
* RETURNS:
* 0 on success, negative errno on failure
*/
int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
{
char buf[32];
unsigned int orig_mask, xfer_mask;
unsigned int pio_mask, mwdma_mask, udma_mask;
int quiet, highbit;
quiet = !!(sel & ATA_DNXFER_QUIET);
sel &= ~ATA_DNXFER_QUIET;
xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
dev->mwdma_mask,
dev->udma_mask);
ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
switch (sel) {
case ATA_DNXFER_PIO:
highbit = fls(pio_mask) - 1;
pio_mask &= ~(1 << highbit);
break;
case ATA_DNXFER_DMA:
if (udma_mask) {
highbit = fls(udma_mask) - 1;
udma_mask &= ~(1 << highbit);
if (!udma_mask)
return -ENOENT;
} else if (mwdma_mask) {
highbit = fls(mwdma_mask) - 1;
mwdma_mask &= ~(1 << highbit);
if (!mwdma_mask)
return -ENOENT;
}
break;
case ATA_DNXFER_40C:
udma_mask &= ATA_UDMA_MASK_40C;
break;
case ATA_DNXFER_FORCE_PIO0:
pio_mask &= 1;
fallthrough;
case ATA_DNXFER_FORCE_PIO:
mwdma_mask = 0;
udma_mask = 0;
break;
default:
BUG();
}
xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
return -ENOENT;
if (!quiet) {
if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
snprintf(buf, sizeof(buf), "%s:%s",
ata_mode_string(xfer_mask),
ata_mode_string(xfer_mask & ATA_MASK_PIO));
else
snprintf(buf, sizeof(buf), "%s",
ata_mode_string(xfer_mask));
ata_dev_warn(dev, "limiting speed to %s\n", buf);
}
ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
&dev->udma_mask);
return 0;
}
static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
struct ata_eh_context *ehc = &dev->link->eh_context;
const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
const char *dev_err_whine = "";
int ign_dev_err = 0;
unsigned int err_mask = 0;
int rc;
dev->flags &= ~ATA_DFLAG_PIO;
if (dev->xfer_shift == ATA_SHIFT_PIO)
dev->flags |= ATA_DFLAG_PIO;
if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
dev_err_whine = " (SET_XFERMODE skipped)";
else {
if (nosetxfer)
ata_dev_warn(dev,
"NOSETXFER but PATA detected - can't "
"skip SETXFER, might malfunction\n");
err_mask = ata_dev_set_xfermode(dev);
}
if (err_mask & ~AC_ERR_DEV)
goto fail;
/* revalidate */
ehc->i.flags |= ATA_EHI_POST_SETMODE;
rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
if (rc)
return rc;
if (dev->xfer_shift == ATA_SHIFT_PIO) {
/* Old CFA may refuse this command, which is just fine */
if (ata_id_is_cfa(dev->id))
ign_dev_err = 1;
/* Catch several broken garbage emulations plus some pre
ATA devices */
if (ata_id_major_version(dev->id) == 0 &&
dev->pio_mode <= XFER_PIO_2)
ign_dev_err = 1;
/* Some very old devices and some bad newer ones fail
any kind of SET_XFERMODE request but support PIO0-2
timings and no IORDY */
if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
ign_dev_err = 1;
}
/* Early MWDMA devices do DMA but don't allow DMA mode setting.
Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
dev->dma_mode == XFER_MW_DMA_0 &&
(dev->id[63] >> 8) & 1)
ign_dev_err = 1;
/* if the device is actually configured correctly, ignore dev err */
if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
ign_dev_err = 1;
if (err_mask & AC_ERR_DEV) {
if (!ign_dev_err)
goto fail;
else
dev_err_whine = " (device error ignored)";
}
ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
if (!(ehc->i.flags & ATA_EHI_QUIET) ||
ehc->i.flags & ATA_EHI_DID_HARDRESET)
ata_dev_info(dev, "configured for %s%s\n",
ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
dev_err_whine);
return 0;
fail:
ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
return -EIO;
}
/**
* ata_do_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Standard implementation of the function used to tune and set
* ATA device disk transfer mode (PIO3, UDMA6, etc.). If
* ata_dev_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* 0 on success, negative errno otherwise
*/
int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
int rc = 0, used_dma = 0, found = 0;
/* step 1: calculate xfer_mask */
ata_for_each_dev(dev, link, ENABLED) {
unsigned int pio_mask, dma_mask;
unsigned int mode_mask;
mode_mask = ATA_DMA_MASK_ATA;
if (dev->class == ATA_DEV_ATAPI)
mode_mask = ATA_DMA_MASK_ATAPI;
else if (ata_id_is_cfa(dev->id))
mode_mask = ATA_DMA_MASK_CFA;
ata_dev_xfermask(dev);
ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
if (libata_dma_mask & mode_mask)
dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
dev->udma_mask);
else
dma_mask = 0;
dev->pio_mode = ata_xfer_mask2mode(pio_mask);
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
found = 1;
if (ata_dma_enabled(dev))
used_dma = 1;
}
if (!found)
goto out;
/* step 2: always set host PIO timings */
ata_for_each_dev(dev, link, ENABLED) {
if (dev->pio_mode == 0xff) {
ata_dev_warn(dev, "no PIO support\n");
rc = -EINVAL;
goto out;
}
dev->xfer_mode = dev->pio_mode;
dev->xfer_shift = ATA_SHIFT_PIO;
if (ap->ops->set_piomode)
ap->ops->set_piomode(ap, dev);
}
/* step 3: set host DMA timings */
ata_for_each_dev(dev, link, ENABLED) {
if (!ata_dma_enabled(dev))
continue;
dev->xfer_mode = dev->dma_mode;
dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
if (ap->ops->set_dmamode)
ap->ops->set_dmamode(ap, dev);
}
/* step 4: update devices' xfer mode */
ata_for_each_dev(dev, link, ENABLED) {
rc = ata_dev_set_mode(dev);
if (rc)
goto out;
}
/* Record simplex status. If we selected DMA then the other
* host channels are not permitted to do so.
*/
if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
ap->host->simplex_claimed = ap;
out:
if (rc)
*r_failed_dev = dev;
return rc;
}
EXPORT_SYMBOL_GPL(ata_do_set_mode);
/**
* ata_wait_ready - wait for link to become ready
* @link: link to be waited on
* @deadline: deadline jiffies for the operation
* @check_ready: callback to check link readiness
*
* Wait for @link to become ready. @check_ready should return
* positive number if @link is ready, 0 if it isn't, -ENODEV if
* link doesn't seem to be occupied, other errno for other error
* conditions.
*
* Transient -ENODEV conditions are allowed for
* ATA_TMOUT_FF_WAIT.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 if @link is ready before @deadline; otherwise, -errno.
*/
int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
unsigned long nodev_deadline;
int warned = 0;
/* choose which 0xff timeout to use, read comment in libata.h */
if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
else
nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
/* Slave readiness can't be tested separately from master. On
* M/S emulation configuration, this function should be called
* only on the master and it will handle both master and slave.
*/
WARN_ON(link == link->ap->slave_link);
if (time_after(nodev_deadline, deadline))
nodev_deadline = deadline;
while (1) {
unsigned long now = jiffies;
int ready, tmp;
ready = tmp = check_ready(link);
if (ready > 0)
return 0;
/*
* -ENODEV could be transient. Ignore -ENODEV if link
* is online. Also, some SATA devices take a long
* time to clear 0xff after reset. Wait for
* ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
* offline.
*
* Note that some PATA controllers (pata_ali) explode
* if status register is read more than once when
* there's no device attached.
*/
if (ready == -ENODEV) {
if (ata_link_online(link))
ready = 0;
else if ((link->ap->flags & ATA_FLAG_SATA) &&
!ata_link_offline(link) &&
time_before(now, nodev_deadline))
ready = 0;
}
if (ready)
return ready;
if (time_after(now, deadline))
return -EBUSY;
if (!warned && time_after(now, start + 5 * HZ) &&
(deadline - now > 3 * HZ)) {
ata_link_warn(link,
"link is slow to respond, please be patient "
"(ready=%d)\n", tmp);
warned = 1;
}
ata_msleep(link->ap, 50);
}
}
/**
* ata_wait_after_reset - wait for link to become ready after reset
* @link: link to be waited on
* @deadline: deadline jiffies for the operation
* @check_ready: callback to check link readiness
*
* Wait for @link to become ready after reset.
*
* LOCKING:
* EH context.
*
* RETURNS:
* 0 if @link is ready before @deadline; otherwise, -errno.
*/
int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
return ata_wait_ready(link, deadline, check_ready);
}
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
/**
* ata_std_prereset - prepare for reset
* @link: ATA link to be reset
* @deadline: deadline jiffies for the operation
*
* @link is about to be reset. Initialize it. Failure from
* prereset makes libata abort whole reset sequence and give up
* that port, so prereset should be best-effort. It does its
* best to prepare for reset sequence but if things go wrong, it
* should just whine, not fail.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* Always 0.
*/
int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
return 0;
/* if SATA, resume link */
if (ap->flags & ATA_FLAG_SATA) {
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
ata_link_warn(link,
"failed to resume link for reset (errno=%d)\n",
rc);
}
/* no point in trying softreset on offline link */
if (ata_phys_link_offline(link))
ehc->i.action &= ~ATA_EH_SOFTRESET;
return 0;
}
EXPORT_SYMBOL_GPL(ata_std_prereset);
/**
* sata_std_hardreset - COMRESET w/o waiting or classification
* @link: link to reset
* @class: resulting class of attached device
* @deadline: deadline jiffies for the operation
*
* Standard SATA COMRESET w/o waiting or classification.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 if link offline, -EAGAIN if link online, -errno on errors.
*/
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
bool online;
int rc;
/* do hardreset */
rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
return online ? -EAGAIN : rc;
}
EXPORT_SYMBOL_GPL(sata_std_hardreset);
/**
* ata_std_postreset - standard postreset callback
* @link: the target ata_link
* @classes: classes of attached devices
*
* This function is invoked after a successful reset. Note that
* the device might have been reset more than once using
* different reset methods before postreset is invoked.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_std_postreset(struct ata_link *link, unsigned int *classes)
{
u32 serror;
/* reset complete, clear SError */
if (!sata_scr_read(link, SCR_ERROR, &serror))
sata_scr_write(link, SCR_ERROR, serror);
/* print link status */
sata_print_link_status(link);
}
EXPORT_SYMBOL_GPL(ata_std_postreset);
/**
* ata_dev_same_device - Determine whether new ID matches configured device
* @dev: device to compare against
* @new_class: class of the new device
* @new_id: IDENTIFY page of the new device
*
* Compare @new_class and @new_id against @dev and determine
* whether @dev is the device indicated by @new_class and
* @new_id.
*
* LOCKING:
* None.
*
* RETURNS:
* 1 if @dev matches @new_class and @new_id, 0 otherwise.
*/
static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
const u16 *new_id)
{
const u16 *old_id = dev->id;
unsigned char model[2][ATA_ID_PROD_LEN + 1];
unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
if (dev->class != new_class) {
ata_dev_info(dev, "class mismatch %d != %d\n",
dev->class, new_class);
return 0;
}
ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
if (strcmp(model[0], model[1])) {
ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
model[0], model[1]);
return 0;
}
if (strcmp(serial[0], serial[1])) {
ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
serial[0], serial[1]);
return 0;
}
return 1;
}
/**
* ata_dev_reread_id - Re-read IDENTIFY data
* @dev: target ATA device
* @readid_flags: read ID flags
*
* Re-read IDENTIFY page and make sure @dev is still attached to
* the port.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, negative errno otherwise
*/
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
unsigned int class = dev->class;
u16 *id = (void *)dev->link->ap->sector_buf;
int rc;
/* read ID data */
rc = ata_dev_read_id(dev, &class, readid_flags, id);
if (rc)
return rc;
/* is the device still there? */
if (!ata_dev_same_device(dev, class, id))
return -ENODEV;
memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
return 0;
}
/**
* ata_dev_revalidate - Revalidate ATA device
* @dev: device to revalidate
* @new_class: new class code
* @readid_flags: read ID flags
*
* Re-read IDENTIFY page, make sure @dev is still attached to the
* port and reconfigure it according to the new IDENTIFY page.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, negative errno otherwise
*/
int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags)
{
u64 n_sectors = dev->n_sectors;
u64 n_native_sectors = dev->n_native_sectors;
int rc;
if (!ata_dev_enabled(dev))
return -ENODEV;
/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
if (ata_class_enabled(new_class) && new_class == ATA_DEV_PMP) {
ata_dev_info(dev, "class mismatch %u != %u\n",
dev->class, new_class);
rc = -ENODEV;
goto fail;
}
/* re-read ID */
rc = ata_dev_reread_id(dev, readid_flags);
if (rc)
goto fail;
/* configure device according to the new ID */
rc = ata_dev_configure(dev);
if (rc)
goto fail;
/* verify n_sectors hasn't changed */
if (dev->class != ATA_DEV_ATA || !n_sectors ||
dev->n_sectors == n_sectors)
return 0;
/* n_sectors has changed */
ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
(unsigned long long)n_sectors,
(unsigned long long)dev->n_sectors);
/*
* Something could have caused HPA to be unlocked
* involuntarily. If n_native_sectors hasn't changed and the
* new size matches it, keep the device.
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
ata_dev_warn(dev,
"new n_sectors matches native, probably "
"late HPA unlock, n_sectors updated\n");
/* use the larger n_sectors */
return 0;
}
/*
* Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
* unlocking HPA in those cases.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=15396
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
!(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
ata_dev_warn(dev,
"old n_sectors matches native, probably "
"late HPA lock, will try to unlock HPA\n");
/* try unlocking HPA */
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
rc = -EIO;
} else
rc = -ENODEV;
/* restore original n_[native_]sectors and fail */
dev->n_native_sectors = n_native_sectors;
dev->n_sectors = n_sectors;
fail:
ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
return rc;
}
struct ata_blacklist_entry {
const char *model_num;
const char *model_rev;
unsigned long horkage;
};
static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices with DMA related problems under Linux */
{ "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
{ "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
{ "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
{ "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
{ "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
{ "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
{ "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
{ "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
{ "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
{ "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
{ "CRD-84", NULL, ATA_HORKAGE_NODMA },
{ "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
{ "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
{ "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
{ "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
{ "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
{ "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
{ "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
{ "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
{ "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
{ "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
{ "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
{ " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
/* Similar story with ASMedia 1092 */
{ "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
{ "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/*
* Causes silent data corruption with higher max sects.
* http://lkml.kernel.org/g/[email protected]
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
/*
* These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
/* NCQ is slow */
{ "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
{ "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
{ "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
{ "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
{ "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
{ "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
/* Seagate NCQ + FLUSH CACHE firmware bug */
{ "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
{ "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
{ "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
{ "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
{ "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
{ "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
/* Sandisk SD7/8/9s lock up hard on large trims */
{ "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
{ "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
/* this one allows HPA unlocking but fails IOs on the area */
{ "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
/* Devices which report 1 sector over size HPA */
{ "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
{ "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
{ "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
/* Devices which get the IVB wrong */
{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
/* Maybe we should just blacklist TSSTcorp... */
{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
/* Devices that do not need bridging limits applied */
{ "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
{ "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
{ "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
{ "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
/*
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* These specific Pioneer models have LPM issues */
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
/* 512GB MX100 with newer firmware has only LPM issues */
{ "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
{ "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
{ "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_NO_DMA_LOG |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
{ "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
* (Return Zero After Trim) flags in the ATA Command Set are
* unreliable in the sense that they only define what happens if
* the device successfully executed the DSM TRIM command. TRIM
* is only advisory, however, and the device is free to silently
* ignore all or parts of the request.
*
* Whitelist drives that are known to reliably return zeroes
* after TRIM.
*/
/*
* The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
* that model before whitelisting all other intel SSDs.
*/
{ "INTEL*SSDSC2MH*", NULL, 0 },
{ "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
/*
* Some WD SATA-I drives spin up and down erratically when the link
* is put into the slumber mode. We don't have full list of the
* affected devices. Disable LPM if the device matches one of the
* known prefixes and is SATA-1. As a side effect LPM partial is
* lost too.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=57211
*/
{ "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
/*
* This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
* log page is accessed. Ensure we never ask for this log page with
* these devices.
*/
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
/* Buggy FUA */
{ "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
{ "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
{ "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
{ "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
/* End Marker */
{ }
};
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
const struct ata_blacklist_entry *ad = ata_device_blacklist;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
if (glob_match(ad->model_num, model_num)) {
if (ad->model_rev == NULL)
return ad->horkage;
if (glob_match(ad->model_rev, model_rev))
return ad->horkage;
}
ad++;
}
return 0;
}
static int ata_dma_blacklisted(const struct ata_device *dev)
{
/* We don't support polling DMA.
* DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
* if the LLDD handles only interrupts in the HSM_ST_LAST state.
*/
if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
return 1;
return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
}
/**
* ata_is_40wire - check drive side detection
* @dev: device
*
* Perform drive side detection decoding, allowing for device vendors
* who can't follow the documentation.
*/
static int ata_is_40wire(struct ata_device *dev)
{
if (dev->horkage & ATA_HORKAGE_IVB)
return ata_drive_40wire_relaxed(dev->id);
return ata_drive_40wire(dev->id);
}
/**
* cable_is_40wire - 40/80/SATA decider
* @ap: port to consider
*
* This function encapsulates the policy for speed management
* in one place. At the moment we don't cache the result but
* there is a good case for setting ap->cbl to the result when
* we are called with unknown cables (and figuring out if it
* impacts hotplug at all).
*
* Return 1 if the cable appears to be 40 wire.
*/
static int cable_is_40wire(struct ata_port *ap)
{
struct ata_link *link;
struct ata_device *dev;
/* If the controller thinks we are 40 wire, we are. */
if (ap->cbl == ATA_CBL_PATA40)
return 1;
/* If the controller thinks we are 80 wire, we are. */
if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
return 0;
/* If the system is known to be 40 wire short cable (eg
* laptop), then we allow 80 wire modes even if the drive
* isn't sure.
*/
if (ap->cbl == ATA_CBL_PATA40_SHORT)
return 0;
/* If the controller doesn't know, we scan.
*
* Note: We look for all 40 wire detects at this point. Any
* 80 wire detect is taken to be 80 wire cable because
* - in many setups only the one drive (slave if present) will
* give a valid detect
* - if you have a non detect capable drive you don't want it
* to colour the choice
*/
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
if (!ata_is_40wire(dev))
return 0;
}
}
return 1;
}
/**
* ata_dev_xfermask - Compute supported xfermask of the given device
* @dev: Device to compute xfermask for
*
* Compute supported xfermask of @dev and store it in
* dev->*_mask. This function is responsible for applying all
* known limits including host controller limits, device
* blacklist, etc...
*
* LOCKING:
* None.
*/
static void ata_dev_xfermask(struct ata_device *dev)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ata_host *host = ap->host;
unsigned int xfer_mask;
/* controller modes available */
xfer_mask = ata_pack_xfermask(ap->pio_mask,
ap->mwdma_mask, ap->udma_mask);
/* drive modes available */
xfer_mask &= ata_pack_xfermask(dev->pio_mask,
dev->mwdma_mask, dev->udma_mask);
xfer_mask &= ata_id_xfermask(dev->id);
/*
* CFA Advanced TrueIDE timings are not allowed on a shared
* cable
*/
if (ata_dev_pair(dev)) {
/* No PIO5 or PIO6 */
xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
/* No MWDMA3 or MWDMA 4 */
xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
}
if (ata_dma_blacklisted(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_warn(dev,
"device is on DMA blacklist, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
host->simplex_claimed && host->simplex_claimed != ap) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_warn(dev,
"simplex DMA is claimed by other device, disabling DMA\n");
}
if (ap->flags & ATA_FLAG_NO_IORDY)
xfer_mask &= ata_pio_mask_no_iordy(dev);
if (ap->ops->mode_filter)
xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
/* Apply cable rule here. Don't apply it early because when
* we handle hot plug the cable type can itself change.
* Check this last so that we know if the transfer rate was
* solely limited by the cable.
* Unknown or 80 wire cables reported host side are checked
* drive side as well. Cases where we know a 40wire cable
* is used safely for 80 are not checked here.
*/
if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
/* UDMA/44 or higher would be available */
if (cable_is_40wire(ap)) {
ata_dev_warn(dev,
"limited to UDMA/33 due to 40-wire cable\n");
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
}
ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
&dev->mwdma_mask, &dev->udma_mask);
}
/**
* ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
* @dev: Device to which command will be sent
*
* Issue SET FEATURES - XFER MODE command to device @dev
* on port @ap.
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/
static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
{
struct ata_taskfile tf;
/* set up set-features taskfile */
ata_dev_dbg(dev, "set features - xfer mode\n");
/* Some controllers and ATAPI devices show flaky interrupt
* behavior after setting xfer mode. Use polling instead.
*/
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
tf.feature = SETFEATURES_XFER;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
tf.protocol = ATA_PROT_NODATA;
/* If we are using IORDY we must send the mode setting command */
if (ata_pio_need_iordy(dev))
tf.nsect = dev->xfer_mode;
/* If the device has IORDY and the controller does not - turn it off */
else if (ata_id_has_iordy(dev->id))
tf.nsect = 0x01;
else /* In the ancient relic department - skip all of this */
return 0;
/*
* On some disks, this command causes spin-up, so we need longer
* timeout.
*/
return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
}
/**
* ata_dev_set_feature - Issue SET FEATURES
* @dev: Device to which command will be sent
* @subcmd: The SET FEATURES subcommand to be sent
* @action: The sector count represents a subcommand specific action
*
* Issue SET FEATURES command to device @dev on port @ap with sector count
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/
unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
{
struct ata_taskfile tf;
unsigned int timeout = 0;
/* set up set-features taskfile */
ata_dev_dbg(dev, "set features\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
tf.feature = subcmd;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
tf.nsect = action;
if (subcmd == SETFEATURES_SPINUP)
timeout = ata_probe_timeout ?
ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
}
EXPORT_SYMBOL_GPL(ata_dev_set_feature);
/**
* ata_dev_init_params - Issue INIT DEV PARAMS command
* @dev: Device to which command will be sent
* @heads: Number of heads (taskfile parameter)
* @sectors: Number of sectors (taskfile parameter)
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors)
{
struct ata_taskfile tf;
unsigned int err_mask;
/* Number of sectors per track 1-255. Number of heads 1-16 */
if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
return AC_ERR_INVALID;
/* set up init dev params taskfile */
ata_dev_dbg(dev, "init dev params \n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
tf.nsect = sectors;
tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/* A clean abort indicates an original or just out of spec drive
and we should continue as we issue the setup based on the
drive reported working geometry */
if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
err_mask = 0;
return err_mask;
}
/**
* atapi_check_dma - Check whether ATAPI DMA can be supported
* @qc: Metadata associated with taskfile to check
*
* Allow low-level driver to filter ATA PACKET commands, returning
* a status indicating whether or not it is OK to use DMA for the
* supplied PACKET command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS: 0 when ATAPI DMA can be used
* nonzero otherwise
*/
int atapi_check_dma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
* few ATAPI devices choke on such DMA requests.
*/
if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
return 1;
if (ap->ops->check_atapi_dma)
return ap->ops->check_atapi_dma(qc);
return 0;
}
/**
* ata_std_qc_defer - Check whether a qc needs to be deferred
* @qc: ATA command in question
*
* Non-NCQ commands cannot run with any other command, NCQ or
* not. As upper layer only knows the queue depth, we are
* responsible for maintaining exclusion. This function checks
* whether a new command @qc can be issued.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* ATA_DEFER_* if deferring is needed, 0 otherwise.
*/
int ata_std_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
if (ata_is_ncq(qc->tf.protocol)) {
if (!ata_tag_valid(link->active_tag))
return 0;
} else {
if (!ata_tag_valid(link->active_tag) && !link->sactive)
return 0;
}
return ATA_DEFER_LINK;
}
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
{
return AC_ERR_OK;
}
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
/**
* ata_sg_init - Associate command with scatter-gather table.
* @qc: Command to be associated
* @sg: Scatter-gather table.
* @n_elem: Number of elements in s/g table.
*
* Initialize the data-related elements of queued_cmd @qc
* to point to a scatter-gather table @sg, containing @n_elem
* elements.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem)
{
qc->sg = sg;
qc->n_elem = n_elem;
qc->cursg = qc->sg;
}
#ifdef CONFIG_HAS_DMA
/**
* ata_sg_clean - Unmap DMA memory associated with command
* @qc: Command containing DMA memory to be released
*
* Unmap all mapped DMA memory associated with this command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->sg;
int dir = qc->dma_dir;
WARN_ON_ONCE(sg == NULL);
if (qc->n_elem)
dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
qc->flags &= ~ATA_QCFLAG_DMAMAP;
qc->sg = NULL;
}
/**
* ata_sg_setup - DMA-map the scatter-gather table associated with a command.
* @qc: Command with scatter-gather table to be mapped.
*
* DMA-map the scatter-gather table associated with queued_cmd @qc.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, negative on error.
*
*/
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int n_elem;
n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
if (n_elem < 1)
return -1;
qc->orig_n_elem = qc->n_elem;
qc->n_elem = n_elem;
qc->flags |= ATA_QCFLAG_DMAMAP;
return 0;
}
#else /* !CONFIG_HAS_DMA */
static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
#endif /* !CONFIG_HAS_DMA */
/**
* swap_buf_le16 - swap halves of 16-bit words in place
* @buf: Buffer to swap
* @buf_words: Number of 16-bit words in buffer.
*
* Swap halves of 16-bit words if needed to convert from
* little-endian byte order to native cpu byte order, or
* vice-versa.
*
* LOCKING:
* Inherited from caller.
*/
void swap_buf_le16(u16 *buf, unsigned int buf_words)
{
#ifdef __BIG_ENDIAN
unsigned int i;
for (i = 0; i < buf_words; i++)
buf[i] = le16_to_cpu(buf[i]);
#endif /* __BIG_ENDIAN */
}
/**
* ata_qc_free - free unused ata_queued_cmd
* @qc: Command to complete
*
* Designed to free unused ata_queued_cmd object
* in case something prevents using it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
qc->flags = 0;
if (ata_tag_valid(qc->tag))
qc->tag = ATA_TAG_POISON;
}
void __ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap;
struct ata_link *link;
WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc);
/* command should be marked inactive atomically with qc completion */
if (ata_is_ncq(qc->tf.protocol)) {
link->sactive &= ~(1 << qc->hw_tag);
if (!link->sactive)
ap->nr_active_links--;
} else {
link->active_tag = ATA_TAG_POISON;
ap->nr_active_links--;
}
/* clear exclusive status */
if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
ap->excl_link == link))
ap->excl_link = NULL;
/* atapi: mark qc as inactive to prevent the interrupt handler
* from completing the command twice later, before the error handler
* is called. (when rc != 0 and atapi request sense is needed)
*/
qc->flags &= ~ATA_QCFLAG_ACTIVE;
ap->qc_active &= ~(1ULL << qc->tag);
/* call completion callback */
qc->complete_fn(qc);
}
static void fill_result_tf(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
qc->result_tf.flags = qc->tf.flags;
ap->ops->qc_fill_rtf(qc);
}
static void ata_verify_xfer(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
if (!ata_is_data(qc->tf.protocol))
return;
if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
return;
dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
}
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
*
* Indicate to the mid and upper layers that an ATA command has
* completed, with either an ok or not-ok status.
*
* Refrain from calling this function multiple times when
* successfully completing multiple NCQ commands.
* ata_qc_complete_multiple() should be used instead, which will
* properly update IRQ expect state.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *dev = qc->dev;
struct ata_eh_info *ehi = &dev->link->eh_info;
/* Trigger the LED (if available) */
ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
/*
* In order to synchronize EH with the regular execution path, a qc that
* is owned by EH is marked with ATA_QCFLAG_EH.
*
* The normal execution path is responsible for not accessing a qc owned
* by EH. libata core enforces the rule by returning NULL from
* ata_qc_from_tag() for qcs owned by EH.
*/
if (unlikely(qc->err_mask))
qc->flags |= ATA_QCFLAG_EH;
/*
* Finish internal commands without any further processing and always
* with the result TF filled.
*/
if (unlikely(ata_tag_internal(qc->tag))) {
fill_result_tf(qc);
trace_ata_qc_complete_internal(qc);
__ata_qc_complete(qc);
return;
}
/* Non-internal qc has failed. Fill the result TF and summon EH. */
if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
fill_result_tf(qc);
trace_ata_qc_complete_failed(qc);
ata_qc_schedule_eh(qc);
return;
}
WARN_ON_ONCE(ata_port_is_frozen(ap));
/* read result TF if requested */
if (qc->flags & ATA_QCFLAG_RESULT_TF)
fill_result_tf(qc);
trace_ata_qc_complete_done(qc);
/*
* For CDL commands that completed without an error, check if we have
* sense data (ATA_SENSE is set). If we do, then the command may have
* been aborted by the device due to a limit timeout using the policy
* 0xD. For these commands, invoke EH to get the command sense data.
*/
if (qc->flags & ATA_QCFLAG_HAS_CDL &&
qc->result_tf.status & ATA_SENSE) {
/*
* Tell SCSI EH to not overwrite scmd->result even if this
* command is finished with result SAM_STAT_GOOD.
*/
qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
/*
* set pending so that ata_qc_schedule_eh() does not trigger
* fast drain, and freeze the port.
*/
ap->pflags |= ATA_PFLAG_EH_PENDING;
ata_qc_schedule_eh(qc);
return;
}
/* Some commands need post-processing after successful completion. */
switch (qc->tf.command) {
case ATA_CMD_SET_FEATURES:
if (qc->tf.feature != SETFEATURES_WC_ON &&
qc->tf.feature != SETFEATURES_WC_OFF &&
qc->tf.feature != SETFEATURES_RA_ON &&
qc->tf.feature != SETFEATURES_RA_OFF)
break;
fallthrough;
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
case ATA_CMD_SET_MULTI: /* multi_count changed */
/* revalidate device */
ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
ata_port_schedule_eh(ap);
break;
case ATA_CMD_SLEEP:
dev->flags |= ATA_DFLAG_SLEEPING;
break;
}
if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
ata_verify_xfer(qc);
__ata_qc_complete(qc);
}
EXPORT_SYMBOL_GPL(ata_qc_complete);
/**
* ata_qc_get_active - get bitmask of active qcs
* @ap: port in question
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Bitmask of active qcs
*/
u64 ata_qc_get_active(struct ata_port *ap)
{
u64 qc_active = ap->qc_active;
/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
qc_active |= (1 << 0);
qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
}
return qc_active;
}
EXPORT_SYMBOL_GPL(ata_qc_get_active);
/**
* ata_qc_issue - issue taskfile to device
* @qc: command to issue to device
*
* Prepare an ATA command to submission to device.
* This includes mapping the data into a DMA-able
* area, filling in the S/G table, and finally
* writing the taskfile to hardware, starting the command.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_link *link = qc->dev->link;
u8 prot = qc->tf.protocol;
/* Make sure only one non-NCQ command is outstanding. */
WARN_ON_ONCE(ata_tag_valid(link->active_tag));
if (ata_is_ncq(prot)) {
WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
if (!link->sactive)
ap->nr_active_links++;
link->sactive |= 1 << qc->hw_tag;
} else {
WARN_ON_ONCE(link->sactive);
ap->nr_active_links++;
link->active_tag = qc->tag;
}
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1ULL << qc->tag;
/*
* We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
goto sys_err;
/* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
link->eh_info.action |= ATA_EH_RESET;
ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
ata_link_abort(link);
return;
}
trace_ata_qc_prep(qc);
qc->err_mask |= ap->ops->qc_prep(qc);
if (unlikely(qc->err_mask))
goto err;
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
goto err;
return;
sys_err:
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
}
/**
* ata_phys_link_online - test whether the given link is online
* @link: ATA link to test
*
* Test whether @link is online. Note that this function returns
* 0 if online status of @link cannot be obtained, so
* ata_link_online(link) != !ata_link_offline(link).
*
* LOCKING:
* None.
*
* RETURNS:
* True if the port online status is available and online.
*/
bool ata_phys_link_online(struct ata_link *link)
{
u32 sstatus;
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
ata_sstatus_online(sstatus))
return true;
return false;
}
/**
* ata_phys_link_offline - test whether the given link is offline
* @link: ATA link to test
*
* Test whether @link is offline. Note that this function
* returns 0 if offline status of @link cannot be obtained, so
* ata_link_online(link) != !ata_link_offline(link).
*
* LOCKING:
* None.
*
* RETURNS:
* True if the port offline status is available and offline.
*/
bool ata_phys_link_offline(struct ata_link *link)
{
u32 sstatus;
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
!ata_sstatus_online(sstatus))
return true;
return false;
}
/**
* ata_link_online - test whether the given link is online
* @link: ATA link to test
*
* Test whether @link is online. This is identical to
* ata_phys_link_online() when there's no slave link. When
* there's a slave link, this function should only be called on
* the master link and will return true if any of M/S links is
* online.
*
* LOCKING:
* None.
*
* RETURNS:
* True if the port online status is available and online.
*/
bool ata_link_online(struct ata_link *link)
{
struct ata_link *slave = link->ap->slave_link;
WARN_ON(link == slave); /* shouldn't be called on slave link */
return ata_phys_link_online(link) ||
(slave && ata_phys_link_online(slave));
}
EXPORT_SYMBOL_GPL(ata_link_online);
/**
* ata_link_offline - test whether the given link is offline
* @link: ATA link to test
*
* Test whether @link is offline. This is identical to
* ata_phys_link_offline() when there's no slave link. When
* there's a slave link, this function should only be called on
* the master link and will return true if both M/S links are
* offline.
*
* LOCKING:
* None.
*
* RETURNS:
* True if the port offline status is available and offline.
*/
bool ata_link_offline(struct ata_link *link)
{
struct ata_link *slave = link->ap->slave_link;
WARN_ON(link == slave); /* shouldn't be called on slave link */
return ata_phys_link_offline(link) &&
(!slave || ata_phys_link_offline(slave));
}
EXPORT_SYMBOL_GPL(ata_link_offline);
#ifdef CONFIG_PM
static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
bool async)
{
struct ata_link *link;
unsigned long flags;
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
/* request PM ops to EH */
spin_lock_irqsave(ap->lock, flags);
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
link->eh_info.action |= action;
link->eh_info.flags |= ehi_flags;
}
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
if (!async) {
ata_port_wait_eh(ap);
WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
}
/*
* On some hardware, device fails to respond after spun down for suspend. As
* the device won't be used before being resumed, we don't need to touch the
* device. Ask EH to skip the usual stuff and proceed directly to suspend.
*
* http://thread.gmane.org/gmane.linux.ide/46764
*/
static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
| ATA_EHI_NO_AUTOPSY
| ATA_EHI_NO_RECOVERY;
static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
static int ata_port_pm_suspend(struct device *dev)
{
struct ata_port *ap = to_ata_port(dev);
if (pm_runtime_suspended(dev))
return 0;
ata_port_suspend(ap, PMSG_SUSPEND);
return 0;
}
static int ata_port_pm_freeze(struct device *dev)
{
struct ata_port *ap = to_ata_port(dev);
if (pm_runtime_suspended(dev))
return 0;
ata_port_suspend(ap, PMSG_FREEZE);
return 0;
}
static int ata_port_pm_poweroff(struct device *dev)
{
ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
return 0;
}
static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET;
static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
{
ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
}
static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
{
ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
}
static int ata_port_pm_resume(struct device *dev)
{
ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
/*
* For ODDs, the upper layer will poll for media change every few seconds,
* which will make it enter and leave suspend state every few seconds. And
* as each suspend will cause a hard/soft reset, the gain of runtime suspend
* is very little and the ODD may malfunction after constantly being reset.
* So the idle callback here will not proceed to suspend if a non-ZPODD capable
* ODD is attached to the port.
*/
static int ata_port_runtime_idle(struct device *dev)
{
struct ata_port *ap = to_ata_port(dev);
struct ata_link *link;
struct ata_device *adev;
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(adev, link, ENABLED)
if (adev->class == ATA_DEV_ATAPI &&
!zpodd_dev_enabled(adev))
return -EBUSY;
}
return 0;
}
static int ata_port_runtime_suspend(struct device *dev)
{
ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
return 0;
}
static int ata_port_runtime_resume(struct device *dev)
{
ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
return 0;
}
static const struct dev_pm_ops ata_port_pm_ops = {
.suspend = ata_port_pm_suspend,
.resume = ata_port_pm_resume,
.freeze = ata_port_pm_freeze,
.thaw = ata_port_pm_resume,
.poweroff = ata_port_pm_poweroff,
.restore = ata_port_pm_resume,
.runtime_suspend = ata_port_runtime_suspend,
.runtime_resume = ata_port_runtime_resume,
.runtime_idle = ata_port_runtime_idle,
};
/* sas ports don't participate in pm runtime management of ata_ports,
* and need to resume ata devices at the domain level, not the per-port
* level. sas suspend/resume is async to allow parallel port recovery
* since sas has multiple ata_port instances per Scsi_Host.
*/
void ata_sas_port_suspend(struct ata_port *ap)
{
ata_port_suspend_async(ap, PMSG_SUSPEND);
}
EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
void ata_sas_port_resume(struct ata_port *ap)
{
ata_port_resume_async(ap, PMSG_RESUME);
}
EXPORT_SYMBOL_GPL(ata_sas_port_resume);
/**
* ata_host_suspend - suspend host
* @host: host to suspend
* @mesg: PM message
*
* Suspend @host. Actual operation is performed by port suspend.
*/
void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
host->dev->power.power_state = mesg;
}
EXPORT_SYMBOL_GPL(ata_host_suspend);
/**
* ata_host_resume - resume host
* @host: host to resume
*
* Resume @host. Actual operation is performed by port resume.
*/
void ata_host_resume(struct ata_host *host)
{
host->dev->power.power_state = PMSG_ON;
}
EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
.name = "ata_port",
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
};
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
*
* Initialize @dev in preparation for probing.
*
* LOCKING:
* Inherited from caller.
*/
void ata_dev_init(struct ata_device *dev)
{
struct ata_link *link = ata_dev_phys_link(dev);
struct ata_port *ap = link->ap;
unsigned long flags;
/* SATA spd limit is bound to the attached device, reset together */
link->sata_spd_limit = link->hw_sata_spd_limit;
link->sata_spd = 0;
/* High bits of dev->flags are used to record warm plug
* requests which occur asynchronously. Synchronize using
* host lock.
*/
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_INIT_MASK;
dev->horkage = 0;
spin_unlock_irqrestore(ap->lock, flags);
memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
dev->pio_mask = UINT_MAX;
dev->mwdma_mask = UINT_MAX;
dev->udma_mask = UINT_MAX;
}
/**
* ata_link_init - Initialize an ata_link structure
* @ap: ATA port link is attached to
* @link: Link structure to initialize
* @pmp: Port multiplier port number
*
* Initialize @link.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
{
int i;
/* clear everything except for devices */
memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
link->ap = ap;
link->pmp = pmp;
link->active_tag = ATA_TAG_POISON;
link->hw_sata_spd_limit = UINT_MAX;
/* can't use iterator, ap isn't initialized yet */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &link->device[i];
dev->link = link;
dev->devno = dev - link->device;
#ifdef CONFIG_ATA_ACPI
dev->gtf_filter = ata_acpi_gtf_filter;
#endif
ata_dev_init(dev);
}
}
/**
* sata_link_init_spd - Initialize link->sata_spd_limit
* @link: Link to configure sata_spd_limit for
*
* Initialize ``link->[hw_]sata_spd_limit`` to the currently
* configured value.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int sata_link_init_spd(struct ata_link *link)
{
u8 spd;
int rc;
rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
if (rc)
return rc;
spd = (link->saved_scontrol >> 4) & 0xf;
if (spd)
link->hw_sata_spd_limit &= (1 << spd) - 1;
ata_force_link_limits(link);
link->sata_spd_limit = link->hw_sata_spd_limit;
return 0;
}
/**
* ata_port_alloc - allocate and initialize basic ATA port resources
* @host: ATA host this allocated port belongs to
*
* Allocate and initialize basic ATA port resources.
*
* RETURNS:
* Allocate ATA port on success, NULL on failure.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
struct ata_port *ata_port_alloc(struct ata_host *host)
{
struct ata_port *ap;
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
return NULL;
ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
ap->lock = &host->lock;
ap->print_id = -1;
ap->local_port_no = -1;
ap->host = host;
ap->dev = host->dev;
mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
init_completion(&ap->park_req_pending);
timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
TIMER_DEFERRABLE);
ap->cbl = ATA_CBL_NONE;
ata_link_init(ap, &ap->link, 0);
#ifdef ATA_IRQ_TRAP
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
ata_sff_port_init(ap);
return ap;
}
static void ata_devres_release(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (!ap)
continue;
if (ap->scsi_host)
scsi_host_put(ap->scsi_host);
}
dev_set_drvdata(gendev, NULL);
ata_host_put(host);
}
static void ata_host_release(struct kref *kref)
{
struct ata_host *host = container_of(kref, struct ata_host, kref);
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
kfree(ap->pmp_link);
kfree(ap->slave_link);
kfree(ap->ncq_sense_buf);
kfree(ap);
host->ports[i] = NULL;
}
kfree(host);
}
void ata_host_get(struct ata_host *host)
{
kref_get(&host->kref);
}
void ata_host_put(struct ata_host *host)
{
kref_put(&host->kref, ata_host_release);
}
EXPORT_SYMBOL_GPL(ata_host_put);
/**
* ata_host_alloc - allocate and init basic ATA host resources
* @dev: generic device this host is associated with
* @max_ports: maximum number of ATA ports associated with this host
*
* Allocate and initialize basic ATA host resources. LLD calls
* this function to allocate a host, initializes it fully and
* attaches it using ata_host_register().
*
* @max_ports ports are allocated and host->n_ports is
* initialized to @max_ports. The caller is allowed to decrease
* host->n_ports before calling ata_host_register(). The unused
* ports will be automatically freed on registration.
*
* RETURNS:
* Allocate ATA host on success, NULL on failure.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
{
struct ata_host *host;
size_t sz;
int i;
void *dr;
/* alloc a container for our list of ATA ports (buses) */
sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
host = kzalloc(sz, GFP_KERNEL);
if (!host)
return NULL;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
goto err_free;
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
goto err_out;
devres_add(dev, dr);
dev_set_drvdata(dev, host);
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->dev = dev;
host->n_ports = max_ports;
kref_init(&host->kref);
/* allocate ports bound to this host */
for (i = 0; i < max_ports; i++) {
struct ata_port *ap;
ap = ata_port_alloc(host);
if (!ap)
goto err_out;
ap->port_no = i;
host->ports[i] = ap;
}
devres_remove_group(dev, NULL);
return host;
err_out:
devres_release_group(dev, NULL);
err_free:
kfree(host);
return NULL;
}
EXPORT_SYMBOL_GPL(ata_host_alloc);
/**
* ata_host_alloc_pinfo - alloc host and init with port_info array
* @dev: generic device this host is associated with
* @ppi: array of ATA port_info to initialize host with
* @n_ports: number of ATA ports attached to this host
*
* Allocate ATA host and initialize with info from @ppi. If NULL
* terminated, @ppi may contain fewer entries than @n_ports. The
* last entry will be used for the remaining ports.
*
* RETURNS:
* Allocate ATA host on success, NULL on failure.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*/
struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi,
int n_ports)
{
const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
host = ata_host_alloc(dev, n_ports);
if (!host)
return NULL;
for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])
pi = ppi[j++];
ap->pio_mask = pi->pio_mask;
ap->mwdma_mask = pi->mwdma_mask;
ap->udma_mask = pi->udma_mask;
ap->flags |= pi->flags;
ap->link.flags |= pi->link_flags;
ap->ops = pi->port_ops;
if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
host->ops = pi->port_ops;
}
return host;
}
EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
static void ata_host_stop(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
int i;
WARN_ON(!(host->flags & ATA_HOST_STARTED));
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
if (host->ops->host_stop)
host->ops->host_stop(host);
}
/**
* ata_finalize_port_ops - finalize ata_port_operations
* @ops: ata_port_operations to finalize
*
* An ata_port_operations can inherit from another ops and that
* ops can again inherit from another. This can go on as many
* times as necessary as long as there is no loop in the
* inheritance chain.
*
* Ops tables are finalized when the host is started. NULL or
* unspecified entries are inherited from the closet ancestor
* which has the method and the entry is populated with it.
* After finalization, the ops table directly points to all the
* methods and ->inherits is no longer necessary and cleared.
*
* Using ATA_OP_NULL, inheriting ops can force a method to NULL.
*
* LOCKING:
* None.
*/
static void ata_finalize_port_ops(struct ata_port_operations *ops)
{
static DEFINE_SPINLOCK(lock);
const struct ata_port_operations *cur;
void **begin = (void **)ops;
void **end = (void **)&ops->inherits;
void **pp;
if (!ops || !ops->inherits)
return;
spin_lock(&lock);
for (cur = ops->inherits; cur; cur = cur->inherits) {
void **inherit = (void **)cur;
for (pp = begin; pp < end; pp++, inherit++)
if (!*pp)
*pp = *inherit;
}
for (pp = begin; pp < end; pp++)
if (IS_ERR(*pp))
*pp = NULL;
ops->inherits = NULL;
spin_unlock(&lock);
}
/**
* ata_host_start - start and freeze ports of an ATA host
* @host: ATA host to start ports for
*
* Start and then freeze ports of @host. Started status is
* recorded in host->flags, so this function can be called
* multiple times. Ports are guaranteed to get started only
* once. If host->ops is not initialized yet, it is set to the
* first non-dummy port ops.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 if all ports are started successfully, -errno otherwise.
*/
int ata_host_start(struct ata_host *host)
{
int have_stop = 0;
void *start_dr = NULL;
int i, rc;
if (host->flags & ATA_HOST_STARTED)
return 0;
ata_finalize_port_ops(host->ops);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_finalize_port_ops(ap->ops);
if (!host->ops && !ata_port_is_dummy(ap))
host->ops = ap->ops;
if (ap->ops->port_stop)
have_stop = 1;
}
if (host->ops && host->ops->host_stop)
have_stop = 1;
if (have_stop) {
start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
if (!start_dr)
return -ENOMEM;
}
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap->ops->port_start) {
rc = ap->ops->port_start(ap);
if (rc) {
if (rc != -ENODEV)
dev_err(host->dev,
"failed to start port %d (errno=%d)\n",
i, rc);
goto err_out;
}
}
ata_eh_freeze_port(ap);
}
if (start_dr)
devres_add(host->dev, start_dr);
host->flags |= ATA_HOST_STARTED;
return 0;
err_out:
while (--i >= 0) {
struct ata_port *ap = host->ports[i];
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
devres_free(start_dr);
return rc;
}
EXPORT_SYMBOL_GPL(ata_host_start);
/**
* ata_host_init - Initialize a host struct for sas (ipr, libsas)
* @host: host to initialize
* @dev: device host is attached to
* @ops: port_ops
*
*/
void ata_host_init(struct ata_host *host, struct device *dev,
struct ata_port_operations *ops)
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
host->n_tags = ATA_MAX_QUEUE;
host->dev = dev;
host->ops = ops;
kref_init(&host->kref);
}
EXPORT_SYMBOL_GPL(ata_host_init);
void ata_port_probe(struct ata_port *ap)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
ehi->probe_mask |= ATA_ALL_DEVICES;
ehi->action |= ATA_EH_RESET;
ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
ap->pflags &= ~ATA_PFLAG_INITIALIZING;
ap->pflags |= ATA_PFLAG_LOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(ata_port_probe);
static void async_port_probe(void *data, async_cookie_t cookie)
{
struct ata_port *ap = data;
/*
* If we're not allowed to scan this host in parallel,
* we need to wait until all previous scans have completed
* before going further.
* Jeff Garzik says this is only within a controller, so we
* don't need to wait for port 0, only for later ports.
*/
if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
async_synchronize_cookie(cookie);
ata_port_probe(ap);
ata_port_wait_eh(ap);
/* in order to keep device order, we need to synchronize at this point */
async_synchronize_cookie(cookie);
ata_scsi_scan_host(ap, 1);
}
/**
* ata_host_register - register initialized ATA host
* @host: ATA host to register
* @sht: template for SCSI host
*
* Register initialized ATA host. @host is allocated using
* ata_host_alloc() and fully initialized by LLD. This function
* starts ports, registers @host with ATA and SCSI layers and
* probe registered devices.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
{
int i, rc;
host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");
WARN_ON(1);
return -EINVAL;
}
/* Blow away unused ports. This happens when LLD can't
* determine the exact number of ports to allocate at
* allocation time.
*/
for (i = host->n_ports; host->ports[i]; i++)
kfree(host->ports[i]);
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
host->ports[i]->local_port_no = i + 1;
}
/* Create associated sysfs transport objects */
for (i = 0; i < host->n_ports; i++) {
rc = ata_tport_add(host->dev,host->ports[i]);
if (rc) {
goto err_tadd;
}
}
rc = ata_scsi_add_hosts(host, sht);
if (rc)
goto err_tadd;
/* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int xfer_mask;
/* set SATA cable type if still unset */
if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
ap->cbl = ATA_CBL_SATA;
/* init sata_spd_limit to the current value */
sata_link_init_spd(&ap->link);
if (ap->slave_link)
sata_link_init_spd(ap->slave_link);
/* print per-port info to dmesg */
xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
ap->udma_mask);
if (!ata_port_is_dummy(ap)) {
ata_port_info(ap, "%cATA max %s %s\n",
(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
ata_mode_string(xfer_mask),
ap->link.eh_info.desc);
ata_ehi_clear_desc(&ap->link.eh_info);
} else
ata_port_info(ap, "DUMMY\n");
}
/* perform each probe asynchronously */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ap->cookie = async_schedule(async_port_probe, ap);
}
return 0;
err_tadd:
while (--i >= 0) {
ata_tport_delete(host->ports[i]);
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_host_register);
/**
* ata_host_activate - start host, request IRQ and register it
* @host: target ATA host
* @irq: IRQ to request
* @irq_handler: irq_handler used when requesting IRQ
* @irq_flags: irq_flags used when requesting IRQ
* @sht: scsi_host_template to use when registering the host
*
* After allocating an ATA host and initializing it, most libata
* LLDs perform three steps to activate the host - start host,
* request IRQ and register it. This helper takes necessary
* arguments and performs the three steps in one go.
*
* An invalid IRQ skips the IRQ registration and expects the host to
* have set polling mode on the port. In this case, @irq_handler
* should be NULL.
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
const struct scsi_host_template *sht)
{
int i, rc;
char *irq_desc;
rc = ata_host_start(host);
if (rc)
return rc;
/* Special case for polling mode */
if (!irq) {
WARN_ON(irq_handler);
return ata_host_register(host, sht);
}
irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
dev_driver_string(host->dev),
dev_name(host->dev));
if (!irq_desc)
return -ENOMEM;
rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
irq_desc, host);
if (rc)
return rc;
for (i = 0; i < host->n_ports; i++)
ata_port_desc(host->ports[i], "irq %d", irq);
rc = ata_host_register(host, sht);
/* if failed, just free the IRQ and leave ports alone */
if (rc)
devm_free_irq(host->dev, irq, host);
return rc;
}
EXPORT_SYMBOL_GPL(ata_host_activate);
/**
* ata_port_detach - Detach ATA port in preparation of device removal
* @ap: ATA port to be detached
*
* Detach all ATA devices and the associated SCSI devices of @ap;
* then, remove the associated SCSI host. @ap is guaranteed to
* be quiescent on return from this function.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
static void ata_port_detach(struct ata_port *ap)
{
unsigned long flags;
struct ata_link *link;
struct ata_device *dev;
/* tell EH we're leaving & flush EH */
spin_lock_irqsave(ap->lock, flags);
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
/* it better be dead now */
WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
/* clean up zpodd on port removal */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
if (zpodd_dev_enabled(dev))
zpodd_exit(dev);
}
}
if (ap->pmp_link) {
int i;
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
ata_tlink_delete(&ap->pmp_link[i]);
}
/* remove the associated SCSI host */
scsi_remove_host(ap->scsi_host);
ata_tport_delete(ap);
}
/**
* ata_host_detach - Detach all ports of an ATA host
* @host: Host to detach
*
* Detach all ports of @host.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_host_detach(struct ata_host *host)
{
int i;
for (i = 0; i < host->n_ports; i++) {
/* Ensure ata_port probe has completed */
async_synchronize_cookie(host->ports[i]->cookie + 1);
ata_port_detach(host->ports[i]);
}
/* the host is dead now, dissociate ACPI */
ata_acpi_dissociate(host);
}
EXPORT_SYMBOL_GPL(ata_host_detach);
#ifdef CONFIG_PCI
/**
* ata_pci_remove_one - PCI layer callback for device removal
* @pdev: PCI device that was removed
*
* PCI layer indicates to libata via this hook that hot-unplug or
* module unload event has occurred. Detach all ports. Resource
* release is handled via devres.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*/
void ata_pci_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
ata_host_detach(host);
}
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ap->pflags |= ATA_PFLAG_FROZEN;
/* Disable port interrupts */
if (ap->ops->freeze)
ap->ops->freeze(ap);
/* Stop the port DMA engines */
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
}
EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
unsigned long tmp = 0;
switch (bits->width) {
case 1: {
u8 tmp8 = 0;
pci_read_config_byte(pdev, bits->reg, &tmp8);
tmp = tmp8;
break;
}
case 2: {
u16 tmp16 = 0;
pci_read_config_word(pdev, bits->reg, &tmp16);
tmp = tmp16;
break;
}
case 4: {
u32 tmp32 = 0;
pci_read_config_dword(pdev, bits->reg, &tmp32);
tmp = tmp32;
break;
}
default:
return -EINVAL;
}
tmp &= bits->mask;
return (tmp == bits->val) ? 1 : 0;
}
EXPORT_SYMBOL_GPL(pci_test_config_bits);
#ifdef CONFIG_PM
void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
pci_save_state(pdev);
pci_disable_device(pdev);
if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
}
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
int ata_pci_device_do_resume(struct pci_dev *pdev)
{
int rc;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
rc = pcim_enable_device(pdev);
if (rc) {
dev_err(&pdev->dev,
"failed to enable device after resume (%d)\n", rc);
return rc;
}
pci_set_master(pdev);
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = pci_get_drvdata(pdev);
ata_host_suspend(host, mesg);
ata_pci_device_do_suspend(pdev, mesg);
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
int ata_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc == 0)
ata_host_resume(host);
return rc;
}
EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
/**
* ata_platform_remove_one - Platform layer callback for device removal
* @pdev: Platform device that was removed
*
* Platform layer indicates to libata via this hook that hot-unplug or
* module unload event has occurred. Detach all ports. Resource
* release is handled via devres.
*
* LOCKING:
* Inherited from platform layer (may sleep).
*/
void ata_platform_remove_one(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
ata_host_detach(host);
}
EXPORT_SYMBOL_GPL(ata_platform_remove_one);
#ifdef CONFIG_ATA_FORCE
#define force_cbl(name, flag) \
{ #name, .cbl = (flag) }
#define force_spd_limit(spd, val) \
{ #spd, .spd_limit = (val) }
#define force_xfer(mode, shift) \
{ #mode, .xfer_mask = (1UL << (shift)) }
#define force_lflag_on(name, flags) \
{ #name, .lflags_on = (flags) }
#define force_lflag_onoff(name, flags) \
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
#define force_horkage_on(name, flag) \
{ #name, .horkage_on = (flag) }
#define force_horkage_onoff(name, flag) \
{ "no" #name, .horkage_on = (flag) }, \
{ #name, .horkage_off = (flag) }
static const struct ata_force_param force_tbl[] __initconst = {
force_cbl(40c, ATA_CBL_PATA40),
force_cbl(80c, ATA_CBL_PATA80),
force_cbl(short40c, ATA_CBL_PATA40_SHORT),
force_cbl(unk, ATA_CBL_PATA_UNK),
force_cbl(ign, ATA_CBL_PATA_IGN),
force_cbl(sata, ATA_CBL_SATA),
force_spd_limit(1.5Gbps, 1),
force_spd_limit(3.0Gbps, 2),
force_xfer(pio0, ATA_SHIFT_PIO + 0),
force_xfer(pio1, ATA_SHIFT_PIO + 1),
force_xfer(pio2, ATA_SHIFT_PIO + 2),
force_xfer(pio3, ATA_SHIFT_PIO + 3),
force_xfer(pio4, ATA_SHIFT_PIO + 4),
force_xfer(pio5, ATA_SHIFT_PIO + 5),
force_xfer(pio6, ATA_SHIFT_PIO + 6),
force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
force_xfer(udma0, ATA_SHIFT_UDMA + 0),
force_xfer(udma16, ATA_SHIFT_UDMA + 0),
force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
force_xfer(udma1, ATA_SHIFT_UDMA + 1),
force_xfer(udma25, ATA_SHIFT_UDMA + 1),
force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
force_xfer(udma2, ATA_SHIFT_UDMA + 2),
force_xfer(udma33, ATA_SHIFT_UDMA + 2),
force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
force_xfer(udma3, ATA_SHIFT_UDMA + 3),
force_xfer(udma44, ATA_SHIFT_UDMA + 3),
force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
force_xfer(udma4, ATA_SHIFT_UDMA + 4),
force_xfer(udma66, ATA_SHIFT_UDMA + 4),
force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
force_xfer(udma5, ATA_SHIFT_UDMA + 5),
force_xfer(udma100, ATA_SHIFT_UDMA + 5),
force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
force_xfer(udma6, ATA_SHIFT_UDMA + 6),
force_xfer(udma133, ATA_SHIFT_UDMA + 6),
force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
force_xfer(udma7, ATA_SHIFT_UDMA + 7),
force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
force_horkage_on(disable, ATA_HORKAGE_DISABLE),
};
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
{
char *start = *cur, *p = *cur;
char *id, *val, *endp;
const struct ata_force_param *match_fp = NULL;
int nr_matches = 0, i;
/* find where this param ends and update *cur */
while (*p != '\0' && *p != ',')
p++;
if (*p == '\0')
*cur = p;
else
*cur = p + 1;
*p = '\0';
/* parse */
p = strchr(start, ':');
if (!p) {
val = strstrip(start);
goto parse_val;
}
*p = '\0';
id = strstrip(start);
val = strstrip(p + 1);
/* parse id */
p = strchr(id, '.');
if (p) {
*p++ = '\0';
force_ent->device = simple_strtoul(p, &endp, 10);
if (p == endp || *endp != '\0') {
*reason = "invalid device";
return -EINVAL;
}
}
force_ent->port = simple_strtoul(id, &endp, 10);
if (id == endp || *endp != '\0') {
*reason = "invalid port/link";
return -EINVAL;
}
parse_val:
/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
const struct ata_force_param *fp = &force_tbl[i];
if (strncasecmp(val, fp->name, strlen(val)))
continue;
nr_matches++;
match_fp = fp;
if (strcasecmp(val, fp->name) == 0) {
nr_matches = 1;
break;
}
}
if (!nr_matches) {
*reason = "unknown value";
return -EINVAL;
}
if (nr_matches > 1) {
*reason = "ambiguous value";
return -EINVAL;
}
force_ent->param = *match_fp;
return 0;
}
static void __init ata_parse_force_param(void)
{
int idx = 0, size = 1;
int last_port = -1, last_device = -1;
char *p, *cur, *next;
/* Calculate maximum number of params and allocate ata_force_tbl */
for (p = ata_force_param_buf; *p; p++)
if (*p == ',')
size++;
ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
if (!ata_force_tbl) {
printk(KERN_WARNING "ata: failed to extend force table, "
"libata.force ignored\n");
return;
}
/* parse and populate the table */
for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
const char *reason = "";
struct ata_force_ent te = { .port = -1, .device = -1 };
next = cur;
if (ata_parse_force_one(&next, &te, &reason)) {
printk(KERN_WARNING "ata: failed to parse force "
"parameter \"%s\" (%s)\n",
cur, reason);
continue;
}
if (te.port == -1) {
te.port = last_port;
te.device = last_device;
}
ata_force_tbl[idx++] = te;
last_port = te.port;
last_device = te.device;
}
ata_force_tbl_size = idx;
}
static void ata_free_force_param(void)
{
kfree(ata_force_tbl);
}
#else
static inline void ata_parse_force_param(void) { }
static inline void ata_free_force_param(void) { }
#endif
static int __init ata_init(void)
{
int rc;
ata_parse_force_param();
rc = ata_sff_init();
if (rc) {
ata_free_force_param();
return rc;
}
libata_transport_init();
ata_scsi_transport_template = ata_attach_transport();
if (!ata_scsi_transport_template) {
ata_sff_exit();
rc = -ENOMEM;
goto err_out;
}
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
err_out:
return rc;
}
static void __exit ata_exit(void)
{
ata_release_transport(ata_scsi_transport_template);
libata_transport_exit();
ata_sff_exit();
ata_free_force_param();
}
subsys_initcall(ata_init);
module_exit(ata_exit);
static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
int ata_ratelimit(void)
{
return __ratelimit(&ratelimit);
}
EXPORT_SYMBOL_GPL(ata_ratelimit);
/**
* ata_msleep - ATA EH owner aware msleep
* @ap: ATA port to attribute the sleep to
* @msecs: duration to sleep in milliseconds
*
* Sleeps @msecs. If the current task is owner of @ap's EH, the
* ownership is released before going to sleep and reacquired
* after the sleep is complete. IOW, other ports sharing the
* @ap->host will be allowed to own the EH while this task is
* sleeping.
*
* LOCKING:
* Might sleep.
*/
void ata_msleep(struct ata_port *ap, unsigned int msecs)
{
bool owns_eh = ap && ap->host->eh_owner == current;
if (owns_eh)
ata_eh_release(ap);
if (msecs < 20) {
unsigned long usecs = msecs * USEC_PER_MSEC;
usleep_range(usecs, usecs + 50);
} else {
msleep(msecs);
}
if (owns_eh)
ata_eh_acquire(ap);
}
EXPORT_SYMBOL_GPL(ata_msleep);
/**
* ata_wait_register - wait until register value changes
* @ap: ATA port to wait register for, can be NULL
* @reg: IO-mapped register
* @mask: Mask to apply to read register value
* @val: Wait condition
* @interval: polling interval in milliseconds
* @timeout: timeout in milliseconds
*
* Waiting for some bits of register to change is a common
* operation for ATA controllers. This function reads 32bit LE
* IO-mapped register @reg and tests for the following condition.
*
* (*@reg & mask) != val
*
* If the condition is met, it returns; otherwise, the process is
* repeated after @interval_msec until timeout.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
* The final register value.
*/
u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
u32 tmp;
tmp = ioread32(reg);
/* Calculate timeout _after_ the first read to make sure
* preceding writes reach the controller before starting to
* eat away the timeout.
*/
deadline = ata_deadline(jiffies, timeout);
while ((tmp & mask) == val && time_before(jiffies, deadline)) {
ata_msleep(ap, interval);
tmp = ioread32(reg);
}
return tmp;
}
EXPORT_SYMBOL_GPL(ata_wait_register);
/*
* Dummy port_ops
*/
static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
{
return AC_ERR_SYSTEM;
}
static void ata_dummy_error_handler(struct ata_port *ap)
{
/* truly dummy */
}
struct ata_port_operations ata_dummy_port_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
.error_handler = ata_dummy_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
};
EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
const struct ata_port_info ata_dummy_port_info = {
.port_ops = &ata_dummy_port_ops,
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
void ata_print_version(const struct device *dev, const char *version)
{
dev_printk(KERN_DEBUG, dev, "version %s\n", version);
}
EXPORT_SYMBOL(ata_print_version);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
| linux-master | drivers/ata/libata-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* This is the low level protocol driver for the EPAT parallel
* to IDE adapter from Shuttle Technologies. This adapter is
* used in many popular parallel port disk products such as the
* SyQuest EZ drives, the Avatar Shark and the Imation SuperDisk.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define j44(a, b) (((a >> 4) & 0x0f) + (b & 0xf0))
#define j53(a, b) (((a >> 3) & 0x1f) + ((b << 4) & 0xe0))
static int epatc8;
module_param(epatc8, int, 0);
MODULE_PARM_DESC(epatc8,
"support for the Shuttle EP1284 chip, "
"used in any recent Imation SuperDisk (LS-120) drive.");
/*
* cont = 0 IDE register file
* cont = 1 IDE control registers
* cont = 2 internal EPAT registers
*/
static int cont_map[3] = { 0x18, 0x10, 0 };
static void epat_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
case 1:
case 2:
w0(0x60+r); w2(1); w0(val); w2(4);
break;
case 3:
case 4:
case 5:
w3(0x40+r); w4(val);
break;
}
}
static int epat_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
w0(r); w2(1); w2(3);
a = r1(); w2(4); b = r1();
return j44(a, b);
case 1:
w0(0x40+r); w2(1); w2(4);
a = r1(); b = r2(); w0(0xff);
return j53(a, b);
case 2:
w0(0x20+r); w2(1); w2(0x25);
a = r0(); w2(4);
return a;
case 3:
case 4:
case 5:
w3(r); w2(0x24); a = r4(); w2(4);
return a;
}
return -1; /* never gets here */
}
static void epat_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, ph, a, b;
switch (pi->mode) {
case 0:
w0(7); w2(1); w2(3); w0(0xff);
ph = 0;
for (k = 0; k < count; k++) {
if (k == count-1)
w0(0xfd);
w2(6 + ph); a = r1();
if (a & 8) {
b = a;
} else {
w2(4+ph); b = r1();
}
buf[k] = j44(a, b);
ph = 1 - ph;
}
w0(0); w2(4);
break;
case 1:
w0(0x47); w2(1); w2(5); w0(0xff);
ph = 0;
for (k = 0; k < count; k++) {
if (k == count - 1)
w0(0xfd);
w2(4 + ph);
a = r1(); b = r2();
buf[k] = j53(a, b);
ph = 1 - ph;
}
w0(0); w2(4);
break;
case 2:
w0(0x27); w2(1); w2(0x25); w0(0);
ph = 0;
for (k = 0; k < count - 1; k++) {
w2(0x24 + ph);
buf[k] = r0();
ph = 1 - ph;
}
w2(0x26); w2(0x27);
buf[count - 1] = r0();
w2(0x25); w2(4);
break;
case 3:
w3(0x80); w2(0x24);
for (k = 0; k < count - 1; k++)
buf[k] = r4();
w2(4); w3(0xa0); w2(0x24);
buf[count - 1] = r4();
w2(4);
break;
case 4:
w3(0x80); w2(0x24);
for (k = 0; k < count / 2 - 1; k++)
((u16 *)buf)[k] = r4w();
buf[count - 2] = r4();
w2(4); w3(0xa0); w2(0x24);
buf[count - 1] = r4();
w2(4);
break;
case 5:
w3(0x80); w2(0x24);
for (k = 0; k < count / 4 - 1; k++)
((u32 *)buf)[k] = r4l();
for (k = count - 4; k < count - 1; k++)
buf[k] = r4();
w2(4); w3(0xa0); w2(0x24);
buf[count - 1] = r4();
w2(4);
break;
}
}
static void epat_write_block(struct pi_adapter *pi, char *buf, int count)
{
int ph, k;
switch (pi->mode) {
case 0:
case 1:
case 2:
w0(0x67); w2(1); w2(5);
ph = 0;
for (k = 0; k < count; k++) {
w0(buf[k]);
w2(4 + ph);
ph = 1 - ph;
}
w2(7); w2(4);
break;
case 3:
w3(0xc0);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(4);
break;
case 4:
w3(0xc0);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
w2(4);
break;
case 5:
w3(0xc0);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
w2(4);
break;
}
}
/* these macros access the EPAT registers in native addressing */
#define WR(r, v) epat_write_regr(pi, 2, r, v)
#define RR(r) epat_read_regr(pi, 2, r)
/* and these access the IDE task file */
#define WRi(r, v) epat_write_regr(pi, 0, r, v)
#define RRi(r) epat_read_regr(pi, 0, r)
/* FIXME: the CPP stuff should be fixed to handle multiple EPATs on a chain */
#define CPP(x) \
do { \
w2(4); w0(0x22); w0(0xaa); \
w0(0x55); w0(0); w0(0xff); \
w0(0x87); w0(0x78); w0(x); \
w2(4); w2(5); w2(4); w0(0xff); \
} while (0)
static void epat_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
/* Initialize the chip */
CPP(0);
if (epatc8) {
CPP(0x40); CPP(0xe0);
w0(0); w2(1); w2(4);
WR(0x8, 0x12);
WR(0xc, 0x14);
WR(0x12, 0x10);
WR(0xe, 0xf);
WR(0xf, 4);
/* WR(0xe,0xa);WR(0xf,4); */
WR(0xe, 0xd);
WR(0xf, 0);
/* CPP(0x30); */
}
/* Connect to the chip */
CPP(0xe0);
w0(0); w2(1); w2(4); /* Idle into SPP */
if (pi->mode >= 3) {
w0(0); w2(1); w2(4); w2(0xc);
/* Request EPP */
w0(0x40); w2(6); w2(7); w2(4); w2(0xc); w2(4);
}
if (!epatc8) {
WR(8, 0x10);
WR(0xc, 0x14);
WR(0xa, 0x38);
WR(0x12, 0x10);
}
}
static void epat_disconnect(struct pi_adapter *pi)
{
CPP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static int epat_test_proto(struct pi_adapter *pi)
{
int k, j, f, cc;
int e[2] = { 0, 0 };
char scratch[512];
epat_connect(pi);
cc = RR(0xd);
epat_disconnect(pi);
epat_connect(pi);
for (j=0;j<2;j++) {
WRi(6, 0xa0 + j * 0x10);
for (k = 0; k < 256; k++) {
WRi(2, k ^ 0xaa);
WRi(3, k ^ 0x55);
if (RRi(2) != (k ^ 0xaa))
e[j]++;
}
}
epat_disconnect(pi);
f = 0;
epat_connect(pi);
WR(0x13, 1); WR(0x13, 0); WR(0xa, 0x11);
epat_read_block(pi, scratch, 512);
for (k = 0; k < 256; k++) {
if ((scratch[2 * k] & 0xff) != k)
f++;
if ((scratch[2 * k + 1] & 0xff) != 0xff - k)
f++;
}
epat_disconnect(pi);
dev_dbg(&pi->dev,
"epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n",
pi->port, pi->mode, cc, e[0], e[1], f);
return (e[0] && e[1]) || f;
}
static void epat_log_adapter(struct pi_adapter *pi)
{
int ver;
char *mode_string[6] =
{ "4-bit", "5/3", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
epat_connect(pi);
WR(0xa, 0x38); /* read the version code */
ver = RR(0xb);
epat_disconnect(pi);
dev_info(&pi->dev,
"Shuttle EPAT chip %x at 0x%x, mode %d (%s), delay %d\n",
ver, pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol epat = {
.owner = THIS_MODULE,
.name = "epat",
.max_mode = 6,
.epp_first = 3,
.default_delay = 1,
.max_units = 1,
.write_regr = epat_write_regr,
.read_regr = epat_read_regr,
.write_block = epat_write_block,
.read_block = epat_read_block,
.connect = epat_connect,
.disconnect = epat_disconnect,
.test_proto = epat_test_proto,
.log_adapter = epat_log_adapter,
};
static int __init epat_init(void)
{
#ifdef CONFIG_PATA_PARPORT_EPATC8
epatc8 = 1;
#endif
return pata_parport_register_driver(&epat);
}
static void __exit epat_exit(void)
{
pata_parport_unregister_driver(&epat);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Shuttle Technologies EPAT parallel port IDE adapter "
"protocol driver");
module_init(epat_init)
module_exit(epat_exit)
| linux-master | drivers/ata/pata_parport/epat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* dstr.c is a low-level protocol driver for the DataStor EP2000 parallel
* to IDE adapter chip.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
/*
* mode codes: 0 nybble reads, 8-bit writes
* 1 8-bit reads and writes
* 2 8-bit EPP mode
* 3 EPP-16
* 4 EPP-32
*/
#define j44(a, b) (((a >> 3) & 0x07) | ((~a >> 4) & 0x08) | \
((b << 1) & 0x70) | ((~b) & 0x80))
#define P1 w2(5);w2(0xd);w2(5);w2(4);
#define P2 w2(5);w2(7);w2(5);w2(4);
#define P3 w2(6);w2(4);w2(6);w2(4);
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x20, 0x40 };
static int dstr_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
r = regr + cont_map[cont];
w0(0x81); P1;
if (pi->mode)
w0(0x11);
else
w0(1);
P2; w0(r); P1;
switch (pi->mode) {
case 0:
w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4);
return j44(a, b);
case 1:
w0(0); w2(0x26); a = r0(); w2(4);
return a;
case 2:
case 3:
case 4:
w2(0x24); a = r4(); w2(4);
return a;
}
return -1;
}
static void dstr_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
w0(0x81); P1;
if (pi->mode >= 2)
w0(0x11);
else
w0(1);
P2; w0(r); P1;
switch (pi->mode) {
case 0:
case 1:
w0(val); w2(5); w2(7); w2(5); w2(4);
break;
case 2:
case 3:
case 4:
w4(val);
break;
}
}
#define CCP(x) \
do { \
w0(0xff); w2(0xc); w2(4); \
w0(0xaa); w0(0x55); w0(0); w0(0xff); \
w0(0x87); w0(0x78); \
w0(x); w2(5); w2(4); \
} while (0)
static void dstr_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); CCP(0xe0); w0(0xff);
}
static void dstr_disconnect(struct pi_adapter *pi)
{
CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void dstr_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b;
w0(0x81); P1;
if (pi->mode)
w0(0x19);
else
w0(9);
P2; w0(0x82); P1; P3; w0(0x20); P1;
switch (pi->mode) {
case 0:
for (k = 0; k < count; k++) {
w2(6); a = r1(); w2(4);
w2(6); b = r1(); w2(4);
buf[k] = j44(a, b);
}
break;
case 1:
w0(0);
for (k = 0; k < count; k++) {
w2(0x26);
buf[k] = r0();
w2(0x24);
}
w2(4);
break;
case 2:
w2(0x24);
for (k = 0; k < count; k++)
buf[k] = r4();
w2(4);
break;
case 3:
w2(0x24);
for (k = 0; k < count / 2; k++)
((u16 *)buf)[k] = r4w();
w2(4);
break;
case 4:
w2(0x24);
for (k = 0; k < count / 4; k++)
((u32 *)buf)[k] = r4l();
w2(4);
break;
}
}
static void dstr_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
w0(0x81); P1;
if (pi->mode)
w0(0x19);
else
w0(9);
P2; w0(0x82); P1; P3; w0(0x20); P1;
switch (pi->mode) {
case 0:
case 1:
for (k = 0; k < count; k++) {
w2(5);
w0(buf[k]);
w2(7);
}
w2(5); w2(4);
break;
case 2:
w2(0xc5);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(0xc4);
break;
case 3:
w2(0xc5);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
w2(0xc4);
break;
case 4:
w2(0xc5);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
w2(0xc4);
break;
}
}
static void dstr_log_adapter(struct pi_adapter *pi)
{
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"DataStor EP2000 at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol dstr = {
.owner = THIS_MODULE,
.name = "dstr",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = dstr_write_regr,
.read_regr = dstr_read_regr,
.write_block = dstr_write_block,
.read_block = dstr_read_block,
.connect = dstr_connect,
.disconnect = dstr_disconnect,
.log_adapter = dstr_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("DataStor EP2000 parallel port IDE adapter protocol driver");
module_pata_parport_driver(dstr);
| linux-master | drivers/ata/pata_parport/dstr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1996-1998 Grant R. Guenther <[email protected]>
*
* frpw.c is a low-level protocol driver for the Freecom "Power" parallel port
* IDE adapter.
*
* Some applications of this adapter may require a "printer" reset prior to
* loading the driver. This can be done by loading and unloading the "lp"
* driver, or it can be done by this driver if you define FRPW_HARD_RESET.
* The latter is not recommended as it may upset devices on other ports.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4);
#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x10 };
static int frpw_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int h, l, r;
r = regr + cont_map[cont];
w2(4);
w0(r); cec4;
w2(6); l = r1();
w2(4); h = r1();
w2(4);
return j44(l, h);
}
static void frpw_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
w2(4); w0(r); cec4;
w0(val);
w2(5); w2(7); w2(5); w2(4);
}
static void frpw_read_block_int(struct pi_adapter *pi, char *buf, int count,
int regr)
{
int h, l, k, ph;
switch (pi->mode) {
case 0:
w2(4); w0(regr); cec4;
for (k = 0; k < count; k++) {
w2(6); l = r1();
w2(4); h = r1();
buf[k] = j44(l, h);
}
w2(4);
break;
case 1:
ph = 2;
w2(4); w0(regr + 0xc0); cec4;
w0(0xff);
for (k = 0; k < count; k++) {
w2(0xa4 + ph);
buf[k] = r0();
ph = 2 - ph;
}
w2(0xac); w2(0xa4); w2(4);
break;
case 2:
w2(4); w0(regr + 0x80); cec4;
for (k = 0; k < count; k++)
buf[k] = r4();
w2(0xac); w2(0xa4);
w2(4);
break;
case 3:
w2(4); w0(regr + 0x80); cec4;
for (k = 0; k < count - 2; k++)
buf[k] = r4();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
case 4:
w2(4); w0(regr + 0x80); cec4;
for (k = 0; k < count / 2 - 1; k++)
((u16 *)buf)[k] = r4w();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
case 5:
w2(4); w0(regr + 0x80); cec4;
for (k = 0; k < count / 4 - 1; k++)
((u32 *)buf)[k] = r4l();
buf[count - 4] = r4();
buf[count - 3] = r4();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
}
}
static void frpw_read_block(struct pi_adapter *pi, char *buf, int count)
{
frpw_read_block_int(pi, buf, count, 0x08);
}
static void frpw_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
case 2:
w2(4); w0(8); cec4; w2(5);
for (k = 0; k < count; k++) {
w0(buf[k]);
w2(7); w2(5);
}
w2(4);
break;
case 3:
w2(4); w0(0xc8); cec4; w2(5);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(4);
break;
case 4:
w2(4); w0(0xc8); cec4; w2(5);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
w2(4);
break;
case 5:
w2(4); w0(0xc8); cec4; w2(5);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
w2(4);
break;
}
}
static void frpw_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
static void frpw_disconnect(struct pi_adapter *pi)
{
w2(4); w0(0x20); cec4;
w0(pi->saved_r0);
w2(pi->saved_r2);
}
/*
* Stub logic to see if PNP string is available - used to distinguish
* between the Xilinx and ASIC implementations of the Freecom adapter.
* returns chip_type: 0 = Xilinx, 1 = ASIC
*/
static int frpw_test_pnp(struct pi_adapter *pi)
{
int olddelay, a, b;
#ifdef FRPW_HARD_RESET
w0(0); w2(8); udelay(50); w2(0xc); /* parallel bus reset */
mdelay(1500);
#endif
olddelay = pi->delay;
pi->delay = 10;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); w0(4); w2(6); w2(7);
a = r1() & 0xff; w2(4); b = r1() & 0xff;
w2(0xc); w2(0xe); w2(4);
pi->delay = olddelay;
w0(pi->saved_r0);
w2(pi->saved_r2);
return ((~a & 0x40) && (b & 0x40));
}
/*
* We use the pi->private to remember the result of the PNP test.
* To make this work, private = port*2 + chip. Yes, I know it's a hack :-(
*/
static int frpw_test_proto(struct pi_adapter *pi)
{
int j, k, r;
int e[2] = { 0, 0 };
char scratch[512];
if ((pi->private >> 1) != pi->port)
pi->private = frpw_test_pnp(pi) + 2*pi->port;
if (((pi->private & 0x1) == 0) && (pi->mode > 2)) {
dev_dbg(&pi->dev,
"frpw: Xilinx does not support mode %d\n", pi->mode);
return 1;
}
if (((pi->private & 0x1) == 1) && (pi->mode == 2)) {
dev_dbg(&pi->dev, "frpw: ASIC does not support mode 2\n");
return 1;
}
frpw_connect(pi);
for (j = 0; j < 2; j++) {
frpw_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
for (k = 0; k < 256; k++) {
frpw_write_regr(pi, 0, 2, k ^ 0xaa);
frpw_write_regr(pi, 0, 3, k ^ 0x55);
if (frpw_read_regr(pi, 0, 2) != (k ^ 0xaa))
e[j]++;
}
}
frpw_disconnect(pi);
frpw_connect(pi);
frpw_read_block_int(pi, scratch, 512, 0x10);
r = 0;
for (k = 0; k < 128; k++) {
if (scratch[k] != k)
r++;
}
frpw_disconnect(pi);
dev_dbg(&pi->dev,
"frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n",
pi->port, (pi->private%2), pi->mode, e[0], e[1], r);
return r || (e[0] && e[1]);
}
static void frpw_log_adapter(struct pi_adapter *pi)
{
char *mode[6] = { "4-bit", "8-bit", "EPP", "EPP-8", "EPP-16", "EPP-32"};
dev_info(&pi->dev,
"Freecom (%s) adapter at 0x%x, mode %d (%s), delay %d\n",
((pi->private & 0x1) == 0) ? "Xilinx" : "ASIC",
pi->port, pi->mode, mode[pi->mode], pi->delay);
}
static struct pi_protocol frpw = {
.owner = THIS_MODULE,
.name = "frpw",
.max_mode = 6,
.epp_first = 2,
.default_delay = 2,
.max_units = 1,
.write_regr = frpw_write_regr,
.read_regr = frpw_read_regr,
.write_block = frpw_write_block,
.read_block = frpw_read_block,
.connect = frpw_connect,
.disconnect = frpw_disconnect,
.test_proto = frpw_test_proto,
.log_adapter = frpw_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Freecom Power parallel port IDE adapter protocol driver");
module_pata_parport_driver(frpw);
| linux-master | drivers/ata/pata_parport/frpw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-8 Grant R. Guenther <[email protected]>
*
* aten.c is a low-level protocol driver for the ATEN EH-100
* parallel port adapter. The EH-100 supports 4-bit and 8-bit
* modes only. There is also an EH-132 which supports EPP mode
* transfers. The EH-132 is not yet supported.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/types.h>
#include <asm/io.h>
#include "pata_parport.h"
#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x20 };
static void aten_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont] + 0x80;
w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc);
}
static int aten_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
r = regr + cont_map[cont] + 0x40;
switch (pi->mode) {
case 0:
w0(r); w2(0xe); w2(6);
w2(7); w2(6); w2(0);
a = r1(); w0(0x10); b = r1(); w2(0xc);
return j44(a,b);
case 1:
r |= 0x10;
w0(r); w2(0xe); w2(6); w0(0xff);
w2(0x27); w2(0x26); w2(0x20);
a = r0();
w2(0x26); w2(0xc);
return a;
}
return -1;
}
static void aten_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b, c, d;
switch (pi->mode) {
case 0:
w0(0x48); w2(0xe); w2(6);
for (k = 0; k < count / 2; k++) {
w2(7); w2(6); w2(2);
a = r1(); w0(0x58); b = r1();
w2(0); d = r1(); w0(0x48); c = r1();
buf[2 * k] = j44(c, d);
buf[2 * k + 1] = j44(a, b);
}
w2(0xc);
break;
case 1:
w0(0x58); w2(0xe); w2(6);
for (k = 0; k < count / 2; k++) {
w2(0x27); w2(0x26); w2(0x22);
a = r0(); w2(0x20); b = r0();
buf[2 * k] = b;
buf[2 * k + 1] = a;
}
w2(0x26); w2(0xc);
break;
}
}
static void aten_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
w0(0x88); w2(0xe); w2(6);
for (k = 0; k < count / 2; k++) {
w0(buf[2 * k + 1]); w2(0xe); w2(6);
w0(buf[2 * k]); w2(7); w2(6);
}
w2(0xc);
}
static void aten_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xc);
}
static void aten_disconnect(struct pi_adapter *pi)
{
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void aten_log_adapter(struct pi_adapter *pi)
{
char *mode_string[2] = { "4-bit", "8-bit" };
dev_info(&pi->dev,
"ATEN EH-100 at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol aten = {
.owner = THIS_MODULE,
.name = "aten",
.max_mode = 2,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = aten_write_regr,
.read_regr = aten_read_regr,
.write_block = aten_write_block,
.read_block = aten_read_block,
.connect = aten_connect,
.disconnect = aten_disconnect,
.log_adapter = aten_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("ATEN EH-100 parallel port IDE adapter protocol driver");
module_pata_parport_driver(aten);
| linux-master | drivers/ata/pata_parport/aten.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1996-1998 Grant R. Guenther <[email protected]>
*
* on20.c is a low-level protocol driver for the
* Onspec 90c20 parallel to IDE adapter.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define op(f) \
do { \
w2(4); w0(f); w2(5); w2(0xd); \
w2(5); w2(0xd); w2(5); w2(4); \
} while (0)
#define vl(v) \
do { \
w2(4); w0(v); w2(5); \
w2(7); w2(5); w2(4); \
} while (0)
#define j44(a, b) (((a >> 4) & 0x0f) | (b & 0xf0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int on20_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int h, l, r;
r = (regr << 2) + 1 + cont;
op(1); vl(r); op(0);
switch (pi->mode) {
case 0:
w2(4); w2(6); l = r1();
w2(4); w2(6); h = r1();
w2(4); w2(6); w2(4); w2(6); w2(4);
return j44(l, h);
case 1:
w2(4); w2(0x26); r = r0();
w2(4); w2(0x26); w2(4);
return r;
}
return -1;
}
static void on20_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = (regr << 2) + 1 + cont;
op(1); vl(r);
op(0); vl(val);
op(0); vl(val);
}
static void on20_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); w0(0); w2(0xc); w2(4); w2(6); w2(4); w2(6); w2(4);
if (pi->mode) {
op(2); vl(8); op(2); vl(9);
} else {
op(2); vl(0); op(2); vl(8);
}
}
static void on20_disconnect(struct pi_adapter *pi)
{
w2(4); w0(7); w2(4); w2(0xc); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void on20_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, l, h;
op(1); vl(1); op(0);
for (k = 0; k < count; k++) {
if (pi->mode) {
w2(4); w2(0x26); buf[k] = r0();
} else {
w2(6); l = r1(); w2(4);
w2(6); h = r1(); w2(4);
buf[k] = j44(l, h);
}
}
w2(4);
}
static void on20_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
op(1); vl(1); op(0);
for (k = 0; k < count; k++) {
w2(5); w0(buf[k]); w2(7);
}
w2(4);
}
static void on20_log_adapter(struct pi_adapter *pi)
{
char *mode_string[2] = { "4-bit", "8-bit" };
dev_info(&pi->dev,
"OnSpec 90c20 at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol on20 = {
.owner = THIS_MODULE,
.name = "on20",
.max_mode = 2,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = on20_write_regr,
.read_regr = on20_read_regr,
.write_block = on20_write_block,
.read_block = on20_read_block,
.connect = on20_connect,
.disconnect = on20_disconnect,
.log_adapter = on20_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Onspec 90c20 parallel port IDE adapter protocol driver");
module_pata_parport_driver(on20);
| linux-master | drivers/ata/pata_parport/on20.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1998 Grant R. Guenther <[email protected]>
*
* fit3.c is a low-level protocol driver for newer models
* of the Fidelity International Technology parallel port adapter.
* This adapter is used in their TransDisk 3000 portable
* hard-drives, as well as CD-ROM, PD-CD and other devices.
*
* The TD-2000 and certain older devices use a different protocol.
* Try the fit2 protocol module with them.
*
* NB: The FIT adapters do not appear to support the control
* registers. So, we map ALT_STATUS to STATUS and NO-OP writes
* to the device control register - this means that IDE reset
* will not work on these devices.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define j44(a, b) (((a >> 3) & 0x0f) | ((b << 1) & 0xf0))
#define w7(byte) out_p(7, byte)
#define r7() (in_p(7) & 0xff)
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
if (cont == 1)
return;
switch (pi->mode) {
case 0:
case 1:
w2(0xc); w0(regr); w2(0x8); w2(0xc);
w0(val); w2(0xd);
w0(0); w2(0xc);
break;
case 2:
w2(0xc); w0(regr); w2(0x8); w2(0xc);
w4(val); w4(0);
w2(0xc);
break;
}
}
static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b;
if (cont) {
if (regr != 6)
return 0xff;
regr = 7;
}
switch (pi->mode) {
case 0:
w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc);
w2(0xd); a = r1();
w2(0xf); b = r1();
w2(0xc);
return j44(a, b);
case 1:
w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
w2(0xec); w2(0xee); w2(0xef); a = r0();
w2(0xc);
return a;
case 2:
w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
w2(0xec);
a = r4(); b = r4();
w2(0xc);
return a;
}
return -1;
}
static void fit3_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b, c, d;
switch (pi->mode) {
case 0:
w2(0xc); w0(0x10); w2(0x8); w2(0xc);
for (k = 0; k < count / 2; k++) {
w2(0xd); a = r1();
w2(0xf); b = r1();
w2(0xc); c = r1();
w2(0xe); d = r1();
buf[2 * k] = j44(a, b);
buf[2 * k + 1] = j44(c, d);
}
w2(0xc);
break;
case 1:
w2(0xc); w0(0x90); w2(0x8); w2(0xc);
w2(0xec); w2(0xee);
for (k = 0; k < count / 2; k++) {
w2(0xef); a = r0();
w2(0xee); b = r0();
buf[2 * k] = a;
buf[2 * k + 1] = b;
}
w2(0xec);
w2(0xc);
break;
case 2:
w2(0xc); w0(0x90); w2(0x8); w2(0xc);
w2(0xec);
for (k = 0; k < count; k++)
buf[k] = r4();
w2(0xc);
break;
}
}
static void fit3_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
w2(0xc); w0(0); w2(0x8); w2(0xc);
for (k = 0; k < count / 2; k++) {
w0(buf[2 * k]); w2(0xd);
w0(buf[2 * k + 1]); w2(0xc);
}
break;
case 2:
w2(0xc); w0(0); w2(0x8); w2(0xc);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(0xc);
break;
}
}
static void fit3_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xc); w0(0); w2(0xa);
if (pi->mode == 2) {
w2(0xc); w0(0x9);
w2(0x8); w2(0xc);
}
}
static void fit3_disconnect(struct pi_adapter *pi)
{
w2(0xc); w0(0xa); w2(0x8); w2(0xc);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void fit3_log_adapter(struct pi_adapter *pi)
{
char *mode_string[3] = { "4-bit", "8-bit", "EPP"};
dev_info(&pi->dev,
"FIT 3000 adapter at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol fit3 = {
.owner = THIS_MODULE,
.name = "fit3",
.max_mode = 3,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = fit3_write_regr,
.read_regr = fit3_read_regr,
.write_block = fit3_write_block,
.read_block = fit3_read_block,
.connect = fit3_connect,
.disconnect = fit3_disconnect,
.log_adapter = fit3_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Fidelity International Technology parallel port IDE adapter"
"(newer models) protocol driver");
module_pata_parport_driver(fit3);
| linux-master | drivers/ata/pata_parport/fit3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* This is a low-level driver for the KBIC-951A and KBIC-971A
* parallel to IDE adapter chips from KingByte Information Systems.
*
* The chips are almost identical, however, the wakeup code
* required for the 971A interferes with the correct operation of
* the 951A, so this driver registers itself twice, once for
* each chip.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define r12w() (delay_p, inw(pi->port + 1) & 0xffff)
#define j44(a, b) ((((a >> 4) & 0x0f) | (b & 0xf0)) ^ 0x88)
#define j53(w) (((w >> 3) & 0x1f) | ((w >> 4) & 0xe0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x80, 0x40 };
static int kbic_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, s;
s = cont_map[cont];
switch (pi->mode) {
case 0:
w0(regr | 0x18 | s); w2(4); w2(6); w2(4); w2(1); w0(8);
a = r1(); w0(0x28); b = r1(); w2(4);
return j44(a, b);
case 1:
w0(regr|0x38 | s); w2(4); w2(6); w2(4); w2(5); w0(8);
a = r12w(); w2(4);
return j53(a);
case 2:
w0(regr | 0x08 | s); w2(4); w2(6); w2(4); w2(0xa5); w2(0xa1);
a = r0(); w2(4);
return a;
case 3:
case 4:
case 5:
w0(0x20 | s); w2(4); w2(6); w2(4); w3(regr);
a = r4(); b = r4(); w2(4); w2(0); w2(4);
return a;
}
return -1;
}
static void kbic_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int s = cont_map[cont];
switch (pi->mode) {
case 0:
case 1:
case 2:
w0(regr | 0x10 | s); w2(4); w2(6); w2(4);
w0(val); w2(5); w2(4);
break;
case 3:
case 4:
case 5:
w0(0x20 | s); w2(4); w2(6); w2(4); w3(regr);
w4(val); w4(val);
w2(4); w2(0); w2(4);
break;
}
}
static void k951_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
static void k951_disconnect(struct pi_adapter *pi)
{
w0(pi->saved_r0);
w2(pi->saved_r2);
}
#define CCP(x) \
do { \
w2(0xc4); w0(0xaa); w0(0x55); \
w0(0); w0(0xff); w0(0x87); \
w0(0x78); w0(x); w2(0xc5); \
w2(0xc4); w0(0xff); \
} while (0)
static void k971_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
CCP(0x20);
w2(4);
}
static void k971_disconnect(struct pi_adapter *pi)
{
CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
/*
* count must be congruent to 0 MOD 4, but all known applications
*have this property.
*/
static void kbic_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b;
switch (pi->mode) {
case 0:
w0(0x98); w2(4); w2(6); w2(4);
for (k = 0; k < count / 2; k++) {
w2(1); w0(8);
a = r1();
w0(0x28);
b = r1();
buf[2 * k] = j44(a, b);
w2(5);
b = r1();
w0(8);
a = r1();
buf[2 * k + 1] = j44(a, b);
w2(4);
}
break;
case 1:
w0(0xb8); w2(4); w2(6); w2(4);
for (k = 0; k < count / 4; k++) {
w0(0xb8);
w2(4); w2(5);
w0(8);
buf[4 * k] = j53(r12w());
w0(0xb8);
buf[4 * k + 1] = j53(r12w());
w2(4); w2(5);
buf[4 * k + 3] = j53(r12w());
w0(8);
buf[4 * k + 2] = j53(r12w());
}
w2(4);
break;
case 2:
w0(0x88); w2(4); w2(6); w2(4);
for (k = 0; k < count / 2; k++) {
w2(0xa0); w2(0xa1);
buf[2 * k] = r0();
w2(0xa5);
buf[2 * k + 1] = r0();
}
w2(4);
break;
case 3:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count; k++)
buf[k] = r4();
w2(4); w2(0); w2(4);
break;
case 4:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count / 2; k++)
((u16 *)buf)[k] = r4w();
w2(4); w2(0); w2(4);
break;
case 5:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count / 4; k++)
((u32 *)buf)[k] = r4l();
w2(4); w2(0); w2(4);
break;
}
}
static void kbic_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
case 2:
w0(0x90); w2(4); w2(6); w2(4);
for (k = 0; k < count / 2; k++) {
w0(buf[2 * k + 1]);
w2(0); w2(4);
w0(buf[2 * k]);
w2(5); w2(4);
}
break;
case 3:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count / 2; k++) {
w4(buf[2 * k + 1]);
w4(buf[2 * k]);
}
w2(4); w2(0); w2(4);
break;
case 4:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count / 2; k++)
w4w(swab16(((u16 *)buf)[k]));
w2(4); w2(0); w2(4);
break;
case 5:
w0(0xa0); w2(4); w2(6); w2(4); w3(0);
for (k = 0; k < count / 4; k++)
w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16);
w2(4); w2(0); w2(4);
break;
}
}
static void kbic_log_adapter(struct pi_adapter *pi, char *chip)
{
char *mode[6] = { "4-bit", "5/3", "8-bit", "EPP-8", "EPP_16", "EPP-32"};
dev_info(&pi->dev, "KingByte %s at 0x%x, mode %d (%s), delay %d\n",
chip, pi->port, pi->mode, mode[pi->mode], pi->delay);
}
static void k951_log_adapter(struct pi_adapter *pi)
{
kbic_log_adapter(pi, "KBIC-951A");
}
static void k971_log_adapter(struct pi_adapter *pi)
{
kbic_log_adapter(pi, "KBIC-971A");
}
static struct pi_protocol k951 = {
.owner = THIS_MODULE,
.name = "k951",
.max_mode = 6,
.epp_first = 3,
.default_delay = 1,
.max_units = 1,
.write_regr = kbic_write_regr,
.read_regr = kbic_read_regr,
.write_block = kbic_write_block,
.read_block = kbic_read_block,
.connect = k951_connect,
.disconnect = k951_disconnect,
.log_adapter = k951_log_adapter,
};
static struct pi_protocol k971 = {
.owner = THIS_MODULE,
.name = "k971",
.max_mode = 6,
.epp_first = 3,
.default_delay = 1,
.max_units = 1,
.write_regr = kbic_write_regr,
.read_regr = kbic_read_regr,
.write_block = kbic_write_block,
.read_block = kbic_read_block,
.connect = k971_connect,
.disconnect = k971_disconnect,
.log_adapter = k971_log_adapter,
};
static int __init kbic_init(void)
{
int rv;
rv = pata_parport_register_driver(&k951);
if (rv < 0)
return rv;
rv = pata_parport_register_driver(&k971);
if (rv < 0)
pata_parport_unregister_driver(&k951);
return rv;
}
static void __exit kbic_exit(void)
{
pata_parport_unregister_driver(&k951);
pata_parport_unregister_driver(&k971);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("KingByte Information Systems KBIC-951A and KBIC-971A "
"parallel port IDE adapter protocol driver");
module_init(kbic_init)
module_exit(kbic_exit)
| linux-master | drivers/ata/pata_parport/kbic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* on26.c is a low-level protocol driver for the
* OnSpec 90c26 parallel to IDE adapter chip.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
/*
* mode codes: 0 nybble reads, 8-bit writes
* 1 8-bit reads and writes
* 2 8-bit EPP mode
* 3 EPP-16
* 4 EPP-32
*/
#define j44(a, b) (((a >> 4) & 0x0f) | (b & 0xf0))
#define P1 \
do { \
w2(5); w2(0xd); w2(5); w2(0xd); w2(5); w2(4); \
} while (0)
#define P2 \
do { \
w2(5); w2(7); w2(5); w2(4); \
} while (0)
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int on26_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
r = (regr << 2) + 1 + cont;
switch (pi->mode) {
case 0:
w0(1); P1; w0(r); P2; w0(0); P1;
w2(6); a = r1(); w2(4);
w2(6); b = r1(); w2(4);
w2(6); w2(4); w2(6); w2(4);
return j44(a, b);
case 1:
w0(1); P1; w0(r); P2; w0(0); P1;
w2(0x26); a = r0(); w2(4); w2(0x26); w2(4);
return a;
case 2:
case 3:
case 4:
w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0); w2(0x24); a = r4(); w2(4);
w2(0x24); (void)r4(); w2(4);
return a;
}
return -1;
}
static void on26_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = (regr << 2) + 1 + cont;
switch (pi->mode) {
case 0:
case 1:
w0(1); P1; w0(r); P2; w0(0); P1;
w0(val); P2; w0(val); P2;
break;
case 2:
case 3:
case 4:
w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0);
w2(5); w4(val); w2(4);
w2(5); w4(val); w2(4);
break;
}
}
#define CCP(x) \
do { \
w0(0xfe); w0(0xaa); w0(0x55); w0(0); \
w0(0xff); w0(0x87); w0(0x78); w0(x); \
w2(4); w2(5); w2(4); w0(0xff); \
} while (0)
static void on26_connect(struct pi_adapter *pi)
{
int x;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
CCP(0x20);
if (pi->mode)
x = 9;
else
x = 8;
w0(2); P1; w0(8); P2;
w0(2); P1; w0(x); P2;
}
static void on26_disconnect(struct pi_adapter *pi)
{
if (pi->mode >= 2) {
w3(4); w3(4); w3(4); w3(4);
} else {
w0(4); P1; w0(4); P1;
}
CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
#define RESET_WAIT 200
/* hard reset */
static int on26_test_port(struct pi_adapter *pi)
{
int i, m, d, x = 0, y = 0;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
d = pi->delay;
m = pi->mode;
pi->delay = 5;
pi->mode = 0;
w2(0xc);
CCP(0x30); CCP(0);
w0(0xfe); w0(0xaa); w0(0x55); w0(0); w0(0xff);
i = ((r1() & 0xf0) << 4); w0(0x87);
i |= (r1() & 0xf0); w0(0x78);
w0(0x20); w2(4); w2(5);
i |= ((r1() & 0xf0) >> 4);
w2(4); w0(0xff);
if (i == 0xb5f) {
w0(2); P1; w0(0); P2;
w0(3); P1; w0(0); P2;
w0(2); P1; w0(8); P2; udelay(100);
w0(2); P1; w0(0xa); P2; udelay(100);
w0(2); P1; w0(8); P2; udelay(1000);
on26_write_regr(pi, 0, 6, 0xa0);
for (i = 0; i < RESET_WAIT; i++) {
on26_write_regr(pi, 0, 6, 0xa0);
x = on26_read_regr(pi, 0, 7);
on26_write_regr(pi, 0, 6, 0xb0);
y = on26_read_regr(pi, 0, 7);
if (!((x & 0x80) || (y & 0x80)))
break;
mdelay(100);
}
if (i == RESET_WAIT)
dev_err(&pi->dev,
"on26: Device reset failed (%x,%x)\n", x, y);
w0(4); P1; w0(4); P1;
}
CCP(0x30);
pi->delay = d;
pi->mode = m;
w0(pi->saved_r0);
w2(pi->saved_r2);
return 5;
}
static void on26_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b;
switch (pi->mode) {
case 0:
w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1;
udelay(10);
for (k = 0; k < count; k++) {
w2(6); a = r1();
w2(4); b = r1();
buf[k] = j44(a, b);
}
w0(2); P1; w0(8); P2;
break;
case 1:
w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1;
udelay(10);
for (k = 0; k < count / 2; k++) {
w2(0x26); buf[2 * k] = r0();
w2(0x24); buf[2 * k + 1] = r0();
}
w0(2); P1; w0(9); P2;
break;
case 2:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k = 0; k < count; k++)
buf[k] = r4();
w2(4);
break;
case 3:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k = 0; k < count / 2; k++)
((u16 *)buf)[k] = r4w();
w2(4);
break;
case 4:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k = 0; k < count / 4; k++)
((u32 *)buf)[k] = r4l();
w2(4);
break;
}
}
static void on26_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
w0(1); P1; w0(1); P2;
w0(2); P1; w0(0x18 + pi->mode); P2; w0(0); P1;
udelay(10);
for (k = 0; k < count / 2; k++) {
w2(5); w0(buf[2 * k]);
w2(7); w0(buf[2 * k + 1]);
}
w2(5); w2(4);
w0(2); P1; w0(8 + pi->mode); P2;
break;
case 2:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(0xc4);
break;
case 3:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
w2(0xc4);
break;
case 4:
w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
w2(0xc4);
break;
}
}
static void on26_log_adapter(struct pi_adapter *pi)
{
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"OnSpec 90c26 at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol on26 = {
.owner = THIS_MODULE,
.name = "on26",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = on26_write_regr,
.read_regr = on26_read_regr,
.write_block = on26_write_block,
.read_block = on26_read_block,
.connect = on26_connect,
.disconnect = on26_disconnect,
.test_port = on26_test_port,
.log_adapter = on26_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Onspec 90c26 parallel port IDE adapter protocol driver");
module_pata_parport_driver(on26);
| linux-master | drivers/ata/pata_parport/on26.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 2001 Micro Solutions Inc.
*
* backpack.c is a low-level protocol driver for the Micro Solutions
* "BACKPACK" parallel port IDE adapter (works on Series 6 drives).
*
* Written by: Ken Hahn ([email protected])
* Clive Turvey ([email protected])
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/parport.h>
#include "pata_parport.h"
/* 60772 Commands */
#define ACCESS_REG 0x00
#define ACCESS_PORT 0x40
#define ACCESS_READ 0x00
#define ACCESS_WRITE 0x20
/* 60772 Command Prefix */
#define CMD_PREFIX_SET 0xe0 // Special command that modifies next command's operation
#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
#define PREFIX_IO16 0x01 // perform 16-bit wide I/O
#define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
#define PREFIX_BLK 0x08 // enable block transfer mode
/* 60772 Registers */
#define REG_STATUS 0x00 // status register
#define STATUS_IRQA 0x01 // Peripheral IRQA line
#define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
#define REG_VERSION 0x01 // PPC version register (read)
#define REG_HWCFG 0x02 // Hardware Config register
#define REG_RAMSIZE 0x03 // Size of RAM Buffer
#define RAMSIZE_128K 0x02
#define REG_EEPROM 0x06 // EEPROM control register
#define EEPROM_SK 0x01 // eeprom SK bit
#define EEPROM_DI 0x02 // eeprom DI bit
#define EEPROM_CS 0x04 // eeprom CS bit
#define EEPROM_EN 0x08 // eeprom output enable
#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
/* flags */
#define fifo_wait 0x10
/* DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES */
#define PPCMODE_UNI_SW 0
#define PPCMODE_UNI_FW 1
#define PPCMODE_BI_SW 2
#define PPCMODE_BI_FW 3
#define PPCMODE_EPP_BYTE 4
#define PPCMODE_EPP_WORD 5
#define PPCMODE_EPP_DWORD 6
static int mode_map[] = { PPCMODE_UNI_FW, PPCMODE_BI_FW, PPCMODE_EPP_BYTE,
PPCMODE_EPP_WORD, PPCMODE_EPP_DWORD };
static void bpck6_send_cmd(struct pi_adapter *pi, u8 cmd)
{
switch (mode_map[pi->mode]) {
case PPCMODE_UNI_SW:
case PPCMODE_UNI_FW:
case PPCMODE_BI_SW:
case PPCMODE_BI_FW:
parport_write_data(pi->pardev->port, cmd);
parport_frob_control(pi->pardev->port, 0, PARPORT_CONTROL_AUTOFD);
break;
case PPCMODE_EPP_BYTE:
case PPCMODE_EPP_WORD:
case PPCMODE_EPP_DWORD:
pi->pardev->port->ops->epp_write_addr(pi->pardev->port, &cmd, 1, 0);
break;
}
}
static u8 bpck6_rd_data_byte(struct pi_adapter *pi)
{
u8 data = 0;
switch (mode_map[pi->mode]) {
case PPCMODE_UNI_SW:
case PPCMODE_UNI_FW:
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_INIT);
data = parport_read_status(pi->pardev->port);
data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
data |= parport_read_status(pi->pardev->port) & 0xB8;
break;
case PPCMODE_BI_SW:
case PPCMODE_BI_FW:
parport_data_reverse(pi->pardev->port);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT);
data = parport_read_data(pi->pardev->port);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE, 0);
parport_data_forward(pi->pardev->port);
break;
case PPCMODE_EPP_BYTE:
case PPCMODE_EPP_WORD:
case PPCMODE_EPP_DWORD:
pi->pardev->port->ops->epp_read_data(pi->pardev->port, &data, 1, 0);
break;
}
return data;
}
static void bpck6_wr_data_byte(struct pi_adapter *pi, u8 data)
{
switch (mode_map[pi->mode]) {
case PPCMODE_UNI_SW:
case PPCMODE_UNI_FW:
case PPCMODE_BI_SW:
case PPCMODE_BI_FW:
parport_write_data(pi->pardev->port, data);
parport_frob_control(pi->pardev->port, 0, PARPORT_CONTROL_INIT);
break;
case PPCMODE_EPP_BYTE:
case PPCMODE_EPP_WORD:
case PPCMODE_EPP_DWORD:
pi->pardev->port->ops->epp_write_data(pi->pardev->port, &data, 1, 0);
break;
}
}
static int bpck6_read_regr(struct pi_adapter *pi, int cont, int reg)
{
u8 port = cont ? reg | 8 : reg;
bpck6_send_cmd(pi, port | ACCESS_PORT | ACCESS_READ);
return bpck6_rd_data_byte(pi);
}
static void bpck6_write_regr(struct pi_adapter *pi, int cont, int reg, int val)
{
u8 port = cont ? reg | 8 : reg;
bpck6_send_cmd(pi, port | ACCESS_PORT | ACCESS_WRITE);
bpck6_wr_data_byte(pi, val);
}
static void bpck6_wait_for_fifo(struct pi_adapter *pi)
{
int i;
if (pi->private & fifo_wait) {
for (i = 0; i < 20; i++)
parport_read_status(pi->pardev->port);
}
}
static void bpck6_write_block(struct pi_adapter *pi, char *buf, int len)
{
u8 this, last;
bpck6_send_cmd(pi, REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE);
bpck6_wr_data_byte(pi, (u8)len);
bpck6_wr_data_byte(pi, (u8)(len >> 8));
bpck6_wr_data_byte(pi, 0);
bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK);
bpck6_send_cmd(pi, ATA_REG_DATA | ACCESS_PORT | ACCESS_WRITE);
switch (mode_map[pi->mode]) {
case PPCMODE_UNI_SW:
case PPCMODE_BI_SW:
while (len--) {
parport_write_data(pi->pardev->port, *buf++);
parport_frob_control(pi->pardev->port, 0,
PARPORT_CONTROL_INIT);
}
break;
case PPCMODE_UNI_FW:
case PPCMODE_BI_FW:
bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_FASTWR);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
last = *buf;
parport_write_data(pi->pardev->port, last);
while (len) {
this = *buf++;
len--;
if (this == last) {
parport_frob_control(pi->pardev->port, 0,
PARPORT_CONTROL_INIT);
} else {
parport_write_data(pi->pardev->port, this);
last = this;
}
}
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
0);
bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_FASTWR);
break;
case PPCMODE_EPP_BYTE:
pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
len, PARPORT_EPP_FAST_8);
bpck6_wait_for_fifo(pi);
break;
case PPCMODE_EPP_WORD:
pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
len, PARPORT_EPP_FAST_16);
bpck6_wait_for_fifo(pi);
break;
case PPCMODE_EPP_DWORD:
pi->pardev->port->ops->epp_write_data(pi->pardev->port, buf,
len, PARPORT_EPP_FAST_32);
bpck6_wait_for_fifo(pi);
break;
}
bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK);
}
static void bpck6_read_block(struct pi_adapter *pi, char *buf, int len)
{
bpck6_send_cmd(pi, REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE);
bpck6_wr_data_byte(pi, (u8)len);
bpck6_wr_data_byte(pi, (u8)(len >> 8));
bpck6_wr_data_byte(pi, 0);
bpck6_send_cmd(pi, CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK);
bpck6_send_cmd(pi, ATA_REG_DATA | ACCESS_PORT | ACCESS_READ);
switch (mode_map[pi->mode]) {
case PPCMODE_UNI_SW:
case PPCMODE_UNI_FW:
while (len) {
u8 d;
parport_frob_control(pi->pardev->port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_INIT); /* DATA STROBE */
d = parport_read_status(pi->pardev->port);
d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
parport_frob_control(pi->pardev->port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
d |= parport_read_status(pi->pardev->port) & 0xB8;
*buf++ = d;
len--;
}
break;
case PPCMODE_BI_SW:
case PPCMODE_BI_FW:
parport_data_reverse(pi->pardev->port);
while (len) {
parport_frob_control(pi->pardev->port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE | PARPORT_CONTROL_INIT);
*buf++ = parport_read_data(pi->pardev->port);
len--;
}
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_STROBE,
0);
parport_data_forward(pi->pardev->port);
break;
case PPCMODE_EPP_BYTE:
pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
PARPORT_EPP_FAST_8);
break;
case PPCMODE_EPP_WORD:
pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
PARPORT_EPP_FAST_16);
break;
case PPCMODE_EPP_DWORD:
pi->pardev->port->ops->epp_read_data(pi->pardev->port, buf, len,
PARPORT_EPP_FAST_32);
break;
}
bpck6_send_cmd(pi, CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK);
}
static int bpck6_open(struct pi_adapter *pi)
{
u8 i, j, k;
pi->saved_r0 = parport_read_data(pi->pardev->port);
pi->saved_r2 = parport_read_control(pi->pardev->port) & 0x5F;
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
if (pi->saved_r0 == 'b')
parport_write_data(pi->pardev->port, 'x');
parport_write_data(pi->pardev->port, 'b');
parport_write_data(pi->pardev->port, 'p');
parport_write_data(pi->pardev->port, pi->unit);
parport_write_data(pi->pardev->port, ~pi->unit);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT, 0);
parport_write_control(pi->pardev->port, PARPORT_CONTROL_INIT);
i = mode_map[pi->mode] & 0x0C;
if (i == 0)
i = (mode_map[pi->mode] & 2) | 1;
parport_write_data(pi->pardev->port, i);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
k = parport_read_status(pi->pardev->port) & 0xB8;
if (j != k)
goto fail;
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_AUTOFD, 0);
k = (parport_read_status(pi->pardev->port) & 0xB8) ^ 0xB8;
if (j != k)
goto fail;
if (i & 4) {
/* EPP */
parport_frob_control(pi->pardev->port,
PARPORT_CONTROL_SELECT | PARPORT_CONTROL_INIT, 0);
} else {
/* PPC/ECP */
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT, 0);
}
pi->private = 0;
bpck6_send_cmd(pi, ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE);
bpck6_wr_data_byte(pi, RAMSIZE_128K);
bpck6_send_cmd(pi, ACCESS_REG | ACCESS_READ | REG_VERSION);
if ((bpck6_rd_data_byte(pi) & 0x3F) == 0x0C)
pi->private |= fifo_wait;
return 1;
fail:
parport_write_control(pi->pardev->port, pi->saved_r2);
parport_write_data(pi->pardev->port, pi->saved_r0);
return 0;
}
static void bpck6_deselect(struct pi_adapter *pi)
{
if (mode_map[pi->mode] & 4) {
/* EPP */
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
} else {
/* PPC/ECP */
parport_frob_control(pi->pardev->port, PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
}
parport_write_data(pi->pardev->port, pi->saved_r0);
parport_write_control(pi->pardev->port,
pi->saved_r2 | PARPORT_CONTROL_SELECT);
parport_write_control(pi->pardev->port, pi->saved_r2);
}
static void bpck6_wr_extout(struct pi_adapter *pi, u8 regdata)
{
bpck6_send_cmd(pi, REG_VERSION | ACCESS_REG | ACCESS_WRITE);
bpck6_wr_data_byte(pi, (u8)((regdata & 0x03) << 6));
}
static void bpck6_connect(struct pi_adapter *pi)
{
dev_dbg(&pi->dev, "connect\n");
bpck6_open(pi);
bpck6_wr_extout(pi, 0x3);
}
static void bpck6_disconnect(struct pi_adapter *pi)
{
dev_dbg(&pi->dev, "disconnect\n");
bpck6_wr_extout(pi, 0x0);
bpck6_deselect(pi);
}
/* check for 8-bit port */
static int bpck6_test_port(struct pi_adapter *pi)
{
dev_dbg(&pi->dev, "PARPORT indicates modes=%x for lp=0x%lx\n",
pi->pardev->port->modes, pi->pardev->port->base);
/* look at the parport device to see what modes we can use */
if (pi->pardev->port->modes & PARPORT_MODE_EPP)
return 5; /* Can do EPP */
if (pi->pardev->port->modes & PARPORT_MODE_TRISTATE)
return 2;
return 1; /* Just flat SPP */
}
static int bpck6_probe_unit(struct pi_adapter *pi)
{
int out, saved_mode;
dev_dbg(&pi->dev, "PROBE UNIT %x on port:%x\n", pi->unit, pi->port);
saved_mode = pi->mode;
/*LOWER DOWN TO UNIDIRECTIONAL*/
pi->mode = 0;
out = bpck6_open(pi);
dev_dbg(&pi->dev, "ppc_open returned %2x\n", out);
if (out) {
bpck6_deselect(pi);
dev_dbg(&pi->dev, "leaving probe\n");
pi->mode = saved_mode;
return 1;
}
dev_dbg(&pi->dev, "Failed open\n");
pi->mode = saved_mode;
return 0;
}
static void bpck6_log_adapter(struct pi_adapter *pi)
{
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"Micro Solutions BACKPACK Drive unit %d at 0x%x, mode:%d (%s), delay %d\n",
pi->unit, pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol bpck6 = {
.owner = THIS_MODULE,
.name = "bpck6",
.max_mode = 5,
.epp_first = 2, /* 2-5 use epp (need 8 ports) */
.max_units = 255,
.write_regr = bpck6_write_regr,
.read_regr = bpck6_read_regr,
.write_block = bpck6_write_block,
.read_block = bpck6_read_block,
.connect = bpck6_connect,
.disconnect = bpck6_disconnect,
.test_port = bpck6_test_port,
.probe_unit = bpck6_probe_unit,
.log_adapter = bpck6_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Micro Solutions Inc.");
MODULE_DESCRIPTION("Micro Solutions BACKPACK parallel port IDE adapter "
"(version 6 drives) protocol driver");
module_pata_parport_driver(bpck6);
| linux-master | drivers/ata/pata_parport/bpck6.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Ondrej Zary
* based on paride.c by Grant R. Guenther <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/parport.h>
#include "pata_parport.h"
#define DRV_NAME "pata_parport"
static DEFINE_IDR(parport_list);
static DEFINE_IDR(protocols);
static DEFINE_IDA(pata_parport_bus_dev_ids);
static DEFINE_MUTEX(pi_mutex);
static bool probe = true;
module_param(probe, bool, 0644);
MODULE_PARM_DESC(probe, "Enable automatic device probing (0=off, 1=on [default])");
/*
* libata drivers cannot sleep so this driver claims parport before activating
* the ata host and keeps it claimed (and protocol connected) until the ata
* host is removed. Unfortunately, this means that you cannot use any chained
* devices (neither other pata_parport devices nor a printer).
*/
static void pi_connect(struct pi_adapter *pi)
{
parport_claim_or_block(pi->pardev);
pi->proto->connect(pi);
}
static void pi_disconnect(struct pi_adapter *pi)
{
pi->proto->disconnect(pi);
parport_release(pi->pardev);
}
static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
{
struct pi_adapter *pi = ap->host->private_data;
u8 tmp;
if (device == 0)
tmp = ATA_DEVICE_OBS;
else
tmp = ATA_DEVICE_OBS | ATA_DEV1;
pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tmp);
ata_sff_pause(ap);
}
static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
{
struct pi_adapter *pi = ap->host->private_data;
u8 nsect, lbal;
pata_parport_dev_select(ap, device);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
return (nsect == 0x55) && (lbal == 0xaa);
}
static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct pi_adapter *pi = ap->host->private_data;
/* software reset. causes dev0 to be selected */
pi->proto->write_regr(pi, 1, 6, ap->ctl);
udelay(20);
pi->proto->write_regr(pi, 1, 6, ap->ctl | ATA_SRST);
udelay(20);
pi->proto->write_regr(pi, 1, 6, ap->ctl);
ap->last_ctl = ap->ctl;
/* wait the port to become ready */
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
}
static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
unsigned int devmask = 0;
int rc;
u8 err;
/* determine if device 0/1 are present */
if (pata_parport_devchk(ap, 0))
devmask |= (1 << 0);
if (pata_parport_devchk(ap, 1))
devmask |= (1 << 1);
/* select device 0 again */
pata_parport_dev_select(ap, 0);
/* issue bus reset */
rc = pata_parport_bus_softreset(ap, devmask, deadline);
if (rc && rc != -ENODEV) {
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0],
devmask & (1 << 0), &err);
if (err != 0x81)
classes[1] = ata_sff_dev_classify(&link->device[1],
devmask & (1 << 1), &err);
return 0;
}
static u8 pata_parport_check_status(struct ata_port *ap)
{
struct pi_adapter *pi = ap->host->private_data;
return pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
}
static u8 pata_parport_check_altstatus(struct ata_port *ap)
{
struct pi_adapter *pi = ap->host->private_data;
return pi->proto->read_regr(pi, 1, 6);
}
static void pata_parport_tf_load(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct pi_adapter *pi = ap->host->private_data;
if (tf->ctl != ap->last_ctl) {
pi->proto->write_regr(pi, 1, 6, tf->ctl);
ap->last_ctl = tf->ctl;
ata_wait_idle(ap);
}
if (tf->flags & ATA_TFLAG_ISADDR) {
if (tf->flags & ATA_TFLAG_LBA48) {
pi->proto->write_regr(pi, 0, ATA_REG_FEATURE,
tf->hob_feature);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT,
tf->hob_nsect);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL,
tf->hob_lbal);
pi->proto->write_regr(pi, 0, ATA_REG_LBAM,
tf->hob_lbam);
pi->proto->write_regr(pi, 0, ATA_REG_LBAH,
tf->hob_lbah);
}
pi->proto->write_regr(pi, 0, ATA_REG_FEATURE, tf->feature);
pi->proto->write_regr(pi, 0, ATA_REG_NSECT, tf->nsect);
pi->proto->write_regr(pi, 0, ATA_REG_LBAL, tf->lbal);
pi->proto->write_regr(pi, 0, ATA_REG_LBAM, tf->lbam);
pi->proto->write_regr(pi, 0, ATA_REG_LBAH, tf->lbah);
}
if (tf->flags & ATA_TFLAG_DEVICE)
pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tf->device);
ata_wait_idle(ap);
}
static void pata_parport_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct pi_adapter *pi = ap->host->private_data;
tf->status = pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
tf->error = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
tf->nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
tf->lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
tf->lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
tf->lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
tf->device = pi->proto->read_regr(pi, 0, ATA_REG_DEVICE);
if (tf->flags & ATA_TFLAG_LBA48) {
pi->proto->write_regr(pi, 1, 6, tf->ctl | ATA_HOB);
tf->hob_feature = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
tf->hob_nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
tf->hob_lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
tf->hob_lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
tf->hob_lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
pi->proto->write_regr(pi, 1, 6, tf->ctl);
ap->last_ctl = tf->ctl;
}
}
static void pata_parport_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
struct pi_adapter *pi = ap->host->private_data;
pi->proto->write_regr(pi, 0, ATA_REG_CMD, tf->command);
ata_sff_pause(ap);
}
static unsigned int pata_parport_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw)
{
struct ata_port *ap = qc->dev->link->ap;
struct pi_adapter *pi = ap->host->private_data;
if (rw == READ)
pi->proto->read_block(pi, buf, buflen);
else
pi->proto->write_block(pi, buf, buflen);
return buflen;
}
static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
struct ata_port *ap;
struct pi_adapter *pi;
char junk[2];
/* We only need to flush incoming data when a command was running */
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
return;
ap = qc->ap;
pi = ap->host->private_data;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (pata_parport_check_status(ap) & ATA_DRQ)
&& count < 65536; count += 2) {
pi->proto->read_block(pi, junk, 2);
}
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
static struct ata_port_operations pata_parport_port_ops = {
.inherits = &ata_sff_port_ops,
.softreset = pata_parport_softreset,
.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_check_status = pata_parport_check_status,
.sff_check_altstatus = pata_parport_check_altstatus,
.sff_tf_load = pata_parport_tf_load,
.sff_tf_read = pata_parport_tf_read,
.sff_exec_command = pata_parport_exec_command,
.sff_data_xfer = pata_parport_data_xfer,
.sff_drain_fifo = pata_parport_drain_fifo,
};
static const struct ata_port_info pata_parport_port_info = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO0,
/* No DMA */
.port_ops = &pata_parport_port_ops,
};
static void pi_release(struct pi_adapter *pi)
{
parport_unregister_device(pi->pardev);
if (pi->proto->release_proto)
pi->proto->release_proto(pi);
module_put(pi->proto->owner);
}
static int default_test_proto(struct pi_adapter *pi)
{
int j, k;
int e[2] = { 0, 0 };
pi->proto->connect(pi);
for (j = 0; j < 2; j++) {
pi->proto->write_regr(pi, 0, 6, 0xa0 + j * 0x10);
for (k = 0; k < 256; k++) {
pi->proto->write_regr(pi, 0, 2, k ^ 0xaa);
pi->proto->write_regr(pi, 0, 3, k ^ 0x55);
if (pi->proto->read_regr(pi, 0, 2) != (k ^ 0xaa))
e[j]++;
}
}
pi->proto->disconnect(pi);
dev_dbg(&pi->dev, "%s: port 0x%x, mode %d, test=(%d,%d)\n",
pi->proto->name, pi->port, pi->mode, e[0], e[1]);
return e[0] && e[1]; /* not here if both > 0 */
}
static int pi_test_proto(struct pi_adapter *pi)
{
int res;
parport_claim_or_block(pi->pardev);
if (pi->proto->test_proto)
res = pi->proto->test_proto(pi);
else
res = default_test_proto(pi);
parport_release(pi->pardev);
return res;
}
static bool pi_probe_mode(struct pi_adapter *pi, int max)
{
int best, range;
if (pi->mode != -1) {
if (pi->mode >= max)
return false;
range = 3;
if (pi->mode >= pi->proto->epp_first)
range = 8;
if (range == 8 && pi->port % 8)
return false;
return !pi_test_proto(pi);
}
best = -1;
for (pi->mode = 0; pi->mode < max; pi->mode++) {
range = 3;
if (pi->mode >= pi->proto->epp_first)
range = 8;
if (range == 8 && pi->port % 8)
break;
if (!pi_test_proto(pi))
best = pi->mode;
}
pi->mode = best;
return best > -1;
}
static bool pi_probe_unit(struct pi_adapter *pi, int unit)
{
int max, s, e;
s = unit;
e = s + 1;
if (s == -1) {
s = 0;
e = pi->proto->max_units;
}
if (pi->proto->test_port) {
parport_claim_or_block(pi->pardev);
max = pi->proto->test_port(pi);
parport_release(pi->pardev);
} else {
max = pi->proto->max_mode;
}
if (pi->proto->probe_unit) {
parport_claim_or_block(pi->pardev);
for (pi->unit = s; pi->unit < e; pi->unit++) {
if (pi->proto->probe_unit(pi)) {
parport_release(pi->pardev);
return pi_probe_mode(pi, max);
}
}
parport_release(pi->pardev);
return false;
}
return pi_probe_mode(pi, max);
}
static void pata_parport_dev_release(struct device *dev)
{
struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
ida_free(&pata_parport_bus_dev_ids, dev->id);
kfree(pi);
}
static void pata_parport_bus_release(struct device *dev)
{
/* nothing to do here but required to avoid warning on device removal */
}
static struct bus_type pata_parport_bus_type = {
.name = DRV_NAME,
};
static struct device pata_parport_bus = {
.init_name = DRV_NAME,
.release = pata_parport_bus_release,
};
static const struct scsi_host_template pata_parport_sht = {
PATA_PARPORT_SHT("pata_parport")
};
struct pi_device_match {
struct parport *parport;
struct pi_protocol *proto;
};
static int pi_find_dev(struct device *dev, void *data)
{
struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
struct pi_device_match *match = data;
return pi->pardev->port == match->parport && pi->proto == match->proto;
}
static struct pi_adapter *pi_init_one(struct parport *parport,
struct pi_protocol *pr, int mode, int unit, int delay)
{
struct pardev_cb par_cb = { };
const struct ata_port_info *ppi[] = { &pata_parport_port_info };
struct ata_host *host;
struct pi_adapter *pi;
struct pi_device_match match = { .parport = parport, .proto = pr };
int id;
/*
* Abort if there's a device already registered on the same parport
* using the same protocol.
*/
if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
return NULL;
id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
if (id < 0)
return NULL;
pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
if (!pi) {
ida_free(&pata_parport_bus_dev_ids, id);
return NULL;
}
/* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
pi->dev.parent = &pata_parport_bus;
pi->dev.bus = &pata_parport_bus_type;
pi->dev.driver = &pr->driver;
pi->dev.release = pata_parport_dev_release;
pi->dev.id = id;
dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
if (device_register(&pi->dev)) {
put_device(&pi->dev);
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
return NULL;
}
pi->proto = pr;
if (!try_module_get(pi->proto->owner))
goto out_unreg_dev;
if (pi->proto->init_proto && pi->proto->init_proto(pi) < 0)
goto out_module_put;
pi->delay = (delay == -1) ? pi->proto->default_delay : delay;
pi->mode = mode;
pi->port = parport->base;
par_cb.private = pi;
pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
if (!pi->pardev)
goto out_module_put;
if (!pi_probe_unit(pi, unit)) {
dev_info(&pi->dev, "Adapter not found\n");
goto out_unreg_parport;
}
pi->proto->log_adapter(pi);
host = ata_host_alloc_pinfo(&pi->pardev->dev, ppi, 1);
if (!host)
goto out_unreg_parport;
dev_set_drvdata(&pi->dev, host);
host->private_data = pi;
ata_port_desc(host->ports[0], "port %s", pi->pardev->port->name);
ata_port_desc(host->ports[0], "protocol %s", pi->proto->name);
pi_connect(pi);
if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
goto out_disconnect;
return pi;
out_disconnect:
pi_disconnect(pi);
out_unreg_parport:
parport_unregister_device(pi->pardev);
if (pi->proto->release_proto)
pi->proto->release_proto(pi);
out_module_put:
module_put(pi->proto->owner);
out_unreg_dev:
device_unregister(&pi->dev);
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
return NULL;
}
int pata_parport_register_driver(struct pi_protocol *pr)
{
int error;
struct parport *parport;
int port_num;
pr->driver.bus = &pata_parport_bus_type;
pr->driver.name = pr->name;
error = driver_register(&pr->driver);
if (error)
return error;
mutex_lock(&pi_mutex);
error = idr_alloc(&protocols, pr, 0, 0, GFP_KERNEL);
if (error < 0) {
driver_unregister(&pr->driver);
mutex_unlock(&pi_mutex);
return error;
}
pr_info("pata_parport: protocol %s registered\n", pr->name);
if (probe) {
/* probe all parports using this protocol */
idr_for_each_entry(&parport_list, parport, port_num)
pi_init_one(parport, pr, -1, -1, -1);
}
mutex_unlock(&pi_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pata_parport_register_driver);
void pata_parport_unregister_driver(struct pi_protocol *pr)
{
struct pi_protocol *pr_iter;
int id = -1;
mutex_lock(&pi_mutex);
idr_for_each_entry(&protocols, pr_iter, id) {
if (pr_iter == pr)
break;
}
idr_remove(&protocols, id);
mutex_unlock(&pi_mutex);
driver_unregister(&pr->driver);
}
EXPORT_SYMBOL_GPL(pata_parport_unregister_driver);
static ssize_t new_device_store(const struct bus_type *bus, const char *buf, size_t count)
{
char port[12] = "auto";
char protocol[8] = "auto";
int mode = -1, unit = -1, delay = -1;
struct pi_protocol *pr, *pr_wanted;
struct device_driver *drv;
struct parport *parport;
int port_num, port_wanted, pr_num;
bool ok = false;
if (sscanf(buf, "%11s %7s %d %d %d",
port, protocol, &mode, &unit, &delay) < 1)
return -EINVAL;
if (sscanf(port, "parport%u", &port_wanted) < 1) {
if (strcmp(port, "auto")) {
pr_err("invalid port name %s\n", port);
return -EINVAL;
}
port_wanted = -1;
}
drv = driver_find(protocol, &pata_parport_bus_type);
if (!drv) {
if (strcmp(protocol, "auto")) {
pr_err("protocol %s not found\n", protocol);
return -EINVAL;
}
pr_wanted = NULL;
} else {
pr_wanted = container_of(drv, struct pi_protocol, driver);
}
mutex_lock(&pi_mutex);
/* walk all parports */
idr_for_each_entry(&parport_list, parport, port_num) {
if (port_num == port_wanted || port_wanted == -1) {
parport = parport_find_number(port_num);
if (!parport) {
pr_err("no such port %s\n", port);
mutex_unlock(&pi_mutex);
return -ENODEV;
}
/* walk all protocols */
idr_for_each_entry(&protocols, pr, pr_num) {
if (pr == pr_wanted || !pr_wanted)
if (pi_init_one(parport, pr, mode, unit,
delay))
ok = true;
}
parport_put_port(parport);
}
}
mutex_unlock(&pi_mutex);
if (!ok)
return -ENODEV;
return count;
}
static BUS_ATTR_WO(new_device);
static void pi_remove_one(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pi_adapter *pi = host->private_data;
ata_host_detach(host);
pi_disconnect(pi);
pi_release(pi);
device_unregister(dev);
/* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
}
static ssize_t delete_device_store(const struct bus_type *bus, const char *buf, size_t count)
{
struct device *dev;
mutex_lock(&pi_mutex);
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev) {
mutex_unlock(&pi_mutex);
return -ENODEV;
}
pi_remove_one(dev);
put_device(dev);
mutex_unlock(&pi_mutex);
return count;
}
static BUS_ATTR_WO(delete_device);
static void pata_parport_attach(struct parport *port)
{
struct pi_protocol *pr;
int pr_num, id;
mutex_lock(&pi_mutex);
id = idr_alloc(&parport_list, port, port->number, port->number,
GFP_KERNEL);
if (id < 0) {
mutex_unlock(&pi_mutex);
return;
}
if (probe) {
/* probe this port using all protocols */
idr_for_each_entry(&protocols, pr, pr_num)
pi_init_one(port, pr, -1, -1, -1);
}
mutex_unlock(&pi_mutex);
}
static int pi_remove_port(struct device *dev, void *p)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pi_adapter *pi = host->private_data;
if (pi->pardev->port == p)
pi_remove_one(dev);
return 0;
}
static void pata_parport_detach(struct parport *port)
{
mutex_lock(&pi_mutex);
bus_for_each_dev(&pata_parport_bus_type, NULL, port, pi_remove_port);
idr_remove(&parport_list, port->number);
mutex_unlock(&pi_mutex);
}
static struct parport_driver pata_parport_driver = {
.name = DRV_NAME,
.match_port = pata_parport_attach,
.detach = pata_parport_detach,
.devmodel = true,
};
static __init int pata_parport_init(void)
{
int error;
error = bus_register(&pata_parport_bus_type);
if (error) {
pr_err("failed to register pata_parport bus, error: %d\n", error);
return error;
}
error = device_register(&pata_parport_bus);
if (error) {
pr_err("failed to register pata_parport bus, error: %d\n", error);
goto out_unregister_bus;
}
error = bus_create_file(&pata_parport_bus_type, &bus_attr_new_device);
if (error) {
pr_err("unable to create sysfs file, error: %d\n", error);
goto out_unregister_dev;
}
error = bus_create_file(&pata_parport_bus_type, &bus_attr_delete_device);
if (error) {
pr_err("unable to create sysfs file, error: %d\n", error);
goto out_remove_new;
}
error = parport_register_driver(&pata_parport_driver);
if (error) {
pr_err("unable to register parport driver, error: %d\n", error);
goto out_remove_del;
}
return 0;
out_remove_del:
bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
out_remove_new:
bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
out_unregister_dev:
device_unregister(&pata_parport_bus);
out_unregister_bus:
bus_unregister(&pata_parport_bus_type);
return error;
}
static __exit void pata_parport_exit(void)
{
parport_unregister_driver(&pata_parport_driver);
bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
device_unregister(&pata_parport_bus);
bus_unregister(&pata_parport_bus_type);
}
MODULE_AUTHOR("Ondrej Zary");
MODULE_DESCRIPTION("driver for parallel port ATA adapters");
MODULE_LICENSE("GPL");
MODULE_ALIAS("paride");
module_init(pata_parport_init);
module_exit(pata_parport_exit);
| linux-master | drivers/ata/pata_parport/pata_parport.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1998 Grant R. Guenther <[email protected]>
*
* friq.c is a low-level protocol driver for the Freecom "IQ"
* parallel port IDE adapter. Early versions of this adapter
* use the 'frpw' protocol.
*
* Freecom uses this adapter in a battery powered external
* CD-ROM drive. It is also used in LS-120 drives by
* Maxell and Panasonic, and other devices.
*
* The battery powered drive requires software support to
* control the power to the drive. This module enables the
* drive power when the high level driver (pcd) is loaded
* and disables it when the module is unloaded. Note, if
* the friq module is built in to the kernel, the power
* will never be switched off, so other means should be
* used to conserve battery power.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define CMD(x) \
do { \
w2(4); w0(0xff); w0(0xff); w0(0x73); w0(0x73); \
w0(0xc9); w0(0xc9); w0(0x26); \
w0(0x26); w0(x); w0(x); \
} while (0)
#define j44(l, h) (((l >> 4) & 0x0f) | (h & 0xf0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x10 };
static int friq_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int h, l, r;
r = regr + cont_map[cont];
CMD(r);
w2(6); l = r1();
w2(4); h = r1();
w2(4);
return j44(l, h);
}
static void friq_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
CMD(r);
w0(val);
w2(5); w2(7); w2(5); w2(4);
}
static void friq_read_block_int(struct pi_adapter *pi, char *buf, int count, int regr)
{
int h, l, k, ph;
switch (pi->mode) {
case 0:
CMD(regr);
for (k = 0; k < count; k++) {
w2(6); l = r1();
w2(4); h = r1();
buf[k] = j44(l, h);
}
w2(4);
break;
case 1:
ph = 2;
CMD(regr + 0xc0);
w0(0xff);
for (k = 0; k < count; k++) {
w2(0xa4 + ph);
buf[k] = r0();
ph = 2 - ph;
}
w2(0xac); w2(0xa4); w2(4);
break;
case 2:
CMD(regr + 0x80);
for (k = 0; k < count - 2; k++)
buf[k] = r4();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
case 3:
CMD(regr + 0x80);
for (k = 0; k < count / 2 - 1; k++)
((u16 *)buf)[k] = r4w();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
case 4:
CMD(regr + 0x80);
for (k = 0; k < count / 4 - 1; k++)
((u32 *)buf)[k] = r4l();
buf[count - 4] = r4();
buf[count - 3] = r4();
w2(0xac); w2(0xa4);
buf[count - 2] = r4();
buf[count - 1] = r4();
w2(4);
break;
}
}
static void friq_read_block(struct pi_adapter *pi, char *buf, int count)
{
friq_read_block_int(pi, buf, count, 0x08);
}
static void friq_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
CMD(8); w2(5);
for (k = 0; k < count; k++) {
w0(buf[k]);
w2(7); w2(5);
}
w2(4);
break;
case 2:
CMD(0xc8); w2(5);
for (k = 0; k < count; k++)
w4(buf[k]);
w2(4);
break;
case 3:
CMD(0xc8); w2(5);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
w2(4);
break;
case 4:
CMD(0xc8); w2(5);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
w2(4);
break;
}
}
static void friq_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
static void friq_disconnect(struct pi_adapter *pi)
{
CMD(0x20);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static int friq_test_proto(struct pi_adapter *pi)
{
int j, k, r;
int e[2] = { 0, 0 };
char scratch[512];
pi->saved_r0 = r0();
w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */
udelay(500);
w0(pi->saved_r0);
friq_connect(pi);
for (j = 0; j < 2; j++) {
friq_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
for (k = 0; k < 256; k++) {
friq_write_regr(pi, 0, 2, k ^ 0xaa);
friq_write_regr(pi, 0, 3, k ^ 0x55);
if (friq_read_regr(pi, 0, 2) != (k ^ 0xaa))
e[j]++;
}
}
friq_disconnect(pi);
friq_connect(pi);
friq_read_block_int(pi, scratch, 512, 0x10);
r = 0;
for (k = 0; k < 128; k++) {
if (scratch[k] != k)
r++;
}
friq_disconnect(pi);
dev_dbg(&pi->dev,
"friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
pi->port, pi->mode, e[0], e[1], r);
return r || (e[0] && e[1]);
}
static void friq_log_adapter(struct pi_adapter *pi)
{
char *mode_string[6] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32"};
dev_info(&pi->dev,
"Freecom IQ ASIC-2 adapter at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
pi->private = 1;
friq_connect(pi);
CMD(0x9e); /* disable sleep timer */
friq_disconnect(pi);
}
static void friq_release_proto(struct pi_adapter *pi)
{
if (pi->private) { /* turn off the power */
friq_connect(pi);
CMD(0x1d); CMD(0x1e);
friq_disconnect(pi);
pi->private = 0;
}
}
static struct pi_protocol friq = {
.owner = THIS_MODULE,
.name = "friq",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = friq_write_regr,
.read_regr = friq_read_regr,
.write_block = friq_write_block,
.read_block = friq_read_block,
.connect = friq_connect,
.disconnect = friq_disconnect,
.test_proto = friq_test_proto,
.log_adapter = friq_log_adapter,
.release_proto = friq_release_proto,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Freecom IQ parallel port IDE adapter protocol driver");
module_pata_parport_driver(friq);
| linux-master | drivers/ata/pata_parport/friq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1998 Grant R. Guenther <[email protected]>
*
* fit2.c is a low-level protocol driver for the older version
* of the Fidelity International Technology parallel port adapter.
* This adapter is used in their TransDisk 2000 and older TransDisk
* 3000 portable hard-drives. As far as I can tell, this device
* supports 4-bit mode _only_.
*
* Newer models of the FIT products use an enhanced protocol.
* The "fit3" protocol module should support current drives.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define j44(a, b) (((a >> 4) & 0x0f) | (b & 0xf0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*
* NB: The FIT adapter does not appear to use the control registers.
* So, we map ALT_STATUS to STATUS and NO-OP writes to the device
* control register - this means that IDE reset will not work on these
* devices.
*/
static void fit2_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
if (cont == 1)
return;
w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4);
}
static int fit2_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
if (cont) {
if (regr != 6)
return 0xff;
r = 7;
} else {
r = regr + 0x10;
}
w2(0xc); w0(r); w2(4); w2(5);
w0(0); a = r1();
w0(1); b = r1();
w2(4);
return j44(a, b);
}
static void fit2_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b, c, d;
w2(0xc); w0(0x10);
for (k = 0; k < count / 4; k++) {
w2(4); w2(5);
w0(0); a = r1(); w0(1); b = r1();
w0(3); c = r1(); w0(2); d = r1();
buf[4 * k + 0] = j44(a, b);
buf[4 * k + 1] = j44(d, c);
w2(4); w2(5);
a = r1(); w0(3); b = r1();
w0(1); c = r1(); w0(0); d = r1();
buf[4 * k + 2] = j44(d, c);
buf[4 * k + 3] = j44(a, b);
}
w2(4);
}
static void fit2_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
w2(0xc); w0(0);
for (k = 0; k < count / 2; k++) {
w2(4); w0(buf[2 * k]);
w2(5); w0(buf[2 * k + 1]);
}
w2(4);
}
static void fit2_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xcc);
}
static void fit2_disconnect(struct pi_adapter *pi)
{
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void fit2_log_adapter(struct pi_adapter *pi)
{
dev_info(&pi->dev, "FIT 2000 adapter at 0x%x, delay %d\n",
pi->port, pi->delay);
}
static struct pi_protocol fit2 = {
.owner = THIS_MODULE,
.name = "fit2",
.max_mode = 1,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = fit2_write_regr,
.read_regr = fit2_read_regr,
.write_block = fit2_write_block,
.read_block = fit2_read_block,
.connect = fit2_connect,
.disconnect = fit2_disconnect,
.log_adapter = fit2_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Fidelity International Technology parallel port IDE adapter"
"(older models) protocol driver");
module_pata_parport_driver(fit2);
| linux-master | drivers/ata/pata_parport/fit2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1998 Grant R. Guenther <[email protected]>
*
* ktti.c is a low-level protocol driver for the KT Technology
* parallel port adapter. This adapter is used in the "PHd"
* portable hard-drives. As far as I can tell, this device
* supports 4-bit mode _only_.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#define j44(a, b) (((a >> 4) & 0x0f) | (b & 0xf0))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x10, 0x08 };
static void ktti_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
w0(r); w2(0xb); w2(0xa); w2(3); w2(6);
w0(val); w2(3); w0(0); w2(6); w2(0xb);
}
static int ktti_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
r = regr + cont_map[cont];
w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9);
return j44(a, b);
}
static void ktti_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, a, b;
for (k = 0; k < count / 2; k++) {
w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
a = r1(); w2(0xc); b = r1(); w2(9);
buf[2*k] = j44(a, b);
a = r1(); w2(0xc); b = r1(); w2(9);
buf[2*k+1] = j44(a, b);
}
}
static void ktti_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
for (k = 0; k < count / 2; k++) {
w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6);
w0(buf[2 * k]); w2(3);
w0(buf[2 * k + 1]); w2(6);
w2(0xb);
}
}
static void ktti_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xb); w2(0xa); w0(0); w2(3); w2(6);
}
static void ktti_disconnect(struct pi_adapter *pi)
{
w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void ktti_log_adapter(struct pi_adapter *pi)
{
dev_info(&pi->dev, "KT adapter at 0x%x, delay %d\n",
pi->port, pi->delay);
}
static struct pi_protocol ktti = {
.owner = THIS_MODULE,
.name = "ktti",
.max_mode = 1,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = ktti_write_regr,
.read_regr = ktti_read_regr,
.write_block = ktti_write_block,
.read_block = ktti_read_block,
.connect = ktti_connect,
.disconnect = ktti_disconnect,
.log_adapter = ktti_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("KT Technology parallel port IDE adapter protocol driver");
module_pata_parport_driver(ktti);
| linux-master | drivers/ata/pata_parport/ktti.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1996-1998 Grant R. Guenther <[email protected]>
*
* bpck.c is a low-level protocol driver for the MicroSolutions
* "backpack" parallel port IDE adapter.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
#undef r2
#undef w2
#undef PC
#define PC pi->private
#define r2() (PC=(in_p(2) & 0xff))
#define w2(byte) {out_p(2,byte); PC = byte;}
#define t2(pat) {PC ^= pat; out_p(2,PC);}
#define e2() {PC &= 0xfe; out_p(2,PC);}
#define o2() {PC |= 1; out_p(2,PC);}
#define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80))
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
* cont = 2 - use internal bpck register addressing
*/
static int cont_map[3] = { 0x40, 0x48, 0 };
static int bpck_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int r, l, h;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
w0(r & 0xf); w0(r); t2(2); t2(4);
l = r1();
t2(4);
h = r1();
return j44(l, h);
case 1:
w0(r & 0xf); w0(r); t2(2);
e2(); t2(0x20);
t2(4); h = r0();
t2(1); t2(0x20);
return h;
case 2:
case 3:
case 4:
w0(r); w2(9); w2(0); w2(0x20);
h = r4();
w2(0);
return h;
}
return -1;
}
static void bpck_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
case 1: w0(r);
t2(2);
w0(val);
o2(); t2(4); t2(1);
break;
case 2:
case 3:
case 4: w0(r); w2(9); w2(0);
w0(val); w2(1); w2(3); w2(0);
break;
}
}
/* These macros access the bpck registers in native addressing */
#define WR(r,v) bpck_write_regr(pi,2,r,v)
#define RR(r) (bpck_read_regr(pi,2,r))
static void bpck_write_block(struct pi_adapter *pi, char *buf, int count)
{
int i;
switch (pi->mode) {
case 0:
WR(4, 0x40);
w0(0x40); t2(2); t2(1);
for (i = 0; i < count; i++) {
w0(buf[i]);
t2(4);
}
WR(4, 0);
break;
case 1:
WR(4, 0x50);
w0(0x40); t2(2); t2(1);
for (i = 0; i < count; i++) {
w0(buf[i]);
t2(4);
}
WR(4, 0x10);
break;
case 2:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i = 0; i < count; i++)
w4(buf[i]);
w2(0);
WR(4, 8);
break;
case 3:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i = 0; i < count / 2; i++)
w4w(((u16 *)buf)[i]);
w2(0);
WR(4, 8);
break;
case 4:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(1);
for (i = 0; i < count / 4; i++)
w4l(((u32 *)buf)[i]);
w2(0);
WR(4, 8);
break;
}
}
static void bpck_read_block(struct pi_adapter *pi, char *buf, int count)
{
int i, l, h;
switch (pi->mode) {
case 0:
WR(4, 0x40);
w0(0x40); t2(2);
for (i = 0; i < count; i++) {
t2(4); l = r1();
t2(4); h = r1();
buf[i] = j44(l, h);
}
WR(4, 0);
break;
case 1:
WR(4, 0x50);
w0(0x40); t2(2); t2(0x20);
for (i = 0; i < count; i++) {
t2(4);
buf[i] = r0();
}
t2(1); t2(0x20);
WR(4, 0x10);
break;
case 2:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i = 0; i < count; i++)
buf[i] = r4();
w2(0);
WR(4, 8);
break;
case 3:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i = 0; i < count / 2; i++)
((u16 *)buf)[i] = r4w();
w2(0);
WR(4, 8);
break;
case 4:
WR(4, 0x48);
w0(0x40); w2(9); w2(0); w2(0x20);
for (i = 0; i < count / 4; i++)
((u32 *)buf)[i] = r4l();
w2(0);
WR(4, 8);
break;
}
}
static int bpck_probe_unit(struct pi_adapter *pi)
{
int o1, o0, f7, id;
int t, s;
id = pi->unit;
s = 0;
w2(4); w2(0xe); r2(); t2(2);
o1 = r1()&0xf8;
o0 = r0();
w0(255-id); w2(4); w0(id);
t2(8); t2(8); t2(8);
t2(2); t = r1()&0xf8;
f7 = ((id % 8) == 7);
if ((f7) || (t != o1)) {
t2(2);
s = r1() & 0xf8;
}
if ((t == o1) && ((!f7) || (s == o1))) {
w2(0x4c); w0(o0);
return 0;
}
t2(8); w0(0); t2(2); w2(0x4c); w0(o0);
return 1;
}
static void bpck_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
w0(0xff-pi->unit); w2(4); w0(pi->unit);
t2(8); t2(8); t2(8);
t2(2); t2(2);
switch (pi->mode) {
case 0:
t2(8); WR(4, 0);
break;
case 1:
t2(8); WR(4, 0x10);
break;
case 2:
case 3:
case 4:
w2(0); WR(4, 8);
break;
}
WR(5,8);
/*
* Possibly wrong, purpose unknown (fiddle with ESS logic ???)
* if (pi->devtype == PI_PCD) {
*/
WR(0x46, 0x10);
WR(0x4c, 0x38);
WR(0x4d, 0x88);
WR(0x46, 0xa0);
WR(0x41, 0);
WR(0x4e, 8);
/* } */
}
static void bpck_disconnect(struct pi_adapter *pi)
{
w0(0);
if (pi->mode >= 2) {
w2(9); w2(0);
} else {
t2(2);
}
w2(0x4c); w0(pi->saved_r0);
}
static void bpck_force_spp(struct pi_adapter *pi)
{
/* This fakes the EPP protocol to turn off EPP ... */
pi->saved_r0 = r0();
w0(0xff-pi->unit); w2(4); w0(pi->unit);
t2(8); t2(8); t2(8);
t2(2); t2(2);
w2(0);
w0(4); w2(9); w2(0);
w0(0); w2(1); w2(3); w2(0);
w0(0); w2(9); w2(0);
w2(0x4c); w0(pi->saved_r0);
}
#define TEST_LEN 16
static int bpck_test_proto(struct pi_adapter *pi)
{
int i, e, l, h, om;
char buf[TEST_LEN];
bpck_force_spp(pi);
switch (pi->mode) {
case 0:
bpck_connect(pi);
WR(0x13, 0x7f);
w0(0x13); t2(2);
for (i = 0; i < TEST_LEN; i++) {
t2(4); l = r1();
t2(4); h = r1();
buf[i] = j44(l, h);
}
bpck_disconnect(pi);
break;
case 1:
bpck_connect(pi);
WR(0x13, 0x7f);
w0(0x13); t2(2); t2(0x20);
for (i = 0; i < TEST_LEN; i++) {
t2(4);
buf[i] = r0();
}
t2(1); t2(0x20);
bpck_disconnect(pi);
break;
case 2:
case 3:
case 4:
om = pi->mode;
pi->mode = 0;
bpck_connect(pi);
WR(7, 3);
WR(4, 8);
bpck_disconnect(pi);
pi->mode = om;
bpck_connect(pi);
w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0);
switch (pi->mode) {
case 2:
for (i = 0; i < TEST_LEN; i++)
buf[i] = r4();
break;
case 3:
for (i = 0; i < TEST_LEN / 2; i++)
((u16 *)buf)[i] = r4w();
break;
case 4:
for (i = 0; i < TEST_LEN / 4; i++)
((u32 *)buf)[i] = r4l();
break;
}
w2(0);
WR(7, 0);
bpck_disconnect(pi);
break;
}
dev_dbg(&pi->dev, "bpck: 0x%x unit %d mode %d: ",
pi->port, pi->unit, pi->mode);
print_hex_dump_debug("bpck: ", DUMP_PREFIX_NONE, TEST_LEN, 1, buf,
TEST_LEN, false);
e = 0;
for (i = 0; i < TEST_LEN; i++) {
if (buf[i] != i + 1)
e++;
}
return e;
}
static void bpck_read_eeprom(struct pi_adapter *pi, char *buf)
{
int i, j, k, p, v, f, om, od;
bpck_force_spp(pi);
om = pi->mode; od = pi->delay;
pi->mode = 0; pi->delay = 6;
bpck_connect(pi);
WR(4, 0);
for (i = 0; i < 64; i++) {
WR(6, 8);
WR(6, 0xc);
p = 0x100;
for (k = 0; k < 9; k++) {
f = (((i + 0x180) & p) != 0) * 2;
WR(6, f + 0xc);
WR(6, f + 0xd);
WR(6, f + 0xc);
p = (p >> 1);
}
for (j = 0; j < 2; j++) {
v = 0;
for (k = 0; k < 8; k++) {
WR(6, 0xc);
WR(6, 0xd);
WR(6, 0xc);
f = RR(0);
v = 2 * v + (f == 0x84);
}
buf[2 * i + 1 - j] = v;
}
}
WR(6, 8);
WR(6, 0);
WR(5, 8);
bpck_disconnect(pi);
if (om >= 2) {
bpck_connect(pi);
WR(7, 3);
WR(4, 8);
bpck_disconnect(pi);
}
pi->mode = om; pi->delay = od;
}
static int bpck_test_port(struct pi_adapter *pi)
{
int i, r, m;
/* Check for 8-bit port */
w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i);
m = -1;
if (r == i)
m = 2;
if (r == (255-i))
m = 0;
w2(0xc);
i = r0();
w0(255-i);
r = r0();
w0(i);
if (r != (255-i))
m = -1;
if (m == 0) {
w2(6);
w2(0xc);
r = r0();
w0(0xaa);
w0(r);
w0(0xaa);
}
if (m == 2) {
w2(0x26);
w2(0xc);
}
if (m == -1)
return 0;
return 5;
}
static void bpck_log_adapter(struct pi_adapter *pi)
{
char *mode_str[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
char scratch[128];
bpck_read_eeprom(pi,scratch);
print_hex_dump_bytes("bpck EEPROM: ", DUMP_PREFIX_NONE, scratch, 128);
dev_info(&pi->dev,
"backpack %8.8s unit %d at 0x%x, mode %d (%s), delay %d\n",
&scratch[110], pi->unit, pi->port, pi->mode,
mode_str[pi->mode], pi->delay);
}
static struct pi_protocol bpck = {
.owner = THIS_MODULE,
.name = "bpck",
.max_mode = 5,
.epp_first = 2,
.default_delay = 4,
.max_units = 255,
.write_regr = bpck_write_regr,
.read_regr = bpck_read_regr,
.write_block = bpck_write_block,
.read_block = bpck_read_block,
.connect = bpck_connect,
.disconnect = bpck_disconnect,
.test_port = bpck_test_port,
.probe_unit = bpck_probe_unit,
.test_proto = bpck_test_proto,
.log_adapter = bpck_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("MicroSolutions BACKPACK parallel port IDE adapter protocol driver");
module_pata_parport_driver(bpck);
| linux-master | drivers/ata/pata_parport/bpck.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* epia.c is a low-level protocol driver for Shuttle Technologies
* EPIA parallel to IDE adapter chip. This device is now obsolete
* and has been replaced with the EPAT chip, which is supported
* by epat.c, however, some devices based on EPIA are still
* available.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
/*
* mode codes: 0 nybble reads on port 1, 8-bit writes
* 1 5/3 reads on ports 1 & 2, 8-bit writes
* 2 8-bit reads and writes
* 3 8-bit EPP mode
* 4 16-bit EPP
* 5 32-bit EPP
*/
#define j44(a, b) (((a >> 4) & 0x0f) + (b & 0xf0))
#define j53(a, b) (((a >> 3) & 0x1f) + ((b << 4) & 0xe0))
/*
* cont = 0 IDE register file
* cont = 1 IDE control registers
*/
static int cont_map[2] = { 0, 0x80 };
static int epia_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int a, b, r;
regr += cont_map[cont];
switch (pi->mode) {
case 0:
r = regr ^ 0x39;
w0(r); w2(1); w2(3); w0(r);
a = r1(); w2(1); b = r1(); w2(4);
return j44(a, b);
case 1:
r = regr ^ 0x31;
w0(r); w2(1); w0(r & 0x37);
w2(3); w2(5); w0(r | 0xf0);
a = r1(); b = r2(); w2(4);
return j53(a, b);
case 2:
r = regr^0x29;
w0(r); w2(1); w2(0X21); w2(0x23);
a = r0(); w2(4);
return a;
case 3:
case 4:
case 5:
w3(regr); w2(0x24); a = r4(); w2(4);
return a;
}
return -1;
}
static void epia_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r;
regr += cont_map[cont];
switch (pi->mode) {
case 0:
case 1:
case 2:
r = regr ^ 0x19;
w0(r); w2(1); w0(val); w2(3); w2(4);
break;
case 3:
case 4:
case 5:
r = regr ^ 0x40;
w3(r); w4(val); w2(4);
break;
}
}
#define WR(r, v) epia_write_regr(pi, 0, r, v)
#define RR(r) epia_read_regr(pi, 0, r)
/*
* The use of register 0x84 is entirely unclear - it seems to control
* some EPP counters ... currently we know about 3 different block
* sizes: the standard 512 byte reads and writes, 12 byte writes and
* 2048 byte reads (the last two being used in the CDrom drivers.
*/
static void epia_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); w0(0xa0); w0(0x50); w0(0xc0); w0(0x30); w0(0xa0); w0(0);
w2(1); w2(4);
if (pi->mode >= 3) {
w0(0xa); w2(1); w2(4); w0(0x82); w2(4); w2(0xc); w2(4);
w2(0x24); w2(0x26); w2(4);
}
WR(0x86, 8);
}
static void epia_disconnect(struct pi_adapter *pi)
{
/* WR(0x84,0x10); */
w0(pi->saved_r0);
w2(1); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void epia_read_block(struct pi_adapter *pi, char *buf, int count)
{
int k, ph, a, b;
switch (pi->mode) {
case 0:
w0(0x81); w2(1); w2(3); w0(0xc1);
ph = 1;
for (k = 0; k < count; k++) {
w2(2+ph); a = r1();
w2(4+ph); b = r1();
buf[k] = j44(a, b);
ph = 1 - ph;
}
w0(0); w2(4);
break;
case 1:
w0(0x91); w2(1); w0(0x10); w2(3);
w0(0x51); w2(5); w0(0xd1);
ph = 1;
for (k = 0; k < count; k++) {
w2(4 + ph);
a = r1(); b = r2();
buf[k] = j53(a, b);
ph = 1 - ph;
}
w0(0); w2(4);
break;
case 2:
w0(0x89); w2(1); w2(0x23); w2(0x21);
ph = 1;
for (k = 0; k < count; k++) {
w2(0x24 + ph);
buf[k] = r0();
ph = 1 - ph;
}
w2(6); w2(4);
break;
case 3:
if (count > 512)
WR(0x84, 3);
w3(0); w2(0x24);
for (k = 0; k < count; k++)
buf[k] = r4();
w2(4); WR(0x84, 0);
break;
case 4:
if (count > 512)
WR(0x84, 3);
w3(0); w2(0x24);
for (k = 0; k < count / 2; k++)
((u16 *)buf)[k] = r4w();
w2(4); WR(0x84, 0);
break;
case 5:
if (count > 512)
WR(0x84, 3);
w3(0); w2(0x24);
for (k = 0; k < count / 4; k++)
((u32 *)buf)[k] = r4l();
w2(4); WR(0x84, 0);
break;
}
}
static void epia_write_block(struct pi_adapter *pi, char *buf, int count)
{
int ph, k, last, d;
switch (pi->mode) {
case 0:
case 1:
case 2:
w0(0xa1); w2(1); w2(3); w2(1); w2(5);
ph = 0; last = 0x8000;
for (k = 0; k < count; k++) {
d = buf[k];
if (d != last) {
last = d;
w0(d);
}
w2(4 + ph);
ph = 1 - ph;
}
w2(7); w2(4);
break;
case 3:
if (count < 512)
WR(0x84, 1);
w3(0x40);
for (k = 0; k < count; k++)
w4(buf[k]);
if (count < 512)
WR(0x84, 0);
break;
case 4:
if (count < 512)
WR(0x84, 1);
w3(0x40);
for (k = 0; k < count / 2; k++)
w4w(((u16 *)buf)[k]);
if (count < 512)
WR(0x84, 0);
break;
case 5:
if (count < 512)
WR(0x84, 1);
w3(0x40);
for (k = 0; k < count / 4; k++)
w4l(((u32 *)buf)[k]);
if (count < 512)
WR(0x84, 0);
break;
}
}
static int epia_test_proto(struct pi_adapter *pi)
{
int j, k, f;
int e[2] = { 0, 0 };
char scratch[512];
epia_connect(pi);
for (j = 0; j < 2; j++) {
WR(6, 0xa0 + j * 0x10);
for (k = 0; k < 256; k++) {
WR(2, k ^ 0xaa);
WR(3, k ^ 0x55);
if (RR(2) != (k ^ 0xaa))
e[j]++;
}
WR(2, 1); WR(3, 1);
}
epia_disconnect(pi);
f = 0;
epia_connect(pi);
WR(0x84, 8);
epia_read_block(pi, scratch, 512);
for (k = 0; k < 256; k++) {
if ((scratch[2 * k] & 0xff) != ((k + 1) & 0xff))
f++;
if ((scratch[2 * k + 1] & 0xff) != ((-2 - k) & 0xff))
f++;
}
WR(0x84, 0);
epia_disconnect(pi);
dev_dbg(&pi->dev, "epia: port 0x%x, mode %d, test=(%d,%d,%d)\n",
pi->port, pi->mode, e[0], e[1], f);
return (e[0] && e[1]) || f;
}
static void epia_log_adapter(struct pi_adapter *pi)
{
char *mode[6] = { "4-bit", "5/3", "8-bit", "EPP-8", "EPP-16", "EPP-32"};
dev_info(&pi->dev,
"Shuttle EPIA at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode[pi->mode], pi->delay);
}
static struct pi_protocol epia = {
.owner = THIS_MODULE,
.name = "epia",
.max_mode = 6,
.epp_first = 3,
.default_delay = 1,
.max_units = 1,
.write_regr = epia_write_regr,
.read_regr = epia_read_regr,
.write_block = epia_write_block,
.read_block = epia_read_block,
.connect = epia_connect,
.disconnect = epia_disconnect,
.test_proto = epia_test_proto,
.log_adapter = epia_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("Shuttle Technologies EPIA parallel port IDE adapter "
"protocol driver");
module_pata_parport_driver(epia);
| linux-master | drivers/ata/pata_parport/epia.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) 1997-1998 Grant R. Guenther <[email protected]>
*
* comm.c is a low-level protocol driver for some older models of the DataStor
* "Commuter" parallel to IDE adapter. Some of the parallel port devices
* marketed by Arista currently use this adapter.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "pata_parport.h"
/*
* mode codes: 0 nybble reads, 8-bit writes
* 1 8-bit reads and writes
* 2 8-bit EPP mode
*/
#define j44(a, b) (((a >> 3) & 0x0f) | ((b << 1) & 0xf0))
#define P1 w2(5);w2(0xd);w2(0xd);w2(5);w2(4);
#define P2 w2(5);w2(7);w2(7);w2(5);w2(4);
/*
* cont = 0 - access the IDE register file
* cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x10 };
static int comm_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int l, h, r;
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
w0(r); P1; w0(0);
w2(6); l = r1(); w0(0x80); h = r1(); w2(4);
return j44(l, h);
case 1:
w0(r+0x20); P1;
w0(0); w2(0x26); h = r0(); w2(4);
return h;
case 2:
case 3:
case 4:
w3(r+0x20); (void)r1();
w2(0x24); h = r4(); w2(4);
return h;
}
return -1;
}
static void comm_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
{
int r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
case 1:
w0(r); P1; w0(val); P2;
break;
case 2:
case 3:
case 4:
w3(r); (void)r1(); w4(val);
break;
}
}
static void comm_connect(struct pi_adapter *pi)
{
pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4); w0(0xff); w2(6);
w2(4); w0(0xaa); w2(6);
w2(4); w0(0x00); w2(6);
w2(4); w0(0x87); w2(6);
w2(4); w0(0xe0); w2(0xc); w2(0xc); w2(4);
}
static void comm_disconnect(struct pi_adapter *pi)
{
w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void comm_read_block(struct pi_adapter *pi, char *buf, int count)
{
int i, l, h;
switch (pi->mode) {
case 0:
w0(0x48); P1;
for (i = 0; i < count; i++) {
w0(0); w2(6); l = r1();
w0(0x80); h = r1(); w2(4);
buf[i] = j44(l, h);
}
break;
case 1:
w0(0x68); P1; w0(0);
for (i = 0; i < count; i++) {
w2(0x26);
buf[i] = r0();
w2(0x24);
}
w2(4);
break;
case 2:
w3(0x68); (void)r1(); w2(0x24);
for (i = 0; i < count; i++)
buf[i] = r4();
w2(4);
break;
case 3:
w3(0x68); (void)r1(); w2(0x24);
for (i = 0; i < count / 2; i++)
((u16 *)buf)[i] = r4w();
w2(4);
break;
case 4:
w3(0x68); (void)r1(); w2(0x24);
for (i = 0; i < count / 4; i++)
((u32 *)buf)[i] = r4l();
w2(4);
break;
}
}
/* NB: Watch out for the byte swapped writes ! */
static void comm_write_block(struct pi_adapter *pi, char *buf, int count)
{
int k;
switch (pi->mode) {
case 0:
case 1:
w0(0x68); P1;
for (k = 0; k < count; k++) {
w2(5);
w0(buf[k ^ 1]);
w2(7);
}
w2(5); w2(4);
break;
case 2:
w3(0x48); (void)r1();
for (k = 0; k < count; k++)
w4(buf[k ^ 1]);
break;
case 3:
w3(0x48); (void)r1();
for (k = 0; k < count / 2; k++)
w4w(swab16(((u16 *)buf)[k]));
break;
case 4:
w3(0x48); (void)r1();
for (k = 0; k < count / 4; k++)
w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16);
break;
}
}
static void comm_log_adapter(struct pi_adapter *pi)
{
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",
pi->port, pi->mode, mode_string[pi->mode], pi->delay);
}
static struct pi_protocol comm = {
.owner = THIS_MODULE,
.name = "comm",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = comm_write_regr,
.read_regr = comm_read_regr,
.write_block = comm_write_block,
.read_block = comm_read_block,
.connect = comm_connect,
.disconnect = comm_disconnect,
.log_adapter = comm_log_adapter,
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Grant R. Guenther <[email protected]>");
MODULE_DESCRIPTION("DataStor Commuter parallel port IDE adapter protocol driver");
module_pata_parport_driver(comm);
| linux-master | drivers/ata/pata_parport/comm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Minimalistic braille device kernel support.
*
* By default, shows console messages on the braille device.
* Pressing Insert switches to VC browsing.
*
* Copyright (C) Samuel Thibault <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/console.h>
#include <linux/notifier.h>
#include <linux/selection.h>
#include <linux/vt_kern.h>
#include <linux/consolemap.h>
#include <linux/keyboard.h>
#include <linux/kbd_kern.h>
#include <linux/input.h>
MODULE_AUTHOR("[email protected]");
MODULE_DESCRIPTION("braille device");
/*
* Braille device support part.
*/
/* Emit various sounds */
static bool sound;
module_param(sound, bool, 0);
MODULE_PARM_DESC(sound, "emit sounds");
static void beep(unsigned int freq)
{
if (sound)
kd_mksound(freq, HZ/10);
}
/* mini console */
#define WIDTH 40
#define BRAILLE_KEY KEY_INSERT
static u16 console_buf[WIDTH];
static int console_cursor;
/* mini view of VC */
static int vc_x, vc_y, lastvc_x, lastvc_y;
/* show console ? (or show VC) */
static int console_show = 1;
/* pending newline ? */
static int console_newline = 1;
static int lastVC = -1;
static struct console *braille_co;
/* Very VisioBraille-specific */
static void braille_write(u16 *buf)
{
static u16 lastwrite[WIDTH];
unsigned char data[1 + 1 + 2*WIDTH + 2 + 1], csum = 0, *c;
u16 out;
int i;
if (!braille_co)
return;
if (!memcmp(lastwrite, buf, WIDTH * sizeof(*buf)))
return;
memcpy(lastwrite, buf, WIDTH * sizeof(*buf));
#define SOH 1
#define STX 2
#define ETX 2
#define EOT 4
#define ENQ 5
data[0] = STX;
data[1] = '>';
csum ^= '>';
c = &data[2];
for (i = 0; i < WIDTH; i++) {
out = buf[i];
if (out >= 0x100)
out = '?';
else if (out == 0x00)
out = ' ';
csum ^= out;
if (out <= 0x05) {
*c++ = SOH;
out |= 0x40;
}
*c++ = out;
}
if (csum <= 0x05) {
*c++ = SOH;
csum |= 0x40;
}
*c++ = csum;
*c++ = ETX;
braille_co->write(braille_co, data, c - data);
}
/* Follow the VC cursor*/
static void vc_follow_cursor(struct vc_data *vc)
{
vc_x = vc->state.x - (vc->state.x % WIDTH);
vc_y = vc->state.y;
lastvc_x = vc->state.x;
lastvc_y = vc->state.y;
}
/* Maybe the VC cursor moved, if so follow it */
static void vc_maybe_cursor_moved(struct vc_data *vc)
{
if (vc->state.x != lastvc_x || vc->state.y != lastvc_y)
vc_follow_cursor(vc);
}
/* Show portion of VC at vc_x, vc_y */
static void vc_refresh(struct vc_data *vc)
{
u16 buf[WIDTH];
int i;
for (i = 0; i < WIDTH; i++) {
u16 glyph = screen_glyph(vc,
2 * (vc_x + i) + vc_y * vc->vc_size_row);
buf[i] = inverse_translate(vc, glyph, true);
}
braille_write(buf);
}
/*
* Link to keyboard
*/
static int keyboard_notifier_call(struct notifier_block *blk,
unsigned long code, void *_param)
{
struct keyboard_notifier_param *param = _param;
struct vc_data *vc = param->vc;
int ret = NOTIFY_OK;
if (!param->down)
return ret;
switch (code) {
case KBD_KEYCODE:
if (console_show) {
if (param->value == BRAILLE_KEY) {
console_show = 0;
beep(880);
vc_maybe_cursor_moved(vc);
vc_refresh(vc);
ret = NOTIFY_STOP;
}
} else {
ret = NOTIFY_STOP;
switch (param->value) {
case KEY_INSERT:
beep(440);
console_show = 1;
lastVC = -1;
braille_write(console_buf);
break;
case KEY_LEFT:
if (vc_x > 0) {
vc_x -= WIDTH;
if (vc_x < 0)
vc_x = 0;
} else if (vc_y >= 1) {
beep(880);
vc_y--;
vc_x = vc->vc_cols-WIDTH;
} else
beep(220);
break;
case KEY_RIGHT:
if (vc_x + WIDTH < vc->vc_cols) {
vc_x += WIDTH;
} else if (vc_y + 1 < vc->vc_rows) {
beep(880);
vc_y++;
vc_x = 0;
} else
beep(220);
break;
case KEY_DOWN:
if (vc_y + 1 < vc->vc_rows)
vc_y++;
else
beep(220);
break;
case KEY_UP:
if (vc_y >= 1)
vc_y--;
else
beep(220);
break;
case KEY_HOME:
vc_follow_cursor(vc);
break;
case KEY_PAGEUP:
vc_x = 0;
vc_y = 0;
break;
case KEY_PAGEDOWN:
vc_x = 0;
vc_y = vc->vc_rows-1;
break;
default:
ret = NOTIFY_OK;
break;
}
if (ret == NOTIFY_STOP)
vc_refresh(vc);
}
break;
case KBD_POST_KEYSYM:
{
unsigned char type = KTYP(param->value) - 0xf0;
if (type == KT_SPEC) {
unsigned char val = KVAL(param->value);
int on_off = -1;
switch (val) {
case KVAL(K_CAPS):
on_off = vt_get_leds(fg_console, VC_CAPSLOCK);
break;
case KVAL(K_NUM):
on_off = vt_get_leds(fg_console, VC_NUMLOCK);
break;
case KVAL(K_HOLD):
on_off = vt_get_leds(fg_console, VC_SCROLLOCK);
break;
}
if (on_off == 1)
beep(880);
else if (on_off == 0)
beep(440);
}
}
break;
case KBD_UNBOUND_KEYCODE:
case KBD_UNICODE:
case KBD_KEYSYM:
/* Unused */
break;
}
return ret;
}
static struct notifier_block keyboard_notifier_block = {
.notifier_call = keyboard_notifier_call,
};
static int vt_notifier_call(struct notifier_block *blk,
unsigned long code, void *_param)
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
switch (code) {
case VT_ALLOCATE:
break;
case VT_DEALLOCATE:
break;
case VT_WRITE:
{
unsigned char c = param->c;
if (vc->vc_num != fg_console)
break;
switch (c) {
case '\b':
case 127:
if (console_cursor > 0) {
console_cursor--;
console_buf[console_cursor] = ' ';
}
break;
case '\n':
case '\v':
case '\f':
case '\r':
console_newline = 1;
break;
case '\t':
c = ' ';
fallthrough;
default:
if (c < 32)
/* Ignore other control sequences */
break;
if (console_newline) {
memset(console_buf, 0, sizeof(console_buf));
console_cursor = 0;
console_newline = 0;
}
if (console_cursor == WIDTH)
memmove(console_buf, &console_buf[1],
(WIDTH-1) * sizeof(*console_buf));
else
console_cursor++;
console_buf[console_cursor-1] = c;
break;
}
if (console_show)
braille_write(console_buf);
else {
vc_maybe_cursor_moved(vc);
vc_refresh(vc);
}
break;
}
case VT_UPDATE:
/* Maybe a VT switch, flush */
if (console_show) {
if (vc->vc_num != lastVC) {
lastVC = vc->vc_num;
memset(console_buf, 0, sizeof(console_buf));
console_cursor = 0;
braille_write(console_buf);
}
} else {
vc_maybe_cursor_moved(vc);
vc_refresh(vc);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block vt_notifier_block = {
.notifier_call = vt_notifier_call,
};
/*
* Called from printk.c when console=brl is given
*/
int braille_register_console(struct console *console, int index,
char *console_options, char *braille_options)
{
int ret;
if (!console_options)
/* Only support VisioBraille for now */
console_options = "57600o8";
if (braille_co)
return -ENODEV;
if (console->setup) {
ret = console->setup(console, console_options);
if (ret != 0)
return ret;
}
console->flags |= CON_ENABLED;
console->index = index;
braille_co = console;
register_keyboard_notifier(&keyboard_notifier_block);
register_vt_notifier(&vt_notifier_block);
return 1;
}
int braille_unregister_console(struct console *console)
{
if (braille_co != console)
return -EINVAL;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL;
return 1;
}
| linux-master | drivers/accessibility/braille/braille_console.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kthread.h>
#include <linux/wait.h>
#include "spk_types.h"
#include "speakup.h"
#include "spk_priv.h"
DECLARE_WAIT_QUEUE_HEAD(speakup_event);
EXPORT_SYMBOL_GPL(speakup_event);
int speakup_thread(void *data)
{
unsigned long flags;
int should_break;
struct bleep our_sound;
our_sound.active = 0;
our_sound.freq = 0;
our_sound.jiffies = 0;
mutex_lock(&spk_mutex);
while (1) {
DEFINE_WAIT(wait);
while (1) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
our_sound = spk_unprocessed_sound;
spk_unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
TASK_INTERRUPTIBLE);
should_break = kthread_should_stop() ||
our_sound.active ||
(synth && synth->catch_up && synth->alive &&
(speakup_info.flushing ||
!synth_buffer_empty()));
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (should_break)
break;
mutex_unlock(&spk_mutex);
schedule();
mutex_lock(&spk_mutex);
}
finish_wait(&speakup_event, &wait);
if (kthread_should_stop())
break;
if (our_sound.active)
kd_mksound(our_sound.freq, our_sound.jiffies);
if (synth && synth->catch_up && synth->alive) {
/*
* It is up to the callee to take the lock, so that it
* can sleep whenever it likes
*/
synth->catch_up(synth);
}
speakup_start_ttys();
}
mutex_unlock(&spk_mutex);
return 0;
}
| linux-master | drivers/accessibility/speakup/thread.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#include "speakup_acnt.h" /* local header file for Accent values */
#define DRV_VERSION "2.11"
#define PROCSPEECH '\r'
static int synth_probe(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\033P8" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\033P5" } },
[RATE_ID] = { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } },
[PITCH_ID] = { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\033A%d", 9, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/acntsa.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_acntsa = {
.name = "acntsa",
.version = DRV_VERSION,
.long_name = "Accent-SA",
.init = "\033T2\033=M\033Oi\033N1\n",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 400,
.trigger = 50,
.jiffies = 30,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "acntsa",
},
};
static int synth_probe(struct spk_synth *synth)
{
int failed;
failed = spk_ttyio_synth_probe(synth);
if (failed == 0) {
synth->synth_immediate(synth, "\033=R\r");
mdelay(100);
}
synth->alive = !failed;
return failed;
}
module_param_named(ser, synth_acntsa.ser, int, 0444);
module_param_named(dev, synth_acntsa.dev_name, charp, 0444);
module_param_named(start, synth_acntsa.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_acntsa);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Accent SA synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_acntsa.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include <linux/unistd.h>
#include <linux/proc_fs.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "speakup.h"
#include "spk_priv.h"
#define DRV_VERSION "2.20"
#define SYNTH_CLEAR 0x03
#define PROCSPEECH 0x0b
static int xoff;
static inline int synth_full(void)
{
return xoff;
}
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static void read_buff_add(u_char c);
static unsigned char get_index(struct spk_synth *synth);
static int in_escape;
static int is_flushing;
static DEFINE_SPINLOCK(flush_lock);
static DECLARE_WAIT_QUEUE_HEAD(flush);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID, INFLECTION_ID,
VOL_ID, PUNCT_ID, VOICE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID,
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 160] " } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
[RATE_ID] = { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
[INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
[VOICE_ID] = { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/dectlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute flush_time_attribute =
__ATTR(flush_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&flush_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306};
static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73};
static struct spk_synth synth_dectlk = {
.name = "dectlk",
.version = DRV_VERSION,
.long_name = "Dectalk Express",
.init = "[:error sp :name paul :rate 180 :tsr off] ",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.flush_time = 4000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.default_pitch = ap_defaults,
.default_vol = g5_defaults,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = read_buff_add,
.get_index = get_index,
.indexing = {
.command = "[:in re %d ] ",
.lowindex = 1,
.highindex = 8,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "dectlk",
},
};
static int is_indnum(u_char *ch)
{
if ((*ch >= '0') && (*ch <= '9')) {
*ch = *ch - '0';
return 1;
}
return 0;
}
static u_char lastind;
static unsigned char get_index(struct spk_synth *synth)
{
u_char rv;
rv = lastind;
lastind = 0;
return rv;
}
static void read_buff_add(u_char c)
{
static int ind = -1;
if (c == 0x01) {
unsigned long flags;
spin_lock_irqsave(&flush_lock, flags);
is_flushing = 0;
wake_up_interruptible(&flush);
spin_unlock_irqrestore(&flush_lock, flags);
} else if (c == 0x13) {
xoff = 1;
} else if (c == 0x11) {
xoff = 0;
} else if (is_indnum(&c)) {
if (ind == -1)
ind = c;
else
ind = ind * 10 + c;
} else if ((c > 31) && (c < 127)) {
if (ind != -1)
lastind = (u_char)ind;
ind = -1;
}
}
static void do_catch_up(struct spk_synth *synth)
{
int synth_full_val = 0;
static u_char ch;
static u_char last = '\0';
unsigned long flags;
unsigned long jiff_max;
unsigned long timeout;
DEFINE_WAIT(wait);
struct var_t *jiffy_delta;
struct var_t *delay_time;
struct var_t *flush_time;
int jiffy_delta_val;
int delay_time_val;
int timeout_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
flush_time = spk_get_var(FLUSH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
timeout_val = flush_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
timeout = msecs_to_jiffies(timeout_val);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
/* if no ctl-a in 4, send data anyway */
spin_lock_irqsave(&flush_lock, flags);
while (is_flushing && timeout) {
prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&flush_lock, flags);
timeout = schedule_timeout(timeout);
spin_lock_irqsave(&flush_lock, flags);
}
finish_wait(&flush, &wait);
is_flushing = 0;
spin_unlock_irqrestore(&flush_lock, flags);
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
synth_full_val = synth_full();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[') {
in_escape = 1;
} else if (ch == ']') {
in_escape = 0;
} else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
synth->io_ops->synth_out(synth,
PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock,
flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
}
if (!in_escape)
synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
if (in_escape)
/* if in command output ']' so we don't get an error */
synth->io_ops->synth_out(synth, ']');
in_escape = 0;
is_flushing = 1;
synth->io_ops->flush_buffer(synth);
synth->io_ops->synth_out(synth, SYNTH_CLEAR);
}
module_param_named(ser, synth_dectlk.ser, int, 0444);
module_param_named(dev, synth_dectlk.dev_name, charp, 0444);
module_param_named(start, synth_dectlk.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(inflection, "Set the inflection variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_dectlk);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_dectlk.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include "spk_types.h"
#include "speakup.h"
#include "spk_priv.h"
#include "serialio.h"
#include <linux/serial_core.h>
/* WARNING: Do not change this to <linux/serial.h> without testing that
* SERIAL_PORT_DFNS does get defined to the appropriate value.
*/
#include <asm/serial.h>
#ifndef SERIAL_PORT_DFNS
#define SERIAL_PORT_DFNS
#endif
static void start_serial_interrupt(int irq);
static const struct old_serial_port rs_table[] = {
SERIAL_PORT_DFNS
};
static const struct old_serial_port *serstate;
static int timeouts;
static int spk_serial_out(struct spk_synth *in_synth, const char ch);
static void spk_serial_send_xchar(struct spk_synth *in_synth, char ch);
static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
static unsigned char spk_serial_in(struct spk_synth *in_synth);
static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth);
static void spk_serial_flush_buffer(struct spk_synth *in_synth);
static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_serial_io_ops = {
.synth_out = spk_serial_out,
.send_xchar = spk_serial_send_xchar,
.tiocmset = spk_serial_tiocmset,
.synth_in = spk_serial_in,
.synth_in_nowait = spk_serial_in_nowait,
.flush_buffer = spk_serial_flush_buffer,
.wait_for_xmitr = spk_serial_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_serial_io_ops);
const struct old_serial_port *spk_serial_init(int index)
{
int baud = 9600, quot = 0;
unsigned int cval = 0;
int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
const struct old_serial_port *ser;
int err;
if (index >= ARRAY_SIZE(rs_table)) {
pr_info("no port info for ttyS%d\n", index);
return NULL;
}
ser = rs_table + index;
/* Divisor, byte size and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
#if defined(__powerpc__) || defined(__alpha__)
cval >>= 8;
#else /* !__powerpc__ && !__alpha__ */
cval >>= 4;
#endif /* !__powerpc__ && !__alpha__ */
if (cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
if (synth_request_region(ser->port, 8)) {
/* try to take it back. */
pr_info("Ports not available, trying to steal them\n");
__release_region(&ioport_resource, ser->port, 8);
err = synth_request_region(ser->port, 8);
if (err) {
pr_warn("Unable to allocate port at %x, errno %i",
ser->port, err);
return NULL;
}
}
/* Disable UART interrupts, set DTR and RTS high
* and set speed.
*/
outb(cval | UART_LCR_DLAB, ser->port + UART_LCR); /* set DLAB */
outb(quot & 0xff, ser->port + UART_DLL); /* LS of divisor */
outb(quot >> 8, ser->port + UART_DLM); /* MS of divisor */
outb(cval, ser->port + UART_LCR); /* reset DLAB */
/* Turn off Interrupts */
outb(0, ser->port + UART_IER);
outb(UART_MCR_DTR | UART_MCR_RTS, ser->port + UART_MCR);
/* If we read 0xff from the LSR, there is no UART here. */
if (inb(ser->port + UART_LSR) == 0xff) {
synth_release_region(ser->port, 8);
serstate = NULL;
return NULL;
}
mdelay(1);
speakup_info.port_tts = ser->port;
serstate = ser;
start_serial_interrupt(ser->irq);
return ser;
}
static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
{
unsigned long flags;
int c;
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
c = inb_p(speakup_info.port_tts + UART_RX);
synth->read_buff_add((u_char)c);
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return IRQ_HANDLED;
}
static void start_serial_interrupt(int irq)
{
int rv;
if (!synth->read_buff_add)
return;
rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED,
"serial", (void *)synth_readbuf_handler);
if (rv)
pr_err("Unable to request Speakup serial I R Q\n");
/* Set MCR */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2,
speakup_info.port_tts + UART_MCR);
/* Turn on Interrupts */
outb(UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI,
speakup_info.port_tts + UART_IER);
inb(speakup_info.port_tts + UART_LSR);
inb(speakup_info.port_tts + UART_RX);
inb(speakup_info.port_tts + UART_IIR);
inb(speakup_info.port_tts + UART_MSR);
outb(1, speakup_info.port_tts + UART_FCR); /* Turn FIFO On */
}
static void spk_serial_send_xchar(struct spk_synth *synth, char ch)
{
int timeout = SPK_XMITR_TIMEOUT;
while (spk_serial_tx_busy()) {
if (!--timeout)
break;
udelay(1);
}
outb(ch, speakup_info.port_tts);
}
static void spk_serial_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
int old = inb(speakup_info.port_tts + UART_MCR);
outb((old & ~clear) | set, speakup_info.port_tts + UART_MCR);
}
int spk_serial_synth_probe(struct spk_synth *synth)
{
const struct old_serial_port *ser;
int failed = 0;
if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) {
ser = spk_serial_init(synth->ser);
if (!ser) {
failed = -1;
} else {
outb_p(0, ser->port);
mdelay(1);
outb_p('\r', ser->port);
}
} else {
failed = -1;
pr_warn("ttyS%i is an invalid port\n", synth->ser);
}
if (failed) {
pr_info("%s: not found\n", synth->long_name);
return -ENODEV;
}
pr_info("%s: ttyS%i, Driver Version %s\n",
synth->long_name, synth->ser, synth->version);
synth->alive = 1;
return 0;
}
EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
void spk_stop_serial_interrupt(void)
{
if (speakup_info.port_tts == 0)
return;
if (!synth->read_buff_add)
return;
/* Turn off interrupts */
outb(0, speakup_info.port_tts + UART_IER);
/* Free IRQ */
free_irq(serstate->irq, (void *)synth_readbuf_handler);
}
EXPORT_SYMBOL_GPL(spk_stop_serial_interrupt);
static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth)
{
int tmout = SPK_XMITR_TIMEOUT;
if ((in_synth->alive) && (timeouts >= NUM_DISABLE_TIMEOUTS)) {
pr_warn("%s: too many timeouts, deactivating speakup\n",
in_synth->long_name);
in_synth->alive = 0;
/* No synth any more, so nobody will restart TTYs, and we thus
* need to do it ourselves. Now that there is no synth we can
* let application flood anyway
*/
speakup_start_ttys();
timeouts = 0;
return 0;
}
while (spk_serial_tx_busy()) {
if (--tmout == 0) {
pr_warn("%s: timed out (tx busy)\n",
in_synth->long_name);
timeouts++;
return 0;
}
udelay(1);
}
tmout = SPK_CTS_TIMEOUT;
while (!((inb_p(speakup_info.port_tts + UART_MSR)) & UART_MSR_CTS)) {
/* CTS */
if (--tmout == 0) {
timeouts++;
return 0;
}
udelay(1);
}
timeouts = 0;
return 1;
}
static unsigned char spk_serial_in(struct spk_synth *in_synth)
{
int tmout = SPK_SERIAL_TIMEOUT;
while (!(inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR)) {
if (--tmout == 0) {
pr_warn("time out while waiting for input.\n");
return 0xff;
}
udelay(1);
}
return inb_p(speakup_info.port_tts + UART_RX);
}
static unsigned char spk_serial_in_nowait(struct spk_synth *in_synth)
{
unsigned char lsr;
lsr = inb_p(speakup_info.port_tts + UART_LSR);
if (!(lsr & UART_LSR_DR))
return 0;
return inb_p(speakup_info.port_tts + UART_RX);
}
static void spk_serial_flush_buffer(struct spk_synth *in_synth)
{
/* TODO: flush the UART 16550 buffer */
}
static int spk_serial_out(struct spk_synth *in_synth, const char ch)
{
if (in_synth->alive && spk_serial_wait_for_xmitr(in_synth)) {
outb_p(ch, speakup_info.port_tts);
return 1;
}
return 0;
}
const char *spk_serial_synth_immediate(struct spk_synth *synth,
const char *buff)
{
u_char ch;
while ((ch = *buff)) {
if (ch == '\n')
ch = synth->procspeech;
if (spk_serial_wait_for_xmitr(synth))
outb(ch, speakup_info.port_tts);
else
return buff;
buff++;
}
return NULL;
}
EXPORT_SYMBOL_GPL(spk_serial_synth_immediate);
void spk_serial_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts == 0)
return;
synth_release_region(speakup_info.port_tts, 8);
speakup_info.port_tts = 0;
}
EXPORT_SYMBOL_GPL(spk_serial_release);
| linux-master | drivers/accessibility/speakup/serialio.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
* eventually modified by Samuel Thibault <[email protected]>
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
* Copyright (C) 2007 Samuel Thibault.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#define PROCSPEECH '\n'
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR '!'
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
PAUSE_ID,
RATE_ID, PITCH_ID, INFLECTION_ID,
VOL_ID, TONE_ID, PUNCT_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"CAPS_START\n" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"CAPS_STOP\n" } },
[PAUSE_ID] = { PAUSE, .u.s = {"PAUSE\n"} },
[RATE_ID] = { RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } },
[INFLECTION_ID] = { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/dummy.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static void read_buff_add(u_char c)
{
pr_info("speakup_dummy: got character %02x\n", c);
}
static struct spk_synth synth_dummy = {
.name = "dummy",
.version = DRV_VERSION,
.long_name = "Dummy",
.init = "Speakup\n",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up_unicode,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = read_buff_add,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "dummy",
},
};
module_param_named(ser, synth_dummy.ser, int, 0444);
module_param_named(dev, synth_dummy.dev_name, charp, 0444);
module_param_named(start, synth_dummy.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(inflection, "Set the inflection variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_dummy);
MODULE_AUTHOR("Samuel Thibault <[email protected]>");
MODULE_DESCRIPTION("Speakup support for text console");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_dummy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18
#define PROCSPEECH '\r'
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x05\x31\x32P" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05\x38P" } },
[RATE_ID] = { RATE, .u.n = {"\x05%dE", 8, 1, 16, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x05%dP", 8, 0, 16, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x05%dV", 8, 0, 16, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x05%dT", 8, 0, 16, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/bns.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_bns = {
.name = "bns",
.version = DRV_VERSION,
.long_name = "Braille 'N Speak",
.init = "\x05Z\x05\x43",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "bns",
},
};
module_param_named(ser, synth_bns.ser, int, 0444);
module_param_named(dev, synth_bns.dev_name, charp, 0444);
module_param_named(start, synth_bns.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_bns);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Braille 'n Speak synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_bns.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the RC Systems DoubleTalk PC internal synthesizer.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "serialio.h"
#include "speakup_dtlk.h" /* local header file for DoubleTalk values */
#include "speakup.h"
#define DRV_VERSION "2.10"
#define PROCSPEECH 0x00
static int synth_probe(struct spk_synth *synth);
static void dtlk_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_lpc;
static int port_forced;
static unsigned int synth_portlist[] = {
0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0
};
static u_char synth_status;
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID, PUNCT_ID,
VOICE_ID, FREQUENCY_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID,
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+35p" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-35p" } },
[RATE_ID] = { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } },
[VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
[FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/dtlk.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&freq_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_dtlk = {
.name = "dtlk",
.version = DRV_VERSION,
.long_name = "DoubleTalk PC",
.init = "\x01@\x01\x31y",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 30,
.jiffies = 50,
.full = 1000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = dtlk_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = spk_synth_get_index,
.indexing = {
.command = "\x01%di",
.lowindex = 1,
.highindex = 5,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "dtlk",
},
};
static inline bool synth_readable(void)
{
synth_status = inb_p(speakup_info.port_tts + UART_RX);
return (synth_status & TTS_READABLE) != 0;
}
static inline bool synth_writable(void)
{
synth_status = inb_p(speakup_info.port_tts + UART_RX);
return (synth_status & TTS_WRITABLE) != 0;
}
static inline bool synth_full(void)
{
synth_status = inb_p(speakup_info.port_tts + UART_RX);
return (synth_status & TTS_ALMOST_FULL) != 0;
}
static void spk_out(const char ch)
{
int timeout = SPK_XMITR_TIMEOUT;
while (!synth_writable()) {
if (!--timeout)
break;
udelay(1);
}
outb_p(ch, speakup_info.port_tts);
timeout = SPK_XMITR_TIMEOUT;
while (synth_writable()) {
if (!--timeout)
break;
udelay(1);
}
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val;
int delay_time_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
spk_out(ch);
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
spk_out(PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
delay_time_val = delay_time->u.n.value;
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
spk_out(PROCSPEECH);
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
while ((ch = (u_char)*buf)) {
if (synth_full())
return buf;
if (ch == '\n')
ch = PROCSPEECH;
spk_out(ch);
buf++;
}
return NULL;
}
static void synth_flush(struct spk_synth *synth)
{
outb_p(SYNTH_CLEAR, speakup_info.port_tts);
while (synth_writable())
cpu_relax();
}
static char synth_read_tts(void)
{
u_char ch;
while (!synth_readable())
cpu_relax();
ch = synth_status & 0x7f;
outb_p(ch, speakup_info.port_tts);
while (synth_readable())
cpu_relax();
return (char)ch;
}
/* interrogate the DoubleTalk PC and return its settings */
static struct synth_settings *synth_interrogate(struct spk_synth *synth)
{
u_char *t;
static char buf[sizeof(struct synth_settings) + 1];
int total, i;
static struct synth_settings status;
synth_immediate(synth, "\x18\x01?");
for (total = 0, i = 0; i < 50; i++) {
buf[total] = synth_read_tts();
if (total > 2 && buf[total] == 0x7f)
break;
if (total < sizeof(struct synth_settings))
total++;
}
t = buf;
/* serial number is little endian */
status.serial_number = t[0] + t[1] * 256;
t += 2;
for (i = 0; *t != '\r'; t++) {
status.rom_version[i] = *t;
if (i < sizeof(status.rom_version) - 1)
i++;
}
status.rom_version[i] = 0;
t++;
status.mode = *t++;
status.punc_level = *t++;
status.formant_freq = *t++;
status.pitch = *t++;
status.speed = *t++;
status.volume = *t++;
status.tone = *t++;
status.expression = *t++;
status.ext_dict_loaded = *t++;
status.ext_dict_status = *t++;
status.free_ram = *t++;
status.articulation = *t++;
status.reverb = *t++;
status.eob = *t++;
return &status;
}
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
int i;
struct synth_settings *sp;
pr_info("Probing for DoubleTalk.\n");
if (port_forced) {
speakup_info.port_tts = port_forced;
pr_info("probe forced to %x by kernel command line\n",
speakup_info.port_tts);
if ((port_forced & 0xf) != 0xf)
pr_info("warning: port base should probably end with f\n");
if (synth_request_region(speakup_info.port_tts - 1,
SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
port_val = inw(speakup_info.port_tts - 1);
synth_lpc = speakup_info.port_tts - 1;
} else {
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i],
SYNTH_IO_EXTENT))
continue;
port_val = inw(synth_portlist[i]) & 0xfbff;
if (port_val == 0x107f) {
synth_lpc = synth_portlist[i];
speakup_info.port_tts = synth_lpc + 1;
break;
}
synth_release_region(synth_portlist[i],
SYNTH_IO_EXTENT);
}
}
port_val &= 0xfbff;
if (port_val != 0x107f) {
pr_info("DoubleTalk PC: not found\n");
if (synth_lpc)
synth_release_region(synth_lpc, SYNTH_IO_EXTENT);
return -ENODEV;
}
while (inw_p(synth_lpc) != 0x147f)
cpu_relax(); /* wait until it's ready */
sp = synth_interrogate(synth);
pr_info("%s: %03x-%03x, ROM ver %s, s/n %u, driver: %s\n",
synth->long_name, synth_lpc, synth_lpc + SYNTH_IO_EXTENT - 1,
sp->rom_version, sp->serial_number, synth->version);
synth->alive = 1;
return 0;
}
static void dtlk_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts - 1,
SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
}
module_param_hw_named(port, port_forced, int, ioport, 0444);
module_param_named(start, synth_dtlk.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(voice, "Set the voice variable on load.");
MODULE_PARM_DESC(frequency, "Set the frequency variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_dtlk);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DoubleTalk PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_dtlk.c |
// SPDX-License-Identifier: GPL-2.0+
/* speakup_soft.c - speakup driver to register and make available
* a user space device for software synthesizers. written by: Kirk
* Reiser <[email protected]>
*
* Copyright (C) 2003 Kirk Reiser.
*
* this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
#include <linux/unistd.h>
#include <linux/miscdevice.h> /* for misc_register, and MISC_DYNAMIC_MINOR */
#include <linux/poll.h> /* for poll_wait() */
/* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
#include <linux/sched/signal.h>
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.6"
#define PROCSPEECH 0x0d
#define CLEAR_SYNTH 0x18
static int softsynth_probe(struct spk_synth *synth);
static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth);
static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var);
static unsigned char get_index(struct spk_synth *synth);
static struct miscdevice synth_device, synthu_device;
static int init_pos;
static int misc_registered;
enum default_vars_id {
DIRECT_ID = 0, CAPS_START_ID, CAPS_STOP_ID,
PAUSE_ID, RATE_ID, PITCH_ID, INFLECTION_ID,
VOL_ID, TONE_ID, PUNCT_ID, VOICE_ID,
FREQUENCY_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x01+3p" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x01-3p" } },
[PAUSE_ID] = { PAUSE, .u.n = {"\x01P" } },
[RATE_ID] = { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
[INFLECTION_ID] = { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } },
[VOICE_ID] = { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
[FREQUENCY_ID] = { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
V_LAST_VAR
};
/* These attributes will appear in /sys/accessibility/speakup/soft. */
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
/*
* We should uncomment the following definition, when we agree on a
* method of passing a language designation to the software synthesizer.
* static struct kobj_attribute lang_attribute =
* __ATTR(lang, 0644, spk_var_show, spk_var_store);
*/
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&freq_attribute.attr,
/* &lang_attribute.attr, */
&pitch_attribute.attr,
&inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_soft = {
.name = "soft",
.version = DRV_VERSION,
.long_name = "software synth",
.init = "\01@\x01\x31y\n",
.procspeech = PROCSPEECH,
.delay = 0,
.trigger = 0,
.jiffies = 0,
.full = 0,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = NULL,
.probe = softsynth_probe,
.release = softsynth_release,
.synth_immediate = NULL,
.catch_up = NULL,
.flush = NULL,
.is_alive = softsynth_is_alive,
.synth_adjust = softsynth_adjust,
.read_buff_add = NULL,
.get_index = get_index,
.indexing = {
.command = "\x01%di",
.lowindex = 1,
.highindex = 5,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "soft",
},
};
static char *get_initstring(void)
{
static char buf[40];
char *cp;
struct var_t *var;
size_t len;
size_t n;
memset(buf, 0, sizeof(buf));
cp = buf;
len = sizeof(buf);
var = synth_soft.vars;
while (var->var_id != MAXVARS) {
if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
var->var_id != PAUSE && var->var_id != DIRECT) {
n = scnprintf(cp, len, var->u.n.synth_fmt,
var->u.n.value);
cp = cp + n;
len = len - n;
}
var++;
}
cp = cp + scnprintf(cp, len, "\n");
return buf;
}
static int softsynth_open(struct inode *inode, struct file *fp)
{
unsigned long flags;
/*if ((fp->f_flags & O_ACCMODE) != O_RDONLY) */
/* return -EPERM; */
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (synth_soft.alive) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -EBUSY;
}
synth_soft.alive = 1;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return 0;
}
static int softsynth_close(struct inode *inode, struct file *fp)
{
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_soft.alive = 0;
init_pos = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
/* Make sure we let applications go before leaving */
speakup_start_ttys();
return 0;
}
static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
loff_t *pos, int unicode)
{
int chars_sent = 0;
char __user *cp;
char *init;
size_t bytes_per_ch = unicode ? 3 : 1;
u16 ch;
int empty;
unsigned long flags;
DEFINE_WAIT(wait);
if (count < bytes_per_ch)
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_soft.alive = 1;
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
if (synth_current() == &synth_soft) {
if (!unicode)
synth_buffer_skip_nonlatin1();
if (!synth_buffer_empty() || speakup_info.flushing)
break;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (fp->f_flags & O_NONBLOCK) {
finish_wait(&speakup_event, &wait);
return -EAGAIN;
}
if (signal_pending(current)) {
finish_wait(&speakup_event, &wait);
return -ERESTARTSYS;
}
schedule();
spin_lock_irqsave(&speakup_info.spinlock, flags);
}
finish_wait(&speakup_event, &wait);
cp = buf;
init = get_initstring();
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
while (chars_sent <= count - bytes_per_ch) {
if (synth_current() != &synth_soft)
break;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
} else if (init[init_pos]) {
ch = init[init_pos++];
} else {
if (!unicode)
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty())
break;
ch = synth_buffer_getc();
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if ((!unicode && ch < 0x100) || (unicode && ch < 0x80)) {
u_char c = ch;
if (copy_to_user(cp, &c, 1))
return -EFAULT;
chars_sent++;
cp++;
} else if (unicode && ch < 0x800) {
u_char s[2] = {
0xc0 | (ch >> 6),
0x80 | (ch & 0x3f)
};
if (copy_to_user(cp, s, sizeof(s)))
return -EFAULT;
chars_sent += sizeof(s);
cp += sizeof(s);
} else if (unicode) {
u_char s[3] = {
0xe0 | (ch >> 12),
0x80 | ((ch >> 6) & 0x3f),
0x80 | (ch & 0x3f)
};
if (copy_to_user(cp, s, sizeof(s)))
return -EFAULT;
chars_sent += sizeof(s);
cp += sizeof(s);
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
}
*pos += chars_sent;
empty = synth_buffer_empty();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (empty) {
speakup_start_ttys();
*pos = 0;
}
return chars_sent;
}
static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
loff_t *pos)
{
return softsynthx_read(fp, buf, count, pos, 0);
}
static ssize_t softsynthu_read(struct file *fp, char __user *buf, size_t count,
loff_t *pos)
{
return softsynthx_read(fp, buf, count, pos, 1);
}
static int last_index;
static ssize_t softsynth_write(struct file *fp, const char __user *buf,
size_t count, loff_t *pos)
{
unsigned long supplied_index = 0;
int converted;
converted = kstrtoul_from_user(buf, count, 0, &supplied_index);
if (converted < 0)
return converted;
last_index = supplied_index;
return count;
}
static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
__poll_t ret = 0;
poll_wait(fp, &speakup_event, wait);
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (synth_current() == &synth_soft &&
(!synth_buffer_empty() || speakup_info.flushing))
ret = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
static unsigned char get_index(struct spk_synth *synth)
{
int rv;
rv = last_index;
last_index = 0;
return rv;
}
static const struct file_operations softsynth_fops = {
.owner = THIS_MODULE,
.poll = softsynth_poll,
.read = softsynth_read,
.write = softsynth_write,
.open = softsynth_open,
.release = softsynth_close,
};
static const struct file_operations softsynthu_fops = {
.owner = THIS_MODULE,
.poll = softsynth_poll,
.read = softsynthu_read,
.write = softsynth_write,
.open = softsynth_open,
.release = softsynth_close,
};
static int softsynth_probe(struct spk_synth *synth)
{
if (misc_registered != 0)
return 0;
memset(&synth_device, 0, sizeof(synth_device));
synth_device.minor = MISC_DYNAMIC_MINOR;
synth_device.name = "softsynth";
synth_device.fops = &softsynth_fops;
if (misc_register(&synth_device)) {
pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n");
return -ENODEV;
}
memset(&synthu_device, 0, sizeof(synthu_device));
synthu_device.minor = MISC_DYNAMIC_MINOR;
synthu_device.name = "softsynthu";
synthu_device.fops = &softsynthu_fops;
if (misc_register(&synthu_device)) {
misc_deregister(&synth_device);
pr_warn("Couldn't initialize miscdevice /dev/softsynthu.\n");
return -ENODEV;
}
misc_registered = 1;
pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR %d)\n",
synth_device.minor);
pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR %d)\n",
synthu_device.minor);
return 0;
}
static void softsynth_release(struct spk_synth *synth)
{
misc_deregister(&synth_device);
misc_deregister(&synthu_device);
misc_registered = 0;
pr_info("unregistered /dev/softsynth\n");
pr_info("unregistered /dev/softsynthu\n");
}
static int softsynth_is_alive(struct spk_synth *synth)
{
if (synth_soft.alive)
return 1;
return 0;
}
static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var)
{
struct st_var_header *punc_level_var;
struct var_t *var_data;
if (var->var_id != PUNC_LEVEL)
return 0;
/* We want to set the the speech synthesis punctuation level
* accordingly, so it properly tunes speaking A_PUNC characters */
var_data = var->data;
if (!var_data)
return 0;
punc_level_var = spk_get_var_header(PUNCT);
if (!punc_level_var)
return 0;
spk_set_num_var(var_data->u.n.value, punc_level_var, E_SET);
return 1;
}
module_param_named(start, synth_soft.startup, short, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444);
module_param_named(frequency, vars[FREQUENCY_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
MODULE_PARM_DESC(rate, "Sets the rate of the synthesizer.");
MODULE_PARM_DESC(pitch, "Sets the pitch of the synthesizer.");
MODULE_PARM_DESC(inflection, "Sets the inflection of the synthesizer.");
MODULE_PARM_DESC(vol, "Sets the volume of the speech synthesizer.");
MODULE_PARM_DESC(tone, "Sets the tone of the speech synthesizer.");
MODULE_PARM_DESC(punct, "Sets the amount of punctuation spoken by the synthesizer.");
MODULE_PARM_DESC(voice, "Sets the voice used by the synthesizer.");
MODULE_PARM_DESC(frequency, "Sets the frequency of speech synthesizer.");
module_spk_synth(synth_soft);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_DESCRIPTION("Speakup userspace software synthesizer support");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_soft.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include "speakup.h"
#include "spk_types.h"
#include "spk_priv.h"
struct spk_ldisc_data {
char buf;
struct completion completion;
bool buf_free;
struct spk_synth *synth;
};
/*
* This allows to catch within spk_ttyio_ldisc_open whether it is getting set
* on for a speakup-driven device.
*/
static struct tty_struct *speakup_tty;
/* This mutex serializes the use of such global speakup_tty variable */
static DEFINE_MUTEX(speakup_tty_mutex);
static int ser_to_dev(int ser, dev_t *dev_no)
{
if (ser < 0 || ser > (255 - 64)) {
pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n");
return -EINVAL;
}
*dev_no = MKDEV(4, (64 + ser));
return 0;
}
static int get_dev_to_use(struct spk_synth *synth, dev_t *dev_no)
{
/* use ser only when dev is not specified */
if (strcmp(synth->dev_name, SYNTH_DEFAULT_DEV) ||
synth->ser == SYNTH_DEFAULT_SER)
return tty_dev_name_to_number(synth->dev_name, dev_no);
return ser_to_dev(synth->ser, dev_no);
}
static int spk_ttyio_ldisc_open(struct tty_struct *tty)
{
struct spk_ldisc_data *ldisc_data;
if (tty != speakup_tty)
/* Somebody tried to use this line discipline outside speakup */
return -ENODEV;
if (!tty->ops->write)
return -EOPNOTSUPP;
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
if (!ldisc_data)
return -ENOMEM;
init_completion(&ldisc_data->completion);
ldisc_data->buf_free = true;
tty->disc_data = ldisc_data;
return 0;
}
static void spk_ttyio_ldisc_close(struct tty_struct *tty)
{
kfree(tty->disc_data);
}
static size_t spk_ttyio_receive_buf2(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
struct spk_synth *synth = ldisc_data->synth;
if (synth->read_buff_add) {
unsigned int i;
for (i = 0; i < count; i++)
synth->read_buff_add(cp[i]);
return count;
}
if (!ldisc_data->buf_free)
/* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
* buf_free == true and overwrite buf
*/
mb();
ldisc_data->buf = cp[0];
ldisc_data->buf_free = false;
complete(&ldisc_data->completion);
return 1;
}
static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
.owner = THIS_MODULE,
.num = N_SPEAKUP,
.name = "speakup_ldisc",
.open = spk_ttyio_ldisc_open,
.close = spk_ttyio_ldisc_close,
.receive_buf2 = spk_ttyio_receive_buf2,
};
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch);
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch);
static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch);
static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear);
static unsigned char spk_ttyio_in(struct spk_synth *in_synth);
static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth);
static void spk_ttyio_flush_buffer(struct spk_synth *in_synth);
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth);
struct spk_io_ops spk_ttyio_ops = {
.synth_out = spk_ttyio_out,
.synth_out_unicode = spk_ttyio_out_unicode,
.send_xchar = spk_ttyio_send_xchar,
.tiocmset = spk_ttyio_tiocmset,
.synth_in = spk_ttyio_in,
.synth_in_nowait = spk_ttyio_in_nowait,
.flush_buffer = spk_ttyio_flush_buffer,
.wait_for_xmitr = spk_ttyio_wait_for_xmitr,
};
EXPORT_SYMBOL_GPL(spk_ttyio_ops);
static inline void get_termios(struct tty_struct *tty,
struct ktermios *out_termios)
{
down_read(&tty->termios_rwsem);
*out_termios = tty->termios;
up_read(&tty->termios_rwsem);
}
static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
{
int ret = 0;
struct tty_struct *tty;
struct ktermios tmp_termios;
dev_t dev;
ret = get_dev_to_use(synth, &dev);
if (ret)
return ret;
tty = tty_kopen_exclusive(dev);
if (IS_ERR(tty))
return PTR_ERR(tty);
if (tty->ops->open)
ret = tty->ops->open(tty, NULL);
else
ret = -ENODEV;
if (ret) {
tty_unlock(tty);
return ret;
}
clear_bit(TTY_HUPPED, &tty->flags);
/* ensure hardware flow control is enabled */
get_termios(tty, &tmp_termios);
if (!(tmp_termios.c_cflag & CRTSCTS)) {
tmp_termios.c_cflag |= CRTSCTS;
tty_set_termios(tty, &tmp_termios);
/*
* check c_cflag to see if it's updated as tty_set_termios
* may not return error even when no tty bits are
* changed by the request.
*/
get_termios(tty, &tmp_termios);
if (!(tmp_termios.c_cflag & CRTSCTS))
pr_warn("speakup: Failed to set hardware flow control\n");
}
tty_unlock(tty);
mutex_lock(&speakup_tty_mutex);
speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
speakup_tty = NULL;
mutex_unlock(&speakup_tty_mutex);
if (!ret) {
/* Success */
struct spk_ldisc_data *ldisc_data = tty->disc_data;
ldisc_data->synth = synth;
synth->dev = tty;
return 0;
}
pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
tty_unlock(tty);
tty_kclose(tty);
return ret;
}
void spk_ttyio_register_ldisc(void)
{
if (tty_register_ldisc(&spk_ttyio_ldisc_ops))
pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
}
void spk_ttyio_unregister_ldisc(void)
{
tty_unregister_ldisc(&spk_ttyio_ldisc_ops);
}
static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
{
struct tty_struct *tty = in_synth->dev;
int ret;
if (!in_synth->alive || !tty->ops->write)
return 0;
ret = tty->ops->write(tty, &ch, 1);
if (ret == 0)
/* No room */
return 0;
if (ret > 0)
/* Success */
return 1;
pr_warn("%s: I/O error, deactivating speakup\n",
in_synth->long_name);
/* No synth any more, so nobody will restart TTYs,
* and we thus need to do it ourselves. Now that there
* is no synth we can let application flood anyway
*/
in_synth->alive = 0;
speakup_start_ttys();
return 0;
}
static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch)
{
int ret;
if (ch < 0x80) {
ret = spk_ttyio_out(in_synth, ch);
} else if (ch < 0x800) {
ret = spk_ttyio_out(in_synth, 0xc0 | (ch >> 6));
ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f));
} else {
ret = spk_ttyio_out(in_synth, 0xe0 | (ch >> 12));
ret &= spk_ttyio_out(in_synth, 0x80 | ((ch >> 6) & 0x3f));
ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f));
}
return ret;
}
static void spk_ttyio_send_xchar(struct spk_synth *in_synth, char ch)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->send_xchar)
tty->ops->send_xchar(tty, ch);
}
static void spk_ttyio_tiocmset(struct spk_synth *in_synth, unsigned int set, unsigned int clear)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->tiocmset)
tty->ops->tiocmset(tty, set, clear);
}
static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth)
{
return 1;
}
static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
{
struct tty_struct *tty = in_synth->dev;
struct spk_ldisc_data *ldisc_data = tty->disc_data;
char rv;
if (!timeout) {
if (!try_wait_for_completion(&ldisc_data->completion))
return 0xff;
} else if (wait_for_completion_timeout(&ldisc_data->completion,
usecs_to_jiffies(timeout)) == 0) {
pr_warn("spk_ttyio: timeout (%d) while waiting for input\n",
timeout);
return 0xff;
}
rv = ldisc_data->buf;
/* Make sure we have read buf before we set buf_free to let
* the producer overwrite it
*/
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
tty_flip_buffer_push(tty->port);
return rv;
}
static unsigned char spk_ttyio_in(struct spk_synth *in_synth)
{
return ttyio_in(in_synth, SPK_SYNTH_TIMEOUT);
}
static unsigned char spk_ttyio_in_nowait(struct spk_synth *in_synth)
{
u8 rv = ttyio_in(in_synth, 0);
return (rv == 0xff) ? 0 : rv;
}
static void spk_ttyio_flush_buffer(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
if (tty->ops->flush_buffer)
tty->ops->flush_buffer(tty);
}
int spk_ttyio_synth_probe(struct spk_synth *synth)
{
int rv = spk_ttyio_initialise_ldisc(synth);
if (rv)
return rv;
synth->alive = 1;
return 0;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe);
void spk_ttyio_release(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
if (tty == NULL)
return;
tty_lock(tty);
if (tty->ops->close)
tty->ops->close(tty, NULL);
tty_ldisc_flush(tty);
tty_unlock(tty);
tty_kclose(tty);
in_synth->dev = NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_release);
const char *spk_ttyio_synth_immediate(struct spk_synth *in_synth, const char *buff)
{
struct tty_struct *tty = in_synth->dev;
u_char ch;
while ((ch = *buff)) {
if (ch == '\n')
ch = in_synth->procspeech;
if (tty_write_room(tty) < 1 ||
!in_synth->io_ops->synth_out(in_synth, ch))
return buff;
buff++;
}
return NULL;
}
EXPORT_SYMBOL_GPL(spk_ttyio_synth_immediate);
| linux-master | drivers/accessibility/speakup/spk_ttyio.c |
// SPDX-License-Identifier: GPL-2.0+
/* fakekey.c
* Functions for simulating key presses.
*
* Copyright (C) 2010 the Speakup Team
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/percpu.h>
#include <linux/input.h>
#include "speakup.h"
#define PRESSED 1
#define RELEASED 0
static DEFINE_PER_CPU(int, reporting_keystroke);
static struct input_dev *virt_keyboard;
int speakup_add_virtual_keyboard(void)
{
int err;
virt_keyboard = input_allocate_device();
if (!virt_keyboard)
return -ENOMEM;
virt_keyboard->name = "Speakup";
virt_keyboard->id.bustype = BUS_VIRTUAL;
virt_keyboard->phys = "speakup/input0";
virt_keyboard->dev.parent = NULL;
__set_bit(EV_KEY, virt_keyboard->evbit);
__set_bit(KEY_DOWN, virt_keyboard->keybit);
err = input_register_device(virt_keyboard);
if (err) {
input_free_device(virt_keyboard);
virt_keyboard = NULL;
}
return err;
}
void speakup_remove_virtual_keyboard(void)
{
if (virt_keyboard) {
input_unregister_device(virt_keyboard);
virt_keyboard = NULL;
}
}
/*
* Send a simulated down-arrow to the application.
*/
void speakup_fake_down_arrow(void)
{
unsigned long flags;
/* disable keyboard interrupts */
local_irq_save(flags);
/* don't change CPU */
preempt_disable();
__this_cpu_write(reporting_keystroke, true);
input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
input_sync(virt_keyboard);
__this_cpu_write(reporting_keystroke, false);
/* reenable preemption */
preempt_enable();
/* reenable keyboard interrupts */
local_irq_restore(flags);
}
/*
* Are we handling a simulated key press on the current CPU?
* Returns a boolean.
*/
bool speakup_fake_key_pressed(void)
{
return this_cpu_read(reporting_keystroke);
}
| linux-master | drivers/accessibility/speakup/fakekey.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* this code is specifically written as a driver for the speakup screenreview
* package and is not a general device driver.
* This driver is for the Aicom Acent PC internal synthesizer.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "serialio.h"
#include "speakup.h"
#include "speakup_acnt.h" /* local header file for Accent values */
#define DRV_VERSION "2.10"
#define PROCSPEECH '\r'
static int synth_probe(struct spk_synth *synth);
static void accent_release(struct spk_synth *synth);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_port_control;
static int port_forced;
static unsigned int synth_portlist[] = { 0x2a8, 0 };
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\033P8" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\033P5" } },
[RATE_ID] = { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } },
[PITCH_ID] = { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\033A%d", 5, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/acntpc.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_acntpc = {
.name = "acntpc",
.version = DRV_VERSION,
.long_name = "Accent PC",
.init = "\033=X \033Oi\033T2\033=M\033N1\n",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 1000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = accent_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "acntpc",
},
};
static inline bool synth_writable(void)
{
return inb_p(synth_port_control) & SYNTH_WRITABLE;
}
static inline bool synth_full(void)
{
return inb_p(speakup_info.port_tts + UART_RX) == 'F';
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
while ((ch = *buf)) {
int timeout = SPK_XMITR_TIMEOUT;
if (ch == '\n')
ch = PROCSPEECH;
if (synth_full())
return buf;
while (synth_writable()) {
if (!--timeout)
return buf;
udelay(1);
}
outb_p(ch, speakup_info.port_tts);
buf++;
}
return NULL;
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
unsigned long flags;
unsigned long jiff_max;
int timeout;
int delay_time_val;
int jiffy_delta_val;
int full_time_val;
struct var_t *delay_time;
struct var_t *full_time;
struct var_t *jiffy_delta;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
set_current_state(TASK_RUNNING);
timeout = SPK_XMITR_TIMEOUT;
while (synth_writable()) {
if (!--timeout)
break;
udelay(1);
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, speakup_info.port_tts);
if (time_after_eq(jiffies, jiff_max) && ch == SPACE) {
timeout = SPK_XMITR_TIMEOUT;
while (synth_writable()) {
if (!--timeout)
break;
udelay(1);
}
outb_p(PROCSPEECH, speakup_info.port_tts);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
timeout = SPK_XMITR_TIMEOUT;
while (synth_writable()) {
if (!--timeout)
break;
udelay(1);
}
outb_p(PROCSPEECH, speakup_info.port_tts);
}
static void synth_flush(struct spk_synth *synth)
{
outb_p(SYNTH_CLEAR, speakup_info.port_tts);
}
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
speakup_info.port_tts = port_forced;
pr_info("probe forced to %x by kernel command line\n",
speakup_info.port_tts);
if (synth_request_region(speakup_info.port_tts - 1,
SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
port_val = inw(speakup_info.port_tts - 1);
synth_port_control = speakup_info.port_tts - 1;
} else {
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i],
SYNTH_IO_EXTENT)) {
pr_warn
("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
continue;
}
port_val = inw(synth_portlist[i]) & 0xfffc;
if (port_val == 0x53fc) {
/* 'S' and out&input bits */
synth_port_control = synth_portlist[i];
speakup_info.port_tts = synth_port_control + 1;
break;
}
}
}
port_val &= 0xfffc;
if (port_val != 0x53fc) {
/* 'S' and out&input bits */
pr_info("%s: not found\n", synth->long_name);
synth_release_region(synth_port_control, SYNTH_IO_EXTENT);
synth_port_control = 0;
return -ENODEV;
}
pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name,
synth_port_control, synth_port_control + SYNTH_IO_EXTENT - 1,
synth->version);
synth->alive = 1;
return 0;
}
static void accent_release(struct spk_synth *synth)
{
spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts - 1,
SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
}
module_param_hw_named(port, port_forced, int, ioport, 0444);
module_param_named(start, synth_acntpc.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(pitch, "Set the pitch variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_acntpc);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Accent PC synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_acntpc.c |
// SPDX-License-Identifier: GPL-2.0+
/* speakup_keyhelp.c
* help module for speakup
*
*written by David Borowski.
*
* Copyright (C) 2003 David Borowski.
*/
#include <linux/keyboard.h>
#include "spk_priv.h"
#include "speakup.h"
#define MAXFUNCS 130
#define MAXKEYS 256
static const int num_key_names = MSG_KEYNAMES_END - MSG_KEYNAMES_START + 1;
static u_short key_offsets[MAXFUNCS], key_data[MAXKEYS];
static u_short masks[] = { 32, 16, 8, 4, 2, 1 };
static short letter_offsets[26] = {
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1 };
static u_char funcvals[] = {
ATTRIB_BLEEP_DEC, ATTRIB_BLEEP_INC, BLEEPS_DEC, BLEEPS_INC,
SAY_FIRST_CHAR, SAY_LAST_CHAR, SAY_CHAR, SAY_CHAR_NUM,
SAY_NEXT_CHAR, SAY_PHONETIC_CHAR, SAY_PREV_CHAR, SPEAKUP_PARKED,
SPEAKUP_CUT, EDIT_DELIM, EDIT_EXNUM, EDIT_MOST,
EDIT_REPEAT, EDIT_SOME, SPEAKUP_GOTO, BOTTOM_EDGE,
LEFT_EDGE, RIGHT_EDGE, TOP_EDGE, SPEAKUP_HELP,
SAY_LINE, SAY_NEXT_LINE, SAY_PREV_LINE, SAY_LINE_INDENT,
SPEAKUP_PASTE, PITCH_DEC, PITCH_INC, PUNCT_DEC,
PUNCT_INC, PUNC_LEVEL_DEC, PUNC_LEVEL_INC, SPEAKUP_QUIET,
RATE_DEC, RATE_INC, READING_PUNC_DEC, READING_PUNC_INC,
SAY_ATTRIBUTES, SAY_FROM_LEFT, SAY_FROM_TOP, SAY_POSITION,
SAY_SCREEN, SAY_TO_BOTTOM, SAY_TO_RIGHT, SPK_KEY,
SPK_LOCK, SPEAKUP_OFF, SPEECH_KILL, SPELL_DELAY_DEC,
SPELL_DELAY_INC, SPELL_WORD, SPELL_PHONETIC, TONE_DEC,
TONE_INC, VOICE_DEC, VOICE_INC, VOL_DEC,
VOL_INC, CLEAR_WIN, SAY_WIN, SET_WIN,
ENABLE_WIN, SAY_WORD, SAY_NEXT_WORD, SAY_PREV_WORD, 0
};
static u_char *state_tbl;
static int cur_item, nstates;
static void build_key_data(void)
{
u_char *kp, counters[MAXFUNCS], ch, ch1;
u_short *p_key, key;
int i, offset = 1;
nstates = (int)(state_tbl[-1]);
memset(counters, 0, sizeof(counters));
memset(key_offsets, 0, sizeof(key_offsets));
kp = state_tbl + nstates + 1;
while (*kp++) {
/* count occurrences of each function */
for (i = 0; i < nstates; i++, kp++) {
if (!*kp)
continue;
if ((state_tbl[i] & 16) != 0 && *kp == SPK_KEY)
continue;
counters[*kp]++;
}
}
for (i = 0; i < MAXFUNCS; i++) {
if (counters[i] == 0)
continue;
key_offsets[i] = offset;
offset += (counters[i] + 1);
if (offset >= MAXKEYS)
break;
}
/* leave counters set so high keycodes come first.
* this is done so num pad and other extended keys maps are spoken before
* the alpha with speakup type mapping.
*/
kp = state_tbl + nstates + 1;
while ((ch = *kp++)) {
for (i = 0; i < nstates; i++) {
ch1 = *kp++;
if (!ch1)
continue;
if ((state_tbl[i] & 16) != 0 && ch1 == SPK_KEY)
continue;
key = (state_tbl[i] << 8) + ch;
counters[ch1]--;
offset = key_offsets[ch1];
if (!offset)
continue;
p_key = key_data + offset + counters[ch1];
*p_key = key;
}
}
}
static void say_key(int key)
{
int i, state = key >> 8;
key &= 0xff;
for (i = 0; i < 6; i++) {
if (state & masks[i])
synth_printf(" %s", spk_msg_get(MSG_STATES_START + i));
}
if ((key > 0) && (key <= num_key_names))
synth_printf(" %s\n",
spk_msg_get(MSG_KEYNAMES_START + (key - 1)));
}
static int help_init(void)
{
char start = SPACE;
int i;
int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1;
state_tbl = spk_our_keys[0] + SHIFT_TBL_SIZE + 2;
for (i = 0; i < num_funcs; i++) {
char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i);
if (start == *cur_funcname)
continue;
start = *cur_funcname;
letter_offsets[(start & 31) - 1] = i;
}
return 0;
}
int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
int i, n;
char *name;
u_char func, *kp;
u_short *p_keys, val;
if (letter_offsets[0] == -1)
help_init();
if (type == KT_LATIN) {
if (ch == SPACE) {
spk_special_handler = NULL;
synth_printf("%s\n", spk_msg_get(MSG_LEAVING_HELP));
return 1;
}
ch |= 32; /* lower case */
if (ch < 'a' || ch > 'z')
return -1;
if (letter_offsets[ch - 'a'] == -1) {
synth_printf(spk_msg_get(MSG_NO_COMMAND), ch);
synth_printf("\n");
return 1;
}
cur_item = letter_offsets[ch - 'a'];
} else if (type == KT_CUR) {
if (ch == 0 &&
(MSG_FUNCNAMES_START + cur_item + 1) <= MSG_FUNCNAMES_END)
cur_item++;
else if (ch == 3 && cur_item > 0)
cur_item--;
else
return -1;
} else if (type == KT_SPKUP && ch == SPEAKUP_HELP &&
!spk_special_handler) {
spk_special_handler = spk_handle_help;
synth_printf("%s\n", spk_msg_get(MSG_HELP_INFO));
build_key_data(); /* rebuild each time in case new mapping */
return 1;
} else {
name = NULL;
if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) {
synth_printf("%s\n",
spk_msg_get(MSG_KEYNAMES_START + key - 1));
return 1;
}
for (i = 0; funcvals[i] != 0 && !name; i++) {
if (ch == funcvals[i])
name = spk_msg_get(MSG_FUNCNAMES_START + i);
}
if (!name)
return -1;
kp = spk_our_keys[key] + 1;
for (i = 0; i < nstates; i++) {
if (ch == kp[i])
break;
}
key += (state_tbl[i] << 8);
say_key(key);
synth_printf(spk_msg_get(MSG_KEYDESC), name);
synth_printf("\n");
return 1;
}
name = spk_msg_get(MSG_FUNCNAMES_START + cur_item);
func = funcvals[cur_item];
synth_printf("%s", name);
if (key_offsets[func] == 0) {
synth_printf(" %s\n", spk_msg_get(MSG_IS_UNASSIGNED));
return 1;
}
p_keys = key_data + key_offsets[func];
for (n = 0; p_keys[n]; n++) {
val = p_keys[n];
if (n > 0)
synth_printf("%s ", spk_msg_get(MSG_DISJUNCTION));
say_key(val);
}
return 1;
}
| linux-master | drivers/accessibility/speakup/keyhelp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Speakup kobject implementation
*
* Copyright (C) 2009 William Hubbs
*
* This code is based on kobject-example.c, which came with linux 2.6.x.
*
* Copyright (C) 2004-2007 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2007 Novell Inc.
*
* Released under the GPL version 2 only.
*
*/
#include <linux/slab.h> /* For kmalloc. */
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include "speakup.h"
#include "spk_priv.h"
/*
* This is called when a user reads the characters or chartab sys file.
*/
static ssize_t chars_chartab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int i;
int len = 0;
char *cp;
char *buf_pointer = buf;
size_t bufsize = PAGE_SIZE;
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
*buf_pointer = '\0';
for (i = 0; i < 256; i++) {
if (bufsize <= 1)
break;
if (strcmp("characters", attr->attr.name) == 0) {
len = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
i, spk_characters[i]);
} else { /* show chartab entry */
if (IS_TYPE(i, B_CTL))
cp = "B_CTL";
else if (IS_TYPE(i, WDLM))
cp = "WDLM";
else if (IS_TYPE(i, A_PUNC))
cp = "A_PUNC";
else if (IS_TYPE(i, PUNC))
cp = "PUNC";
else if (IS_TYPE(i, NUM))
cp = "NUM";
else if (IS_TYPE(i, A_CAP))
cp = "A_CAP";
else if (IS_TYPE(i, ALPHA))
cp = "ALPHA";
else if (IS_TYPE(i, B_CAPSYM))
cp = "B_CAPSYM";
else if (IS_TYPE(i, B_SYM))
cp = "B_SYM";
else
cp = "0";
len =
scnprintf(buf_pointer, bufsize, "%d\t%s\n", i, cp);
}
bufsize -= len;
buf_pointer += len;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return buf_pointer - buf;
}
/*
* Print informational messages or warnings after updating
* character descriptions or chartab entries.
*/
static void report_char_chartab_status(int reset, int received, int used,
int rejected, int do_characters)
{
static char const *object_type[] = {
"character class entries",
"character descriptions",
};
int len;
char buf[80];
if (reset) {
pr_info("%s reset to defaults\n", object_type[do_characters]);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d %s\n",
used, received, object_type[do_characters]);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
pr_info("%s", buf);
}
}
/*
* This is called when a user changes the characters or chartab parameters.
*/
static ssize_t chars_chartab_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
char *cp = (char *)buf;
char *end = cp + count; /* the null at the end of the buffer */
char *linefeed = NULL;
char keyword[MAX_DESC_LEN + 1];
char *outptr = NULL; /* Will hold keyword or desc. */
char *temp = NULL;
char *desc = NULL;
ssize_t retval = count;
unsigned long flags;
unsigned long index = 0;
int charclass = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
int do_characters = !strcmp(attr->attr.name, "characters");
size_t desc_length = 0;
int i;
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if ((*cp == '\n') || strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
/*
* Do not replace with kstrtoul:
* here we need temp to be updated
*/
index = simple_strtoul(cp, &temp, 10);
if (index > 255) {
rejected++;
cp = linefeed + 1;
continue;
}
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
if (desc_length > MAX_DESC_LEN) {
rejected++;
cp = linefeed + 1;
continue;
}
if (do_characters) {
desc = kmalloc(desc_length + 1, GFP_ATOMIC);
if (!desc) {
retval = -ENOMEM;
reset = 1; /* just reset on error. */
break;
}
outptr = desc;
} else {
outptr = keyword;
}
for (i = 0; i < desc_length; i++)
outptr[i] = temp[i];
outptr[desc_length] = '\0';
if (do_characters) {
if (spk_characters[index] != spk_default_chars[index])
kfree(spk_characters[index]);
spk_characters[index] = desc;
used++;
} else {
charclass = spk_chartab_get_value(keyword);
if (charclass == 0) {
rejected++;
cp = linefeed + 1;
continue;
}
if (charclass != spk_chartab[index]) {
spk_chartab[index] = charclass;
used++;
}
}
cp = linefeed + 1;
}
if (reset) {
if (do_characters)
spk_reset_default_chars();
else
spk_reset_default_chartab();
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
report_char_chartab_status(reset, received, used, rejected,
do_characters);
return retval;
}
/*
* This is called when a user reads the keymap parameter.
*/
static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp = buf;
int i;
int n;
int num_keys;
int nstates;
u_char *cp1;
u_char ch;
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
cp1 = spk_key_buf + SHIFT_TBL_SIZE;
num_keys = (int)(*cp1);
nstates = (int)cp1[1];
cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
cp1 += 2; /* now pointing at shift states */
/* dump num_keys+1 as first row is shift states + flags,
* each subsequent row is key + states
*/
for (n = 0; n <= num_keys; n++) {
for (i = 0; i <= nstates; i++) {
ch = *cp1++;
cp += sprintf(cp, "%d,", (int)ch);
*cp++ = (i < nstates) ? SPACE : '\n';
}
}
cp += sprintf(cp, "0, %d\n", KEY_MAP_VER);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return (int)(cp - buf);
}
/*
* This is called when a user changes the keymap parameter.
*/
static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int i;
ssize_t ret = count;
char *in_buff = NULL;
char *cp;
u_char *cp1;
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -ENOMEM;
}
if (strchr("dDrR", *in_buff)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
pr_info("keymap set to default values\n");
kfree(in_buff);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
if (in_buff[count - 1] == '\n')
in_buff[count - 1] = '\0';
cp = in_buff;
cp1 = (u_char *)in_buff;
for (i = 0; i < 3; i++) {
cp = spk_s2uchar(cp, cp1);
cp1++;
}
i = (int)cp1[-2] + 1;
i *= (int)cp1[-1] + 1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
i + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf)) {
pr_warn("i %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -EINVAL;
}
while (--i >= 0) {
cp = spk_s2uchar(cp, cp1);
cp1++;
if (!(*cp))
break;
}
if (i != 0 || cp1[-1] != KEY_MAP_VER || cp1[-2] != 0) {
ret = -EINVAL;
pr_warn("end %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
} else {
if (spk_set_key_info(in_buff, spk_key_buf)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
ret = -EINVAL;
pr_warn("set key failed\n");
}
}
kfree(in_buff);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
/*
* This is called when a user changes the value of the silent parameter.
*/
static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
struct vc_data *vc = vc_cons[fg_console].d;
char ch = 0;
char shut;
unsigned long flags;
len = strlen(buf);
if (len > 0 && len < 3) {
ch = buf[0];
if (ch == '\n')
ch = '0';
}
if (ch < '0' || ch > '7') {
pr_warn("silent value '%c' not in range (0,7)\n", ch);
return -EINVAL;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (ch & 2) {
shut = 1;
spk_do_flush();
} else {
shut = 0;
}
if (ch & 4)
shut |= 0x40;
if (ch & 1)
spk_shut_up |= shut;
else
spk_shut_up &= ~shut;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
/*
* This is called when a user reads the synth setting.
*/
static ssize_t synth_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv;
if (!synth)
rv = sprintf(buf, "%s\n", "none");
else
rv = sprintf(buf, "%s\n", synth->name);
return rv;
}
/*
* This is called when a user requests to change synthesizers.
*/
static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
char new_synth_name[10];
len = strlen(buf);
if (len < 2 || len > 9)
return -EINVAL;
memcpy(new_synth_name, buf, len);
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
spk_strlwr(new_synth_name);
if (synth && !strcmp(new_synth_name, synth->name)) {
pr_warn("%s already in use\n", new_synth_name);
} else if (synth_init(new_synth_name) != 0) {
pr_warn("failed to init synth %s\n", new_synth_name);
return -ENODEV;
}
return count;
}
/*
* This is called when text is sent to the synth via the synth_direct file.
*/
static ssize_t synth_direct_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
u_char tmp[256];
int len;
int bytes;
const char *ptr = buf;
unsigned long flags;
if (!synth)
return -EPERM;
len = strlen(buf);
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (len > 0) {
bytes = min_t(size_t, len, 250);
strncpy(tmp, ptr, bytes);
tmp[bytes] = '\0';
string_unescape_any_inplace(tmp);
synth_printf("%s", tmp);
ptr += bytes;
len -= bytes;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
/*
* This function is called when a user reads the version.
*/
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp;
cp = buf;
cp += sprintf(cp, "Speakup version %s\n", SPEAKUP_VERSION);
if (synth)
cp += sprintf(cp, "%s synthesizer driver version %s\n",
synth->name, synth->version);
return cp - buf;
}
/*
* This is called when a user reads the punctuation settings.
*/
static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int i;
char *cp = buf;
struct st_var_header *p_header;
struct punc_var_t *var;
struct st_bits_data *pb;
short mask;
unsigned long flags;
p_header = spk_var_header_by_name(attr->attr.name);
if (!p_header) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (!var) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
pb = (struct st_bits_data *)&spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
if (!(spk_chartab[i] & mask))
continue;
*cp++ = (char)i;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return cp - buf;
}
/*
* This is called when a user changes the punctuation settings.
*/
static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int x;
struct st_var_header *p_header;
struct punc_var_t *var;
char punc_buf[100];
unsigned long flags;
x = strlen(buf);
if (x < 1 || x > 99)
return -EINVAL;
p_header = spk_var_header_by_name(attr->attr.name);
if (!p_header) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (!var) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
memcpy(punc_buf, buf, x);
while (x && punc_buf[x - 1] == '\n')
x--;
punc_buf[x] = '\0';
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (*punc_buf == 'd' || *punc_buf == 'r')
x = spk_set_mask_bits(NULL, var->value, 3);
else
x = spk_set_mask_bits(punc_buf, var->value, 3);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
/*
* This function is called when a user reads one of the variable parameters.
*/
ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv = 0;
struct st_var_header *param;
struct var_t *var;
char *cp1;
char *cp;
char ch;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (!param)
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
var = (struct var_t *)param->data;
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (var)
rv = sprintf(buf, "%i\n", var->u.n.value);
else
rv = sprintf(buf, "0\n");
break;
case VAR_STRING:
if (var) {
cp1 = buf;
*cp1++ = '"';
for (cp = (char *)param->p_val; (ch = *cp); cp++) {
if (ch >= ' ' && ch < '~')
*cp1++ = ch;
else
cp1 += sprintf(cp1, "\\x%02x", ch);
}
*cp1++ = '"';
*cp1++ = '\n';
*cp1 = '\0';
rv = cp1 - buf;
} else {
rv = sprintf(buf, "\"\"\n");
}
break;
default:
rv = sprintf(buf, "Bad parameter %s, type %i\n",
param->name, param->var_type);
break;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return rv;
}
EXPORT_SYMBOL_GPL(spk_var_show);
/*
* Used to reset either default_pitch or default_vol.
*/
static inline void spk_reset_default_value(char *header_name,
int *synth_default_value, int idx)
{
struct st_var_header *param;
if (synth && synth_default_value) {
param = spk_var_header_by_name(header_name);
if (param) {
spk_set_num_var(synth_default_value[idx],
param, E_NEW_DEFAULT);
spk_set_num_var(0, param, E_DEFAULT);
pr_info("%s reset to default value\n", param->name);
}
}
}
/*
* This function is called when a user echos a value to one of the
* variable parameters.
*/
ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct st_var_header *param;
int ret;
int len;
char *cp;
struct var_t *var_data;
long value;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (!param)
return -EINVAL;
if (!param->data)
return 0;
ret = 0;
cp = (char *)buf;
string_unescape_any_inplace(cp);
spin_lock_irqsave(&speakup_info.spinlock, flags);
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (*cp == 'd' || *cp == 'r' || *cp == '\0')
len = E_DEFAULT;
else if (*cp == '+' || *cp == '-')
len = E_INC;
else
len = E_SET;
if (kstrtol(cp, 10, &value) == 0)
ret = spk_set_num_var(value, param, len);
else
pr_warn("overflow or parsing error has occurred");
if (ret == -ERANGE) {
var_data = param->data;
pr_warn("value for %s out of range, expect %d to %d\n",
param->name,
var_data->u.n.low, var_data->u.n.high);
}
/*
* If voice was just changed, we might need to reset our default
* pitch and volume.
*/
if (param->var_id == VOICE && synth &&
(ret == 0 || ret == -ERESTART)) {
var_data = param->data;
value = var_data->u.n.value;
spk_reset_default_value("pitch", synth->default_pitch,
value);
spk_reset_default_value("vol", synth->default_vol,
value);
}
break;
case VAR_STRING:
len = strlen(cp);
if ((len >= 1) && (cp[len - 1] == '\n'))
--len;
if ((len >= 2) && (cp[0] == '"') && (cp[len - 1] == '"')) {
++cp;
len -= 2;
}
cp[len] = '\0';
ret = spk_set_string_var(cp, param, len);
if (ret == -E2BIG)
pr_warn("value too long for %s\n",
param->name);
break;
default:
pr_warn("%s unknown type %d\n",
param->name, (int)param->var_type);
break;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ret == -ERESTART)
pr_info("%s reset to default value\n", param->name);
return count;
}
EXPORT_SYMBOL_GPL(spk_var_store);
/*
* Functions for reading and writing lists of i18n messages. Incomplete.
*/
static ssize_t message_show_helper(char *buf, enum msg_index_t first,
enum msg_index_t last)
{
size_t bufsize = PAGE_SIZE;
char *buf_pointer = buf;
int printed;
enum msg_index_t cursor;
int index = 0;
*buf_pointer = '\0'; /* buf_pointer always looking at a NUL byte. */
for (cursor = first; cursor <= last; cursor++, index++) {
if (bufsize <= 1)
break;
printed = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
index, spk_msg_get(cursor));
buf_pointer += printed;
bufsize -= printed;
}
return buf_pointer - buf;
}
static void report_msg_status(int reset, int received, int used,
int rejected, char *groupname)
{
int len;
char buf[160];
if (reset) {
pr_info("i18n messages from group %s reset to defaults\n",
groupname);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d i18n messages from group %s\n",
used, received, groupname);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
pr_info("%s", buf);
}
}
static ssize_t message_store_helper(const char *buf, size_t count,
struct msg_group_t *group)
{
char *cp = (char *)buf;
char *end = cp + count;
char *linefeed = NULL;
char *temp = NULL;
ssize_t msg_stored = 0;
ssize_t retval = count;
size_t desc_length = 0;
unsigned long index = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
enum msg_index_t firstmessage = group->start;
enum msg_index_t lastmessage = group->end;
enum msg_index_t curmessage;
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if (strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
/*
* Do not replace with kstrtoul:
* here we need temp to be updated
*/
index = simple_strtoul(cp, &temp, 10);
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
curmessage = firstmessage + index;
/*
* Note the check (curmessage < firstmessage). It is not
* redundant. Suppose that the user gave us an index
* equal to ULONG_MAX - 1. If firstmessage > 1, then
* firstmessage + index < firstmessage!
*/
if ((curmessage < firstmessage) || (curmessage > lastmessage)) {
rejected++;
cp = linefeed + 1;
continue;
}
msg_stored = spk_msg_set(curmessage, temp, desc_length);
if (msg_stored < 0) {
retval = msg_stored;
if (msg_stored == -ENOMEM)
reset = 1;
break;
}
used++;
cp = linefeed + 1;
}
if (reset)
spk_reset_msg_group(group);
report_msg_status(reset, received, used, rejected, group->name);
return retval;
}
static ssize_t message_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
if (WARN_ON(!group))
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return retval;
}
static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
if (WARN_ON(!group))
return -EINVAL;
return message_store_helper(buf, count, group);
}
/*
* Declare the attributes.
*/
static struct kobj_attribute keymap_attribute =
__ATTR_RW(keymap);
static struct kobj_attribute silent_attribute =
__ATTR_WO(silent);
static struct kobj_attribute synth_attribute =
__ATTR_RW(synth);
static struct kobj_attribute synth_direct_attribute =
__ATTR_WO(synth_direct);
static struct kobj_attribute version_attribute =
__ATTR_RO(version);
static struct kobj_attribute delimiters_attribute =
__ATTR(delimiters, 0644, punc_show, punc_store);
static struct kobj_attribute ex_num_attribute =
__ATTR(ex_num, 0644, punc_show, punc_store);
static struct kobj_attribute punc_all_attribute =
__ATTR(punc_all, 0644, punc_show, punc_store);
static struct kobj_attribute punc_most_attribute =
__ATTR(punc_most, 0644, punc_show, punc_store);
static struct kobj_attribute punc_some_attribute =
__ATTR(punc_some, 0644, punc_show, punc_store);
static struct kobj_attribute repeats_attribute =
__ATTR(repeats, 0644, punc_show, punc_store);
static struct kobj_attribute attrib_bleep_attribute =
__ATTR(attrib_bleep, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute bell_pos_attribute =
__ATTR(bell_pos, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute bleep_time_attribute =
__ATTR(bleep_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute bleeps_attribute =
__ATTR(bleeps, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute cursor_time_attribute =
__ATTR(cursor_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute key_echo_attribute =
__ATTR(key_echo, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute no_interrupt_attribute =
__ATTR(no_interrupt, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punc_level_attribute =
__ATTR(punc_level, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute reading_punc_attribute =
__ATTR(reading_punc, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute say_control_attribute =
__ATTR(say_control, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute say_word_ctl_attribute =
__ATTR(say_word_ctl, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute spell_delay_attribute =
__ATTR(spell_delay, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute cur_phonetic_attribute =
__ATTR(cur_phonetic, 0644, spk_var_show, spk_var_store);
/*
* These attributes are i18n related.
*/
static struct kobj_attribute announcements_attribute =
__ATTR(announcements, 0644, message_show, message_store);
static struct kobj_attribute characters_attribute =
__ATTR(characters, 0644, chars_chartab_show,
chars_chartab_store);
static struct kobj_attribute chartab_attribute =
__ATTR(chartab, 0644, chars_chartab_show,
chars_chartab_store);
static struct kobj_attribute ctl_keys_attribute =
__ATTR(ctl_keys, 0644, message_show, message_store);
static struct kobj_attribute colors_attribute =
__ATTR(colors, 0644, message_show, message_store);
static struct kobj_attribute formatted_attribute =
__ATTR(formatted, 0644, message_show, message_store);
static struct kobj_attribute function_names_attribute =
__ATTR(function_names, 0644, message_show, message_store);
static struct kobj_attribute key_names_attribute =
__ATTR(key_names, 0644, message_show, message_store);
static struct kobj_attribute states_attribute =
__ATTR(states, 0644, message_show, message_store);
/*
* Create groups of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *main_attrs[] = {
&keymap_attribute.attr,
&silent_attribute.attr,
&synth_attribute.attr,
&synth_direct_attribute.attr,
&version_attribute.attr,
&delimiters_attribute.attr,
&ex_num_attribute.attr,
&punc_all_attribute.attr,
&punc_most_attribute.attr,
&punc_some_attribute.attr,
&repeats_attribute.attr,
&attrib_bleep_attribute.attr,
&bell_pos_attribute.attr,
&bleep_time_attribute.attr,
&bleeps_attribute.attr,
&cursor_time_attribute.attr,
&key_echo_attribute.attr,
&no_interrupt_attribute.attr,
&punc_level_attribute.attr,
&reading_punc_attribute.attr,
&say_control_attribute.attr,
&say_word_ctl_attribute.attr,
&spell_delay_attribute.attr,
&cur_phonetic_attribute.attr,
NULL,
};
static struct attribute *i18n_attrs[] = {
&announcements_attribute.attr,
&characters_attribute.attr,
&chartab_attribute.attr,
&ctl_keys_attribute.attr,
&colors_attribute.attr,
&formatted_attribute.attr,
&function_names_attribute.attr,
&key_names_attribute.attr,
&states_attribute.attr,
NULL,
};
/*
* An unnamed attribute group will put all of the attributes directly in
* the kobject directory. If we specify a name, a subdirectory will be
* created for the attributes with the directory being the name of the
* attribute group.
*/
static const struct attribute_group main_attr_group = {
.attrs = main_attrs,
};
static const struct attribute_group i18n_attr_group = {
.attrs = i18n_attrs,
.name = "i18n",
};
static struct kobject *accessibility_kobj;
struct kobject *speakup_kobj;
int speakup_kobj_init(void)
{
int retval;
/*
* Create a simple kobject with the name of "accessibility",
* located under /sys/
*
* As this is a simple directory, no uevent will be sent to
* userspace. That is why this function should not be used for
* any type of dynamic kobjects, where the name and number are
* not known ahead of time.
*/
accessibility_kobj = kobject_create_and_add("accessibility", NULL);
if (!accessibility_kobj) {
retval = -ENOMEM;
goto out;
}
speakup_kobj = kobject_create_and_add("speakup", accessibility_kobj);
if (!speakup_kobj) {
retval = -ENOMEM;
goto err_acc;
}
/* Create the files associated with this kobject */
retval = sysfs_create_group(speakup_kobj, &main_attr_group);
if (retval)
goto err_speakup;
retval = sysfs_create_group(speakup_kobj, &i18n_attr_group);
if (retval)
goto err_group;
goto out;
err_group:
sysfs_remove_group(speakup_kobj, &main_attr_group);
err_speakup:
kobject_put(speakup_kobj);
err_acc:
kobject_put(accessibility_kobj);
out:
return retval;
}
void speakup_kobj_exit(void)
{
sysfs_remove_group(speakup_kobj, &i18n_attr_group);
sysfs_remove_group(speakup_kobj, &main_attr_group);
kobject_put(speakup_kobj);
kobject_put(accessibility_kobj);
}
| linux-master | drivers/accessibility/speakup/kobjects.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* originally written by: Kirk Reiser <[email protected]>
* this version considerably modified by David Borowski, [email protected]
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* specifically written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18
#define PROCSPEECH '\r'
static void synth_flush(struct spk_synth *synth);
enum default_vars_id {
CAPS_START_ID = 0, CAPS_STOP_ID,
RATE_ID, PITCH_ID,
VOL_ID, TONE_ID, PUNCT_ID,
DIRECT_ID, V_LAST_VAR_ID,
NB_ID
};
static struct var_t vars[NB_ID] = {
[CAPS_START_ID] = { CAPS_START, .u.s = {"\x05P+" } },
[CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"\x05P-" } },
[RATE_ID] = { RATE, .u.n = {"\x05R%d", 7, 0, 9, 0, 0, NULL } },
[PITCH_ID] = { PITCH, .u.n = {"\x05P%d", 3, 0, 9, 0, 0, NULL } },
[VOL_ID] = { VOL, .u.n = {"\x05V%d", 9, 0, 9, 0, 0, NULL } },
[TONE_ID] = { TONE, .u.n = {"\x05T%c", 8, 0, 25, 65, 0, NULL } },
[PUNCT_ID] = { PUNCT, .u.n = {"\x05M%c", 0, 0, 3, 0, 0, "nsma" } },
[DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/* These attributes will appear in /sys/accessibility/speakup/spkout. */
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, 0644, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_spkout = {
.name = "spkout",
.version = DRV_VERSION,
.long_name = "Speakout",
.init = "\005W1\005I2\005C3",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 40000,
.dev_name = SYNTH_DEFAULT_DEV,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.io_ops = &spk_ttyio_ops,
.probe = spk_ttyio_synth_probe,
.release = spk_ttyio_release,
.synth_immediate = spk_ttyio_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = spk_synth_get_index,
.indexing = {
.command = "\x05[%c",
.lowindex = 1,
.highindex = 5,
.currindex = 1,
},
.attributes = {
.attrs = synth_attrs,
.name = "spkout",
},
};
static void synth_flush(struct spk_synth *synth)
{
synth->io_ops->flush_buffer(synth);
synth->io_ops->send_xchar(synth, SYNTH_CLEAR);
}
module_param_named(ser, synth_spkout.ser, int, 0444);
module_param_named(dev, synth_spkout.dev_name, charp, 0444);
module_param_named(start, synth_spkout.startup, short, 0444);
module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444);
module_param_named(vol, vars[PITCH_ID].u.n.default_val, int, 0444);
module_param_named(tone, vars[TONE_ID].u.n.default_val, int, 0444);
module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444);
module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(rate, "Set the rate variable on load.");
MODULE_PARM_DESC(vol, "Set the vol variable on load.");
MODULE_PARM_DESC(tone, "Set the tone variable on load.");
MODULE_PARM_DESC(punct, "Set the punct variable on load.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_spkout);
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Speak Out synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| linux-master | drivers/accessibility/speakup/speakup_spkout.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h> /* for kmalloc */
#include <linux/consolemap.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/device.h> /* for dev_warn */
#include <linux/selection.h>
#include <linux/workqueue.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/atomic.h>
#include <linux/console.h>
#include "speakup.h"
unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
struct vc_data *spk_sel_cons;
struct speakup_selection_work {
struct work_struct work;
struct tiocl_selection sel;
struct tty_struct *tty;
};
static void __speakup_set_selection(struct work_struct *work)
{
struct speakup_selection_work *ssw =
container_of(work, struct speakup_selection_work, work);
struct tty_struct *tty;
struct tiocl_selection sel;
sel = ssw->sel;
/* this ensures we copy sel before releasing the lock below */
rmb();
/* release the lock by setting tty of the struct to NULL */
tty = xchg(&ssw->tty, NULL);
if (spk_sel_cons != vc_cons[fg_console].d) {
spk_sel_cons = vc_cons[fg_console].d;
pr_warn("Selection: mark console not the same as cut\n");
goto unref;
}
console_lock();
clear_selection();
console_unlock();
set_selection_kernel(&sel, tty);
unref:
tty_kref_put(tty);
}
static struct speakup_selection_work speakup_sel_work = {
.work = __WORK_INITIALIZER(speakup_sel_work.work,
__speakup_set_selection)
};
int speakup_set_selection(struct tty_struct *tty)
{
/* we get kref here first in order to avoid a subtle race when
* cancelling selection work. getting kref first establishes the
* invariant that if speakup_sel_work.tty is not NULL when
* speakup_cancel_selection() is called, it must be the case that a put
* kref is pending.
*/
tty_kref_get(tty);
if (cmpxchg(&speakup_sel_work.tty, NULL, tty)) {
tty_kref_put(tty);
return -EBUSY;
}
/* now we have the 'lock' by setting tty member of
* speakup_selection_work. wmb() ensures that writes to
* speakup_sel_work don't happen before cmpxchg() above.
*/
wmb();
speakup_sel_work.sel.xs = spk_xs + 1;
speakup_sel_work.sel.ys = spk_ys + 1;
speakup_sel_work.sel.xe = spk_xe + 1;
speakup_sel_work.sel.ye = spk_ye + 1;
speakup_sel_work.sel.sel_mode = TIOCL_SELCHAR;
schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work);
return 0;
}
void speakup_cancel_selection(void)
{
struct tty_struct *tty;
cancel_work_sync(&speakup_sel_work.work);
/* setting to null so that if work fails to run and we cancel it,
* we can run it again without getting EBUSY forever from there on.
* we need to use xchg here to avoid race with speakup_set_selection()
*/
tty = xchg(&speakup_sel_work.tty, NULL);
if (tty)
tty_kref_put(tty);
}
static void __speakup_paste_selection(struct work_struct *work)
{
struct speakup_selection_work *ssw =
container_of(work, struct speakup_selection_work, work);
struct tty_struct *tty = xchg(&ssw->tty, NULL);
paste_selection(tty);
tty_kref_put(tty);
}
static struct speakup_selection_work speakup_paste_work = {
.work = __WORK_INITIALIZER(speakup_paste_work.work,
__speakup_paste_selection)
};
int speakup_paste_selection(struct tty_struct *tty)
{
tty_kref_get(tty);
if (cmpxchg(&speakup_paste_work.tty, NULL, tty)) {
tty_kref_put(tty);
return -EBUSY;
}
schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
return 0;
}
void speakup_cancel_paste(void)
{
struct tty_struct *tty;
cancel_work_sync(&speakup_paste_work.work);
tty = xchg(&speakup_paste_work.tty, NULL);
if (tty)
tty_kref_put(tty);
}
| linux-master | drivers/accessibility/speakup/selection.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/console.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "speakup.h"
#include "spk_priv.h"
#define SYNTH_BUF_SIZE 8192 /* currently 8K bytes */
static u16 synth_buffer[SYNTH_BUF_SIZE]; /* guess what this is for! */
static u16 *buff_in = synth_buffer;
static u16 *buff_out = synth_buffer;
static u16 *buffer_end = synth_buffer + SYNTH_BUF_SIZE - 1;
/* These try to throttle applications by stopping the TTYs
* Note: we need to make sure that we will restart them eventually, which is
* usually not possible to do from the notifiers. TODO: it should be possible
* starting from linux 2.6.26.
*
* So we only stop when we know alive == 1 (else we discard the data anyway),
* and the alive synth will eventually call start_ttys from the thread context.
*/
void speakup_start_ttys(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (speakup_console[i] && speakup_console[i]->tty_stopped)
continue;
if (vc_cons[i].d && vc_cons[i].d->port.tty)
start_tty(vc_cons[i].d->port.tty);
}
}
EXPORT_SYMBOL_GPL(speakup_start_ttys);
static void speakup_stop_ttys(void)
{
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons[i].d && vc_cons[i].d->port.tty)
stop_tty(vc_cons[i].d->port.tty);
}
static int synth_buffer_free(void)
{
int chars_free;
if (buff_in >= buff_out)
chars_free = SYNTH_BUF_SIZE - (buff_in - buff_out);
else
chars_free = buff_out - buff_in;
return chars_free;
}
int synth_buffer_empty(void)
{
return (buff_in == buff_out);
}
EXPORT_SYMBOL_GPL(synth_buffer_empty);
void synth_buffer_add(u16 ch)
{
if (!synth->alive) {
/* This makes sure that we won't stop TTYs if there is no synth
* to restart them
*/
return;
}
if (synth_buffer_free() <= 100) {
synth_start();
speakup_stop_ttys();
}
if (synth_buffer_free() <= 1)
return;
*buff_in++ = ch;
if (buff_in > buffer_end)
buff_in = synth_buffer;
/* We have written something to the speech synthesis, so we are not
* paused any more.
*/
spk_paused = false;
}
u16 synth_buffer_getc(void)
{
u16 ch;
if (buff_out == buff_in)
return 0;
ch = *buff_out++;
if (buff_out > buffer_end)
buff_out = synth_buffer;
return ch;
}
EXPORT_SYMBOL_GPL(synth_buffer_getc);
u16 synth_buffer_peek(void)
{
if (buff_out == buff_in)
return 0;
return *buff_out;
}
EXPORT_SYMBOL_GPL(synth_buffer_peek);
void synth_buffer_skip_nonlatin1(void)
{
while (buff_out != buff_in) {
if (*buff_out < 0x100)
return;
buff_out++;
if (buff_out > buffer_end)
buff_out = synth_buffer;
}
}
EXPORT_SYMBOL_GPL(synth_buffer_skip_nonlatin1);
void synth_buffer_clear(void)
{
buff_in = synth_buffer;
buff_out = synth_buffer;
}
EXPORT_SYMBOL_GPL(synth_buffer_clear);
| linux-master | drivers/accessibility/speakup/buffers.c |
// SPDX-License-Identifier: GPL-2.0+
/* speakup.c
* review functions for the speakup screen review package.
* originally written by: Kirk Reiser and Andy Berdan.
*
* extensively modified by David Borowski.
*
** Copyright (C) 1998 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*/
#include <linux/kernel.h>
#include <linux/vt.h>
#include <linux/tty.h>
#include <linux/mm.h> /* __get_free_page() and friends */
#include <linux/vt_kern.h>
#include <linux/ctype.h>
#include <linux/selection.h>
#include <linux/unistd.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/keyboard.h> /* for KT_SHIFT */
#include <linux/kbd_kern.h> /* for vc_kbd_* and friends */
#include <linux/input.h>
#include <linux/kmod.h>
/* speakup_*_selection */
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/consolemap.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/uaccess.h> /* copy_from|to|user() and others */
#include "spk_priv.h"
#include "speakup.h"
#define MAX_DELAY msecs_to_jiffies(500)
#define MINECHOCHAR SPACE
MODULE_AUTHOR("Kirk Reiser <[email protected]>");
MODULE_AUTHOR("Daniel Drake <[email protected]>");
MODULE_DESCRIPTION("Speakup console speech");
MODULE_LICENSE("GPL");
MODULE_VERSION(SPEAKUP_VERSION);
char *synth_name;
module_param_named(synth, synth_name, charp, 0444);
module_param_named(quiet, spk_quiet_boot, bool, 0444);
MODULE_PARM_DESC(synth, "Synth to start if speakup is built in.");
MODULE_PARM_DESC(quiet, "Do not announce when the synthesizer is found.");
special_func spk_special_handler;
short spk_pitch_shift, synth_flags;
static u16 buf[256];
int spk_attrib_bleep, spk_bleeps, spk_bleep_time = 10;
int spk_no_intr, spk_spell_delay;
int spk_key_echo, spk_say_word_ctl;
int spk_say_ctrl, spk_bell_pos;
short spk_punc_mask;
int spk_punc_level, spk_reading_punc;
int spk_cur_phonetic;
char spk_str_caps_start[MAXVARLEN + 1] = "\0";
char spk_str_caps_stop[MAXVARLEN + 1] = "\0";
char spk_str_pause[MAXVARLEN + 1] = "\0";
bool spk_paused;
const struct st_bits_data spk_punc_info[] = {
{"none", "", 0},
{"some", "/$%&@", SOME},
{"most", "$%&#()=+*/@^<>|\\", MOST},
{"all", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", PUNC},
{"delimiters", "", B_WDLM},
{"repeats", "()", CH_RPT},
{"extended numeric", "", B_EXNUM},
{"symbols", "", B_SYM},
{NULL, NULL}
};
static char mark_cut_flag;
#define MAX_KEY 160
static u_char *spk_shift_table;
u_char *spk_our_keys[MAX_KEY];
u_char spk_key_buf[600];
const u_char spk_key_defaults[] = {
#include "speakupmap.h"
};
/* cursor track modes, must be ordered same as cursor_msgs in enum msg_index_t */
enum cursor_track {
CT_Off = 0,
CT_On,
CT_Highlight,
CT_Window,
CT_Max,
read_all_mode = CT_Max,
};
/* Speakup Cursor Track Variables */
static enum cursor_track cursor_track = 1, prev_cursor_track = 1;
static struct tty_struct *tty;
static void spkup_write(const u16 *in_buf, int count);
static char *phonetic[] = {
"alfa", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel",
"india", "juliett", "keelo", "leema", "mike", "november", "oscar",
"papa",
"keh beck", "romeo", "sierra", "tango", "uniform", "victer", "whiskey",
"x ray", "yankee", "zulu"
};
/* array of 256 char pointers (one for each character description)
* initialized to default_chars and user selectable via
* /proc/speakup/characters
*/
char *spk_characters[256];
char *spk_default_chars[256] = {
/*000*/ "null", "^a", "^b", "^c", "^d", "^e", "^f", "^g",
/*008*/ "^h", "^i", "^j", "^k", "^l", "^m", "^n", "^o",
/*016*/ "^p", "^q", "^r", "^s", "^t", "^u", "^v", "^w",
/*024*/ "^x", "^y", "^z", "control", "control", "control", "control",
"control",
/*032*/ "space", "bang!", "quote", "number", "dollar", "percent", "and",
"tick",
/*040*/ "left paren", "right paren", "star", "plus", "comma", "dash",
"dot",
"slash",
/*048*/ "zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine",
/*058*/ "colon", "semmy", "less", "equals", "greater", "question", "at",
/*065*/ "EIGH", "B", "C", "D", "E", "F", "G",
/*072*/ "H", "I", "J", "K", "L", "M", "N", "O",
/*080*/ "P", "Q", "R", "S", "T", "U", "V", "W", "X",
/*089*/ "Y", "ZED", "left bracket", "backslash", "right bracket",
"caret",
"line",
/*096*/ "accent", "a", "b", "c", "d", "e", "f", "g",
/*104*/ "h", "i", "j", "k", "l", "m", "n", "o",
/*112*/ "p", "q", "r", "s", "t", "u", "v", "w",
/*120*/ "x", "y", "zed", "left brace", "bar", "right brace", "tihlduh",
/*127*/ "del", "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
/*138*/ "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
"control", "control",
/*150*/ "control", "control", "control", "control", "control",
"control", "control", "control", "control", "control",
/*160*/ "nbsp", "inverted bang",
/*162*/ "cents", "pounds", "currency", "yen", "broken bar", "section",
/*168*/ "diaeresis", "copyright", "female ordinal", "double left angle",
/*172*/ "not", "soft hyphen", "registered", "macron",
/*176*/ "degrees", "plus or minus", "super two", "super three",
/*180*/ "acute accent", "micro", "pilcrow", "middle dot",
/*184*/ "cedilla", "super one", "male ordinal", "double right angle",
/*188*/ "one quarter", "one half", "three quarters",
"inverted question",
/*192*/ "A GRAVE", "A ACUTE", "A CIRCUMFLEX", "A TILDE", "A OOMLAUT",
"A RING",
/*198*/ "AE", "C CIDELLA", "E GRAVE", "E ACUTE", "E CIRCUMFLEX",
"E OOMLAUT",
/*204*/ "I GRAVE", "I ACUTE", "I CIRCUMFLEX", "I OOMLAUT", "ETH",
"N TILDE",
/*210*/ "O GRAVE", "O ACUTE", "O CIRCUMFLEX", "O TILDE", "O OOMLAUT",
/*215*/ "multiplied by", "O STROKE", "U GRAVE", "U ACUTE",
"U CIRCUMFLEX",
/*220*/ "U OOMLAUT", "Y ACUTE", "THORN", "sharp s", "a grave",
/*225*/ "a acute", "a circumflex", "a tilde", "a oomlaut", "a ring",
/*230*/ "ae", "c cidella", "e grave", "e acute",
/*234*/ "e circumflex", "e oomlaut", "i grave", "i acute",
"i circumflex",
/*239*/ "i oomlaut", "eth", "n tilde", "o grave", "o acute",
"o circumflex",
/*245*/ "o tilde", "o oomlaut", "divided by", "o stroke", "u grave",
"u acute",
/* 251 */ "u circumflex", "u oomlaut", "y acute", "thorn", "y oomlaut"
};
/* array of 256 u_short (one for each character)
* initialized to default_chartab and user selectable via
* /sys/module/speakup/parameters/chartab
*/
u_short spk_chartab[256];
static u_short default_chartab[256] = {
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 0-7 */
B_CTL, B_CTL, A_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 8-15 */
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /*16-23 */
B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, B_CTL, /* 24-31 */
WDLM, A_PUNC, PUNC, PUNC, PUNC, PUNC, PUNC, A_PUNC, /* !"#$%&' */
PUNC, PUNC, PUNC, PUNC, A_PUNC, A_PUNC, A_PUNC, PUNC, /* ()*+, -./ */
NUM, NUM, NUM, NUM, NUM, NUM, NUM, NUM, /* 01234567 */
NUM, NUM, A_PUNC, PUNC, PUNC, PUNC, PUNC, A_PUNC, /* 89:;<=>? */
PUNC, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* @ABCDEFG */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* HIJKLMNO */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* PQRSTUVW */
A_CAP, A_CAP, A_CAP, PUNC, PUNC, PUNC, PUNC, PUNC, /* XYZ[\]^_ */
PUNC, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* `abcdefg */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* hijklmno */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* pqrstuvw */
ALPHA, ALPHA, ALPHA, PUNC, PUNC, PUNC, PUNC, 0, /* xyz{|}~ */
B_CAPSYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 128-134 */
B_SYM, /* 135 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 136-142 */
B_CAPSYM, /* 143 */
B_CAPSYM, B_CAPSYM, B_SYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, /* 144-150 */
B_SYM, /* 151 */
B_SYM, B_SYM, B_CAPSYM, B_CAPSYM, B_SYM, B_SYM, B_SYM, /*152-158 */
B_SYM, /* 159 */
WDLM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_CAPSYM, /* 160-166 */
B_SYM, /* 167 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 168-175 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 176-183 */
B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, B_SYM, /* 184-191 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* 192-199 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, /* 200-207 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, B_SYM, /* 208-215 */
A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, A_CAP, ALPHA, /* 216-223 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* 224-231 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, /* 232-239 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, B_SYM, /* 240-247 */
ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA, ALPHA /* 248-255 */
};
struct task_struct *speakup_task;
struct bleep spk_unprocessed_sound;
static int spk_keydown;
static u16 spk_lastkey;
static u_char spk_close_press, keymap_flags;
static u_char last_keycode, this_speakup_key;
static u_long last_spk_jiffy;
struct st_spk_t *speakup_console[MAX_NR_CONSOLES];
DEFINE_MUTEX(spk_mutex);
static int keyboard_notifier_call(struct notifier_block *,
unsigned long code, void *param);
static struct notifier_block keyboard_notifier_block = {
.notifier_call = keyboard_notifier_call,
};
static int vt_notifier_call(struct notifier_block *,
unsigned long code, void *param);
static struct notifier_block vt_notifier_block = {
.notifier_call = vt_notifier_call,
};
static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, true);
return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
}
static void speakup_date(struct vc_data *vc)
{
spk_x = spk_cx = vc->state.x;
spk_y = spk_cy = vc->state.y;
spk_pos = spk_cp = vc->vc_pos;
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)spk_pos);
}
static void bleep(u_short val)
{
static const short vals[] = {
350, 370, 392, 414, 440, 466, 491, 523, 554, 587, 619, 659
};
short freq;
int time = spk_bleep_time;
freq = vals[val % 12];
if (val > 11)
freq *= (1 << (val / 12));
spk_unprocessed_sound.freq = freq;
spk_unprocessed_sound.jiffies = msecs_to_jiffies(time);
spk_unprocessed_sound.active = 1;
/* We can only have 1 active sound at a time. */
}
static void speakup_shut_up(struct vc_data *vc)
{
if (spk_killed)
return;
spk_shut_up |= 0x01;
spk_parked &= 0xfe;
speakup_date(vc);
if (synth)
spk_do_flush();
}
static void speech_kill(struct vc_data *vc)
{
char val = synth->is_alive(synth);
if (val == 0)
return;
/* re-enables synth, if disabled */
if (val == 2 || spk_killed) {
/* dead */
spk_shut_up &= ~0x40;
synth_printf("%s\n", spk_msg_get(MSG_IAM_ALIVE));
} else {
synth_printf("%s\n", spk_msg_get(MSG_YOU_KILLED_SPEAKUP));
spk_shut_up |= 0x40;
}
}
static void speakup_off(struct vc_data *vc)
{
if (spk_shut_up & 0x80) {
spk_shut_up &= 0x7f;
synth_printf("%s\n", spk_msg_get(MSG_HEY_THATS_BETTER));
} else {
spk_shut_up |= 0x80;
synth_printf("%s\n", spk_msg_get(MSG_YOU_TURNED_ME_OFF));
}
speakup_date(vc);
}
static void speakup_parked(struct vc_data *vc)
{
if (spk_parked & 0x80) {
spk_parked = 0;
synth_printf("%s\n", spk_msg_get(MSG_UNPARKED));
} else {
spk_parked |= 0x80;
synth_printf("%s\n", spk_msg_get(MSG_PARKED));
}
}
static void speakup_cut(struct vc_data *vc)
{
static const char err_buf[] = "set selection failed";
int ret;
if (!mark_cut_flag) {
mark_cut_flag = 1;
spk_xs = (u_short)spk_x;
spk_ys = (u_short)spk_y;
spk_sel_cons = vc;
synth_printf("%s\n", spk_msg_get(MSG_MARK));
return;
}
spk_xe = (u_short)spk_x;
spk_ye = (u_short)spk_y;
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_CUT));
ret = speakup_set_selection(tty);
switch (ret) {
case 0:
break; /* no error */
case -EFAULT:
pr_warn("%sEFAULT\n", err_buf);
break;
case -EINVAL:
pr_warn("%sEINVAL\n", err_buf);
break;
case -ENOMEM:
pr_warn("%sENOMEM\n", err_buf);
break;
}
}
static void speakup_paste(struct vc_data *vc)
{
if (mark_cut_flag) {
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_MARK_CLEARED));
} else {
synth_printf("%s\n", spk_msg_get(MSG_PASTE));
speakup_paste_selection(tty);
}
}
static void say_attributes(struct vc_data *vc)
{
int fg = spk_attr & 0x0f;
int bg = spk_attr >> 4;
synth_printf("%s", spk_msg_get(MSG_COLORS_START + fg));
if (bg > 7) {
synth_printf(" %s ", spk_msg_get(MSG_ON_BLINKING));
bg -= 8;
} else {
synth_printf(" %s ", spk_msg_get(MSG_ON));
}
synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
}
/* must be ordered same as edge_msgs in enum msg_index_t */
enum edge {
edge_none = 0,
edge_top,
edge_bottom,
edge_left,
edge_right,
edge_quiet
};
static void announce_edge(struct vc_data *vc, enum edge msg_id)
{
if (spk_bleeps & 1)
bleep(spk_y);
if ((spk_bleeps & 2) && (msg_id < edge_quiet))
synth_printf("%s\n",
spk_msg_get(MSG_EDGE_MSGS_START + msg_id - 1));
}
static void speak_char(u16 ch)
{
char *cp;
struct var_t *direct = spk_get_var(DIRECT);
if (ch >= 0x100 || (direct && direct->u.n.value)) {
if (ch < 0x100 && IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
synth_printf("%s", spk_str_caps_start);
}
synth_putwc_s(ch);
if (ch < 0x100 && IS_CHAR(ch, B_CAP))
synth_printf("%s", spk_str_caps_stop);
return;
}
cp = spk_characters[ch];
if (!cp) {
pr_info("%s: cp == NULL!\n", __func__);
return;
}
if (IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
synth_printf("%s %s %s",
spk_str_caps_start, cp, spk_str_caps_stop);
} else {
if (*cp == '^') {
cp++;
synth_printf(" %s%s ", spk_msg_get(MSG_CTRL), cp);
} else {
synth_printf(" %s ", cp);
}
}
}
static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
{
u16 ch = ' ';
if (vc && pos) {
u16 w;
u16 c;
pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, true);
w = scr_readw(pos);
c = w & 0xff;
if (w & vc->vc_hi_font_mask) {
w &= ~vc->vc_hi_font_mask;
c |= 0x100;
}
ch = inverse_translate(vc, c, true);
*attribs = (w & 0xff00) >> 8;
}
return ch;
}
static void say_char(struct vc_data *vc)
{
u16 ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (spk_attr != spk_old_attr) {
if (spk_attrib_bleep & 1)
bleep(spk_y);
if (spk_attrib_bleep & 2)
say_attributes(vc);
}
speak_char(ch);
}
static void say_phonetic_char(struct vc_data *vc)
{
u16 ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
if (ch <= 0x7f && isalpha(ch)) {
ch &= 0x1f;
synth_printf("%s\n", phonetic[--ch]);
} else {
if (ch < 0x100 && IS_CHAR(ch, B_NUM))
synth_printf("%s ", spk_msg_get(MSG_NUMBER));
speak_char(ch);
}
}
static void say_prev_char(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_x == 0) {
announce_edge(vc, edge_left);
return;
}
spk_x--;
spk_pos -= 2;
say_char(vc);
}
static void say_next_char(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_x == vc->vc_cols - 1) {
announce_edge(vc, edge_right);
return;
}
spk_x++;
spk_pos += 2;
say_char(vc);
}
/* get_word - will first check to see if the character under the
* reading cursor is a space and if spk_say_word_ctl is true it will
* return the word space. If spk_say_word_ctl is not set it will check to
* see if there is a word starting on the next position to the right
* and return that word if it exists. If it does not exist it will
* move left to the beginning of any previous word on the line or the
* beginning off the line whichever comes first..
*/
static u_long get_word(struct vc_data *vc)
{
u_long cnt = 0, tmpx = spk_x, tmp_pos = spk_pos;
u16 ch;
u16 attr_ch;
u_char temp;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
if (spk_say_word_ctl && ch == SPACE) {
*buf = '\0';
synth_printf("%s\n", spk_msg_get(MSG_SPACE));
return 0;
} else if (tmpx < vc->vc_cols - 2 &&
(ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) {
tmp_pos += 2;
tmpx++;
} else {
while (tmpx > 0) {
ch = get_char(vc, (u_short *)tmp_pos - 1, &temp);
if ((ch == SPACE || ch == 0 ||
(ch < 0x100 && IS_WDLM(ch))) &&
get_char(vc, (u_short *)tmp_pos, &temp) > SPACE)
break;
tmp_pos -= 2;
tmpx--;
}
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
if (ch == SPACE || ch == 0 ||
(buf[cnt - 1] < 0x100 && IS_WDLM(buf[cnt - 1]) &&
ch > SPACE))
break;
buf[cnt++] = ch;
}
buf[cnt] = '\0';
return cnt;
}
static void say_word(struct vc_data *vc)
{
u_long cnt = get_word(vc);
u_short saved_punc_mask = spk_punc_mask;
if (cnt == 0)
return;
spk_punc_mask = PUNC;
buf[cnt++] = SPACE;
spkup_write(buf, cnt);
spk_punc_mask = saved_punc_mask;
}
static void say_prev_word(struct vc_data *vc)
{
u_char temp;
u16 ch;
enum edge edge_said = edge_none;
u_short last_state = 0, state = 0;
spk_parked |= 0x01;
if (spk_x == 0) {
if (spk_y == 0) {
announce_edge(vc, edge_top);
return;
}
spk_y--;
spk_x = vc->vc_cols;
edge_said = edge_quiet;
}
while (1) {
if (spk_x == 0) {
if (spk_y == 0) {
edge_said = edge_top;
break;
}
if (edge_said != edge_quiet)
edge_said = edge_left;
if (state > 0)
break;
spk_y--;
spk_x = vc->vc_cols - 1;
} else {
spk_x--;
}
spk_pos -= 2;
ch = get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (ch < 0x100 && IS_WDLM(ch))
state = 1;
else
state = 2;
if (state < last_state) {
spk_pos += 2;
spk_x++;
break;
}
last_state = state;
}
if (spk_x == 0 && edge_said == edge_quiet)
edge_said = edge_left;
if (edge_said > edge_none && edge_said < edge_quiet)
announce_edge(vc, edge_said);
say_word(vc);
}
static void say_next_word(struct vc_data *vc)
{
u_char temp;
u16 ch;
enum edge edge_said = edge_none;
u_short last_state = 2, state = 0;
spk_parked |= 0x01;
if (spk_x == vc->vc_cols - 1 && spk_y == vc->vc_rows - 1) {
announce_edge(vc, edge_bottom);
return;
}
while (1) {
ch = get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
else if (ch < 0x100 && IS_WDLM(ch))
state = 1;
else
state = 2;
if (state > last_state)
break;
if (spk_x >= vc->vc_cols - 1) {
if (spk_y == vc->vc_rows - 1) {
edge_said = edge_bottom;
break;
}
state = 0;
spk_y++;
spk_x = 0;
edge_said = edge_right;
} else {
spk_x++;
}
spk_pos += 2;
last_state = state;
}
if (edge_said > edge_none)
announce_edge(vc, edge_said);
say_word(vc);
}
static void spell_word(struct vc_data *vc)
{
static char const *delay_str[] = { "", ",", ".", ". .", ". . ." };
u16 *cp = buf;
char *cp1;
char *str_cap = spk_str_caps_stop;
char *last_cap = spk_str_caps_stop;
struct var_t *direct = spk_get_var(DIRECT);
u16 ch;
if (!get_word(vc))
return;
while ((ch = *cp)) {
if (cp != buf)
synth_printf(" %s ", delay_str[spk_spell_delay]);
/* FIXME: Non-latin1 considered as lower case */
if (ch < 0x100 && IS_CHAR(ch, B_CAP)) {
str_cap = spk_str_caps_start;
if (*spk_str_caps_stop)
spk_pitch_shift++;
else /* synth has no pitch */
last_cap = spk_str_caps_stop;
} else {
str_cap = spk_str_caps_stop;
}
if (str_cap != last_cap) {
synth_printf("%s", str_cap);
last_cap = str_cap;
}
if (ch >= 0x100 || (direct && direct->u.n.value)) {
synth_putwc_s(ch);
} else if (this_speakup_key == SPELL_PHONETIC &&
ch <= 0x7f && isalpha(ch)) {
ch &= 0x1f;
cp1 = phonetic[--ch];
synth_printf("%s", cp1);
} else {
cp1 = spk_characters[ch];
if (*cp1 == '^') {
synth_printf("%s", spk_msg_get(MSG_CTRL));
cp1++;
}
synth_printf("%s", cp1);
}
cp++;
}
if (str_cap != spk_str_caps_stop)
synth_printf("%s", spk_str_caps_stop);
}
static int get_line(struct vc_data *vc)
{
u_long tmp = spk_pos - (spk_x * 2);
int i = 0;
u_char tmp2;
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
buf[i] = get_char(vc, (u_short *)tmp, &tmp2);
tmp += 2;
}
for (--i; i >= 0; i--)
if (buf[i] != SPACE)
break;
return ++i;
}
static void say_line(struct vc_data *vc)
{
int i = get_line(vc);
u16 *cp;
u_short saved_punc_mask = spk_punc_mask;
if (i == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
buf[i++] = '\n';
if (this_speakup_key == SAY_LINE_INDENT) {
cp = buf;
while (*cp == SPACE)
cp++;
synth_printf("%zd, ", (cp - buf) + 1);
}
spk_punc_mask = spk_punc_masks[spk_reading_punc];
spkup_write(buf, i);
spk_punc_mask = saved_punc_mask;
}
static void say_prev_line(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_y == 0) {
announce_edge(vc, edge_top);
return;
}
spk_y--;
spk_pos -= vc->vc_size_row;
say_line(vc);
}
static void say_next_line(struct vc_data *vc)
{
spk_parked |= 0x01;
if (spk_y == vc->vc_rows - 1) {
announce_edge(vc, edge_bottom);
return;
}
spk_y++;
spk_pos += vc->vc_size_row;
say_line(vc);
}
static int say_from_to(struct vc_data *vc, u_long from, u_long to,
int read_punc)
{
int i = 0;
u_char tmp;
u_short saved_punc_mask = spk_punc_mask;
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)from);
while (from < to) {
buf[i++] = get_char(vc, (u_short *)from, &tmp);
from += 2;
if (i >= vc->vc_size_row)
break;
}
for (--i; i >= 0; i--)
if (buf[i] != SPACE)
break;
buf[++i] = SPACE;
buf[++i] = '\0';
if (i < 1)
return i;
if (read_punc)
spk_punc_mask = spk_punc_info[spk_reading_punc].mask;
spkup_write(buf, i);
if (read_punc)
spk_punc_mask = saved_punc_mask;
return i - 1;
}
static void say_line_from_to(struct vc_data *vc, u_long from, u_long to,
int read_punc)
{
u_long start = vc->vc_origin + (spk_y * vc->vc_size_row);
u_long end = start + (to * 2);
start += from * 2;
if (say_from_to(vc, start, end, read_punc) <= 0)
if (cursor_track != read_all_mode)
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
}
/* Sentence Reading Commands */
static int currsentence;
static int numsentences[2];
static u16 *sentbufend[2];
static u16 *sentmarks[2][10];
static int currbuf;
static int bn;
static u16 sentbuf[2][256];
static int say_sentence_num(int num, int prev)
{
bn = currbuf;
currsentence = num + 1;
if (prev && --bn == -1)
bn = 1;
if (num > numsentences[bn])
return 0;
spkup_write(sentmarks[bn][num], sentbufend[bn] - sentmarks[bn][num]);
return 1;
}
static int get_sentence_buf(struct vc_data *vc, int read_punc)
{
u_long start, end;
int i, bn;
u_char tmp;
currbuf++;
if (currbuf == 2)
currbuf = 0;
bn = currbuf;
start = vc->vc_origin + ((spk_y) * vc->vc_size_row);
end = vc->vc_origin + ((spk_y) * vc->vc_size_row) + vc->vc_cols * 2;
numsentences[bn] = 0;
sentmarks[bn][0] = &sentbuf[bn][0];
i = 0;
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)start);
while (start < end) {
sentbuf[bn][i] = get_char(vc, (u_short *)start, &tmp);
if (i > 0) {
if (sentbuf[bn][i] == SPACE &&
sentbuf[bn][i - 1] == '.' &&
numsentences[bn] < 9) {
/* Sentence Marker */
numsentences[bn]++;
sentmarks[bn][numsentences[bn]] =
&sentbuf[bn][i];
}
}
i++;
start += 2;
if (i >= vc->vc_size_row)
break;
}
for (--i; i >= 0; i--)
if (sentbuf[bn][i] != SPACE)
break;
if (i < 1)
return -1;
sentbuf[bn][++i] = SPACE;
sentbuf[bn][++i] = '\0';
sentbufend[bn] = &sentbuf[bn][i];
return numsentences[bn];
}
static void say_screen_from_to(struct vc_data *vc, u_long from, u_long to)
{
u_long start = vc->vc_origin, end;
if (from > 0)
start += from * vc->vc_size_row;
if (to > vc->vc_rows)
to = vc->vc_rows;
end = vc->vc_origin + (to * vc->vc_size_row);
for (from = start; from < end; from = to) {
to = from + vc->vc_size_row;
say_from_to(vc, from, to, 1);
}
}
static void say_screen(struct vc_data *vc)
{
say_screen_from_to(vc, 0, vc->vc_rows);
}
static void speakup_win_say(struct vc_data *vc)
{
u_long start, end, from, to;
if (win_start < 2) {
synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
start = vc->vc_origin + (win_top * vc->vc_size_row);
end = vc->vc_origin + (win_bottom * vc->vc_size_row);
while (start <= end) {
from = start + (win_left * 2);
to = start + (win_right * 2);
say_from_to(vc, from, to, 1);
start += vc->vc_size_row;
}
}
static void top_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos = vc->vc_origin + 2 * spk_x;
spk_y = 0;
say_line(vc);
}
static void bottom_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos += (vc->vc_rows - spk_y - 1) * vc->vc_size_row;
spk_y = vc->vc_rows - 1;
say_line(vc);
}
static void left_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos -= spk_x * 2;
spk_x = 0;
say_char(vc);
}
static void right_edge(struct vc_data *vc)
{
spk_parked |= 0x01;
spk_pos += (vc->vc_cols - spk_x - 1) * 2;
spk_x = vc->vc_cols - 1;
say_char(vc);
}
static void say_first_char(struct vc_data *vc)
{
int i, len = get_line(vc);
u16 ch;
spk_parked |= 0x01;
if (len == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
for (i = 0; i < len; i++)
if (buf[i] != SPACE)
break;
ch = buf[i];
spk_pos -= (spk_x - i) * 2;
spk_x = i;
synth_printf("%d, ", ++i);
speak_char(ch);
}
static void say_last_char(struct vc_data *vc)
{
int len = get_line(vc);
u16 ch;
spk_parked |= 0x01;
if (len == 0) {
synth_printf("%s\n", spk_msg_get(MSG_BLANK));
return;
}
ch = buf[--len];
spk_pos -= (spk_x - len) * 2;
spk_x = len;
synth_printf("%d, ", ++len);
speak_char(ch);
}
static void say_position(struct vc_data *vc)
{
synth_printf(spk_msg_get(MSG_POS_INFO), spk_y + 1, spk_x + 1,
vc->vc_num + 1);
synth_printf("\n");
}
/* Added by brianb */
static void say_char_num(struct vc_data *vc)
{
u_char tmp;
u16 ch = get_char(vc, (u_short *)spk_pos, &tmp);
synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
}
/* these are stub functions to keep keyboard.c happy. */
static void say_from_top(struct vc_data *vc)
{
say_screen_from_to(vc, 0, spk_y);
}
static void say_to_bottom(struct vc_data *vc)
{
say_screen_from_to(vc, spk_y, vc->vc_rows);
}
static void say_from_left(struct vc_data *vc)
{
say_line_from_to(vc, 0, spk_x, 1);
}
static void say_to_right(struct vc_data *vc)
{
say_line_from_to(vc, spk_x, vc->vc_cols, 1);
}
/* end of stub functions. */
static void spkup_write(const u16 *in_buf, int count)
{
static int rep_count;
static u16 ch = '\0', old_ch = '\0';
static u_short char_type, last_type;
int in_count = count;
spk_keydown = 0;
while (count--) {
if (cursor_track == read_all_mode) {
/* Insert Sentence Index */
if ((in_buf == sentmarks[bn][currsentence]) &&
(currsentence <= numsentences[bn]))
synth_insert_next_index(currsentence++);
}
ch = *in_buf++;
if (ch < 0x100)
char_type = spk_chartab[ch];
else
char_type = ALPHA;
if (ch == old_ch && !(char_type & B_NUM)) {
if (++rep_count > 2)
continue;
} else {
if ((last_type & CH_RPT) && rep_count > 2) {
synth_printf(" ");
synth_printf(spk_msg_get(MSG_REPEAT_DESC),
++rep_count);
synth_printf(" ");
}
rep_count = 0;
}
if (ch == spk_lastkey) {
rep_count = 0;
if (spk_key_echo == 1 && ch >= MINECHOCHAR)
speak_char(ch);
} else if (char_type & B_ALPHA) {
if ((synth_flags & SF_DEC) && (last_type & PUNC))
synth_buffer_add(SPACE);
synth_putwc_s(ch);
} else if (char_type & B_NUM) {
rep_count = 0;
synth_putwc_s(ch);
} else if (char_type & spk_punc_mask) {
speak_char(ch);
char_type &= ~PUNC; /* for dec nospell processing */
} else if (char_type & SYNTH_OK) {
/* these are usually puncts like . and , which synth
* needs for expression.
* suppress multiple to get rid of long pauses and
* clear repeat count
* so if someone has
* repeats on you don't get nothing repeated count
*/
if (ch != old_ch)
synth_putwc_s(ch);
else
rep_count = 0;
} else {
/* send space and record position, if next is num overwrite space */
if (old_ch != ch)
synth_buffer_add(SPACE);
else
rep_count = 0;
}
old_ch = ch;
last_type = char_type;
}
spk_lastkey = 0;
if (in_count > 2 && rep_count > 2) {
if (last_type & CH_RPT) {
synth_printf(" ");
synth_printf(spk_msg_get(MSG_REPEAT_DESC2),
++rep_count);
synth_printf(" ");
}
rep_count = 0;
}
}
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
static void cursor_done(struct timer_list *unused);
static DEFINE_TIMER(cursor_timer, cursor_done);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
if (!synth || up_flag || spk_killed)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
switch (value) {
case KVAL(K_SHIFT):
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
read_all_doc(vc);
break;
case KVAL(K_CTRL):
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
break;
}
} else {
spk_shut_up &= 0xfe;
spk_do_flush();
}
if (spk_say_ctrl && value < NUM_CTL_LABELS)
synth_printf("%s", spk_msg_get(MSG_CTL_START + value));
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (up_flag) {
spk_lastkey = 0;
spk_keydown = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (!synth || spk_killed) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
spk_shut_up &= 0xfe;
spk_lastkey = value;
spk_keydown++;
spk_parked &= 0xfe;
if (spk_key_echo == 2 && value >= MINECHOCHAR)
speak_char(value);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
{
int i = 0, states, key_data_len;
const u_char *cp = key_info;
u_char *cp1 = k_buffer;
u_char ch, version, num_keys;
version = *cp++;
if (version != KEY_MAP_VER) {
pr_debug("version found %d should be %d\n",
version, KEY_MAP_VER);
return -EINVAL;
}
num_keys = *cp;
states = (int)cp[1];
key_data_len = (states + 1) * (num_keys + 1);
if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf)) {
pr_debug("too many key_infos (%d over %u)\n",
key_data_len + SHIFT_TBL_SIZE + 4,
(unsigned int)(sizeof(spk_key_buf)));
return -EINVAL;
}
memset(k_buffer, 0, SHIFT_TBL_SIZE);
memset(spk_our_keys, 0, sizeof(spk_our_keys));
spk_shift_table = k_buffer;
spk_our_keys[0] = spk_shift_table;
cp1 += SHIFT_TBL_SIZE;
memcpy(cp1, cp, key_data_len + 3);
/* get num_keys, states and data */
cp1 += 2; /* now pointing at shift states */
for (i = 1; i <= states; i++) {
ch = *cp1++;
if (ch >= SHIFT_TBL_SIZE) {
pr_debug("(%d) not valid shift state (max_allowed = %d)\n",
ch, SHIFT_TBL_SIZE);
return -EINVAL;
}
spk_shift_table[ch] = i;
}
keymap_flags = *cp1++;
while ((ch = *cp1)) {
if (ch >= MAX_KEY) {
pr_debug("(%d), not valid key, (max_allowed = %d)\n",
ch, MAX_KEY);
return -EINVAL;
}
spk_our_keys[ch] = cp1;
cp1 += states + 1;
}
return 0;
}
enum spk_vars_id {
BELL_POS_ID = 0, SPELL_DELAY_ID, ATTRIB_BLEEP_ID,
BLEEPS_ID, BLEEP_TIME_ID, PUNC_LEVEL_ID,
READING_PUNC_ID, CURSOR_TIME_ID, SAY_CONTROL_ID,
SAY_WORD_CTL_ID, NO_INTERRUPT_ID, KEY_ECHO_ID,
CUR_PHONETIC_ID, V_LAST_VAR_ID, NB_ID
};
static struct var_t spk_vars[NB_ID] = {
/* bell must be first to set high limit */
[BELL_POS_ID] = { BELL_POS, .u.n = {NULL, 0, 0, 0, 0, 0, NULL} },
[SPELL_DELAY_ID] = { SPELL_DELAY, .u.n = {NULL, 0, 0, 4, 0, 0, NULL} },
[ATTRIB_BLEEP_ID] = { ATTRIB_BLEEP, .u.n = {NULL, 1, 0, 3, 0, 0, NULL} },
[BLEEPS_ID] = { BLEEPS, .u.n = {NULL, 3, 0, 3, 0, 0, NULL} },
[BLEEP_TIME_ID] = { BLEEP_TIME, .u.n = {NULL, 30, 1, 200, 0, 0, NULL} },
[PUNC_LEVEL_ID] = { PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
[READING_PUNC_ID] = { READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} },
[CURSOR_TIME_ID] = { CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} },
[SAY_CONTROL_ID] = { SAY_CONTROL, TOGGLE_0},
[SAY_WORD_CTL_ID] = {SAY_WORD_CTL, TOGGLE_0},
[NO_INTERRUPT_ID] = { NO_INTERRUPT, TOGGLE_0},
[KEY_ECHO_ID] = { KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} },
[CUR_PHONETIC_ID] = { CUR_PHONETIC, .u.n = {NULL, 0, 0, 1, 0, 0, NULL} },
V_LAST_VAR
};
static void toggle_cursoring(struct vc_data *vc)
{
if (cursor_track == read_all_mode)
cursor_track = prev_cursor_track;
if (++cursor_track >= CT_Max)
cursor_track = 0;
synth_printf("%s\n", spk_msg_get(MSG_CURSOR_MSGS_START + cursor_track));
}
void spk_reset_default_chars(void)
{
int i;
/* First, free any non-default */
for (i = 0; i < 256; i++) {
if (spk_characters[i] &&
(spk_characters[i] != spk_default_chars[i]))
kfree(spk_characters[i]);
}
memcpy(spk_characters, spk_default_chars, sizeof(spk_default_chars));
}
void spk_reset_default_chartab(void)
{
memcpy(spk_chartab, default_chartab, sizeof(default_chartab));
}
static const struct st_bits_data *pb_edit;
static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
short mask = pb_edit->mask, ch_type = spk_chartab[ch];
if (type != KT_LATIN || (ch_type & B_NUM) || ch < SPACE)
return -1;
if (ch == SPACE) {
synth_printf("%s\n", spk_msg_get(MSG_EDIT_DONE));
spk_special_handler = NULL;
return 1;
}
if (mask < PUNC && !(ch_type & PUNC))
return -1;
spk_chartab[ch] ^= mask;
speak_char(ch);
synth_printf(" %s\n",
(spk_chartab[ch] & mask) ? spk_msg_get(MSG_ON) :
spk_msg_get(MSG_OFF));
return 1;
}
/* Allocation concurrency is protected by the console semaphore */
static int speakup_allocate(struct vc_data *vc, gfp_t gfp_flags)
{
int vc_num;
vc_num = vc->vc_num;
if (!speakup_console[vc_num]) {
speakup_console[vc_num] = kzalloc(sizeof(*speakup_console[0]),
gfp_flags);
if (!speakup_console[vc_num])
return -ENOMEM;
speakup_date(vc);
} else if (!spk_parked) {
speakup_date(vc);
}
return 0;
}
static void speakup_deallocate(struct vc_data *vc)
{
int vc_num;
vc_num = vc->vc_num;
kfree(speakup_console[vc_num]);
speakup_console[vc_num] = NULL;
}
enum read_all_command {
RA_NEXT_SENT = KVAL(K_DOWN)+1,
RA_PREV_LINE = KVAL(K_LEFT)+1,
RA_NEXT_LINE = KVAL(K_RIGHT)+1,
RA_PREV_SENT = KVAL(K_UP)+1,
RA_DOWN_ARROW,
RA_TIMER,
RA_FIND_NEXT_SENT,
RA_FIND_PREV_SENT,
};
static u_char is_cursor;
static u_long old_cursor_pos, old_cursor_x, old_cursor_y;
static int cursor_con;
static void reset_highlight_buffers(struct vc_data *);
static enum read_all_command read_all_key;
static int in_keyboard_notifier;
static void start_read_all_timer(struct vc_data *vc, enum read_all_command command);
static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command)
{
del_timer(&cursor_timer);
speakup_fake_down_arrow();
start_read_all_timer(vc, command);
}
static void read_all_doc(struct vc_data *vc)
{
if ((vc->vc_num != fg_console) || !synth || spk_shut_up)
return;
if (!synth_supports_indexing())
return;
if (cursor_track != read_all_mode)
prev_cursor_track = cursor_track;
cursor_track = read_all_mode;
spk_reset_index_count(0);
if (get_sentence_buf(vc, 0) == -1) {
del_timer(&cursor_timer);
if (!in_keyboard_notifier)
speakup_fake_down_arrow();
start_read_all_timer(vc, RA_DOWN_ARROW);
} else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
}
static void stop_read_all(struct vc_data *vc)
{
del_timer(&cursor_timer);
cursor_track = prev_cursor_track;
spk_shut_up &= 0xfe;
spk_do_flush();
}
static void start_read_all_timer(struct vc_data *vc, enum read_all_command command)
{
struct var_t *cursor_timeout;
cursor_con = vc->vc_num;
read_all_key = command;
cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
}
static void handle_cursor_read_all(struct vc_data *vc, enum read_all_command command)
{
int indcount, sentcount, rv, sn;
switch (command) {
case RA_NEXT_SENT:
/* Get Current Sentence */
spk_get_index_count(&indcount, &sentcount);
/*printk("%d %d ", indcount, sentcount); */
spk_reset_index_count(sentcount + 1);
if (indcount == 1) {
if (!say_sentence_num(sentcount + 1, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
return;
}
synth_insert_next_index(0);
} else {
sn = 0;
if (!say_sentence_num(sentcount + 1, 1)) {
sn = 1;
spk_reset_index_count(sn);
} else {
synth_insert_next_index(0);
}
if (!say_sentence_num(sn, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
return;
}
synth_insert_next_index(0);
}
start_read_all_timer(vc, RA_TIMER);
break;
case RA_PREV_SENT:
break;
case RA_NEXT_LINE:
read_all_doc(vc);
break;
case RA_PREV_LINE:
break;
case RA_DOWN_ARROW:
if (get_sentence_buf(vc, 0) == -1) {
kbd_fakekey2(vc, RA_DOWN_ARROW);
} else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
break;
case RA_FIND_NEXT_SENT:
rv = get_sentence_buf(vc, 0);
if (rv == -1)
read_all_doc(vc);
if (rv == 0) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
} else {
say_sentence_num(1, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
}
break;
case RA_FIND_PREV_SENT:
break;
case RA_TIMER:
spk_get_index_count(&indcount, &sentcount);
if (indcount < 2)
kbd_fakekey2(vc, RA_DOWN_ARROW);
else
start_read_all_timer(vc, RA_TIMER);
break;
}
}
static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
spk_parked &= 0xfe;
if (!synth || up_flag || spk_shut_up) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
start_read_all_timer(vc, value + 1);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_OK;
}
static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
struct var_t *cursor_timeout;
spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_parked &= 0xfe;
if (!synth || up_flag || spk_shut_up || cursor_track == CT_Off) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
spk_shut_up &= 0xfe;
if (spk_no_intr)
spk_do_flush();
/* the key press flushes if !no_inter but we want to flush on cursor
* moves regardless of no_inter state
*/
is_cursor = value + 1;
old_cursor_pos = vc->vc_pos;
old_cursor_x = vc->state.x;
old_cursor_y = vc->state.y;
speakup_console[vc->vc_num]->ht.cy = vc->state.y;
cursor_con = vc->vc_num;
if (cursor_track == CT_Highlight)
reset_highlight_buffers(vc);
cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void update_color_buffer(struct vc_data *vc, const u16 *ic, int len)
{
int i, bi, hi;
int vc_num = vc->vc_num;
bi = (vc->vc_attr & 0x70) >> 4;
hi = speakup_console[vc_num]->ht.highsize[bi];
i = 0;
if (speakup_console[vc_num]->ht.highsize[bi] == 0) {
speakup_console[vc_num]->ht.rpos[bi] = vc->vc_pos;
speakup_console[vc_num]->ht.rx[bi] = vc->state.x;
speakup_console[vc_num]->ht.ry[bi] = vc->state.y;
}
while ((hi < COLOR_BUFFER_SIZE) && (i < len)) {
if (ic[i] > 32) {
speakup_console[vc_num]->ht.highbuf[bi][hi] = ic[i];
hi++;
} else if ((ic[i] == 32) && (hi != 0)) {
if (speakup_console[vc_num]->ht.highbuf[bi][hi - 1] !=
32) {
speakup_console[vc_num]->ht.highbuf[bi][hi] =
ic[i];
hi++;
}
}
i++;
}
speakup_console[vc_num]->ht.highsize[bi] = hi;
}
static void reset_highlight_buffers(struct vc_data *vc)
{
int i;
int vc_num = vc->vc_num;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.highsize[i] = 0;
}
static int count_highlight_color(struct vc_data *vc)
{
int i, bg;
int cc;
int vc_num = vc->vc_num;
u16 ch;
u16 *start = (u16 *)vc->vc_origin;
for (i = 0; i < 8; i++)
speakup_console[vc_num]->ht.bgcount[i] = 0;
for (i = 0; i < vc->vc_rows; i++) {
u16 *end = start + vc->vc_cols * 2;
u16 *ptr;
for (ptr = start; ptr < end; ptr++) {
ch = get_attributes(vc, ptr);
bg = (ch & 0x70) >> 4;
speakup_console[vc_num]->ht.bgcount[bg]++;
}
start += vc->vc_size_row;
}
cc = 0;
for (i = 0; i < 8; i++)
if (speakup_console[vc_num]->ht.bgcount[i] > 0)
cc++;
return cc;
}
static int get_highlight_color(struct vc_data *vc)
{
int i, j;
unsigned int cptr[8];
int vc_num = vc->vc_num;
for (i = 0; i < 8; i++)
cptr[i] = i;
for (i = 0; i < 7; i++)
for (j = i + 1; j < 8; j++)
if (speakup_console[vc_num]->ht.bgcount[cptr[i]] >
speakup_console[vc_num]->ht.bgcount[cptr[j]])
swap(cptr[i], cptr[j]);
for (i = 0; i < 8; i++)
if (speakup_console[vc_num]->ht.bgcount[cptr[i]] != 0)
if (speakup_console[vc_num]->ht.highsize[cptr[i]] > 0)
return cptr[i];
return -1;
}
static int speak_highlight(struct vc_data *vc)
{
int hc, d;
int vc_num = vc->vc_num;
if (count_highlight_color(vc) == 1)
return 0;
hc = get_highlight_color(vc);
if (hc != -1) {
d = vc->state.y - speakup_console[vc_num]->ht.cy;
if ((d == 1) || (d == -1))
if (speakup_console[vc_num]->ht.ry[hc] != vc->state.y)
return 0;
spk_parked |= 0x01;
spk_do_flush();
spkup_write(speakup_console[vc_num]->ht.highbuf[hc],
speakup_console[vc_num]->ht.highsize[hc]);
spk_pos = spk_cp = speakup_console[vc_num]->ht.rpos[hc];
spk_x = spk_cx = speakup_console[vc_num]->ht.rx[hc];
spk_y = spk_cy = speakup_console[vc_num]->ht.ry[hc];
return 1;
}
return 0;
}
static void cursor_done(struct timer_list *unused)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
del_timer(&cursor_timer);
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_con != fg_console) {
is_cursor = 0;
goto out;
}
speakup_date(vc);
if (win_enabled) {
if (vc->state.x >= win_left && vc->state.x <= win_right &&
vc->state.y >= win_top && vc->state.y <= win_bottom) {
spk_keydown = 0;
is_cursor = 0;
goto out;
}
}
if (cursor_track == read_all_mode) {
handle_cursor_read_all(vc, read_all_key);
goto out;
}
if (cursor_track == CT_Highlight) {
if (speak_highlight(vc)) {
spk_keydown = 0;
is_cursor = 0;
goto out;
}
}
if (cursor_track == CT_Window)
speakup_win_say(vc);
else if (is_cursor == 1 || is_cursor == 4)
say_line_from_to(vc, 0, vc->vc_cols, 0);
else {
if (spk_cur_phonetic == 1)
say_phonetic_char(vc);
else
say_char(vc);
}
spk_keydown = 0;
is_cursor = 0;
out:
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* called by: vt_notifier_call() */
static void speakup_bs(struct vc_data *vc)
{
unsigned long flags;
if (!speakup_console[vc->vc_num])
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
if (!spk_parked)
speakup_date(vc);
if (spk_shut_up || !synth) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (vc->vc_num == fg_console && spk_keydown) {
spk_keydown = 0;
if (!is_cursor)
say_char(vc);
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* called by: vt_notifier_call() */
static void speakup_con_write(struct vc_data *vc, u16 *str, int len)
{
unsigned long flags;
if ((vc->vc_num != fg_console) || spk_shut_up || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
if (spk_bell_pos && spk_keydown && (vc->state.x == spk_bell_pos - 1))
bleep(3);
if ((is_cursor) || (cursor_track == read_all_mode)) {
if (cursor_track == CT_Highlight)
update_color_buffer(vc, str, len);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (win_enabled) {
if (vc->state.x >= win_left && vc->state.x <= win_right &&
vc->state.y >= win_top && vc->state.y <= win_bottom) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
}
spkup_write(str, len);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
speakup_date(vc);
if (vc->vc_mode == KD_GRAPHICS && !spk_paused && spk_str_pause[0]) {
synth_printf("%s", spk_str_pause);
spk_paused = true;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
int on_off = 2;
char *label;
if (!synth || up_flag || spk_killed)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_shut_up &= 0xfe;
if (spk_no_intr)
spk_do_flush();
switch (value) {
case KVAL(K_CAPS):
label = spk_msg_get(MSG_KEYNAME_CAPSLOCK);
on_off = vt_get_leds(fg_console, VC_CAPSLOCK);
break;
case KVAL(K_NUM):
label = spk_msg_get(MSG_KEYNAME_NUMLOCK);
on_off = vt_get_leds(fg_console, VC_NUMLOCK);
break;
case KVAL(K_HOLD):
label = spk_msg_get(MSG_KEYNAME_SCROLLLOCK);
on_off = vt_get_leds(fg_console, VC_SCROLLOCK);
if (speakup_console[vc->vc_num])
speakup_console[vc->vc_num]->tty_stopped = on_off;
break;
default:
spk_parked &= 0xfe;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (on_off < 2)
synth_printf("%s %s\n",
label, spk_msg_get(MSG_STATUS_START + on_off));
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static int inc_dec_var(u_char value)
{
struct st_var_header *p_header;
struct var_t *var_data;
char num_buf[32];
char *cp = num_buf;
char *pn;
int var_id = (int)value - VAR_START;
int how = (var_id & 1) ? E_INC : E_DEC;
var_id = var_id / 2 + FIRST_SET_VAR;
p_header = spk_get_var_header(var_id);
if (!p_header)
return -1;
if (p_header->var_type != VAR_NUM)
return -1;
var_data = p_header->data;
if (spk_set_num_var(1, p_header, how) != 0)
return -1;
if (!spk_close_press) {
for (pn = p_header->name; *pn; pn++) {
if (*pn == '_')
*cp = SPACE;
else
*cp++ = *pn;
}
}
snprintf(cp, sizeof(num_buf) - (cp - num_buf), " %d ",
var_data->u.n.value);
synth_printf("%s", num_buf);
return 0;
}
static void speakup_win_set(struct vc_data *vc)
{
char info[40];
if (win_start > 1) {
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_ALREADY_SET));
return;
}
if (spk_x < win_left || spk_y < win_top) {
synth_printf("%s\n", spk_msg_get(MSG_END_BEFORE_START));
return;
}
if (win_start && spk_x == win_left && spk_y == win_top) {
win_left = 0;
win_right = vc->vc_cols - 1;
win_bottom = spk_y;
snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_LINE),
(int)win_top + 1);
} else {
if (!win_start) {
win_top = spk_y;
win_left = spk_x;
} else {
win_bottom = spk_y;
win_right = spk_x;
}
snprintf(info, sizeof(info), spk_msg_get(MSG_WINDOW_BOUNDARY),
(win_start) ?
spk_msg_get(MSG_END) : spk_msg_get(MSG_START),
(int)spk_y + 1, (int)spk_x + 1);
}
synth_printf("%s\n", info);
win_start++;
}
static void speakup_win_clear(struct vc_data *vc)
{
win_top = 0;
win_bottom = 0;
win_left = 0;
win_right = 0;
win_start = 0;
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_CLEARED));
}
static void speakup_win_enable(struct vc_data *vc)
{
if (win_start < 2) {
synth_printf("%s\n", spk_msg_get(MSG_NO_WINDOW));
return;
}
win_enabled ^= 1;
if (win_enabled)
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCED));
else
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_SILENCE_DISABLED));
}
static void speakup_bits(struct vc_data *vc)
{
int val = this_speakup_key - (FIRST_EDIT_BITS - 1);
if (spk_special_handler || val < 1 || val > 6) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
pb_edit = &spk_punc_info[val];
synth_printf(spk_msg_get(MSG_EDIT_PROMPT), pb_edit->name);
spk_special_handler = edit_bits;
}
static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
{
static u_char goto_buf[8];
static int num;
int maxlen;
char *cp;
u16 wch;
if (type == KT_SPKUP && ch == SPEAKUP_GOTO)
goto do_goto;
if (type == KT_LATIN && ch == '\n')
goto do_goto;
if (type != 0)
goto oops;
if (ch == 8) {
u16 wch;
if (num == 0)
return -1;
wch = goto_buf[--num];
goto_buf[num] = '\0';
spkup_write(&wch, 1);
return 1;
}
if (ch < '+' || ch > 'y')
goto oops;
wch = ch;
goto_buf[num++] = ch;
goto_buf[num] = '\0';
spkup_write(&wch, 1);
maxlen = (*goto_buf >= '0') ? 3 : 4;
if ((ch == '+' || ch == '-') && num == 1)
return 1;
if (ch >= '0' && ch <= '9' && num < maxlen)
return 1;
if (num < maxlen - 1 || num > maxlen)
goto oops;
if (ch < 'x' || ch > 'y') {
oops:
if (!spk_killed)
synth_printf(" %s\n", spk_msg_get(MSG_GOTO_CANCELED));
goto_buf[num = 0] = '\0';
spk_special_handler = NULL;
return 1;
}
/* Do not replace with kstrtoul: here we need cp to be updated */
goto_pos = simple_strtoul(goto_buf, &cp, 10);
if (*cp == 'x') {
if (*goto_buf < '0')
goto_pos += spk_x;
else if (goto_pos > 0)
goto_pos--;
if (goto_pos >= vc->vc_cols)
goto_pos = vc->vc_cols - 1;
goto_x = 1;
} else {
if (*goto_buf < '0')
goto_pos += spk_y;
else if (goto_pos > 0)
goto_pos--;
if (goto_pos >= vc->vc_rows)
goto_pos = vc->vc_rows - 1;
goto_x = 0;
}
goto_buf[num = 0] = '\0';
do_goto:
spk_special_handler = NULL;
spk_parked |= 0x01;
if (goto_x) {
spk_pos -= spk_x * 2;
spk_x = goto_pos;
spk_pos += goto_pos * 2;
say_word(vc);
} else {
spk_y = goto_pos;
spk_pos = vc->vc_origin + (goto_pos * vc->vc_size_row);
say_line(vc);
}
return 1;
}
static void speakup_goto(struct vc_data *vc)
{
if (spk_special_handler) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
synth_printf("%s\n", spk_msg_get(MSG_GOTO));
spk_special_handler = handle_goto;
}
static void speakup_help(struct vc_data *vc)
{
spk_handle_help(vc, KT_SPKUP, SPEAKUP_HELP, 0);
}
static void do_nothing(struct vc_data *vc)
{
return; /* flush done in do_spkup */
}
static u_char key_speakup, spk_key_locked;
static void speakup_lock(struct vc_data *vc)
{
if (!spk_key_locked) {
spk_key_locked = 16;
key_speakup = 16;
} else {
spk_key_locked = 0;
key_speakup = 0;
}
}
typedef void (*spkup_hand) (struct vc_data *);
static spkup_hand spkup_handler[] = {
/* must be ordered same as defines in speakup.h */
do_nothing, speakup_goto, speech_kill, speakup_shut_up,
speakup_cut, speakup_paste, say_first_char, say_last_char,
say_char, say_prev_char, say_next_char,
say_word, say_prev_word, say_next_word,
say_line, say_prev_line, say_next_line,
top_edge, bottom_edge, left_edge, right_edge,
spell_word, spell_word, say_screen,
say_position, say_attributes,
speakup_off, speakup_parked, say_line, /* this is for indent */
say_from_top, say_to_bottom,
say_from_left, say_to_right,
say_char_num, speakup_bits, speakup_bits, say_phonetic_char,
speakup_bits, speakup_bits, speakup_bits,
speakup_win_set, speakup_win_clear, speakup_win_enable, speakup_win_say,
speakup_lock, speakup_help, toggle_cursoring, read_all_doc, NULL
};
static void do_spkup(struct vc_data *vc, u_char value)
{
if (spk_killed && value != SPEECH_KILL)
return;
spk_keydown = 0;
spk_lastkey = 0;
spk_shut_up &= 0xfe;
this_speakup_key = value;
if (value < SPKUP_MAX_FUNC && spkup_handler[value]) {
spk_do_flush();
(*spkup_handler[value]) (vc);
} else {
if (inc_dec_var(value) < 0)
bleep(9);
}
}
static const char *pad_chars = "0123456789+-*/\015,.?()";
static int
speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
int up_flag)
{
unsigned long flags;
int kh;
u_char *key_info;
u_char type = KTYP(keysym), value = KVAL(keysym), new_key = 0;
u_char shift_info, offset;
int ret = 0;
if (!synth)
return 0;
spin_lock_irqsave(&speakup_info.spinlock, flags);
tty = vc->port.tty;
if (type >= 0xf0)
type -= 0xf0;
if (type == KT_PAD &&
(vt_get_leds(fg_console, VC_NUMLOCK))) {
if (up_flag) {
spk_keydown = 0;
goto out;
}
value = pad_chars[value];
spk_lastkey = value;
spk_keydown++;
spk_parked &= 0xfe;
goto no_map;
}
if (keycode >= MAX_KEY)
goto no_map;
key_info = spk_our_keys[keycode];
if (!key_info)
goto no_map;
/* Check valid read all mode keys */
if ((cursor_track == read_all_mode) && (!up_flag)) {
switch (value) {
case KVAL(K_DOWN):
case KVAL(K_UP):
case KVAL(K_LEFT):
case KVAL(K_RIGHT):
case KVAL(K_PGUP):
case KVAL(K_PGDN):
break;
default:
stop_read_all(vc);
break;
}
}
shift_info = (shift_state & 0x0f) + key_speakup;
offset = spk_shift_table[shift_info];
if (offset) {
new_key = key_info[offset];
if (new_key) {
ret = 1;
if (new_key == SPK_KEY) {
if (!spk_key_locked)
key_speakup = (up_flag) ? 0 : 16;
if (up_flag || spk_killed)
goto out;
spk_shut_up &= 0xfe;
spk_do_flush();
goto out;
}
if (up_flag)
goto out;
if (last_keycode == keycode &&
time_after(last_spk_jiffy + MAX_DELAY, jiffies)) {
spk_close_press = 1;
offset = spk_shift_table[shift_info + 32];
/* double press? */
if (offset && key_info[offset])
new_key = key_info[offset];
}
last_keycode = keycode;
last_spk_jiffy = jiffies;
type = KT_SPKUP;
value = new_key;
}
}
no_map:
if (type == KT_SPKUP && !spk_special_handler) {
do_spkup(vc, new_key);
spk_close_press = 0;
ret = 1;
goto out;
}
if (up_flag || spk_killed || type == KT_SHIFT)
goto out;
spk_shut_up &= 0xfe;
kh = (value == KVAL(K_DOWN)) ||
(value == KVAL(K_UP)) ||
(value == KVAL(K_LEFT)) ||
(value == KVAL(K_RIGHT));
if ((cursor_track != read_all_mode) || !kh)
if (!spk_no_intr)
spk_do_flush();
if (spk_special_handler) {
if (type == KT_SPEC && value == 1) {
value = '\n';
type = KT_LATIN;
} else if (type == KT_LETTER) {
type = KT_LATIN;
} else if (value == 0x7f) {
value = 8; /* make del = backspace */
}
ret = (*spk_special_handler) (vc, type, value, keycode);
spk_close_press = 0;
if (ret < 0)
bleep(9);
goto out;
}
last_keycode = 0;
out:
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
static int keyboard_notifier_call(struct notifier_block *nb,
unsigned long code, void *_param)
{
struct keyboard_notifier_param *param = _param;
struct vc_data *vc = param->vc;
int up = !param->down;
int ret = NOTIFY_OK;
static int keycode; /* to hold the current keycode */
in_keyboard_notifier = 1;
if (vc->vc_mode == KD_GRAPHICS)
goto out;
/*
* First, determine whether we are handling a fake keypress on
* the current processor. If we are, then return NOTIFY_OK,
* to pass the keystroke up the chain. This prevents us from
* trying to take the Speakup lock while it is held by the
* processor on which the simulated keystroke was generated.
* Also, the simulated keystrokes should be ignored by Speakup.
*/
if (speakup_fake_key_pressed())
goto out;
switch (code) {
case KBD_KEYCODE:
/* speakup requires keycode and keysym currently */
keycode = param->value;
break;
case KBD_UNBOUND_KEYCODE:
/* not used yet */
break;
case KBD_UNICODE:
/* not used yet */
break;
case KBD_KEYSYM:
if (speakup_key(vc, param->shift, keycode, param->value, up))
ret = NOTIFY_STOP;
else if (KTYP(param->value) == KT_CUR)
ret = pre_handle_cursor(vc, KVAL(param->value), up);
break;
case KBD_POST_KEYSYM:{
unsigned char type = KTYP(param->value) - 0xf0;
unsigned char val = KVAL(param->value);
switch (type) {
case KT_SHIFT:
do_handle_shift(vc, val, up);
break;
case KT_LATIN:
case KT_LETTER:
do_handle_latin(vc, val, up);
break;
case KT_CUR:
do_handle_cursor(vc, val, up);
break;
case KT_SPEC:
do_handle_spec(vc, val, up);
break;
}
break;
}
}
out:
in_keyboard_notifier = 0;
return ret;
}
static int vt_notifier_call(struct notifier_block *nb,
unsigned long code, void *_param)
{
struct vt_notifier_param *param = _param;
struct vc_data *vc = param->vc;
switch (code) {
case VT_ALLOCATE:
if (vc->vc_mode == KD_TEXT)
speakup_allocate(vc, GFP_ATOMIC);
break;
case VT_DEALLOCATE:
speakup_deallocate(vc);
break;
case VT_WRITE:
if (param->c == '\b') {
speakup_bs(vc);
} else {
u16 d = param->c;
speakup_con_write(vc, &d, 1);
}
break;
case VT_UPDATE:
speakup_con_update(vc);
break;
}
return NOTIFY_OK;
}
/* called by: module_exit() */
static void __exit speakup_exit(void)
{
int i;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
speakup_unregister_devsynth();
speakup_cancel_selection();
speakup_cancel_paste();
del_timer_sync(&cursor_timer);
kthread_stop(speakup_task);
speakup_task = NULL;
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
spk_ttyio_unregister_ldisc();
speakup_kobj_exit();
for (i = 0; i < MAX_NR_CONSOLES; i++)
kfree(speakup_console[i]);
speakup_remove_virtual_keyboard();
for (i = 0; i < MAXVARS; i++)
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
if (spk_characters[i] != spk_default_chars[i])
kfree(spk_characters[i]);
}
spk_free_user_msgs();
}
/* call by: module_init() */
static int __init speakup_init(void)
{
int i;
long err = 0;
struct vc_data *vc = vc_cons[fg_console].d;
struct var_t *var;
/* These first few initializations cannot fail. */
spk_initialize_msgs(); /* Initialize arrays for i18n. */
spk_reset_default_chars();
spk_reset_default_chartab();
spk_strlwr(synth_name);
spk_vars[0].u.n.high = vc->vc_cols;
for (var = spk_vars; var->var_id != MAXVARS; var++)
speakup_register_var(var);
for (var = synth_time_vars;
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
for (i = 1; spk_punc_info[i].mask != 0; i++)
spk_set_mask_bits(NULL, i, 2);
spk_set_key_info(spk_key_defaults, spk_key_buf);
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
if (err)
goto error_virtkeyboard;
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons[i].d) {
err = speakup_allocate(vc_cons[i].d, GFP_KERNEL);
if (err)
goto error_kobjects;
}
if (spk_quiet_boot)
spk_shut_up |= 0x01;
err = speakup_kobj_init();
if (err)
goto error_kobjects;
spk_ttyio_register_ldisc();
synth_init(synth_name);
speakup_register_devsynth();
/*
* register_devsynth might fail, but this error is not fatal.
* /dev/synth is an extra feature; the rest of Speakup
* will work fine without it.
*/
err = register_keyboard_notifier(&keyboard_notifier_block);
if (err)
goto error_kbdnotifier;
err = register_vt_notifier(&vt_notifier_block);
if (err)
goto error_vtnotifier;
speakup_task = kthread_create(speakup_thread, NULL, "speakup");
if (IS_ERR(speakup_task)) {
err = PTR_ERR(speakup_task);
goto error_task;
}
set_user_nice(speakup_task, 10);
wake_up_process(speakup_task);
pr_info("speakup %s: initialized\n", SPEAKUP_VERSION);
pr_info("synth name on entry is: %s\n", synth_name);
goto out;
error_task:
unregister_vt_notifier(&vt_notifier_block);
error_vtnotifier:
unregister_keyboard_notifier(&keyboard_notifier_block);
del_timer(&cursor_timer);
error_kbdnotifier:
speakup_unregister_devsynth();
mutex_lock(&spk_mutex);
synth_release();
mutex_unlock(&spk_mutex);
speakup_kobj_exit();
error_kobjects:
for (i = 0; i < MAX_NR_CONSOLES; i++)
kfree(speakup_console[i]);
speakup_remove_virtual_keyboard();
error_virtkeyboard:
for (i = 0; i < MAXVARS; i++)
speakup_unregister_var(i);
for (i = 0; i < 256; i++) {
if (spk_characters[i] != spk_default_chars[i])
kfree(spk_characters[i]);
}
spk_free_user_msgs();
out:
return err;
}
module_param_named(bell_pos, spk_vars[BELL_POS_ID].u.n.default_val, int, 0444);
module_param_named(spell_delay, spk_vars[SPELL_DELAY_ID].u.n.default_val, int, 0444);
module_param_named(attrib_bleep, spk_vars[ATTRIB_BLEEP_ID].u.n.default_val, int, 0444);
module_param_named(bleeps, spk_vars[BLEEPS_ID].u.n.default_val, int, 0444);
module_param_named(bleep_time, spk_vars[BLEEP_TIME_ID].u.n.default_val, int, 0444);
module_param_named(punc_level, spk_vars[PUNC_LEVEL_ID].u.n.default_val, int, 0444);
module_param_named(reading_punc, spk_vars[READING_PUNC_ID].u.n.default_val, int, 0444);
module_param_named(cursor_time, spk_vars[CURSOR_TIME_ID].u.n.default_val, int, 0444);
module_param_named(say_control, spk_vars[SAY_CONTROL_ID].u.n.default_val, int, 0444);
module_param_named(say_word_ctl, spk_vars[SAY_WORD_CTL_ID].u.n.default_val, int, 0444);
module_param_named(no_interrupt, spk_vars[NO_INTERRUPT_ID].u.n.default_val, int, 0444);
module_param_named(key_echo, spk_vars[KEY_ECHO_ID].u.n.default_val, int, 0444);
module_param_named(cur_phonetic, spk_vars[CUR_PHONETIC_ID].u.n.default_val, int, 0444);
MODULE_PARM_DESC(bell_pos, "This works much like a typewriter bell. If for example 72 is echoed to bell_pos, it will beep the PC speaker when typing on a line past character 72.");
MODULE_PARM_DESC(spell_delay, "This controls how fast a word is spelled when speakup's spell word review command is pressed.");
MODULE_PARM_DESC(attrib_bleep, "Beeps the PC speaker when there is an attribute change such as background color when using speakup review commands. One = on, zero = off.");
MODULE_PARM_DESC(bleeps, "This controls whether one hears beeps through the PC speaker when using speakup review commands.");
MODULE_PARM_DESC(bleep_time, "This controls the duration of the PC speaker beeps speakup produces.");
MODULE_PARM_DESC(punc_level, "Controls the level of punctuation spoken as the screen is displayed, not reviewed.");
MODULE_PARM_DESC(reading_punc, "It controls the level of punctuation when reviewing the screen with speakup's screen review commands.");
MODULE_PARM_DESC(cursor_time, "This controls cursor delay when using arrow keys.");
MODULE_PARM_DESC(say_control, "This controls if speakup speaks shift, alt and control when those keys are pressed or not.");
MODULE_PARM_DESC(say_word_ctl, "Sets the say_word_ctl on load.");
MODULE_PARM_DESC(no_interrupt, "Controls if typing interrupts output from speakup.");
MODULE_PARM_DESC(key_echo, "Controls if speakup speaks keys when they are typed. One = on zero = off or don't echo keys.");
MODULE_PARM_DESC(cur_phonetic, "Controls if speakup speaks letters phonetically during navigation. One = on zero = off or don't speak phonetically.");
module_init(speakup_init);
module_exit(speakup_exit);
| linux-master | drivers/accessibility/speakup/main.c |
// SPDX-License-Identifier: GPL-2.0+
/* genmap.c
* originally written by: Kirk Reiser.
*
** Copyright (C) 2002 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*/
#include <stdlib.h>
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <linux/version.h>
#include <ctype.h>
#include "utils.h"
struct st_key_init {
char *name;
int value, shift;
};
static unsigned char key_data[MAXKEYVAL][16], *kp;
#include "mapdata.h"
static const char delims[] = "\t\n ";
static char *cp;
static int map_ver = 119; /* an arbitrary number so speakup can check */
static int shift_table[17];
static int max_states = 1, flags;
/* flags reserved for later, maybe for individual console maps */
static int get_shift_value(int state)
{
int i;
for (i = 0; shift_table[i] != state; i++) {
if (shift_table[i] == -1) {
if (i >= 16)
oops("too many shift states", NULL);
shift_table[i] = state;
max_states = i+1;
break;
}
}
return i;
}
int
main(int argc, char *argv[])
{
int value, shift_state, i, spk_val = 0, lock_val = 0;
int max_key_used = 0, num_keys_used = 0;
struct st_key *this;
struct st_key_init *p_init;
char buffer[256];
bzero(key_table, sizeof(key_table));
bzero(key_data, sizeof(key_data));
shift_table[0] = 0;
for (i = 1; i <= 16; i++)
shift_table[i] = -1;
if (argc < 2) {
fputs("usage: genmap filename\n", stderr);
exit(1);
}
for (p_init = init_key_data; p_init->name[0] != '.'; p_init++)
add_key(p_init->name, p_init->value, p_init->shift);
open_input(NULL, argv[1]);
while (fgets(buffer, sizeof(buffer), infile)) {
lc++;
value = shift_state = 0;
cp = strtok(buffer, delims);
if (*cp == '#')
continue;
while (cp) {
if (*cp == '=')
break;
this = find_key(cp);
if (this == NULL)
oops("unknown key/modifier", cp);
if (this->shift == is_shift) {
if (value)
oops("modifiers must come first", cp);
shift_state += this->value;
} else if (this->shift == is_input)
value = this->value;
else
oops("bad modifier or key", cp);
cp = strtok(0, delims);
}
if (!cp)
oops("no = found", NULL);
cp = strtok(0, delims);
if (!cp)
oops("no speakup function after =", NULL);
this = find_key(cp);
if (this == NULL || this->shift != is_spk)
oops("invalid speakup function", cp);
i = get_shift_value(shift_state);
if (key_data[value][i]) {
while (--cp > buffer)
if (!*cp)
*cp = ' ';
oops("two functions on same key combination", cp);
}
key_data[value][i] = (char)this->value;
if (value > max_key_used)
max_key_used = value;
}
fclose(infile);
this = find_key("spk_key");
if (this)
spk_val = this->value;
this = find_key("spk_lock");
if (this)
lock_val = this->value;
for (lc = 1; lc <= max_key_used; lc++) {
kp = key_data[lc];
if (!memcmp(key_data[0], kp, 16))
continue;
num_keys_used++;
for (i = 0; i < max_states; i++) {
if (kp[i] != spk_val && kp[i] != lock_val)
continue;
shift_state = shift_table[i];
if (shift_state&16)
continue;
shift_state = get_shift_value(shift_state+16);
kp[shift_state] = kp[i];
/* fill in so we can process the key up, as spk bit will be set */
}
}
printf("\t%d, %d, %d,\n\t", map_ver, num_keys_used, max_states);
for (i = 0; i < max_states; i++)
printf("%d, ", shift_table[i]);
printf("%d,", flags);
for (lc = 1; lc <= max_key_used; lc++) {
kp = key_data[lc];
if (!memcmp(key_data[0], kp, 16))
continue;
printf("\n\t%d,", lc);
for (i = 0; i < max_states; i++)
printf(" %d,", (unsigned int)kp[i]);
}
printf("\n\t0, %d\n", map_ver);
exit(0);
}
| linux-master | drivers/accessibility/speakup/genmap.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
#include "spk_types.h"
#include "spk_priv.h"
#include "speakup.h"
static struct st_var_header var_headers[] = {
{ "version", VERSION, VAR_PROC, NULL, NULL },
{ "synth_name", SYNTH, VAR_PROC, NULL, NULL },
{ "keymap", KEYMAP, VAR_PROC, NULL, NULL },
{ "silent", SILENT, VAR_PROC, NULL, NULL },
{ "punc_some", PUNC_SOME, VAR_PROC, NULL, NULL },
{ "punc_most", PUNC_MOST, VAR_PROC, NULL, NULL },
{ "punc_all", PUNC_ALL, VAR_PROC, NULL, NULL },
{ "delimiters", DELIM, VAR_PROC, NULL, NULL },
{ "repeats", REPEATS, VAR_PROC, NULL, NULL },
{ "ex_num", EXNUMBER, VAR_PROC, NULL, NULL },
{ "characters", CHARS, VAR_PROC, NULL, NULL },
{ "synth_direct", SYNTH_DIRECT, VAR_PROC, NULL, NULL },
{ "caps_start", CAPS_START, VAR_STRING, spk_str_caps_start, NULL },
{ "caps_stop", CAPS_STOP, VAR_STRING, spk_str_caps_stop, NULL },
{ "delay_time", DELAY, VAR_TIME, NULL, NULL },
{ "trigger_time", TRIGGER, VAR_TIME, NULL, NULL },
{ "jiffy_delta", JIFFY, VAR_TIME, NULL, NULL },
{ "full_time", FULL, VAR_TIME, NULL, NULL },
{ "flush_time", FLUSH, VAR_TIME, NULL, NULL },
{ "spell_delay", SPELL_DELAY, VAR_NUM, &spk_spell_delay, NULL },
{ "bleeps", BLEEPS, VAR_NUM, &spk_bleeps, NULL },
{ "attrib_bleep", ATTRIB_BLEEP, VAR_NUM, &spk_attrib_bleep, NULL },
{ "bleep_time", BLEEP_TIME, VAR_TIME, &spk_bleep_time, NULL },
{ "cursor_time", CURSOR_TIME, VAR_TIME, NULL, NULL },
{ "punc_level", PUNC_LEVEL, VAR_NUM, &spk_punc_level, NULL },
{ "reading_punc", READING_PUNC, VAR_NUM, &spk_reading_punc, NULL },
{ "say_control", SAY_CONTROL, VAR_NUM, &spk_say_ctrl, NULL },
{ "say_word_ctl", SAY_WORD_CTL, VAR_NUM, &spk_say_word_ctl, NULL },
{ "no_interrupt", NO_INTERRUPT, VAR_NUM, &spk_no_intr, NULL },
{ "key_echo", KEY_ECHO, VAR_NUM, &spk_key_echo, NULL },
{ "bell_pos", BELL_POS, VAR_NUM, &spk_bell_pos, NULL },
{ "rate", RATE, VAR_NUM, NULL, NULL },
{ "pitch", PITCH, VAR_NUM, NULL, NULL },
{ "inflection", INFLECTION, VAR_NUM, NULL, NULL },
{ "vol", VOL, VAR_NUM, NULL, NULL },
{ "tone", TONE, VAR_NUM, NULL, NULL },
{ "punct", PUNCT, VAR_NUM, NULL, NULL },
{ "voice", VOICE, VAR_NUM, NULL, NULL },
{ "freq", FREQUENCY, VAR_NUM, NULL, NULL },
{ "lang", LANG, VAR_NUM, NULL, NULL },
{ "chartab", CHARTAB, VAR_PROC, NULL, NULL },
{ "direct", DIRECT, VAR_NUM, NULL, NULL },
{ "pause", PAUSE, VAR_STRING, spk_str_pause, NULL },
{ "cur_phonetic", CUR_PHONETIC, VAR_NUM, &spk_cur_phonetic, NULL },
};
static struct st_var_header *var_ptrs[MAXVARS] = { NULL, NULL, NULL };
static struct punc_var_t punc_vars[] = {
{ PUNC_SOME, 1 },
{ PUNC_MOST, 2 },
{ PUNC_ALL, 3 },
{ DELIM, 4 },
{ REPEATS, 5 },
{ EXNUMBER, 6 },
{ -1, -1 },
};
int spk_chartab_get_value(char *keyword)
{
int value = 0;
if (!strcmp(keyword, "ALPHA"))
value = ALPHA;
else if (!strcmp(keyword, "B_CTL"))
value = B_CTL;
else if (!strcmp(keyword, "WDLM"))
value = WDLM;
else if (!strcmp(keyword, "A_PUNC"))
value = A_PUNC;
else if (!strcmp(keyword, "PUNC"))
value = PUNC;
else if (!strcmp(keyword, "NUM"))
value = NUM;
else if (!strcmp(keyword, "A_CAP"))
value = A_CAP;
else if (!strcmp(keyword, "B_CAPSYM"))
value = B_CAPSYM;
else if (!strcmp(keyword, "B_SYM"))
value = B_SYM;
return value;
}
void speakup_register_var(struct var_t *var)
{
static char nothing[2] = "\0";
int i;
struct st_var_header *p_header;
BUG_ON(!var || var->var_id < 0 || var->var_id >= MAXVARS);
if (!var_ptrs[0]) {
for (i = 0; i < MAXVARS; i++) {
p_header = &var_headers[i];
var_ptrs[p_header->var_id] = p_header;
p_header->data = NULL;
}
}
p_header = var_ptrs[var->var_id];
if (p_header->data)
return;
p_header->data = var;
switch (p_header->var_type) {
case VAR_STRING:
spk_set_string_var(nothing, p_header, 0);
break;
case VAR_NUM:
case VAR_TIME:
spk_set_num_var(0, p_header, E_DEFAULT);
break;
default:
break;
}
}
void speakup_unregister_var(enum var_id_t var_id)
{
struct st_var_header *p_header;
BUG_ON(var_id < 0 || var_id >= MAXVARS);
p_header = var_ptrs[var_id];
p_header->data = NULL;
}
struct st_var_header *spk_get_var_header(enum var_id_t var_id)
{
struct st_var_header *p_header;
if (var_id < 0 || var_id >= MAXVARS)
return NULL;
p_header = var_ptrs[var_id];
if (!p_header->data)
return NULL;
return p_header;
}
EXPORT_SYMBOL_GPL(spk_get_var_header);
struct st_var_header *spk_var_header_by_name(const char *name)
{
int i;
if (!name)
return NULL;
for (i = 0; i < MAXVARS; i++) {
if (strcmp(name, var_ptrs[i]->name) == 0)
return var_ptrs[i];
}
return NULL;
}
struct var_t *spk_get_var(enum var_id_t var_id)
{
BUG_ON(var_id < 0 || var_id >= MAXVARS);
BUG_ON(!var_ptrs[var_id]);
return var_ptrs[var_id]->data;
}
EXPORT_SYMBOL_GPL(spk_get_var);
struct punc_var_t *spk_get_punc_var(enum var_id_t var_id)
{
struct punc_var_t *rv = NULL;
struct punc_var_t *where;
where = punc_vars;
while ((where->var_id != -1) && (!rv)) {
if (where->var_id == var_id)
rv = where;
else
where++;
}
return rv;
}
/* handlers for setting vars */
int spk_set_num_var(int input, struct st_var_header *var, int how)
{
int val;
int *p_val = var->p_val;
char buf[32];
char *cp;
struct var_t *var_data = var->data;
if (!var_data)
return -ENODATA;
val = var_data->u.n.value;
switch (how) {
case E_NEW_DEFAULT:
if (input < var_data->u.n.low || input > var_data->u.n.high)
return -ERANGE;
var_data->u.n.default_val = input;
return 0;
case E_DEFAULT:
val = var_data->u.n.default_val;
break;
case E_SET:
val = input;
break;
case E_INC:
val += input;
break;
case E_DEC:
val -= input;
break;
}
if (val < var_data->u.n.low || val > var_data->u.n.high)
return -ERANGE;
var_data->u.n.value = val;
if (var->var_type == VAR_TIME && p_val) {
*p_val = msecs_to_jiffies(val);
return 0;
}
if (p_val)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val];
}
if (var_data->u.n.multiplier != 0)
val *= var_data->u.n.multiplier;
val += var_data->u.n.offset;
if (!synth)
return 0;
if (synth->synth_adjust && synth->synth_adjust(synth, var))
return 0;
if (var->var_id < FIRST_SYNTH_VAR)
return 0;
if (!var_data->u.n.synth_fmt)
return 0;
if (var->var_id == PITCH)
cp = spk_pitch_buff;
else
cp = buf;
if (!var_data->u.n.out_str)
sprintf(cp, var_data->u.n.synth_fmt, (int)val);
else
sprintf(cp, var_data->u.n.synth_fmt,
var_data->u.n.out_str[val]);
synth_printf("%s", cp);
return 0;
}
EXPORT_SYMBOL_GPL(spk_set_num_var);
int spk_set_string_var(const char *page, struct st_var_header *var, int len)
{
struct var_t *var_data = var->data;
if (!var_data)
return -ENODATA;
if (len > MAXVARLEN)
return -E2BIG;
if (!len) {
if (!var_data->u.s.default_val)
return 0;
if (!var->p_val)
var->p_val = var_data->u.s.default_val;
if (var->p_val != var_data->u.s.default_val)
strcpy((char *)var->p_val, var_data->u.s.default_val);
return -ERESTART;
} else if (var->p_val) {
strcpy((char *)var->p_val, page);
} else {
return -E2BIG;
}
return 0;
}
/*
* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
* if input is null uses the defaults.
* values for how: 0 clears bits of chars supplied,
* 1 clears allk, 2 sets bits for chars
*/
int spk_set_mask_bits(const char *input, const int which, const int how)
{
u_char *cp;
short mask = spk_punc_info[which].mask;
if (how & 1) {
for (cp = (u_char *)spk_punc_info[3].value; *cp; cp++)
spk_chartab[*cp] &= ~mask;
}
cp = (u_char *)input;
if (!cp) {
cp = spk_punc_info[which].value;
} else {
for (; *cp; cp++) {
if (*cp < SPACE)
break;
if (mask < PUNC) {
if (!(spk_chartab[*cp] & PUNC))
break;
} else if (spk_chartab[*cp] & B_NUM) {
break;
}
}
if (*cp)
return -EINVAL;
cp = (u_char *)input;
}
if (how & 2) {
for (; *cp; cp++)
if (*cp > SPACE)
spk_chartab[*cp] |= mask;
} else {
for (; *cp; cp++)
if (*cp > SPACE)
spk_chartab[*cp] &= ~mask;
}
return 0;
}
char *spk_strlwr(char *s)
{
char *p;
if (!s)
return NULL;
for (p = s; *p; p++)
*p = tolower(*p);
return s;
}
char *spk_s2uchar(char *start, char *dest)
{
int val;
/* Do not replace with kstrtoul: here we need start to be updated */
val = simple_strtoul(skip_spaces(start), &start, 10);
if (*start == ',')
start++;
*dest = (u_char)val;
return start;
}
| linux-master | drivers/accessibility/speakup/varhandlers.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.