python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Trace Port Interface Unit driver
*/
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include "coresight-priv.h"
#define TPIU_SUPP_PORTSZ 0x000
#define TPIU_CURR_PORTSZ 0x004
#define TPIU_SUPP_TRIGMODES 0x100
#define TPIU_TRIG_CNTRVAL 0x104
#define TPIU_TRIG_MULT 0x108
#define TPIU_SUPP_TESTPATM 0x200
#define TPIU_CURR_TESTPATM 0x204
#define TPIU_TEST_PATREPCNTR 0x208
#define TPIU_FFSR 0x300
#define TPIU_FFCR 0x304
#define TPIU_FSYNC_CNTR 0x308
#define TPIU_EXTCTL_INPORT 0x400
#define TPIU_EXTCTL_OUTPORT 0x404
#define TPIU_ITTRFLINACK 0xee4
#define TPIU_ITTRFLIN 0xee8
#define TPIU_ITATBDATA0 0xeec
#define TPIU_ITATBCTR2 0xef0
#define TPIU_ITATBCTR1 0xef4
#define TPIU_ITATBCTR0 0xef8
/** register definition **/
/* FFSR - 0x300 */
#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu");
/*
* @base: memory mapped base address for this component.
* @atclk: optional clock for the core parts of the TPIU.
* @csdev: component vitals needed by the framework.
*/
struct tpiu_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
};
static void tpiu_enable_hw(struct csdev_access *csa)
{
CS_UNLOCK(csa->base);
/* TODO: fill this up */
CS_LOCK(csa->base);
}
static int tpiu_enable(struct coresight_device *csdev, enum cs_mode mode,
void *__unused)
{
tpiu_enable_hw(&csdev->access);
atomic_inc(&csdev->refcnt);
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
static void tpiu_disable_hw(struct csdev_access *csa)
{
CS_UNLOCK(csa->base);
/* Clear formatter and stop on flush */
csdev_access_relaxed_write32(csa, FFCR_STOP_FI, TPIU_FFCR);
/* Generate manual flush */
csdev_access_relaxed_write32(csa, FFCR_STOP_FI | FFCR_FON_MAN, TPIU_FFCR);
/* Wait for flush to complete */
coresight_timeout(csa, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
coresight_timeout(csa, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(csa->base);
}
static int tpiu_disable(struct coresight_device *csdev)
{
if (atomic_dec_return(&csdev->refcnt))
return -EBUSY;
tpiu_disable_hw(&csdev->access);
dev_dbg(&csdev->dev, "TPIU disabled\n");
return 0;
}
static const struct coresight_ops_sink tpiu_sink_ops = {
.enable = tpiu_enable,
.disable = tpiu_disable,
};
static const struct coresight_ops tpiu_cs_ops = {
.sink_ops = &tpiu_sink_ops,
};
static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct tpiu_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&tpiu_devs, dev);
if (!desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
/* Disable tpiu to support older devices */
tpiu_disable_hw(&desc.access);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
dev->platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
desc.ops = &tpiu_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
if (!IS_ERR(drvdata->csdev)) {
pm_runtime_put(&adev->dev);
return 0;
}
return PTR_ERR(drvdata->csdev);
}
static void tpiu_remove(struct amba_device *adev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
}
#ifdef CONFIG_PM
static int tpiu_runtime_suspend(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return 0;
}
static int tpiu_runtime_resume(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_prepare_enable(drvdata->atclk);
return 0;
}
#endif
static const struct dev_pm_ops tpiu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
};
static const struct amba_id tpiu_ids[] = {
{
.id = 0x000bb912,
.mask = 0x000fffff,
},
{
.id = 0x0004b912,
.mask = 0x0007ffff,
},
{
/* Coresight SoC-600 */
.id = 0x000bb9e7,
.mask = 0x000fffff,
},
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, tpiu_ids);
static struct amba_driver tpiu_driver = {
.drv = {
.name = "coresight-tpiu",
.owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
.suppress_bind_attrs = true,
},
.probe = tpiu_probe,
.remove = tpiu_remove,
.id_table = tpiu_ids,
};
module_amba_driver(tpiu_driver);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_AUTHOR("Mathieu Poirier <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight TPIU (Trace Port Interface Unit) driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-tpiu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <asm/hardware/cp14.h>
#include "coresight-etm.h"
int etm_readl_cp14(u32 reg, unsigned int *val)
{
switch (reg) {
case ETMCR:
*val = etm_read(ETMCR);
return 0;
case ETMCCR:
*val = etm_read(ETMCCR);
return 0;
case ETMTRIGGER:
*val = etm_read(ETMTRIGGER);
return 0;
case ETMSR:
*val = etm_read(ETMSR);
return 0;
case ETMSCR:
*val = etm_read(ETMSCR);
return 0;
case ETMTSSCR:
*val = etm_read(ETMTSSCR);
return 0;
case ETMTEEVR:
*val = etm_read(ETMTEEVR);
return 0;
case ETMTECR1:
*val = etm_read(ETMTECR1);
return 0;
case ETMFFLR:
*val = etm_read(ETMFFLR);
return 0;
case ETMACVRn(0):
*val = etm_read(ETMACVR0);
return 0;
case ETMACVRn(1):
*val = etm_read(ETMACVR1);
return 0;
case ETMACVRn(2):
*val = etm_read(ETMACVR2);
return 0;
case ETMACVRn(3):
*val = etm_read(ETMACVR3);
return 0;
case ETMACVRn(4):
*val = etm_read(ETMACVR4);
return 0;
case ETMACVRn(5):
*val = etm_read(ETMACVR5);
return 0;
case ETMACVRn(6):
*val = etm_read(ETMACVR6);
return 0;
case ETMACVRn(7):
*val = etm_read(ETMACVR7);
return 0;
case ETMACVRn(8):
*val = etm_read(ETMACVR8);
return 0;
case ETMACVRn(9):
*val = etm_read(ETMACVR9);
return 0;
case ETMACVRn(10):
*val = etm_read(ETMACVR10);
return 0;
case ETMACVRn(11):
*val = etm_read(ETMACVR11);
return 0;
case ETMACVRn(12):
*val = etm_read(ETMACVR12);
return 0;
case ETMACVRn(13):
*val = etm_read(ETMACVR13);
return 0;
case ETMACVRn(14):
*val = etm_read(ETMACVR14);
return 0;
case ETMACVRn(15):
*val = etm_read(ETMACVR15);
return 0;
case ETMACTRn(0):
*val = etm_read(ETMACTR0);
return 0;
case ETMACTRn(1):
*val = etm_read(ETMACTR1);
return 0;
case ETMACTRn(2):
*val = etm_read(ETMACTR2);
return 0;
case ETMACTRn(3):
*val = etm_read(ETMACTR3);
return 0;
case ETMACTRn(4):
*val = etm_read(ETMACTR4);
return 0;
case ETMACTRn(5):
*val = etm_read(ETMACTR5);
return 0;
case ETMACTRn(6):
*val = etm_read(ETMACTR6);
return 0;
case ETMACTRn(7):
*val = etm_read(ETMACTR7);
return 0;
case ETMACTRn(8):
*val = etm_read(ETMACTR8);
return 0;
case ETMACTRn(9):
*val = etm_read(ETMACTR9);
return 0;
case ETMACTRn(10):
*val = etm_read(ETMACTR10);
return 0;
case ETMACTRn(11):
*val = etm_read(ETMACTR11);
return 0;
case ETMACTRn(12):
*val = etm_read(ETMACTR12);
return 0;
case ETMACTRn(13):
*val = etm_read(ETMACTR13);
return 0;
case ETMACTRn(14):
*val = etm_read(ETMACTR14);
return 0;
case ETMACTRn(15):
*val = etm_read(ETMACTR15);
return 0;
case ETMCNTRLDVRn(0):
*val = etm_read(ETMCNTRLDVR0);
return 0;
case ETMCNTRLDVRn(1):
*val = etm_read(ETMCNTRLDVR1);
return 0;
case ETMCNTRLDVRn(2):
*val = etm_read(ETMCNTRLDVR2);
return 0;
case ETMCNTRLDVRn(3):
*val = etm_read(ETMCNTRLDVR3);
return 0;
case ETMCNTENRn(0):
*val = etm_read(ETMCNTENR0);
return 0;
case ETMCNTENRn(1):
*val = etm_read(ETMCNTENR1);
return 0;
case ETMCNTENRn(2):
*val = etm_read(ETMCNTENR2);
return 0;
case ETMCNTENRn(3):
*val = etm_read(ETMCNTENR3);
return 0;
case ETMCNTRLDEVRn(0):
*val = etm_read(ETMCNTRLDEVR0);
return 0;
case ETMCNTRLDEVRn(1):
*val = etm_read(ETMCNTRLDEVR1);
return 0;
case ETMCNTRLDEVRn(2):
*val = etm_read(ETMCNTRLDEVR2);
return 0;
case ETMCNTRLDEVRn(3):
*val = etm_read(ETMCNTRLDEVR3);
return 0;
case ETMCNTVRn(0):
*val = etm_read(ETMCNTVR0);
return 0;
case ETMCNTVRn(1):
*val = etm_read(ETMCNTVR1);
return 0;
case ETMCNTVRn(2):
*val = etm_read(ETMCNTVR2);
return 0;
case ETMCNTVRn(3):
*val = etm_read(ETMCNTVR3);
return 0;
case ETMSQ12EVR:
*val = etm_read(ETMSQ12EVR);
return 0;
case ETMSQ21EVR:
*val = etm_read(ETMSQ21EVR);
return 0;
case ETMSQ23EVR:
*val = etm_read(ETMSQ23EVR);
return 0;
case ETMSQ31EVR:
*val = etm_read(ETMSQ31EVR);
return 0;
case ETMSQ32EVR:
*val = etm_read(ETMSQ32EVR);
return 0;
case ETMSQ13EVR:
*val = etm_read(ETMSQ13EVR);
return 0;
case ETMSQR:
*val = etm_read(ETMSQR);
return 0;
case ETMEXTOUTEVRn(0):
*val = etm_read(ETMEXTOUTEVR0);
return 0;
case ETMEXTOUTEVRn(1):
*val = etm_read(ETMEXTOUTEVR1);
return 0;
case ETMEXTOUTEVRn(2):
*val = etm_read(ETMEXTOUTEVR2);
return 0;
case ETMEXTOUTEVRn(3):
*val = etm_read(ETMEXTOUTEVR3);
return 0;
case ETMCIDCVRn(0):
*val = etm_read(ETMCIDCVR0);
return 0;
case ETMCIDCVRn(1):
*val = etm_read(ETMCIDCVR1);
return 0;
case ETMCIDCVRn(2):
*val = etm_read(ETMCIDCVR2);
return 0;
case ETMCIDCMR:
*val = etm_read(ETMCIDCMR);
return 0;
case ETMIMPSPEC0:
*val = etm_read(ETMIMPSPEC0);
return 0;
case ETMIMPSPEC1:
*val = etm_read(ETMIMPSPEC1);
return 0;
case ETMIMPSPEC2:
*val = etm_read(ETMIMPSPEC2);
return 0;
case ETMIMPSPEC3:
*val = etm_read(ETMIMPSPEC3);
return 0;
case ETMIMPSPEC4:
*val = etm_read(ETMIMPSPEC4);
return 0;
case ETMIMPSPEC5:
*val = etm_read(ETMIMPSPEC5);
return 0;
case ETMIMPSPEC6:
*val = etm_read(ETMIMPSPEC6);
return 0;
case ETMIMPSPEC7:
*val = etm_read(ETMIMPSPEC7);
return 0;
case ETMSYNCFR:
*val = etm_read(ETMSYNCFR);
return 0;
case ETMIDR:
*val = etm_read(ETMIDR);
return 0;
case ETMCCER:
*val = etm_read(ETMCCER);
return 0;
case ETMEXTINSELR:
*val = etm_read(ETMEXTINSELR);
return 0;
case ETMTESSEICR:
*val = etm_read(ETMTESSEICR);
return 0;
case ETMEIBCR:
*val = etm_read(ETMEIBCR);
return 0;
case ETMTSEVR:
*val = etm_read(ETMTSEVR);
return 0;
case ETMAUXCR:
*val = etm_read(ETMAUXCR);
return 0;
case ETMTRACEIDR:
*val = etm_read(ETMTRACEIDR);
return 0;
case ETMVMIDCVR:
*val = etm_read(ETMVMIDCVR);
return 0;
case ETMOSLSR:
*val = etm_read(ETMOSLSR);
return 0;
case ETMOSSRR:
*val = etm_read(ETMOSSRR);
return 0;
case ETMPDCR:
*val = etm_read(ETMPDCR);
return 0;
case ETMPDSR:
*val = etm_read(ETMPDSR);
return 0;
default:
*val = 0;
return -EINVAL;
}
}
int etm_writel_cp14(u32 reg, u32 val)
{
switch (reg) {
case ETMCR:
etm_write(val, ETMCR);
break;
case ETMTRIGGER:
etm_write(val, ETMTRIGGER);
break;
case ETMSR:
etm_write(val, ETMSR);
break;
case ETMTSSCR:
etm_write(val, ETMTSSCR);
break;
case ETMTEEVR:
etm_write(val, ETMTEEVR);
break;
case ETMTECR1:
etm_write(val, ETMTECR1);
break;
case ETMFFLR:
etm_write(val, ETMFFLR);
break;
case ETMACVRn(0):
etm_write(val, ETMACVR0);
break;
case ETMACVRn(1):
etm_write(val, ETMACVR1);
break;
case ETMACVRn(2):
etm_write(val, ETMACVR2);
break;
case ETMACVRn(3):
etm_write(val, ETMACVR3);
break;
case ETMACVRn(4):
etm_write(val, ETMACVR4);
break;
case ETMACVRn(5):
etm_write(val, ETMACVR5);
break;
case ETMACVRn(6):
etm_write(val, ETMACVR6);
break;
case ETMACVRn(7):
etm_write(val, ETMACVR7);
break;
case ETMACVRn(8):
etm_write(val, ETMACVR8);
break;
case ETMACVRn(9):
etm_write(val, ETMACVR9);
break;
case ETMACVRn(10):
etm_write(val, ETMACVR10);
break;
case ETMACVRn(11):
etm_write(val, ETMACVR11);
break;
case ETMACVRn(12):
etm_write(val, ETMACVR12);
break;
case ETMACVRn(13):
etm_write(val, ETMACVR13);
break;
case ETMACVRn(14):
etm_write(val, ETMACVR14);
break;
case ETMACVRn(15):
etm_write(val, ETMACVR15);
break;
case ETMACTRn(0):
etm_write(val, ETMACTR0);
break;
case ETMACTRn(1):
etm_write(val, ETMACTR1);
break;
case ETMACTRn(2):
etm_write(val, ETMACTR2);
break;
case ETMACTRn(3):
etm_write(val, ETMACTR3);
break;
case ETMACTRn(4):
etm_write(val, ETMACTR4);
break;
case ETMACTRn(5):
etm_write(val, ETMACTR5);
break;
case ETMACTRn(6):
etm_write(val, ETMACTR6);
break;
case ETMACTRn(7):
etm_write(val, ETMACTR7);
break;
case ETMACTRn(8):
etm_write(val, ETMACTR8);
break;
case ETMACTRn(9):
etm_write(val, ETMACTR9);
break;
case ETMACTRn(10):
etm_write(val, ETMACTR10);
break;
case ETMACTRn(11):
etm_write(val, ETMACTR11);
break;
case ETMACTRn(12):
etm_write(val, ETMACTR12);
break;
case ETMACTRn(13):
etm_write(val, ETMACTR13);
break;
case ETMACTRn(14):
etm_write(val, ETMACTR14);
break;
case ETMACTRn(15):
etm_write(val, ETMACTR15);
break;
case ETMCNTRLDVRn(0):
etm_write(val, ETMCNTRLDVR0);
break;
case ETMCNTRLDVRn(1):
etm_write(val, ETMCNTRLDVR1);
break;
case ETMCNTRLDVRn(2):
etm_write(val, ETMCNTRLDVR2);
break;
case ETMCNTRLDVRn(3):
etm_write(val, ETMCNTRLDVR3);
break;
case ETMCNTENRn(0):
etm_write(val, ETMCNTENR0);
break;
case ETMCNTENRn(1):
etm_write(val, ETMCNTENR1);
break;
case ETMCNTENRn(2):
etm_write(val, ETMCNTENR2);
break;
case ETMCNTENRn(3):
etm_write(val, ETMCNTENR3);
break;
case ETMCNTRLDEVRn(0):
etm_write(val, ETMCNTRLDEVR0);
break;
case ETMCNTRLDEVRn(1):
etm_write(val, ETMCNTRLDEVR1);
break;
case ETMCNTRLDEVRn(2):
etm_write(val, ETMCNTRLDEVR2);
break;
case ETMCNTRLDEVRn(3):
etm_write(val, ETMCNTRLDEVR3);
break;
case ETMCNTVRn(0):
etm_write(val, ETMCNTVR0);
break;
case ETMCNTVRn(1):
etm_write(val, ETMCNTVR1);
break;
case ETMCNTVRn(2):
etm_write(val, ETMCNTVR2);
break;
case ETMCNTVRn(3):
etm_write(val, ETMCNTVR3);
break;
case ETMSQ12EVR:
etm_write(val, ETMSQ12EVR);
break;
case ETMSQ21EVR:
etm_write(val, ETMSQ21EVR);
break;
case ETMSQ23EVR:
etm_write(val, ETMSQ23EVR);
break;
case ETMSQ31EVR:
etm_write(val, ETMSQ31EVR);
break;
case ETMSQ32EVR:
etm_write(val, ETMSQ32EVR);
break;
case ETMSQ13EVR:
etm_write(val, ETMSQ13EVR);
break;
case ETMSQR:
etm_write(val, ETMSQR);
break;
case ETMEXTOUTEVRn(0):
etm_write(val, ETMEXTOUTEVR0);
break;
case ETMEXTOUTEVRn(1):
etm_write(val, ETMEXTOUTEVR1);
break;
case ETMEXTOUTEVRn(2):
etm_write(val, ETMEXTOUTEVR2);
break;
case ETMEXTOUTEVRn(3):
etm_write(val, ETMEXTOUTEVR3);
break;
case ETMCIDCVRn(0):
etm_write(val, ETMCIDCVR0);
break;
case ETMCIDCVRn(1):
etm_write(val, ETMCIDCVR1);
break;
case ETMCIDCVRn(2):
etm_write(val, ETMCIDCVR2);
break;
case ETMCIDCMR:
etm_write(val, ETMCIDCMR);
break;
case ETMIMPSPEC0:
etm_write(val, ETMIMPSPEC0);
break;
case ETMIMPSPEC1:
etm_write(val, ETMIMPSPEC1);
break;
case ETMIMPSPEC2:
etm_write(val, ETMIMPSPEC2);
break;
case ETMIMPSPEC3:
etm_write(val, ETMIMPSPEC3);
break;
case ETMIMPSPEC4:
etm_write(val, ETMIMPSPEC4);
break;
case ETMIMPSPEC5:
etm_write(val, ETMIMPSPEC5);
break;
case ETMIMPSPEC6:
etm_write(val, ETMIMPSPEC6);
break;
case ETMIMPSPEC7:
etm_write(val, ETMIMPSPEC7);
break;
case ETMSYNCFR:
etm_write(val, ETMSYNCFR);
break;
case ETMEXTINSELR:
etm_write(val, ETMEXTINSELR);
break;
case ETMTESSEICR:
etm_write(val, ETMTESSEICR);
break;
case ETMEIBCR:
etm_write(val, ETMEIBCR);
break;
case ETMTSEVR:
etm_write(val, ETMTSEVR);
break;
case ETMAUXCR:
etm_write(val, ETMAUXCR);
break;
case ETMTRACEIDR:
etm_write(val, ETMTRACEIDR);
break;
case ETMVMIDCVR:
etm_write(val, ETMVMIDCVR);
break;
case ETMOSLAR:
etm_write(val, ETMOSLAR);
break;
case ETMOSSRR:
etm_write(val, ETMOSSRR);
break;
case ETMPDCR:
etm_write(val, ETMPDCR);
break;
case ETMPDSR:
etm_write(val, ETMPDSR);
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/hwtracing/coresight/coresight-etm-cp14.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/amba/bus.h>
#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "coresight-priv.h"
#include "coresight-tpda.h"
#include "coresight-trace-id.h"
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
/* Settings pre enabling port control register */
static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
{
u32 val;
val = readl_relaxed(drvdata->base + TPDA_CR);
val &= ~TPDA_CR_ATID;
val |= FIELD_PREP(TPDA_CR_ATID, drvdata->atid);
writel_relaxed(val, drvdata->base + TPDA_CR);
}
static void tpda_enable_port(struct tpda_drvdata *drvdata, int port)
{
u32 val;
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
/* Enable the port */
val |= TPDA_Pn_CR_ENA;
writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
}
static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
{
CS_UNLOCK(drvdata->base);
if (!drvdata->csdev->enable)
tpda_enable_pre_port(drvdata);
tpda_enable_port(drvdata, port);
CS_LOCK(drvdata->base);
}
static int tpda_enable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (atomic_read(&in->dest_refcnt) == 0)
__tpda_enable(drvdata, in->dest_port);
atomic_inc(&in->dest_refcnt);
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
return 0;
}
static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
{
u32 val;
CS_UNLOCK(drvdata->base);
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
val &= ~TPDA_Pn_CR_ENA;
writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
CS_LOCK(drvdata->base);
}
static void tpda_disable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (atomic_dec_return(&in->dest_refcnt) == 0)
__tpda_disable(drvdata, in->dest_port);
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
}
static const struct coresight_ops_link tpda_link_ops = {
.enable = tpda_enable,
.disable = tpda_disable,
};
static const struct coresight_ops tpda_cs_ops = {
.link_ops = &tpda_link_ops,
};
static int tpda_init_default_data(struct tpda_drvdata *drvdata)
{
int atid;
/*
* TPDA must has a unique atid. This atid can uniquely
* identify the TPDM trace source connected to the TPDA.
* The TPDMs which are connected to same TPDA share the
* same trace-id. When TPDA does packetization, different
* port will have unique channel number for decoding.
*/
atid = coresight_trace_id_get_system_id();
if (atid < 0)
return atid;
drvdata->atid = atid;
return 0;
}
static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpda_drvdata *drvdata;
struct coresight_desc desc = { 0 };
void __iomem *base;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
ret = tpda_init_default_data(drvdata);
if (ret)
return ret;
desc.name = coresight_alloc_device_name(&tpda_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &tpda_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
desc.access = CSDEV_ACCESS_IOMEM(base);
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
pm_runtime_put(&adev->dev);
dev_dbg(drvdata->dev, "TPDA initialized\n");
return 0;
}
static void tpda_remove(struct amba_device *adev)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_trace_id_put_system_id(drvdata->atid);
coresight_unregister(drvdata->csdev);
}
/*
* Different TPDA has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
static struct amba_id tpda_ids[] = {
{
.id = 0x000f0f00,
.mask = 0x000fff00,
},
{ 0, 0},
};
static struct amba_driver tpda_driver = {
.drv = {
.name = "coresight-tpda",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpda_probe,
.remove = tpda_remove,
.id_table = tpda_ids,
};
module_amba_driver(tpda_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
| linux-master | drivers/hwtracing/coresight/coresight-tpda.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Replicator driver
*/
#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
#define REPLICATOR_IDFILTER0 0x000
#define REPLICATOR_IDFILTER1 0x004
DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
/**
* struct replicator_drvdata - specifics associated to a replicator component
* @base: memory mapped base address for this component. Also indicates
* whether this one is programmable or not.
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
* @spinlock: serialize enable/disable operations.
* @check_idfilter_val: check if the context is lost upon clock removal.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
bool check_idfilter_val;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
if (!coresight_claim_device_unlocked(csdev)) {
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
coresight_disclaim_device_unlocked(csdev);
}
CS_LOCK(drvdata->base);
}
/*
* replicator_reset : Reset the replicator configuration to sane values.
*/
static inline void replicator_reset(struct replicator_drvdata *drvdata)
{
if (drvdata->base)
dynamic_replicator_reset(drvdata);
}
static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
int rc = 0;
u32 id0val, id1val;
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
id0val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0);
id1val = readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1);
/*
* Some replicator designs lose context when AMBA clocks are removed,
* so have a check for this.
*/
if (drvdata->check_idfilter_val && id0val == 0x0 && id1val == 0x0)
id0val = id1val = 0xff;
if (id0val == 0xff && id1val == 0xff)
rc = coresight_claim_device_unlocked(csdev);
if (!rc) {
switch (outport) {
case 0:
id0val = 0x0;
break;
case 1:
id1val = 0x0;
break;
default:
WARN_ON(1);
rc = -EINVAL;
}
}
/* Ensure that the outport is enabled. */
if (!rc) {
writel_relaxed(id0val, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(id1val, drvdata->base + REPLICATOR_IDFILTER1);
}
CS_LOCK(drvdata->base);
return rc;
}
static int replicator_enable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (atomic_read(&out->src_refcnt) == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
out->src_port);
if (!rc)
first_enable = true;
}
if (!rc)
atomic_inc(&out->src_refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
u32 reg;
struct coresight_device *csdev = drvdata->csdev;
switch (outport) {
case 0:
reg = REPLICATOR_IDFILTER0;
break;
case 1:
reg = REPLICATOR_IDFILTER1;
break;
default:
WARN_ON(1);
return;
}
CS_UNLOCK(drvdata->base);
/* disable the flow of ATB data through port */
writel_relaxed(0xff, drvdata->base + reg);
if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
(readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
static void replicator_disable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (atomic_dec_return(&out->src_refcnt) == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
.enable = replicator_enable,
.disable = replicator_disable,
};
static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
static struct attribute *replicator_mgmt_attrs[] = {
coresight_simple_reg32(idfilter0, REPLICATOR_IDFILTER0),
coresight_simple_reg32(idfilter1, REPLICATOR_IDFILTER1),
NULL,
};
static const struct attribute_group replicator_mgmt_group = {
.attrs = replicator_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group *replicator_groups[] = {
&replicator_mgmt_group,
NULL,
};
static int replicator_probe(struct device *dev, struct resource *res)
{
int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
void __iomem *base;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
dev_warn_once(dev,
"Uses OBSOLETE CoreSight replicator binding\n");
desc.name = coresight_alloc_device_name(&replicator_devs, dev);
if (!desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
/*
* Map the device base for dynamic-replicator, which has been
* validated by AMBA core
*/
if (res) {
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_disable_clk;
}
drvdata->base = base;
desc.groups = replicator_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
}
if (fwnode_property_present(dev_fwnode(dev),
"qcom,replicator-loses-context"))
drvdata->check_idfilter_val = true;
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto out_disable_clk;
}
dev->platform_data = pdata;
spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
desc.pdata = dev->platform_data;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto out_disable_clk;
}
replicator_reset(drvdata);
pm_runtime_put(dev);
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return ret;
}
static int replicator_remove(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
coresight_unregister(drvdata->csdev);
return 0;
}
static int static_replicator_probe(struct platform_device *pdev)
{
int ret;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* Static replicators do not have programming base */
ret = replicator_probe(&pdev->dev, NULL);
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
return ret;
}
static int static_replicator_remove(struct platform_device *pdev)
{
replicator_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return 0;
}
static int replicator_runtime_resume(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_prepare_enable(drvdata->atclk);
return 0;
}
#endif
static const struct dev_pm_ops replicator_dev_pm_ops = {
SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
replicator_runtime_resume, NULL)
};
static const struct of_device_id static_replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
{.compatible = "arm,coresight-static-replicator"},
{}
};
MODULE_DEVICE_TABLE(of, static_replicator_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_replicator_acpi_ids[] = {
{"ARMHC985", 0}, /* ARM CoreSight Static Replicator */
{}
};
MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids);
#endif
static struct platform_driver static_replicator_driver = {
.probe = static_replicator_probe,
.remove = static_replicator_remove,
.driver = {
.name = "coresight-static-replicator",
/* THIS_MODULE is taken care of by platform_driver_register() */
.of_match_table = of_match_ptr(static_replicator_match),
.acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
};
static int dynamic_replicator_probe(struct amba_device *adev,
const struct amba_id *id)
{
return replicator_probe(&adev->dev, &adev->res);
}
static void dynamic_replicator_remove(struct amba_device *adev)
{
replicator_remove(&adev->dev);
}
static const struct amba_id dynamic_replicator_ids[] = {
CS_AMBA_ID(0x000bb909),
CS_AMBA_ID(0x000bb9ec), /* Coresight SoC-600 */
{},
};
MODULE_DEVICE_TABLE(amba, dynamic_replicator_ids);
static struct amba_driver dynamic_replicator_driver = {
.drv = {
.name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = dynamic_replicator_probe,
.remove = dynamic_replicator_remove,
.id_table = dynamic_replicator_ids,
};
static int __init replicator_init(void)
{
int ret;
ret = platform_driver_register(&static_replicator_driver);
if (ret) {
pr_info("Error registering platform driver\n");
return ret;
}
ret = amba_driver_register(&dynamic_replicator_driver);
if (ret) {
pr_info("Error registering amba driver\n");
platform_driver_unregister(&static_replicator_driver);
}
return ret;
}
static void __exit replicator_exit(void)
{
platform_driver_unregister(&static_replicator_driver);
amba_driver_unregister(&dynamic_replicator_driver);
}
module_init(replicator_init);
module_exit(replicator_exit);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_AUTHOR("Mathieu Poirier <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight Replicator Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-replicator.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2020 Linaro Limited. All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include "coresight-etm4x.h"
#include "coresight-etm4x-cfg.h"
#include "coresight-priv.h"
#include "coresight-syscfg.h"
/* defines to associate register IDs with driver data locations */
#define CHECKREG(cval, elem) \
{ \
if (offset == cval) { \
reg_csdev->driver_regval = &drvcfg->elem; \
err = 0; \
break; \
} \
}
#define CHECKREGIDX(cval, elem, off_idx, mask) \
{ \
if (mask == cval) { \
reg_csdev->driver_regval = &drvcfg->elem[off_idx]; \
err = 0; \
break; \
} \
}
/**
* etm4_cfg_map_reg_offset - validate and map the register offset into a
* location in the driver config struct.
*
* Limits the number of registers that can be accessed and programmed in
* features, to those which are used to control the trace capture parameters.
*
* Omits or limits access to those which the driver must use exclusively.
*
* Invalid offsets will result in fail code return and feature load failure.
*
* @drvdata: driver data to map into.
* @reg_csdev: register to map.
* @offset: device offset for the register
*/
static int etm4_cfg_map_reg_offset(struct etmv4_drvdata *drvdata,
struct cscfg_regval_csdev *reg_csdev, u32 offset)
{
int err = -EINVAL, idx;
struct etmv4_config *drvcfg = &drvdata->config;
u32 off_mask;
if (((offset >= TRCEVENTCTL0R) && (offset <= TRCVIPCSSCTLR)) ||
((offset >= TRCSEQRSTEVR) && (offset <= TRCEXTINSELR)) ||
((offset >= TRCCIDCCTLR0) && (offset <= TRCVMIDCCTLR1))) {
do {
CHECKREG(TRCEVENTCTL0R, eventctrl0);
CHECKREG(TRCEVENTCTL1R, eventctrl1);
CHECKREG(TRCSTALLCTLR, stall_ctrl);
CHECKREG(TRCTSCTLR, ts_ctrl);
CHECKREG(TRCSYNCPR, syncfreq);
CHECKREG(TRCCCCTLR, ccctlr);
CHECKREG(TRCBBCTLR, bb_ctrl);
CHECKREG(TRCVICTLR, vinst_ctrl);
CHECKREG(TRCVIIECTLR, viiectlr);
CHECKREG(TRCVISSCTLR, vissctlr);
CHECKREG(TRCVIPCSSCTLR, vipcssctlr);
CHECKREG(TRCSEQRSTEVR, seq_rst);
CHECKREG(TRCSEQSTR, seq_state);
CHECKREG(TRCEXTINSELR, ext_inp);
CHECKREG(TRCCIDCCTLR0, ctxid_mask0);
CHECKREG(TRCCIDCCTLR1, ctxid_mask1);
CHECKREG(TRCVMIDCCTLR0, vmid_mask0);
CHECKREG(TRCVMIDCCTLR1, vmid_mask1);
} while (0);
} else if ((offset & GENMASK(11, 4)) == TRCSEQEVRn(0)) {
/* sequencer state control registers */
idx = (offset & GENMASK(3, 0)) / 4;
if (idx < ETM_MAX_SEQ_STATES) {
reg_csdev->driver_regval = &drvcfg->seq_ctrl[idx];
err = 0;
}
} else if ((offset >= TRCSSCCRn(0)) && (offset <= TRCSSPCICRn(7))) {
/* 32 bit, 8 off indexed register sets */
idx = (offset & GENMASK(4, 0)) / 4;
off_mask = (offset & GENMASK(11, 5));
do {
CHECKREGIDX(TRCSSCCRn(0), ss_ctrl, idx, off_mask);
CHECKREGIDX(TRCSSCSRn(0), ss_status, idx, off_mask);
CHECKREGIDX(TRCSSPCICRn(0), ss_pe_cmp, idx, off_mask);
} while (0);
} else if ((offset >= TRCCIDCVRn(0)) && (offset <= TRCVMIDCVRn(7))) {
/* 64 bit, 8 off indexed register sets */
idx = (offset & GENMASK(5, 0)) / 8;
off_mask = (offset & GENMASK(11, 6));
do {
CHECKREGIDX(TRCCIDCVRn(0), ctxid_pid, idx, off_mask);
CHECKREGIDX(TRCVMIDCVRn(0), vmid_val, idx, off_mask);
} while (0);
} else if ((offset >= TRCRSCTLRn(2)) &&
(offset <= TRCRSCTLRn((ETM_MAX_RES_SEL - 1)))) {
/* 32 bit resource selection regs, 32 off, skip fixed 0,1 */
idx = (offset & GENMASK(6, 0)) / 4;
if (idx < ETM_MAX_RES_SEL) {
reg_csdev->driver_regval = &drvcfg->res_ctrl[idx];
err = 0;
}
} else if ((offset >= TRCACVRn(0)) &&
(offset <= TRCACATRn((ETM_MAX_SINGLE_ADDR_CMP - 1)))) {
/* 64 bit addr cmp regs, 16 off */
idx = (offset & GENMASK(6, 0)) / 8;
off_mask = offset & GENMASK(11, 7);
do {
CHECKREGIDX(TRCACVRn(0), addr_val, idx, off_mask);
CHECKREGIDX(TRCACATRn(0), addr_acc, idx, off_mask);
} while (0);
} else if ((offset >= TRCCNTRLDVRn(0)) &&
(offset <= TRCCNTVRn((ETMv4_MAX_CNTR - 1)))) {
/* 32 bit counter regs, 4 off (ETMv4_MAX_CNTR - 1) */
idx = (offset & GENMASK(3, 0)) / 4;
off_mask = offset & GENMASK(11, 4);
do {
CHECKREGIDX(TRCCNTRLDVRn(0), cntrldvr, idx, off_mask);
CHECKREGIDX(TRCCNTCTLRn(0), cntr_ctrl, idx, off_mask);
CHECKREGIDX(TRCCNTVRn(0), cntr_val, idx, off_mask);
} while (0);
}
return err;
}
/**
* etm4_cfg_load_feature - load a feature into a device instance.
*
* @csdev: An ETMv4 CoreSight device.
* @feat_csdev: The feature to be loaded.
*
* The function will load a feature instance into the device, checking that
* the register definitions are valid for the device.
*
* Parameter and register definitions will be converted into internal
* structures that are used to set the values in the driver when the
* feature is enabled for the device.
*
* The feature spinlock pointer is initialised to the same spinlock
* that the driver uses to protect the internal register values.
*/
static int etm4_cfg_load_feature(struct coresight_device *csdev,
struct cscfg_feature_csdev *feat_csdev)
{
struct device *dev = csdev->dev.parent;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
const struct cscfg_feature_desc *feat_desc = feat_csdev->feat_desc;
u32 offset;
int i = 0, err = 0;
/*
* essential we set the device spinlock - this is used in the generic
* programming routines when copying values into the drvdata structures
* via the pointers setup in etm4_cfg_map_reg_offset().
*/
feat_csdev->drv_spinlock = &drvdata->spinlock;
/* process the register descriptions */
for (i = 0; i < feat_csdev->nr_regs && !err; i++) {
offset = feat_desc->regs_desc[i].offset;
err = etm4_cfg_map_reg_offset(drvdata, &feat_csdev->regs_csdev[i], offset);
}
return err;
}
/* match information when loading configurations */
#define CS_CFG_ETM4_MATCH_FLAGS (CS_CFG_MATCH_CLASS_SRC_ALL | \
CS_CFG_MATCH_CLASS_SRC_ETM4)
int etm4_cscfg_register(struct coresight_device *csdev)
{
struct cscfg_csdev_feat_ops ops;
ops.load_feat = &etm4_cfg_load_feature;
return cscfg_register_csdev(csdev, CS_CFG_ETM4_MATCH_FLAGS, &ops);
}
| linux-master | drivers/hwtracing/coresight/coresight-etm4x-cfg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/coresight.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "coresight-priv.h"
struct dummy_drvdata {
struct device *dev;
struct coresight_device *csdev;
};
DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode)
{
dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0;
}
static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event)
{
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}
static int dummy_sink_enable(struct coresight_device *csdev, enum cs_mode mode,
void *data)
{
dev_dbg(csdev->dev.parent, "Dummy sink enabled\n");
return 0;
}
static int dummy_sink_disable(struct coresight_device *csdev)
{
dev_dbg(csdev->dev.parent, "Dummy sink disabled\n");
return 0;
}
static const struct coresight_ops_source dummy_source_ops = {
.enable = dummy_source_enable,
.disable = dummy_source_disable,
};
static const struct coresight_ops dummy_source_cs_ops = {
.source_ops = &dummy_source_ops,
};
static const struct coresight_ops_sink dummy_sink_ops = {
.enable = dummy_sink_enable,
.disable = dummy_sink_disable,
};
static const struct coresight_ops dummy_sink_cs_ops = {
.sink_ops = &dummy_sink_ops,
};
static int dummy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct coresight_platform_data *pdata;
struct dummy_drvdata *drvdata;
struct coresight_desc desc = { 0 };
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
desc.name = coresight_alloc_device_name(&source_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype =
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &dummy_source_cs_ops;
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
desc.name = coresight_alloc_device_name(&sink_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_DUMMY;
desc.ops = &dummy_sink_cs_ops;
} else {
dev_err(dev, "Device type not set\n");
return -EINVAL;
}
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
pdev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
pm_runtime_enable(dev);
dev_dbg(dev, "Dummy device initialized\n");
return 0;
}
static int dummy_remove(struct platform_device *pdev)
{
struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
return 0;
}
static const struct of_device_id dummy_match[] = {
{.compatible = "arm,coresight-dummy-source"},
{.compatible = "arm,coresight-dummy-sink"},
{},
};
static struct platform_driver dummy_driver = {
.probe = dummy_probe,
.remove = dummy_remove,
.driver = {
.name = "coresight-dummy",
.of_match_table = dummy_match,
},
};
module_platform_driver(dummy_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight dummy driver");
| linux-master | drivers/hwtracing/coresight/coresight-dummy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <[email protected]>
*/
#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
#include "coresight-priv.h"
static ssize_t nr_addr_cmp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->nr_addr_cmp;
return sprintf(buf, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_addr_cmp);
static ssize_t nr_cntr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->nr_cntr;
return sprintf(buf, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_cntr);
static ssize_t nr_ctxid_cmp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->nr_ctxid_cmp;
return sprintf(buf, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_ctxid_cmp);
static ssize_t etmsr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long flags, val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
val = etm_readl(drvdata, ETMSR);
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
return sprintf(buf, "%#lx\n", val);
}
static DEVICE_ATTR_RO(etmsr);
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int i, ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
if (val) {
spin_lock(&drvdata->spinlock);
memset(config, 0, sizeof(struct etm_config));
config->mode = ETM_MODE_EXCLUDE;
config->trigger_event = ETM_DEFAULT_EVENT_VAL;
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
config->addr_type[i] = ETM_ADDR_TYPE_NONE;
}
etm_set_default(config);
etm_release_trace_id(drvdata);
spin_unlock(&drvdata->spinlock);
}
return size;
}
static DEVICE_ATTR_WO(reset);
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->mode;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->mode = val & ETM_MODE_ALL;
if (config->mode & ETM_MODE_EXCLUDE)
config->enable_ctrl1 |= ETMTECR1_INC_EXC;
else
config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
if (config->mode & ETM_MODE_CYCACC)
config->ctrl |= ETMCR_CYC_ACC;
else
config->ctrl &= ~ETMCR_CYC_ACC;
if (config->mode & ETM_MODE_STALL) {
if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
dev_warn(dev, "stall mode not supported\n");
ret = -EINVAL;
goto err_unlock;
}
config->ctrl |= ETMCR_STALL_MODE;
} else
config->ctrl &= ~ETMCR_STALL_MODE;
if (config->mode & ETM_MODE_TIMESTAMP) {
if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
dev_warn(dev, "timestamp not supported\n");
ret = -EINVAL;
goto err_unlock;
}
config->ctrl |= ETMCR_TIMESTAMP_EN;
} else
config->ctrl &= ~ETMCR_TIMESTAMP_EN;
if (config->mode & ETM_MODE_CTXID)
config->ctrl |= ETMCR_CTXID_SIZE;
else
config->ctrl &= ~ETMCR_CTXID_SIZE;
if (config->mode & ETM_MODE_BBROAD)
config->ctrl |= ETMCR_BRANCH_BROADCAST;
else
config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
if (config->mode & ETM_MODE_RET_STACK)
config->ctrl |= ETMCR_RETURN_STACK;
else
config->ctrl &= ~ETMCR_RETURN_STACK;
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm_config_trace_mode(config);
spin_unlock(&drvdata->spinlock);
return size;
err_unlock:
spin_unlock(&drvdata->spinlock);
return ret;
}
static DEVICE_ATTR_RW(mode);
static ssize_t trigger_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->trigger_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t trigger_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->trigger_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(trigger_event);
static ssize_t enable_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->enable_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t enable_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->enable_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(enable_event);
static ssize_t fifofull_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->fifofull_level;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t fifofull_level_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->fifofull_level = val;
return size;
}
static DEVICE_ATTR_RW(fifofull_level);
static ssize_t addr_idx_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->addr_idx;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t addr_idx_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
if (val >= drvdata->nr_addr_cmp)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
config->addr_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_idx);
static ssize_t addr_single_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 idx;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
val = config->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t addr_single_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u8 idx;
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
config->addr_val[idx] = val;
config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_single);
static ssize_t addr_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 idx;
unsigned long val1, val2;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val1 = config->addr_val[idx];
val2 = config->addr_val[idx + 1];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx %#lx\n", val1, val2);
}
static ssize_t addr_range_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u8 idx;
unsigned long val1, val2;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
return -EINVAL;
/* Lower address comparator cannot have a higher address value */
if (val1 > val2)
return -EINVAL;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = val1;
config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
config->addr_val[idx + 1] = val2;
config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
config->enable_ctrl1 |= (1 << (idx/2));
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_range);
static ssize_t addr_start_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 idx;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = config->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t addr_start_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u8 idx;
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->startstop_ctrl |= (1 << idx);
config->enable_ctrl1 |= ETMTECR1_START_STOP;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_start);
static ssize_t addr_stop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 idx;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = config->addr_val[idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t addr_stop_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u8 idx;
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->startstop_ctrl |= (1 << (idx + 16));
config->enable_ctrl1 |= ETMTECR1_START_STOP;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_stop);
static ssize_t addr_acctype_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
val = config->addr_acctype[config->addr_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t addr_acctype_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->addr_acctype[config->addr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_acctype);
static ssize_t cntr_idx_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->cntr_idx;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t cntr_idx_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
if (val >= drvdata->nr_cntr)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
config->cntr_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_idx);
static ssize_t cntr_rld_val_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
val = config->cntr_rld_val[config->cntr_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t cntr_rld_val_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->cntr_rld_val[config->cntr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_rld_val);
static ssize_t cntr_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
val = config->cntr_event[config->cntr_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t cntr_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_event);
static ssize_t cntr_rld_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
val = config->cntr_rld_event[config->cntr_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t cntr_rld_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_rld_event);
static ssize_t cntr_val_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, ret = 0;
u32 val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
if (!local_read(&drvdata->mode)) {
spin_lock(&drvdata->spinlock);
for (i = 0; i < drvdata->nr_cntr; i++)
ret += sprintf(buf, "counter %d: %x\n",
i, config->cntr_val[i]);
spin_unlock(&drvdata->spinlock);
return ret;
}
for (i = 0; i < drvdata->nr_cntr; i++) {
val = etm_readl(drvdata, ETMCNTVRn(i));
ret += sprintf(buf, "counter %d: %x\n", i, val);
}
return ret;
}
static ssize_t cntr_val_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->cntr_val[config->cntr_idx] = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_val);
static ssize_t seq_12_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_12_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_12_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_12_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_12_event);
static ssize_t seq_21_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_21_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_21_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_21_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_21_event);
static ssize_t seq_23_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_23_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_23_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_23_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_23_event);
static ssize_t seq_31_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_31_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_31_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_31_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_31_event);
static ssize_t seq_32_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_32_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_32_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_32_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_32_event);
static ssize_t seq_13_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->seq_13_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_13_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->seq_13_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(seq_13_event);
static ssize_t seq_curr_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val, flags;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
if (!local_read(&drvdata->mode)) {
val = config->seq_curr_state;
goto out;
}
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
out:
return sprintf(buf, "%#lx\n", val);
}
static ssize_t seq_curr_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
if (val > ETM_SEQ_STATE_MAX_VAL)
return -EINVAL;
config->seq_curr_state = val;
return size;
}
static DEVICE_ATTR_RW(seq_curr_state);
static ssize_t ctxid_idx_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->ctxid_idx;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t ctxid_idx_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
if (val >= drvdata->nr_ctxid_cmp)
return -EINVAL;
/*
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
spin_lock(&drvdata->spinlock);
config->ctxid_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_idx);
static ssize_t ctxid_pid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
/*
* Don't use contextID tracing if coming from a PID namespace. See
* comment in ctxid_pid_store().
*/
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
spin_lock(&drvdata->spinlock);
val = config->ctxid_pid[config->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t ctxid_pid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
/*
* When contextID tracing is enabled the tracers will insert the
* value found in the contextID register in the trace stream. But if
* a process is in a namespace the PID of that process as seen from the
* namespace won't be what the kernel sees, something that makes the
* feature confusing and can potentially leak kernel only information.
* As such refuse to use the feature if @current is not in the initial
* PID namespace.
*/
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
ret = kstrtoul(buf, 16, &pid);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
config->ctxid_pid[config->ctxid_idx] = pid;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_pid);
static ssize_t ctxid_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
/*
* Don't use contextID tracing if coming from a PID namespace. See
* comment in ctxid_pid_store().
*/
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
val = config->ctxid_mask;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t ctxid_mask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
/*
* Don't use contextID tracing if coming from a PID namespace. See
* comment in ctxid_pid_store().
*/
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->ctxid_mask = val;
return size;
}
static DEVICE_ATTR_RW(ctxid_mask);
static ssize_t sync_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->sync_freq;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t sync_freq_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->sync_freq = val & ETM_SYNC_MASK;
return size;
}
static DEVICE_ATTR_RW(sync_freq);
static ssize_t timestamp_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
val = config->timestamp_event;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t timestamp_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
config->timestamp_event = val & ETM_EVENT_MASK;
return size;
}
static DEVICE_ATTR_RW(timestamp_event);
static ssize_t cpu_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->cpu;
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
static DEVICE_ATTR_RO(cpu);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int trace_id;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
trace_id = etm_read_alloc_trace_id(drvdata);
if (trace_id < 0)
return trace_id;
return sysfs_emit(buf, "%#x\n", trace_id);
}
static DEVICE_ATTR_RO(traceid);
static struct attribute *coresight_etm_attrs[] = {
&dev_attr_nr_addr_cmp.attr,
&dev_attr_nr_cntr.attr,
&dev_attr_nr_ctxid_cmp.attr,
&dev_attr_etmsr.attr,
&dev_attr_reset.attr,
&dev_attr_mode.attr,
&dev_attr_trigger_event.attr,
&dev_attr_enable_event.attr,
&dev_attr_fifofull_level.attr,
&dev_attr_addr_idx.attr,
&dev_attr_addr_single.attr,
&dev_attr_addr_range.attr,
&dev_attr_addr_start.attr,
&dev_attr_addr_stop.attr,
&dev_attr_addr_acctype.attr,
&dev_attr_cntr_idx.attr,
&dev_attr_cntr_rld_val.attr,
&dev_attr_cntr_event.attr,
&dev_attr_cntr_rld_event.attr,
&dev_attr_cntr_val.attr,
&dev_attr_seq_12_event.attr,
&dev_attr_seq_21_event.attr,
&dev_attr_seq_23_event.attr,
&dev_attr_seq_31_event.attr,
&dev_attr_seq_32_event.attr,
&dev_attr_seq_13_event.attr,
&dev_attr_seq_curr_state.attr,
&dev_attr_ctxid_idx.attr,
&dev_attr_ctxid_pid.attr,
&dev_attr_ctxid_mask.attr,
&dev_attr_sync_freq.attr,
&dev_attr_timestamp_event.attr,
&dev_attr_traceid.attr,
&dev_attr_cpu.attr,
NULL,
};
static struct attribute *coresight_etm_mgmt_attrs[] = {
coresight_simple_reg32(etmccr, ETMCCR),
coresight_simple_reg32(etmccer, ETMCCER),
coresight_simple_reg32(etmscr, ETMSCR),
coresight_simple_reg32(etmidr, ETMIDR),
coresight_simple_reg32(etmcr, ETMCR),
coresight_simple_reg32(etmtraceidr, ETMTRACEIDR),
coresight_simple_reg32(etmteevr, ETMTEEVR),
coresight_simple_reg32(etmtssvr, ETMTSSCR),
coresight_simple_reg32(etmtecr1, ETMTECR1),
coresight_simple_reg32(etmtecr2, ETMTECR2),
NULL,
};
static const struct attribute_group coresight_etm_group = {
.attrs = coresight_etm_attrs,
};
static const struct attribute_group coresight_etm_mgmt_group = {
.attrs = coresight_etm_mgmt_attrs,
.name = "mgmt",
};
const struct attribute_group *coresight_etm_groups[] = {
&coresight_etm_group,
&coresight_etm_mgmt_group,
NULL,
};
| linux-master | drivers/hwtracing/coresight/coresight-etm3x-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Program Flow Trace driver
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/perf_event.h>
#include <asm/sections.h>
#include "coresight-etm.h"
#include "coresight-etm-perf.h"
#include "coresight-trace-id.h"
/*
* Not really modular but using module_param is the easiest way to
* remain consistent with existing use cases for now.
*/
static int boot_enable;
module_param_named(boot_enable, boot_enable, int, S_IRUGO);
static struct etm_drvdata *etmdrvdata[NR_CPUS];
static enum cpuhp_state hp_online;
/*
* Memory mapped writes to clear os lock are not supported on some processors
* and OS lock must be unlocked before any memory mapped access on such
* processors, otherwise memory mapped reads/writes will be invalid.
*/
static void etm_os_unlock(struct etm_drvdata *drvdata)
{
/* Writing any value to ETMOSLAR unlocks the trace registers */
etm_writel(drvdata, 0x0, ETMOSLAR);
drvdata->os_unlock = true;
isb();
}
static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
{
u32 etmcr;
/* Ensure pending cp14 accesses complete before setting pwrdwn */
mb();
isb();
etmcr = etm_readl(drvdata, ETMCR);
etmcr |= ETMCR_PWD_DWN;
etm_writel(drvdata, etmcr, ETMCR);
}
static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
{
u32 etmcr;
etmcr = etm_readl(drvdata, ETMCR);
etmcr &= ~ETMCR_PWD_DWN;
etm_writel(drvdata, etmcr, ETMCR);
/* Ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
}
static void etm_set_pwrup(struct etm_drvdata *drvdata)
{
u32 etmpdcr;
etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
etmpdcr |= ETMPDCR_PWD_UP;
writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
/* Ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
}
static void etm_clr_pwrup(struct etm_drvdata *drvdata)
{
u32 etmpdcr;
/* Ensure pending cp14 accesses complete before clearing pwrup */
mb();
isb();
etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
etmpdcr &= ~ETMPDCR_PWD_UP;
writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
}
/**
* coresight_timeout_etm - loop until a bit has changed to a specific state.
* @drvdata: etm's private data structure.
* @offset: address of a register, starting from @addr.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
*
* Basically the same as @coresight_timeout except for the register access
* method where we have to account for CP14 configurations.
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
int position, int value)
{
int i;
u32 val;
for (i = TIMEOUT_US; i > 0; i--) {
val = etm_readl(drvdata, offset);
/* Waiting on the bit to go from 0 to 1 */
if (value) {
if (val & BIT(position))
return 0;
/* Waiting on the bit to go from 1 to 0 */
} else {
if (!(val & BIT(position)))
return 0;
}
/*
* Delay is arbitrary - the specification doesn't say how long
* we are expected to wait. Extra check required to make sure
* we don't wait needlessly on the last iteration.
*/
if (i - 1)
udelay(1);
}
return -EAGAIN;
}
static void etm_set_prog(struct etm_drvdata *drvdata)
{
u32 etmcr;
etmcr = etm_readl(drvdata, ETMCR);
etmcr |= ETMCR_ETM_PRG;
etm_writel(drvdata, etmcr, ETMCR);
/*
* Recommended by spec for cp14 accesses to ensure etmcr write is
* complete before polling etmsr
*/
isb();
if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
dev_err(&drvdata->csdev->dev,
"%s: timeout observed when probing at offset %#x\n",
__func__, ETMSR);
}
}
static void etm_clr_prog(struct etm_drvdata *drvdata)
{
u32 etmcr;
etmcr = etm_readl(drvdata, ETMCR);
etmcr &= ~ETMCR_ETM_PRG;
etm_writel(drvdata, etmcr, ETMCR);
/*
* Recommended by spec for cp14 accesses to ensure etmcr write is
* complete before polling etmsr
*/
isb();
if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
dev_err(&drvdata->csdev->dev,
"%s: timeout observed when probing at offset %#x\n",
__func__, ETMSR);
}
}
void etm_set_default(struct etm_config *config)
{
int i;
if (WARN_ON_ONCE(!config))
return;
/*
* Taken verbatim from the TRM:
*
* To trace all memory:
* set bit [24] in register 0x009, the ETMTECR1, to 1
* set all other bits in register 0x009, the ETMTECR1, to 0
* set all bits in register 0x007, the ETMTECR2, to 0
* set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
*/
config->enable_ctrl1 = ETMTECR1_INC_EXC;
config->enable_ctrl2 = 0x0;
config->enable_event = ETM_HARD_WIRE_RES_A;
config->trigger_event = ETM_DEFAULT_EVENT_VAL;
config->enable_event = ETM_HARD_WIRE_RES_A;
config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
for (i = 0; i < ETM_MAX_CNTR; i++) {
config->cntr_rld_val[i] = 0x0;
config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
config->cntr_val[i] = 0x0;
}
config->seq_curr_state = 0x0;
config->ctxid_idx = 0x0;
for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
config->ctxid_pid[i] = 0x0;
config->ctxid_mask = 0x0;
/* Setting default to 1024 as per TRM recommendation */
config->sync_freq = 0x400;
}
void etm_config_trace_mode(struct etm_config *config)
{
u32 flags, mode;
mode = config->mode;
mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
/* excluding kernel AND user space doesn't make sense */
if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
return;
/* nothing to do if neither flags are set */
if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
return;
flags = (1 << 0 | /* instruction execute */
3 << 3 | /* ARM instruction */
0 << 5 | /* No data value comparison */
0 << 7 | /* No exact mach */
0 << 8); /* Ignore context ID */
/* No need to worry about single address comparators. */
config->enable_ctrl2 = 0x0;
/* Bit 0 is address range comparator 1 */
config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
/*
* On ETMv3.5:
* ETMACTRn[13,11] == Non-secure state comparison control
* ETMACTRn[12,10] == Secure state comparison control
*
* b00 == Match in all modes in this state
* b01 == Do not match in any more in this state
* b10 == Match in all modes excepts user mode in this state
* b11 == Match only in user mode in this state
*/
/* Tracing in secure mode is not supported at this time */
flags |= (0 << 12 | 1 << 10);
if (mode & ETM_MODE_EXCL_USER) {
/* exclude user, match all modes except user mode */
flags |= (1 << 13 | 0 << 11);
} else {
/* exclude kernel, match only in user mode */
flags |= (1 << 13 | 1 << 11);
}
/*
* The ETMEEVR register is already set to "hard wire A". As such
* all there is to do is setup an address comparator that spans
* the entire address range and configure the state and mode bits.
*/
config->addr_val[0] = (u32) 0x0;
config->addr_val[1] = (u32) ~0x0;
config->addr_acctype[0] = flags;
config->addr_acctype[1] = flags;
config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
}
#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
ETMCR_TIMESTAMP_EN | \
ETMCR_RETURN_STACK)
static int etm_parse_event_config(struct etm_drvdata *drvdata,
struct perf_event *event)
{
struct etm_config *config = &drvdata->config;
struct perf_event_attr *attr = &event->attr;
if (!attr)
return -EINVAL;
/* Clear configuration from previous run */
memset(config, 0, sizeof(struct etm_config));
if (attr->exclude_kernel)
config->mode = ETM_MODE_EXCL_KERN;
if (attr->exclude_user)
config->mode = ETM_MODE_EXCL_USER;
/* Always start from the default config */
etm_set_default(config);
/*
* By default the tracers are configured to trace the whole address
* range. Narrow the field only if requested by user space.
*/
if (config->mode)
etm_config_trace_mode(config);
/*
* At this time only cycle accurate, return stack and timestamp
* options are available.
*/
if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
return -EINVAL;
config->ctrl = attr->config;
/* Don't trace contextID when runs in non-root PID namespace */
if (!task_is_in_init_pid_ns(current))
config->ctrl &= ~ETMCR_CTXID_SIZE;
/*
* Possible to have cores with PTM (supports ret stack) and ETM
* (never has ret stack) on the same SoC. So if we have a request
* for return stack that can't be honoured on this core then
* clear the bit - trace will still continue normally
*/
if ((config->ctrl & ETMCR_RETURN_STACK) &&
!(drvdata->etmccer & ETMCCER_RETSTACK))
config->ctrl &= ~ETMCR_RETURN_STACK;
return 0;
}
static int etm_enable_hw(struct etm_drvdata *drvdata)
{
int i, rc;
u32 etmcr;
struct etm_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
/* Turn engine on */
etm_clr_pwrdwn(drvdata);
/* Apply power to trace registers */
etm_set_pwrup(drvdata);
/* Make sure all registers are accessible */
etm_os_unlock(drvdata);
etm_set_prog(drvdata);
etmcr = etm_readl(drvdata, ETMCR);
/* Clear setting from a previous run if need be */
etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
etmcr |= drvdata->port_size;
etmcr |= ETMCR_ETM_EN;
etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
etm_writel(drvdata, config->enable_event, ETMTEEVR);
etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
etm_writel(drvdata, config->fifofull_level, ETMFFLR);
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
}
for (i = 0; i < drvdata->nr_cntr; i++) {
etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
etm_writel(drvdata, config->cntr_rld_event[i],
ETMCNTRLDEVRn(i));
etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
}
etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
etm_writel(drvdata, config->seq_curr_state, ETMSQR);
for (i = 0; i < drvdata->nr_ext_out; i++)
etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
/* No external input selected */
etm_writel(drvdata, 0x0, ETMEXTINSELR);
etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
/* No auxiliary control selected */
etm_writel(drvdata, 0x0, ETMAUXCR);
etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
/* No VMID comparator value selected */
etm_writel(drvdata, 0x0, ETMVMIDCVR);
etm_clr_prog(drvdata);
done:
CS_LOCK(drvdata->base);
dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
return rc;
}
struct etm_enable_arg {
struct etm_drvdata *drvdata;
int rc;
};
static void etm_enable_hw_smp_call(void *info)
{
struct etm_enable_arg *arg = info;
if (WARN_ON(!arg))
return;
arg->rc = etm_enable_hw(arg->drvdata);
}
static int etm_cpu_id(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
return drvdata->cpu;
}
int etm_read_alloc_trace_id(struct etm_drvdata *drvdata)
{
int trace_id;
/*
* This will allocate a trace ID to the cpu,
* or return the one currently allocated.
*
* trace id function has its own lock
*/
trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
if (IS_VALID_CS_TRACE_ID(trace_id))
drvdata->traceid = (u8)trace_id;
else
dev_err(&drvdata->csdev->dev,
"Failed to allocate trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
return trace_id;
}
void etm_release_trace_id(struct etm_drvdata *drvdata)
{
coresight_trace_id_put_cpu_id(drvdata->cpu);
}
static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
/*
* perf allocates cpu ids as part of _setup_aux() - device needs to use
* the allocated ID. This reads the current version without allocation.
*
* This does not use the trace id lock to prevent lock_dep issues
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
return -EINVAL;
}
drvdata->traceid = (u8)trace_id;
/* And enable it */
return etm_enable_hw(drvdata);
}
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
/* sysfs needs to allocate and set a trace ID */
ret = etm_read_alloc_trace_id(drvdata);
if (ret < 0)
goto unlock_enable_sysfs;
/*
* Configure the ETM only if the CPU is online. If it isn't online
* hw configuration will take place on the local CPU during bring up.
*/
if (cpu_online(drvdata->cpu)) {
arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
etm_enable_hw_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
if (!ret)
drvdata->sticky_enable = true;
} else {
ret = -ENODEV;
}
if (ret)
etm_release_trace_id(drvdata);
unlock_enable_sysfs:
spin_unlock(&drvdata->spinlock);
if (!ret)
dev_dbg(&csdev->dev, "ETM tracing enabled\n");
return ret;
}
static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
int ret;
u32 val;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
/* Someone is already using the tracer */
if (val)
return -EBUSY;
switch (mode) {
case CS_MODE_SYSFS:
ret = etm_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
ret = etm_enable_perf(csdev, event);
break;
default:
ret = -EINVAL;
}
/* The tracer didn't start */
if (ret)
local_set(&drvdata->mode, CS_MODE_DISABLED);
return ret;
}
static void etm_disable_hw(void *info)
{
int i;
struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
/* Read back sequencer and counters for post trace analysis */
config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
for (i = 0; i < drvdata->nr_cntr; i++)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
}
static void etm_disable_perf(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return;
CS_UNLOCK(drvdata->base);
/* Setting the prog bit disables tracing immediately */
etm_set_prog(drvdata);
/*
* There is no way to know when the tracer will be used again so
* power down the tracer.
*/
etm_set_pwrdwn(drvdata);
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
/*
* perf will release trace ids when _free_aux()
* is called at the end of the session
*/
}
static void etm_disable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* Taking hotplug lock here protects from clocks getting disabled
* with tracing being left on (crash scenario) if user disable occurs
* after cpu online mask indicates the cpu is offline but before the
* DYING hotplug callback is serviced by the ETM driver.
*/
cpus_read_lock();
spin_lock(&drvdata->spinlock);
/*
* Executing etm_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
*/
smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
/*
* we only release trace IDs when resetting sysfs.
* This permits sysfs users to read the trace ID after the trace
* session has completed. This maintains operational behaviour with
* prior trace id allocation method
*/
dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
static void etm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
enum cs_mode mode;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
mode = local_read(&drvdata->mode);
switch (mode) {
case CS_MODE_DISABLED:
break;
case CS_MODE_SYSFS:
etm_disable_sysfs(csdev);
break;
case CS_MODE_PERF:
etm_disable_perf(csdev);
break;
default:
WARN_ON_ONCE(mode);
return;
}
if (mode)
local_set(&drvdata->mode, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm_source_ops = {
.cpu_id = etm_cpu_id,
.enable = etm_enable,
.disable = etm_disable,
};
static const struct coresight_ops etm_cs_ops = {
.source_ops = &etm_source_ops,
};
static int etm_online_cpu(unsigned int cpu)
{
if (!etmdrvdata[cpu])
return 0;
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
coresight_enable(etmdrvdata[cpu]->csdev);
return 0;
}
static int etm_starting_cpu(unsigned int cpu)
{
if (!etmdrvdata[cpu])
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock) {
etm_os_unlock(etmdrvdata[cpu]);
etmdrvdata[cpu]->os_unlock = true;
}
if (local_read(&etmdrvdata[cpu]->mode))
etm_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
static int etm_dying_cpu(unsigned int cpu)
{
if (!etmdrvdata[cpu])
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
if (local_read(&etmdrvdata[cpu]->mode))
etm_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
static bool etm_arch_supported(u8 arch)
{
switch (arch) {
case ETM_ARCH_V3_3:
break;
case ETM_ARCH_V3_5:
break;
case PFT_ARCH_V1_0:
break;
case PFT_ARCH_V1_1:
break;
default:
return false;
}
return true;
}
static void etm_init_arch_data(void *info)
{
u32 etmidr;
u32 etmccr;
struct etm_drvdata *drvdata = info;
/* Make sure all registers are accessible */
etm_os_unlock(drvdata);
CS_UNLOCK(drvdata->base);
/* First dummy read */
(void)etm_readl(drvdata, ETMPDSR);
/* Provide power to ETM: ETMPDCR[3] == 1 */
etm_set_pwrup(drvdata);
/*
* Clear power down bit since when this bit is set writes to
* certain registers might be ignored.
*/
etm_clr_pwrdwn(drvdata);
/*
* Set prog bit. It will be set from reset but this is included to
* ensure it is set
*/
etm_set_prog(drvdata);
/* Find all capabilities */
etmidr = etm_readl(drvdata, ETMIDR);
drvdata->arch = BMVAL(etmidr, 4, 11);
drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
drvdata->etmccer = etm_readl(drvdata, ETMCCER);
etmccr = etm_readl(drvdata, ETMCCR);
drvdata->etmccr = etmccr;
drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
etm_set_pwrdwn(drvdata);
etm_clr_pwrup(drvdata);
CS_LOCK(drvdata->base);
}
static int __init etm_hp_setup(void)
{
int ret;
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
"arm/coresight:starting",
etm_starting_cpu, etm_dying_cpu);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
"arm/coresight:online",
etm_online_cpu, NULL);
/* HP dyn state ID returned in ret on success */
if (ret > 0) {
hp_online = ret;
return 0;
}
/* failed dyn state - remove others */
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
return ret;
}
static void etm_hp_clear(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online) {
cpuhp_remove_state_nocalls(hp_online);
hp_online = 0;
}
}
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct etm_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14");
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
return drvdata->cpu;
desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
if (!desc.name)
return -ENOMEM;
if (smp_call_function_single(drvdata->cpu,
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
if (etm_arch_supported(drvdata->arch) == false)
return -EINVAL;
etm_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
adev->dev.platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc.ops = &etm_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_etm_groups;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
ret = etm_perf_symlink(drvdata->csdev, true);
if (ret) {
coresight_unregister(drvdata->csdev);
return ret;
}
etmdrvdata[drvdata->cpu] = drvdata;
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev,
"%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
}
return 0;
}
static void clear_etmdrvdata(void *info)
{
int cpu = *(int *)info;
etmdrvdata[cpu] = NULL;
}
static void etm_remove(struct amba_device *adev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
etm_perf_symlink(drvdata->csdev, false);
/*
* Taking hotplug lock here to avoid racing between etm_remove and
* CPU hotplug call backs.
*/
cpus_read_lock();
/*
* The readers for etmdrvdata[] are CPU hotplug call backs
* and PM notification call backs. Change etmdrvdata[i] on
* CPU i ensures these call backs has consistent view
* inside one call back function.
*/
if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
etmdrvdata[drvdata->cpu] = NULL;
cpus_read_unlock();
coresight_unregister(drvdata->csdev);
}
#ifdef CONFIG_PM
static int etm_runtime_suspend(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return 0;
}
static int etm_runtime_resume(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_prepare_enable(drvdata->atclk);
return 0;
}
#endif
static const struct dev_pm_ops etm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
};
static const struct amba_id etm_ids[] = {
/* ETM 3.3 */
CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"),
/* ETM 3.5 - Cortex-A5 */
CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"),
/* ETM 3.5 */
CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"),
/* PTM 1.0 */
CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"),
/* PTM 1.1 */
CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
/* PTM 1.1 Qualcomm */
CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, etm_ids);
static struct amba_driver etm_driver = {
.drv = {
.name = "coresight-etm3x",
.owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
.suppress_bind_attrs = true,
},
.probe = etm_probe,
.remove = etm_remove,
.id_table = etm_ids,
};
static int __init etm_init(void)
{
int ret;
ret = etm_hp_setup();
/* etm_hp_setup() does its own cleanup - exit on error */
if (ret)
return ret;
ret = amba_driver_register(&etm_driver);
if (ret) {
pr_err("Error registering etm3x driver\n");
etm_hp_clear();
}
return ret;
}
static void __exit etm_exit(void)
{
amba_driver_unregister(&etm_driver);
etm_hp_clear();
}
module_init(etm_init);
module_exit(etm_exit);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_AUTHOR("Mathieu Poirier <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-etm3x-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Linaro Limited. All rights reserved.
*
* Author: Leo Yan <[email protected]>
*/
#include <linux/amba/bus.h>
#include <linux/coresight.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/panic_notifier.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "coresight-priv.h"
#define EDPCSR 0x0A0
#define EDCIDSR 0x0A4
#define EDVIDSR 0x0A8
#define EDPCSR_HI 0x0AC
#define EDOSLAR 0x300
#define EDPRCR 0x310
#define EDPRSR 0x314
#define EDDEVID1 0xFC4
#define EDDEVID 0xFC8
#define EDPCSR_PROHIBITED 0xFFFFFFFF
/* bits definition for EDPCSR */
#define EDPCSR_THUMB BIT(0)
#define EDPCSR_ARM_INST_MASK GENMASK(31, 2)
#define EDPCSR_THUMB_INST_MASK GENMASK(31, 1)
/* bits definition for EDPRCR */
#define EDPRCR_COREPURQ BIT(3)
#define EDPRCR_CORENPDRQ BIT(0)
/* bits definition for EDPRSR */
#define EDPRSR_DLK BIT(6)
#define EDPRSR_PU BIT(0)
/* bits definition for EDVIDSR */
#define EDVIDSR_NS BIT(31)
#define EDVIDSR_E2 BIT(30)
#define EDVIDSR_E3 BIT(29)
#define EDVIDSR_HV BIT(28)
#define EDVIDSR_VMID GENMASK(7, 0)
/*
* bits definition for EDDEVID1:PSCROffset
*
* NOTE: armv8 and armv7 have different definition for the register,
* so consolidate the bits definition as below:
*
* 0b0000 - Sample offset applies based on the instruction state, we
* rely on EDDEVID to check if EDPCSR is implemented or not
* 0b0001 - No offset applies.
* 0b0010 - No offset applies, but do not use in AArch32 mode
*
*/
#define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0)
#define EDDEVID1_PCSR_OFFSET_INS_SET (0x0)
#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
/* bits definition for EDDEVID */
#define EDDEVID_PCSAMPLE_MODE GENMASK(3, 0)
#define EDDEVID_IMPL_EDPCSR (0x1)
#define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
#define EDDEVID_IMPL_FULL (0x3)
#define DEBUG_WAIT_SLEEP 1000
#define DEBUG_WAIT_TIMEOUT 32000
struct debug_drvdata {
void __iomem *base;
struct device *dev;
int cpu;
bool edpcsr_present;
bool edcidsr_present;
bool edvidsr_present;
bool pc_has_offset;
u32 edpcsr;
u32 edpcsr_hi;
u32 edprsr;
u32 edvidsr;
u32 edcidsr;
};
static DEFINE_MUTEX(debug_lock);
static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
static int debug_count;
static struct dentry *debug_debugfs_dir;
static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
module_param_named(enable, debug_enable, bool, 0600);
MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
static void debug_os_unlock(struct debug_drvdata *drvdata)
{
/* Unlocks the debug registers */
writel_relaxed(0x0, drvdata->base + EDOSLAR);
/* Make sure the registers are unlocked before accessing */
wmb();
}
/*
* According to ARM DDI 0487A.k, before access external debug
* registers should firstly check the access permission; if any
* below condition has been met then cannot access debug
* registers to avoid lockup issue:
*
* - CPU power domain is powered off;
* - The OS Double Lock is locked;
*
* By checking EDPRSR can get to know if meet these conditions.
*/
static bool debug_access_permitted(struct debug_drvdata *drvdata)
{
/* CPU is powered off */
if (!(drvdata->edprsr & EDPRSR_PU))
return false;
/* The OS Double Lock is locked */
if (drvdata->edprsr & EDPRSR_DLK)
return false;
return true;
}
static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
{
u32 edprcr;
try_again:
/*
* Send request to power management controller and assert
* DBGPWRUPREQ signal; if power management controller has
* sane implementation, it should enable CPU power domain
* in case CPU is in low power state.
*/
edprcr = readl_relaxed(drvdata->base + EDPRCR);
edprcr |= EDPRCR_COREPURQ;
writel_relaxed(edprcr, drvdata->base + EDPRCR);
/* Wait for CPU to be powered up (timeout~=32ms) */
if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
/*
* Unfortunately the CPU cannot be powered up, so return
* back and later has no permission to access other
* registers. For this case, should disable CPU low power
* states to ensure CPU power domain is enabled!
*/
dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
__func__, drvdata->cpu);
return;
}
/*
* At this point the CPU is powered up, so set the no powerdown
* request bit so we don't lose power and emulate power down.
*/
edprcr = readl_relaxed(drvdata->base + EDPRCR);
edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
writel_relaxed(edprcr, drvdata->base + EDPRCR);
drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
/* The core power domain got switched off on use, try again */
if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
goto try_again;
}
static void debug_read_regs(struct debug_drvdata *drvdata)
{
u32 save_edprcr;
CS_UNLOCK(drvdata->base);
/* Unlock os lock */
debug_os_unlock(drvdata);
/* Save EDPRCR register */
save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
/*
* Ensure CPU power domain is enabled to let registers
* are accessiable.
*/
debug_force_cpu_powered_up(drvdata);
if (!debug_access_permitted(drvdata))
goto out;
drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
/*
* As described in ARM DDI 0487A.k, if the processing
* element (PE) is in debug state, or sample-based
* profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
* EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
* UNKNOWN state. So directly bail out for this case.
*/
if (drvdata->edpcsr == EDPCSR_PROHIBITED)
goto out;
/*
* A read of the EDPCSR normally has the side-effect of
* indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
* at this point it's safe to read value from them.
*/
if (IS_ENABLED(CONFIG_64BIT))
drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
if (drvdata->edcidsr_present)
drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
if (drvdata->edvidsr_present)
drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
out:
/* Restore EDPRCR register */
writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
CS_LOCK(drvdata->base);
}
#ifdef CONFIG_64BIT
static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
{
return (unsigned long)drvdata->edpcsr_hi << 32 |
(unsigned long)drvdata->edpcsr;
}
#else
static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
{
unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
unsigned long pc;
pc = (unsigned long)drvdata->edpcsr;
if (drvdata->pc_has_offset) {
arm_inst_offset = 8;
thumb_inst_offset = 4;
}
/* Handle thumb instruction */
if (pc & EDPCSR_THUMB) {
pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
return pc;
}
/*
* Handle arm instruction offset, if the arm instruction
* is not 4 byte alignment then it's possible the case
* for implementation defined; keep original value for this
* case and print info for notice.
*/
if (pc & BIT(1))
dev_emerg(drvdata->dev,
"Instruction offset is implementation defined\n");
else
pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
return pc;
}
#endif
static void debug_dump_regs(struct debug_drvdata *drvdata)
{
struct device *dev = drvdata->dev;
unsigned long pc;
dev_emerg(dev, " EDPRSR: %08x (Power:%s DLK:%s)\n",
drvdata->edprsr,
drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
if (!debug_access_permitted(drvdata)) {
dev_emerg(dev, "No permission to access debug registers!\n");
return;
}
if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
return;
}
pc = debug_adjust_pc(drvdata);
dev_emerg(dev, " EDPCSR: %pS\n", (void *)pc);
if (drvdata->edcidsr_present)
dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
if (drvdata->edvidsr_present)
dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
drvdata->edvidsr,
drvdata->edvidsr & EDVIDSR_NS ?
"Non-secure" : "Secure",
drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
(drvdata->edvidsr & EDVIDSR_E2 ?
"EL2" : "EL1/0"),
drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
drvdata->edvidsr & (u32)EDVIDSR_VMID);
}
static void debug_init_arch_data(void *info)
{
struct debug_drvdata *drvdata = info;
u32 mode, pcsr_offset;
u32 eddevid, eddevid1;
CS_UNLOCK(drvdata->base);
/* Read device info */
eddevid = readl_relaxed(drvdata->base + EDDEVID);
eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
CS_LOCK(drvdata->base);
/* Parse implementation feature */
mode = eddevid & EDDEVID_PCSAMPLE_MODE;
pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
drvdata->edpcsr_present = false;
drvdata->edcidsr_present = false;
drvdata->edvidsr_present = false;
drvdata->pc_has_offset = false;
switch (mode) {
case EDDEVID_IMPL_FULL:
drvdata->edvidsr_present = true;
fallthrough;
case EDDEVID_IMPL_EDPCSR_EDCIDSR:
drvdata->edcidsr_present = true;
fallthrough;
case EDDEVID_IMPL_EDPCSR:
/*
* In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
* define if has the offset for PC sampling value; if read
* back EDDEVID1.PCSROffset == 0x2, then this means the debug
* module does not sample the instruction set state when
* armv8 CPU in AArch32 state.
*/
drvdata->edpcsr_present =
((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
(pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
drvdata->pc_has_offset =
(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
break;
default:
break;
}
}
/*
* Dump out information on panic.
*/
static int debug_notifier_call(struct notifier_block *self,
unsigned long v, void *p)
{
int cpu;
struct debug_drvdata *drvdata;
/* Bail out if we can't acquire the mutex or the functionality is off */
if (!mutex_trylock(&debug_lock))
return NOTIFY_DONE;
if (!debug_enable)
goto skip_dump;
pr_emerg("ARM external debug module:\n");
for_each_possible_cpu(cpu) {
drvdata = per_cpu(debug_drvdata, cpu);
if (!drvdata)
continue;
dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
debug_read_regs(drvdata);
debug_dump_regs(drvdata);
}
skip_dump:
mutex_unlock(&debug_lock);
return NOTIFY_DONE;
}
static struct notifier_block debug_notifier = {
.notifier_call = debug_notifier_call,
};
static int debug_enable_func(void)
{
struct debug_drvdata *drvdata;
int cpu, ret = 0;
cpumask_t mask;
/*
* Use cpumask to track which debug power domains have
* been powered on and use it to handle failure case.
*/
cpumask_clear(&mask);
for_each_possible_cpu(cpu) {
drvdata = per_cpu(debug_drvdata, cpu);
if (!drvdata)
continue;
ret = pm_runtime_get_sync(drvdata->dev);
if (ret < 0)
goto err;
else
cpumask_set_cpu(cpu, &mask);
}
return 0;
err:
/*
* If pm_runtime_get_sync() has failed, need rollback on
* all the other CPUs that have been enabled before that.
*/
for_each_cpu(cpu, &mask) {
drvdata = per_cpu(debug_drvdata, cpu);
pm_runtime_put_noidle(drvdata->dev);
}
return ret;
}
static int debug_disable_func(void)
{
struct debug_drvdata *drvdata;
int cpu, ret, err = 0;
/*
* Disable debug power domains, records the error and keep
* circling through all other CPUs when an error has been
* encountered.
*/
for_each_possible_cpu(cpu) {
drvdata = per_cpu(debug_drvdata, cpu);
if (!drvdata)
continue;
ret = pm_runtime_put(drvdata->dev);
if (ret < 0)
err = ret;
}
return err;
}
static ssize_t debug_func_knob_write(struct file *f,
const char __user *buf, size_t count, loff_t *ppos)
{
u8 val;
int ret;
ret = kstrtou8_from_user(buf, count, 2, &val);
if (ret)
return ret;
mutex_lock(&debug_lock);
if (val == debug_enable)
goto out;
if (val)
ret = debug_enable_func();
else
ret = debug_disable_func();
if (ret) {
pr_err("%s: unable to %s debug function: %d\n",
__func__, val ? "enable" : "disable", ret);
goto err;
}
debug_enable = val;
out:
ret = count;
err:
mutex_unlock(&debug_lock);
return ret;
}
static ssize_t debug_func_knob_read(struct file *f,
char __user *ubuf, size_t count, loff_t *ppos)
{
ssize_t ret;
char buf[3];
mutex_lock(&debug_lock);
snprintf(buf, sizeof(buf), "%d\n", debug_enable);
mutex_unlock(&debug_lock);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
return ret;
}
static const struct file_operations debug_func_knob_fops = {
.open = simple_open,
.read = debug_func_knob_read,
.write = debug_func_knob_write,
};
static int debug_func_init(void)
{
int ret;
/* Create debugfs node */
debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
&debug_func_knob_fops);
/* Register function to be called for panic */
ret = atomic_notifier_chain_register(&panic_notifier_list,
&debug_notifier);
if (ret) {
pr_err("%s: unable to register notifier: %d\n",
__func__, ret);
goto err;
}
return 0;
err:
debugfs_remove_recursive(debug_debugfs_dir);
return ret;
}
static void debug_func_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&debug_notifier);
debugfs_remove_recursive(debug_debugfs_dir);
}
static int debug_probe(struct amba_device *adev, const struct amba_id *id)
{
void __iomem *base;
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata;
struct resource *res = &adev->res;
int ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
return drvdata->cpu;
if (per_cpu(debug_drvdata, drvdata->cpu)) {
dev_err(dev, "CPU%d drvdata has already been initialized\n",
drvdata->cpu);
return -EBUSY;
}
drvdata->dev = &adev->dev;
amba_set_drvdata(adev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
cpus_read_lock();
per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
drvdata, 1);
cpus_read_unlock();
if (ret) {
dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
goto err;
}
if (!drvdata->edpcsr_present) {
dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
drvdata->cpu);
ret = -ENXIO;
goto err;
}
if (!debug_count++) {
ret = debug_func_init();
if (ret)
goto err_func_init;
}
mutex_lock(&debug_lock);
/* Turn off debug power domain if debugging is disabled */
if (!debug_enable)
pm_runtime_put(dev);
mutex_unlock(&debug_lock);
dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
return 0;
err_func_init:
debug_count--;
err:
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
return ret;
}
static void debug_remove(struct amba_device *adev)
{
struct device *dev = &adev->dev;
struct debug_drvdata *drvdata = amba_get_drvdata(adev);
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
mutex_lock(&debug_lock);
/* Turn off debug power domain before rmmod the module */
if (debug_enable)
pm_runtime_put(dev);
mutex_unlock(&debug_lock);
if (!--debug_count)
debug_func_exit();
}
static const struct amba_cs_uci_id uci_id_debug[] = {
{
/* CPU Debug UCI data */
.devarch = 0x47706a15,
.devarch_mask = 0xfff0ffff,
.devtype = 0x00000015,
}
};
static const struct amba_id debug_ids[] = {
CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */
CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */
CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */
CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_debug), /* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_debug), /* Qualcomm Kryo */
{},
};
MODULE_DEVICE_TABLE(amba, debug_ids);
static struct amba_driver debug_driver = {
.drv = {
.name = "coresight-cpu-debug",
.suppress_bind_attrs = true,
},
.probe = debug_probe,
.remove = debug_remove,
.id_table = debug_ids,
};
module_amba_driver(debug_driver);
MODULE_AUTHOR("Leo Yan <[email protected]>");
MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/hwtracing/coresight/coresight-cpu-debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
* sink device could then pair with an appropriate per-cpu coresight source
* device (ETE) thus generating required trace data. Trace can be enabled
* via the perf framework.
*
* The AUX buffer handling is inspired from Arm SPE PMU driver.
*
* Copyright (C) 2020 ARM Ltd.
*
* Author: Anshuman Khandual <[email protected]>
*/
#define DRVNAME "arm_trbe"
#define pr_fmt(fmt) DRVNAME ": " fmt
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
/*
* A padding packet that will help the user space tools
* in skipping relevant sections in the captured trace
* data which could not be decoded. TRBE doesn't support
* formatting the trace data, unlike the legacy CoreSight
* sinks and thus we use ETE trace packets to pad the
* sections of the buffer.
*/
#define ETE_IGNORE_PACKET 0x70
/*
* Minimum amount of meaningful trace will contain:
* A-Sync, Trace Info, Trace On, Address, Atom.
* This is about 44bytes of ETE trace. To be on
* the safer side, we assume 64bytes is the minimum
* space required for a meaningful session, before
* we hit a "WRAP" event.
*/
#define TRBE_TRACE_MIN_BUF_SIZE 64
enum trbe_fault_action {
TRBE_FAULT_ACT_WRAP,
TRBE_FAULT_ACT_SPURIOUS,
TRBE_FAULT_ACT_FATAL,
};
struct trbe_buf {
/*
* Even though trbe_base represents vmap()
* mapped allocated buffer's start address,
* it's being as unsigned long for various
* arithmetic and comparision operations &
* also to be consistent with trbe_write &
* trbe_limit sibling pointers.
*/
unsigned long trbe_base;
/* The base programmed into the TRBE */
unsigned long trbe_hw_base;
unsigned long trbe_limit;
unsigned long trbe_write;
int nr_pages;
void **pages;
bool snapshot;
struct trbe_cpudata *cpudata;
};
/*
* TRBE erratum list
*
* The errata are defined in arm64 generic cpu_errata framework.
* Since the errata work arounds could be applied individually
* to the affected CPUs inside the TRBE driver, we need to know if
* a given CPU is affected by the erratum. Unlike the other erratum
* work arounds, TRBE driver needs to check multiple times during
* a trace session. Thus we need a quicker access to per-CPU
* errata and not issue costly this_cpu_has_cap() everytime.
* We keep a set of the affected errata in trbe_cpudata, per TRBE.
*
* We rely on the corresponding cpucaps to be defined for a given
* TRBE erratum. We map the given cpucap into a TRBE internal number
* to make the tracking of the errata lean.
*
* This helps in :
* - Not duplicating the detection logic
* - Streamlined detection of erratum across the system
*/
#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
#define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
#define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2
#define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE 3
#define TRBE_IS_BROKEN 4
static int trbe_errata_cpucaps[] = {
[TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
[TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
[TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
[TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923,
[TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691,
-1, /* Sentinel, must be the last entry */
};
/* The total number of listed errata in trbe_errata_cpucaps */
#define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1)
/*
* Safe limit for the number of bytes that may be overwritten
* when ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE is triggered.
*/
#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256
/*
* struct trbe_cpudata: TRBE instance specific data
* @trbe_flag - TRBE dirty/access flag support
* @trbe_hw_align - Actual TRBE alignment required for TRBPTR_EL1.
* @trbe_align - Software alignment used for the TRBPTR_EL1.
* @cpu - CPU this TRBE belongs to.
* @mode - Mode of current operation. (perf/disabled)
* @drvdata - TRBE specific drvdata
* @errata - Bit map for the errata on this TRBE.
*/
struct trbe_cpudata {
bool trbe_flag;
u64 trbe_hw_align;
u64 trbe_align;
int cpu;
enum cs_mode mode;
struct trbe_buf *buf;
struct trbe_drvdata *drvdata;
DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
};
struct trbe_drvdata {
struct trbe_cpudata __percpu *cpudata;
struct perf_output_handle * __percpu *handle;
struct hlist_node hotplug_node;
int irq;
cpumask_t supported_cpus;
enum cpuhp_state trbe_online;
struct platform_device *pdev;
};
static void trbe_check_errata(struct trbe_cpudata *cpudata)
{
int i;
for (i = 0; i < TRBE_ERRATA_MAX; i++) {
int cap = trbe_errata_cpucaps[i];
if (WARN_ON_ONCE(cap < 0))
return;
if (this_cpu_has_cap(cap))
set_bit(i, cpudata->errata);
}
}
static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
{
return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
}
static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
}
static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
}
static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need TSB CSYNC and
* DSB in order to prevent subsequent writes into certain TRBE
* system registers from being ignored and not effected.
*/
return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
}
static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need an additional
* context synchronization in order to prevent an inconsistent
* TRBE prohibited region view on the CPU which could possibly
* corrupt the TRBE buffer or the TRBE state.
*/
return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
}
static inline bool trbe_is_broken(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
}
static int trbe_alloc_node(struct perf_event *event)
{
if (event->cpu == -1)
return NUMA_NO_NODE;
return cpu_to_node(event->cpu);
}
static inline void trbe_drain_buffer(void)
{
tsb_csync();
dsb(nsh);
}
static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
{
/*
* Enable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
/* Synchronize the TRBE enable event */
isb();
if (trbe_needs_ctxt_sync_after_enable(cpudata))
isb();
}
static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
/*
* Disable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
isb();
}
static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
{
trbe_drain_buffer();
set_trbe_disabled(cpudata);
}
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
}
static void trbe_report_wrap_event(struct perf_output_handle *handle)
{
/*
* Mark the buffer to indicate that there was a WRAP event by
* setting the COLLISION flag. This indicates to the user that
* the TRBE trace collection was stopped without stopping the
* ETE and thus there might be some amount of trace that was
* lost between the time the WRAP was detected and the IRQ
* was consumed by the CPU.
*
* Setting the TRUNCATED flag would move the event to STOPPED
* state unnecessarily, even when there is space left in the
* ring buffer. Using the COLLISION flag doesn't have this side
* effect. We only set TRUNCATED flag when there is no space
* left in the ring buffer.
*/
perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
}
static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
/*
* We cannot proceed with the buffer collection and we
* do not have any data for the current session. The
* etm_perf driver expects to close out the aux_buffer
* at event_stop(). So disable the TRBE here and leave
* the update_buffer() to return a 0 size.
*/
trbe_drain_and_disable_local(buf->cpudata);
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
}
/*
* TRBE Buffer Management
*
* The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
* it starts writing trace data from the write pointer onward till the limit pointer.
* When the write pointer reaches the address just before the limit pointer, it gets
* wrapped around again to the base pointer. This is called a TRBE wrap event, which
* generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
* uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
* handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
* LIMIT pointers.
*
* Wrap around with an IRQ
* ------ < ------ < ------- < ----- < -----
* | |
* ------ > ------ > ------- > ----- > -----
*
* +---------------+-----------------------+
* | | |
* +---------------+-----------------------+
* Base Pointer Write Pointer Limit Pointer
*
* The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
* pointer can be aligned to the implementation defined TRBE trace buffer alignment
* as captured in trbe_cpudata->trbe_align.
*
*
* head tail wakeup
* +---------------------------------------+----- ~ ~ ------
* |$$$$$$$|################|$$$$$$$$$$$$$$| |
* +---------------------------------------+----- ~ ~ ------
* Base Pointer Write Pointer Limit Pointer
*
* The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
* values which tracks all the driver writes and user reads from the perf auxiliary
* buffer. Generally [head..tail] is the area where the driver can write into unless
* the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
* configured depending on the perf_output_handle indices, so that the driver does
* not override into areas in the perf auxiliary buffer which is being or yet to be
* consumed from the user space. The enabled TRBE buffer area is a moving subset of
* the allocated perf auxiliary buffer.
*/
static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
{
memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
}
static void trbe_pad_buf(struct perf_output_handle *handle, int len)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
__trbe_pad_buf(buf, head, len);
if (!buf->snapshot)
perf_aux_output_skip(handle, len);
}
static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
/*
* The ETE trace has alignment synchronization packets allowing
* the decoder to reset in case of an overflow or corruption.
* So we can use the entire buffer for the snapshot mode.
*/
return buf->nr_pages * PAGE_SIZE;
}
static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
{
u64 size = TRBE_TRACE_MIN_BUF_SIZE;
struct trbe_buf *buf = etm_perf_sink_config(handle);
struct trbe_cpudata *cpudata = buf->cpudata;
/*
* When the TRBE is affected by an erratum that could make it
* write to the next "virtually addressed" page beyond the LIMIT.
* We need to make sure there is always a PAGE after the LIMIT,
* within the buffer. Thus we ensure there is at least an extra
* page than normal. With this we could then adjust the LIMIT
* pointer down by a PAGE later.
*/
if (trbe_may_write_out_of_range(cpudata))
size += PAGE_SIZE;
return size;
}
/*
* TRBE Limit Calculation
*
* The following markers are used to illustrate various TRBE buffer situations.
*
* $$$$ - Data area, unconsumed captured trace data, not to be overridden
* #### - Free area, enabled, trace will be written
* %%%% - Free area, disabled, trace will not be written
* ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
*/
static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
struct trbe_cpudata *cpudata = buf->cpudata;
const u64 bufsize = buf->nr_pages * PAGE_SIZE;
u64 limit = bufsize;
u64 head, tail, wakeup;
head = PERF_IDX2OFF(handle->head, buf);
/*
* head
* ------->|
* |
* head TRBE align tail
* +----|-------|---------------|-------+
* |$$$$|=======|###############|$$$$$$$|
* +----|-------|---------------|-------+
* trbe_base trbe_base + nr_pages
*
* Perf aux buffer output head position can be misaligned depending on
* various factors including user space reads. In case misaligned, head
* needs to be aligned before TRBE can be configured. Pad the alignment
* gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
* and skip this section thus advancing the head.
*/
if (!IS_ALIGNED(head, cpudata->trbe_align)) {
unsigned long delta = roundup(head, cpudata->trbe_align) - head;
delta = min(delta, handle->size);
trbe_pad_buf(handle, delta);
head = PERF_IDX2OFF(handle->head, buf);
}
/*
* head = tail (size = 0)
* +----|-------------------------------+
* |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ |
* +----|-------------------------------+
* trbe_base trbe_base + nr_pages
*
* Perf aux buffer does not have any space for the driver to write into.
*/
if (!handle->size)
return 0;
/* Compute the tail and wakeup indices now that we've aligned head */
tail = PERF_IDX2OFF(handle->head + handle->size, buf);
wakeup = PERF_IDX2OFF(handle->wakeup, buf);
/*
* Lets calculate the buffer area which TRBE could write into. There
* are three possible scenarios here. Limit needs to be aligned with
* PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
* unconsumed data.
*
* 1) head < tail
*
* head tail
* +----|-----------------------|-------+
* |$$$$|#######################|$$$$$$$|
* +----|-----------------------|-------+
* trbe_base limit trbe_base + nr_pages
*
* TRBE could write into [head..tail] area. Unless the tail is right at
* the end of the buffer, neither an wrap around nor an IRQ is expected
* while being enabled.
*
* 2) head == tail
*
* head = tail (size > 0)
* +----|-------------------------------+
* |%%%%|###############################|
* +----|-------------------------------+
* trbe_base limit = trbe_base + nr_pages
*
* TRBE should just write into [head..base + nr_pages] area even though
* the entire buffer is empty. Reason being, when the trace reaches the
* end of the buffer, it will just wrap around with an IRQ giving an
* opportunity to reconfigure the buffer.
*
* 3) tail < head
*
* tail head
* +----|-----------------------|-------+
* |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
* +----|-----------------------|-------+
* trbe_base limit = trbe_base + nr_pages
*
* TRBE should just write into [head..base + nr_pages] area even though
* the [trbe_base..tail] is also empty. Reason being, when the trace
* reaches the end of the buffer, it will just wrap around with an IRQ
* giving an opportunity to reconfigure the buffer.
*/
if (head < tail)
limit = round_down(tail, PAGE_SIZE);
/*
* Wakeup may be arbitrarily far into the future. If it's not in the
* current generation, either we'll wrap before hitting it, or it's
* in the past and has been handled already.
*
* If there's a wakeup before we wrap, arrange to be woken up by the
* page boundary following it. Keep the tail boundary if that's lower.
*
* head wakeup tail
* +----|---------------|-------|-------+
* |$$$$|###############|%%%%%%%|$$$$$$$|
* +----|---------------|-------|-------+
* trbe_base limit trbe_base + nr_pages
*/
if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
limit = min(limit, round_up(wakeup, PAGE_SIZE));
/*
* There are two situation when this can happen i.e limit is before
* the head and hence TRBE cannot be configured.
*
* 1) head < tail (aligned down with PAGE_SIZE) and also they are both
* within the same PAGE size range.
*
* PAGE_SIZE
* |----------------------|
*
* limit head tail
* +------------|------|--------|-------+
* |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
* +------------|------|--------|-------+
* trbe_base trbe_base + nr_pages
*
* 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
* head and wakeup are within same PAGE size range.
*
* PAGE_SIZE
* |----------------------|
*
* limit head wakeup tail
* +----|------|-------|--------|-------+
* |$$$$$$$$$$$|=======|========|$$$$$$$|
* +----|------|-------|--------|-------+
* trbe_base trbe_base + nr_pages
*/
if (limit > head)
return limit;
trbe_pad_buf(handle, handle->size);
return 0;
}
static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 limit = __trbe_normal_offset(handle);
u64 head = PERF_IDX2OFF(handle->head, buf);
/*
* If the head is too close to the limit and we don't
* have space for a meaningful run, we rather pad it
* and start fresh.
*
* We might have to do this more than once to make sure
* we have enough required space.
*/
while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
trbe_pad_buf(handle, limit - head);
limit = __trbe_normal_offset(handle);
head = PERF_IDX2OFF(handle->head, buf);
}
return limit;
}
static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
unsigned long offset;
if (buf->snapshot)
offset = trbe_snapshot_offset(handle);
else
offset = trbe_normal_offset(handle);
return buf->trbe_base + offset;
}
static void clr_trbe_status(void)
{
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
WARN_ON(is_trbe_enabled());
trbsr &= ~TRBSR_EL1_IRQ;
trbsr &= ~TRBSR_EL1_TRG;
trbsr &= ~TRBSR_EL1_WRAP;
trbsr &= ~TRBSR_EL1_EC_MASK;
trbsr &= ~TRBSR_EL1_BSC_MASK;
trbsr &= ~TRBSR_EL1_S;
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
}
static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
unsigned long addr = buf->trbe_limit;
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
trblimitr &= ~TRBLIMITR_EL1_nVM;
trblimitr &= ~TRBLIMITR_EL1_FM_MASK;
trblimitr &= ~TRBLIMITR_EL1_TM_MASK;
trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK;
/*
* Fill trace buffer mode is used here while configuring the
* TRBE for trace capture. In this particular mode, the trace
* collection is stopped and a maintenance interrupt is raised
* when the current write pointer wraps. This pause in trace
* collection gives the software an opportunity to capture the
* trace data in the interrupt handler, before reconfiguring
* the TRBE.
*/
trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) &
TRBLIMITR_EL1_FM_MASK;
/*
* Trigger mode is not used here while configuring the TRBE for
* the trace capture. Hence just keep this in the ignore mode.
*/
trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) &
TRBLIMITR_EL1_TM_MASK;
trblimitr |= (addr & PAGE_MASK);
set_trbe_enabled(buf->cpudata, trblimitr);
}
static void trbe_enable_hw(struct trbe_buf *buf)
{
WARN_ON(buf->trbe_hw_base < buf->trbe_base);
WARN_ON(buf->trbe_write < buf->trbe_hw_base);
WARN_ON(buf->trbe_write >= buf->trbe_limit);
set_trbe_disabled(buf->cpudata);
clr_trbe_status();
set_trbe_base_pointer(buf->trbe_hw_base);
set_trbe_write_pointer(buf->trbe_write);
/*
* Synchronize all the register updates
* till now before enabling the TRBE.
*/
isb();
set_trbe_limit_pointer_enabled(buf);
}
static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
u64 trbsr)
{
int ec = get_trbe_ec(trbsr);
int bsc = get_trbe_bsc(trbsr);
struct trbe_buf *buf = etm_perf_sink_config(handle);
struct trbe_cpudata *cpudata = buf->cpudata;
WARN_ON(is_trbe_running(trbsr));
if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
return TRBE_FAULT_ACT_FATAL;
if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
return TRBE_FAULT_ACT_FATAL;
/*
* If the trbe is affected by TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
* it might write data after a WRAP event in the fill mode.
* Thus the check TRBPTR == TRBBASER will not be honored.
*/
if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
(trbe_may_overwrite_in_fill_mode(cpudata) ||
get_trbe_write_pointer() == get_trbe_base_pointer()))
return TRBE_FAULT_ACT_WRAP;
return TRBE_FAULT_ACT_SPURIOUS;
}
static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
struct trbe_buf *buf, bool wrap)
{
u64 write;
u64 start_off, end_off;
u64 size;
u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
/*
* If the TRBE has wrapped around the write pointer has
* wrapped and should be treated as limit.
*
* When the TRBE is affected by TRBE_WORKAROUND_WRITE_OUT_OF_RANGE,
* it may write upto 64bytes beyond the "LIMIT". The driver already
* keeps a valid page next to the LIMIT and we could potentially
* consume the trace data that may have been collected there. But we
* cannot be really sure it is available, and the TRBPTR may not
* indicate the same. Also, affected cores are also affected by another
* erratum which forces the PAGE_SIZE alignment on the TRBPTR, and thus
* could potentially pad an entire PAGE_SIZE - 64bytes, to get those
* 64bytes. Thus we ignore the potential triggering of the erratum
* on WRAP and limit the data to LIMIT.
*/
if (wrap)
write = get_trbe_limit_pointer();
else
write = get_trbe_write_pointer();
/*
* TRBE may use a different base address than the base
* of the ring buffer. Thus use the beginning of the ring
* buffer to compute the offsets.
*/
end_off = write - buf->trbe_base;
start_off = PERF_IDX2OFF(handle->head, buf);
if (WARN_ON_ONCE(end_off < start_off))
return 0;
size = end_off - start_off;
/*
* If the TRBE is affected by the following erratum, we must fill
* the space we skipped with IGNORE packets. And we are always
* guaranteed to have at least a PAGE_SIZE space in the buffer.
*/
if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
!WARN_ON(size < overwrite_skip))
__trbe_pad_buf(buf, start_off, overwrite_skip);
return size;
}
static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool snapshot)
{
struct trbe_buf *buf;
struct page **pglist;
int i;
/*
* TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
* just a single page, there would not be any room left while writing
* into a partially filled TRBE buffer after the page size alignment.
* Hence restrict the minimum buffer size as two pages.
*/
if (nr_pages < 2)
return NULL;
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
if (!buf)
return ERR_PTR(-ENOMEM);
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
kfree(buf);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < nr_pages; i++)
pglist[i] = virt_to_page(pages[i]);
buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
if (!buf->trbe_base) {
kfree(pglist);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
buf->trbe_write = buf->trbe_base;
buf->snapshot = snapshot;
buf->nr_pages = nr_pages;
buf->pages = pages;
kfree(pglist);
return buf;
}
static void arm_trbe_free_buffer(void *config)
{
struct trbe_buf *buf = config;
vunmap((void *)buf->trbe_base);
kfree(buf);
}
static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *config)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
struct trbe_buf *buf = config;
enum trbe_fault_action act;
unsigned long size, status;
unsigned long flags;
bool wrap = false;
WARN_ON(buf->cpudata != cpudata);
WARN_ON(cpudata->cpu != smp_processor_id());
WARN_ON(cpudata->drvdata != drvdata);
if (cpudata->mode != CS_MODE_PERF)
return 0;
/*
* We are about to disable the TRBE. And this could in turn
* fill up the buffer triggering, an IRQ. This could be consumed
* by the PE asynchronously, causing a race here against
* the IRQ handler in closing out the handle. So, let us
* make sure the IRQ can't trigger while we are collecting
* the buffer. We also make sure that a WRAP event is handled
* accordingly.
*/
local_irq_save(flags);
/*
* If the TRBE was disabled due to lack of space in the AUX buffer or a
* spurious fault, the driver leaves it disabled, truncating the buffer.
* Since the etm_perf driver expects to close out the AUX buffer, the
* driver skips it. Thus, just pass in 0 size here to indicate that the
* buffer was truncated.
*/
if (!is_trbe_enabled()) {
size = 0;
goto done;
}
/*
* perf handle structure needs to be shared with the TRBE IRQ handler for
* capturing trace data and restarting the handle. There is a probability
* of an undefined reference based crash when etm event is being stopped
* while a TRBE IRQ also getting processed. This happens due the release
* of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
* the TRBE here will ensure that no IRQ could be generated when the perf
* handle gets freed in etm_event_stop().
*/
trbe_drain_and_disable_local(cpudata);
/* Check if there is a pending interrupt and handle it here */
status = read_sysreg_s(SYS_TRBSR_EL1);
if (is_trbe_irq(status)) {
/*
* Now that we are handling the IRQ here, clear the IRQ
* from the status, to let the irq handler know that it
* is taken care of.
*/
clr_trbe_irq();
isb();
act = trbe_get_fault_act(handle, status);
/*
* If this was not due to a WRAP event, we have some
* errors and as such buffer is empty.
*/
if (act != TRBE_FAULT_ACT_WRAP) {
size = 0;
goto done;
}
trbe_report_wrap_event(handle);
wrap = true;
}
size = trbe_get_trace_size(handle, buf, wrap);
done:
local_irq_restore(flags);
if (buf->snapshot)
handle->head += size;
return size;
}
static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
{
/*
* TRBE_WORKAROUND_OVERWRITE_FILL_MODE causes the TRBE to overwrite a few cache
* line size from the "TRBBASER_EL1" in the event of a "FILL".
* Thus, we could loose some amount of the trace at the base.
*
* Before Fix:
*
* normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
* | \/ /
* -------------------------------------------------------------
* | Pg0 | Pg1 | | | PgN |
* -------------------------------------------------------------
*
* In the normal course of action, we would set the TRBBASER to the
* beginning of the ring-buffer (normal-BASE). But with the erratum,
* the TRBE could overwrite the contents at the "normal-BASE", after
* hitting the "normal-LIMIT", since it doesn't stop as expected. And
* this is wrong. This could result in overwriting trace collected in
* one of the previous runs, being consumed by the user. So we must
* always make sure that the TRBBASER is within the region
* [head, head+size]. Note that TRBBASER must be PAGE aligned,
*
* After moving the BASE:
*
* normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
* | \/ /
* -------------------------------------------------------------
* | | |xyzdef. |.. tuvw| |
* -------------------------------------------------------------
* /
* New-BASER
*
* Also, we would set the TRBPTR to head (after adjusting for
* alignment) at normal-PTR. This would mean that the last few bytes
* of the trace (say, "xyz") might overwrite the first few bytes of
* trace written ("abc"). More importantly they will appear in what
* userspace sees as the beginning of the trace, which is wrong. We may
* not always have space to move the latest trace "xyz" to the correct
* order as it must appear beyond the LIMIT. (i.e, [head..head+size]).
* Thus it is easier to ignore those bytes than to complicate the
* driver to move it, assuming that the erratum was triggered and
* doing additional checks to see if there is indeed allowed space at
* TRBLIMITR.LIMIT.
*
* Thus the full workaround will move the BASE and the PTR and would
* look like (after padding at the skipped bytes at the end of
* session) :
*
* normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
* | \/ /
* -------------------------------------------------------------
* | | |///abc.. |.. rst| |
* -------------------------------------------------------------
* / |
* New-BASER New-TRBPTR
*
* To summarize, with the work around:
*
* - We always align the offset for the next session to PAGE_SIZE
* (This is to ensure we can program the TRBBASER to this offset
* within the region [head...head+size]).
*
* - At TRBE enable:
* - Set the TRBBASER to the page aligned offset of the current
* proposed write offset. (which is guaranteed to be aligned
* as above)
* - Move the TRBPTR to skip first 256bytes (that might be
* overwritten with the erratum). This ensures that the trace
* generated in the session is not re-written.
*
* - At trace collection:
* - Pad the 256bytes skipped above again with IGNORE packets.
*/
if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
return -EINVAL;
buf->trbe_hw_base = buf->trbe_write;
buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
}
/*
* TRBE_WORKAROUND_WRITE_OUT_OF_RANGE could cause the TRBE to write to
* the next page after the TRBLIMITR.LIMIT. For perf, the "next page"
* may be:
* - The page beyond the ring buffer. This could mean, TRBE could
* corrupt another entity (kernel / user)
* - A portion of the "ring buffer" consumed by the userspace.
* i.e, a page outisde [head, head + size].
*
* We work around this by:
* - Making sure that we have at least an extra space of PAGE left
* in the ring buffer [head, head + size], than we normally do
* without the erratum. See trbe_min_trace_buf_size().
*
* - Adjust the TRBLIMITR.LIMIT to leave the extra PAGE outside
* the TRBE's range (i.e [TRBBASER, TRBLIMITR.LIMI] ).
*/
if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
s64 space = buf->trbe_limit - buf->trbe_write;
/*
* We must have more than a PAGE_SIZE worth space in the proposed
* range for the TRBE.
*/
if (WARN_ON(space <= PAGE_SIZE ||
!IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
return -EINVAL;
buf->trbe_limit -= PAGE_SIZE;
}
return 0;
}
static int __arm_trbe_enable(struct trbe_buf *buf,
struct perf_output_handle *handle)
{
int ret = 0;
perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
buf->trbe_limit = compute_trbe_buffer_limit(handle);
buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
if (buf->trbe_limit == buf->trbe_base) {
ret = -ENOSPC;
goto err;
}
/* Set the base of the TRBE to the buffer base */
buf->trbe_hw_base = buf->trbe_base;
ret = trbe_apply_work_around_before_enable(buf);
if (ret)
goto err;
*this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
trbe_enable_hw(buf);
return 0;
err:
trbe_stop_and_truncate_event(handle);
return ret;
}
static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode,
void *data)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
struct perf_output_handle *handle = data;
struct trbe_buf *buf = etm_perf_sink_config(handle);
WARN_ON(cpudata->cpu != smp_processor_id());
WARN_ON(cpudata->drvdata != drvdata);
if (mode != CS_MODE_PERF)
return -EINVAL;
cpudata->buf = buf;
cpudata->mode = mode;
buf->cpudata = cpudata;
return __arm_trbe_enable(buf, handle);
}
static int arm_trbe_disable(struct coresight_device *csdev)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
struct trbe_buf *buf = cpudata->buf;
WARN_ON(buf->cpudata != cpudata);
WARN_ON(cpudata->cpu != smp_processor_id());
WARN_ON(cpudata->drvdata != drvdata);
if (cpudata->mode != CS_MODE_PERF)
return -EINVAL;
trbe_drain_and_disable_local(cpudata);
buf->cpudata = NULL;
cpudata->buf = NULL;
cpudata->mode = CS_MODE_DISABLED;
return 0;
}
static void trbe_handle_spurious(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
/*
* If the IRQ was spurious, simply re-enable the TRBE
* back without modifying the buffer parameters to
* retain the trace collected so far.
*/
set_trbe_enabled(buf->cpudata, trblimitr);
}
static int trbe_handle_overflow(struct perf_output_handle *handle)
{
struct perf_event *event = handle->event;
struct trbe_buf *buf = etm_perf_sink_config(handle);
unsigned long size;
struct etm_event_data *event_data;
size = trbe_get_trace_size(handle, buf, true);
if (buf->snapshot)
handle->head += size;
trbe_report_wrap_event(handle);
perf_aux_output_end(handle, size);
event_data = perf_aux_output_begin(handle, event);
if (!event_data) {
/*
* We are unable to restart the trace collection,
* thus leave the TRBE disabled. The etm-perf driver
* is able to detect this with a disconnected handle
* (handle->event = NULL).
*/
trbe_drain_and_disable_local(buf->cpudata);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
return -EINVAL;
}
return __arm_trbe_enable(buf, handle);
}
static bool is_perf_trbe(struct perf_output_handle *handle)
{
struct trbe_buf *buf = etm_perf_sink_config(handle);
struct trbe_cpudata *cpudata = buf->cpudata;
struct trbe_drvdata *drvdata = cpudata->drvdata;
int cpu = smp_processor_id();
WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
if (cpudata->mode != CS_MODE_PERF)
return false;
if (cpudata->cpu != cpu)
return false;
if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
return false;
return true;
}
static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
{
struct perf_output_handle **handle_ptr = dev;
struct perf_output_handle *handle = *handle_ptr;
struct trbe_buf *buf = etm_perf_sink_config(handle);
enum trbe_fault_action act;
u64 status;
bool truncated = false;
u64 trfcr;
/* Reads to TRBSR_EL1 is fine when TRBE is active */
status = read_sysreg_s(SYS_TRBSR_EL1);
/*
* If the pending IRQ was handled by update_buffer callback
* we have nothing to do here.
*/
if (!is_trbe_irq(status))
return IRQ_NONE;
/* Prohibit the CPU from tracing before we disable the TRBE */
trfcr = cpu_prohibit_trace();
/*
* Ensure the trace is visible to the CPUs and
* any external aborts have been resolved.
*/
trbe_drain_and_disable_local(buf->cpudata);
clr_trbe_irq();
isb();
if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
return IRQ_NONE;
if (!is_perf_trbe(handle))
return IRQ_NONE;
act = trbe_get_fault_act(handle, status);
switch (act) {
case TRBE_FAULT_ACT_WRAP:
truncated = !!trbe_handle_overflow(handle);
break;
case TRBE_FAULT_ACT_SPURIOUS:
trbe_handle_spurious(handle);
break;
case TRBE_FAULT_ACT_FATAL:
trbe_stop_and_truncate_event(handle);
truncated = true;
break;
}
/*
* If the buffer was truncated, ensure perf callbacks
* have completed, which will disable the event.
*
* Otherwise, restore the trace filter controls to
* allow the tracing.
*/
if (truncated)
irq_work_run();
else
write_trfcr(trfcr);
return IRQ_HANDLED;
}
static const struct coresight_ops_sink arm_trbe_sink_ops = {
.enable = arm_trbe_enable,
.disable = arm_trbe_disable,
.alloc_buffer = arm_trbe_alloc_buffer,
.free_buffer = arm_trbe_free_buffer,
.update_buffer = arm_trbe_update_buffer,
};
static const struct coresight_ops arm_trbe_cs_ops = {
.sink_ops = &arm_trbe_sink_ops,
};
static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
}
static DEVICE_ATTR_RO(align);
static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", cpudata->trbe_flag);
}
static DEVICE_ATTR_RO(flag);
static struct attribute *arm_trbe_attrs[] = {
&dev_attr_align.attr,
&dev_attr_flag.attr,
NULL,
};
static const struct attribute_group arm_trbe_group = {
.attrs = arm_trbe_attrs,
};
static const struct attribute_group *arm_trbe_groups[] = {
&arm_trbe_group,
NULL,
};
static void arm_trbe_enable_cpu(void *info)
{
struct trbe_drvdata *drvdata = info;
struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
trbe_reset_local(cpudata);
enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
}
static void arm_trbe_disable_cpu(void *info)
{
struct trbe_drvdata *drvdata = info;
struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
disable_percpu_irq(drvdata->irq);
trbe_reset_local(cpudata);
}
static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
{
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
struct coresight_desc desc = { 0 };
struct device *dev;
if (WARN_ON(trbe_csdev))
return;
/* If the TRBE was not probed on the CPU, we shouldn't be here */
if (WARN_ON(!cpudata->drvdata))
return;
dev = &cpudata->drvdata->pdev->dev;
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
if (!desc.name)
goto cpu_clear;
desc.pdata = coresight_get_platform_data(dev);
if (IS_ERR(desc.pdata))
goto cpu_clear;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
desc.ops = &arm_trbe_cs_ops;
desc.groups = arm_trbe_groups;
desc.dev = dev;
trbe_csdev = coresight_register(&desc);
if (IS_ERR(trbe_csdev))
goto cpu_clear;
dev_set_drvdata(&trbe_csdev->dev, cpudata);
coresight_set_percpu_sink(cpu, trbe_csdev);
return;
cpu_clear:
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
}
/*
* Must be called with preemption disabled, for trbe_check_errata().
*/
static void arm_trbe_probe_cpu(void *info)
{
struct trbe_drvdata *drvdata = info;
int cpu = smp_processor_id();
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
u64 trbidr;
if (WARN_ON(!cpudata))
goto cpu_clear;
if (!is_trbe_available()) {
pr_err("TRBE is not implemented on cpu %d\n", cpu);
goto cpu_clear;
}
trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
if (!is_trbe_programmable(trbidr)) {
pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
goto cpu_clear;
}
cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
if (cpudata->trbe_hw_align > SZ_2K) {
pr_err("Unsupported alignment on cpu %d\n", cpu);
goto cpu_clear;
}
/*
* Run the TRBE erratum checks, now that we know
* this instance is about to be registered.
*/
trbe_check_errata(cpudata);
if (trbe_is_broken(cpudata)) {
pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
goto cpu_clear;
}
/*
* If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
* we must always program the TBRPTR_EL1, 256bytes from a page
* boundary, with TRBBASER_EL1 set to the page, to prevent
* TRBE over-writing 256bytes at TRBBASER_EL1 on FILL event.
*
* Thus make sure we always align our write pointer to a PAGE_SIZE,
* which also guarantees that we have at least a PAGE_SIZE space in
* the buffer (TRBLIMITR is PAGE aligned) and thus we can skip
* the required bytes at the base.
*/
if (trbe_may_overwrite_in_fill_mode(cpudata))
cpudata->trbe_align = PAGE_SIZE;
else
cpudata->trbe_align = cpudata->trbe_hw_align;
cpudata->trbe_flag = get_trbe_flag_update(trbidr);
cpudata->cpu = cpu;
cpudata->drvdata = drvdata;
return;
cpu_clear:
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
}
static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
{
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
if (trbe_csdev) {
coresight_unregister(trbe_csdev);
coresight_set_percpu_sink(cpu, NULL);
}
}
static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
{
int cpu;
drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
if (!drvdata->cpudata)
return -ENOMEM;
for_each_cpu(cpu, &drvdata->supported_cpus) {
/* If we fail to probe the CPU, let us defer it to hotplug callbacks */
if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
continue;
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_register_coresight_cpu(drvdata, cpu);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
}
return 0;
}
static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
{
int cpu;
for_each_cpu(cpu, &drvdata->supported_cpus) {
smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
arm_trbe_remove_coresight_cpu(drvdata, cpu);
}
free_percpu(drvdata->cpudata);
return 0;
}
static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
{
preempt_disable();
arm_trbe_probe_cpu(drvdata);
preempt_enable();
}
static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
{
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
/*
* If this CPU was not probed for TRBE,
* initialize it now.
*/
if (!coresight_get_percpu_sink(cpu)) {
arm_trbe_probe_hotplugged_cpu(drvdata);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_register_coresight_cpu(drvdata, cpu);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_enable_cpu(drvdata);
} else {
arm_trbe_enable_cpu(drvdata);
}
}
return 0;
}
static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
arm_trbe_disable_cpu(drvdata);
return 0;
}
static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
{
enum cpuhp_state trbe_online;
int ret;
trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
if (trbe_online < 0)
return trbe_online;
ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
if (ret) {
cpuhp_remove_multi_state(trbe_online);
return ret;
}
drvdata->trbe_online = trbe_online;
return 0;
}
static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
{
cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
cpuhp_remove_multi_state(drvdata->trbe_online);
}
static int arm_trbe_probe_irq(struct platform_device *pdev,
struct trbe_drvdata *drvdata)
{
int ret;
drvdata->irq = platform_get_irq(pdev, 0);
if (drvdata->irq < 0) {
pr_err("IRQ not found for the platform device\n");
return drvdata->irq;
}
if (!irq_is_percpu(drvdata->irq)) {
pr_err("IRQ is not a PPI\n");
return -EINVAL;
}
if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
return -EINVAL;
drvdata->handle = alloc_percpu(struct perf_output_handle *);
if (!drvdata->handle)
return -ENOMEM;
ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
if (ret) {
free_percpu(drvdata->handle);
return ret;
}
return 0;
}
static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
{
free_percpu_irq(drvdata->irq, drvdata->handle);
free_percpu(drvdata->handle);
}
static int arm_trbe_device_probe(struct platform_device *pdev)
{
struct trbe_drvdata *drvdata;
struct device *dev = &pdev->dev;
int ret;
/* Trace capture is not possible with kernel page table isolation */
if (arm64_kernel_unmapped_at_el0()) {
pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
return -EOPNOTSUPP;
}
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(dev, drvdata);
drvdata->pdev = pdev;
ret = arm_trbe_probe_irq(pdev, drvdata);
if (ret)
return ret;
ret = arm_trbe_probe_coresight(drvdata);
if (ret)
goto probe_failed;
ret = arm_trbe_probe_cpuhp(drvdata);
if (ret)
goto cpuhp_failed;
return 0;
cpuhp_failed:
arm_trbe_remove_coresight(drvdata);
probe_failed:
arm_trbe_remove_irq(drvdata);
return ret;
}
static int arm_trbe_device_remove(struct platform_device *pdev)
{
struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
arm_trbe_remove_cpuhp(drvdata);
arm_trbe_remove_coresight(drvdata);
arm_trbe_remove_irq(drvdata);
return 0;
}
static const struct of_device_id arm_trbe_of_match[] = {
{ .compatible = "arm,trace-buffer-extension"},
{},
};
MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
static struct platform_driver arm_trbe_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_trbe_of_match),
.suppress_bind_attrs = true,
},
.probe = arm_trbe_device_probe,
.remove = arm_trbe_device_remove,
};
static int __init arm_trbe_init(void)
{
int ret;
ret = platform_driver_register(&arm_trbe_driver);
if (!ret)
return 0;
pr_err("Error registering %s platform driver\n", DRVNAME);
return ret;
}
static void __exit arm_trbe_exit(void)
{
platform_driver_unregister(&arm_trbe_driver);
}
module_init(arm_trbe_init);
module_exit(arm_trbe_exit);
MODULE_AUTHOR("Anshuman Khandual <[email protected]>");
MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-trbe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Arm Limited. All rights reserved.
*
* Coresight Address Translation Unit support
*
* Author: Suzuki K Poulose <[email protected]>
*/
#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "coresight-catu.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
#define csdev_to_catu_drvdata(csdev) \
dev_get_drvdata(csdev->dev.parent)
/* Verbose output for CATU table contents */
#ifdef CATU_DEBUG
#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
#else
#define catu_dbg(x, ...) do {} while (0)
#endif
DEFINE_CORESIGHT_DEVLIST(catu_devs, "catu");
struct catu_etr_buf {
struct tmc_sg_table *catu_table;
dma_addr_t sladdr;
};
/*
* CATU uses a page size of 4KB for page tables as well as data pages.
* Each 64bit entry in the table has the following format.
*
* 63 12 1 0
* ------------------------------------
* | Address [63-12] | SBZ | V|
* ------------------------------------
*
* Where bit[0] V indicates if the address is valid or not.
* Each 4K table pages have upto 256 data page pointers, taking upto 2K
* size. There are two Link pointers, pointing to the previous and next
* table pages respectively at the end of the 4K page. (i.e, entry 510
* and 511).
* E.g, a table of two pages could look like :
*
* Table Page 0 Table Page 1
* SLADDR ===> x------------------x x--> x-----------------x
* INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
* |------------------| | |-----------------|
* INADDR+4K ->| Page 1 | V | | | |
* |------------------| | |-----------------|
* | Page 2 | V | | | |
* |------------------| | |-----------------|
* | ... | V | | | ... |
* |------------------| | |-----------------|
* INADDR+1020K| Page 255 | V | | | Page 511 | V |
* SLADDR+2K==>|------------------| | |-----------------|
* | UNUSED | | | | |
* |------------------| | | |
* | UNUSED | | | | |
* |------------------| | | |
* | ... | | | | |
* |------------------| | |-----------------|
* | IGNORED | 0 | | | Table Page 0| 1 |
* |------------------| | |-----------------|
* | Table Page 1| 1 |--x | IGNORED | 0 |
* x------------------x x-----------------x
* SLADDR+4K==>
*
* The base input address (used by the ETR, programmed in INADDR_{LO,HI})
* must be aligned to 1MB (the size addressable by a single page table).
* The CATU maps INADDR{LO:HI} to the first page in the table pointed
* to by SLADDR{LO:HI} and so on.
*
*/
typedef u64 cate_t;
#define CATU_PAGE_SHIFT 12
#define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
/* Page pointers are only allocated in the first 2K half */
#define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
#define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
#define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
#define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
#define CATU_ADDR_SHIFT 12
#define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
#define CATU_ENTRY_VALID ((cate_t)0x1)
#define CATU_VALID_ENTRY(addr) \
(((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
#define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
/* CATU expects the INADDR to be aligned to 1M. */
#define CATU_DEFAULT_INADDR (1ULL << 20)
/*
* catu_get_table : Retrieve the table pointers for the given @offset
* within the buffer. The buffer is wrapped around to a valid offset.
*
* Returns : The CPU virtual address for the beginning of the table
* containing the data page pointer for @offset. If @daddrp is not NULL,
* @daddrp points the DMA address of the beginning of the table.
*/
static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
unsigned long offset,
dma_addr_t *daddrp)
{
unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
unsigned int table_nr, pg_idx, pg_offset;
struct tmc_pages *table_pages = &catu_table->table_pages;
void *ptr;
/* Make sure offset is within the range */
offset %= buf_size;
/*
* Each table can address 1MB and a single kernel page can
* contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
*/
table_nr = offset >> 20;
/* Find the table page where the table_nr lies in */
pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
if (daddrp)
*daddrp = table_pages->daddrs[pg_idx] + pg_offset;
ptr = page_address(table_pages->pages[pg_idx]);
return (cate_t *)((unsigned long)ptr + pg_offset);
}
#ifdef CATU_DEBUG
static void catu_dump_table(struct tmc_sg_table *catu_table)
{
int i;
cate_t *table;
unsigned long table_end, buf_size, offset = 0;
buf_size = tmc_sg_table_buf_size(catu_table);
dev_dbg(catu_table->dev,
"Dump table %p, tdaddr: %llx\n",
catu_table, catu_table->table_daddr);
while (offset < buf_size) {
table_end = offset + SZ_1M < buf_size ?
offset + SZ_1M : buf_size;
table = catu_get_table(catu_table, offset, NULL);
for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
dev_dbg(catu_table->dev, "== End of sub-table ===");
}
dev_dbg(catu_table->dev, "== End of Table ===");
}
#else
static inline void catu_dump_table(struct tmc_sg_table *catu_table)
{
}
#endif
static inline cate_t catu_make_entry(dma_addr_t addr)
{
return addr ? CATU_VALID_ENTRY(addr) : 0;
}
/*
* catu_populate_table : Populate the given CATU table.
* The table is always populated as a circular table.
* i.e, the "prev" link of the "first" table points to the "last"
* table and the "next" link of the "last" table points to the
* "first" table. The buffer should be made linear by calling
* catu_set_table().
*/
static void
catu_populate_table(struct tmc_sg_table *catu_table)
{
int i;
int sys_pidx; /* Index to current system data page */
int catu_pidx; /* Index of CATU page within the system data page */
unsigned long offset, buf_size, table_end;
dma_addr_t data_daddr;
dma_addr_t prev_taddr, next_taddr, cur_taddr;
cate_t *table_ptr, *next_table;
buf_size = tmc_sg_table_buf_size(catu_table);
sys_pidx = catu_pidx = 0;
offset = 0;
table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
prev_taddr = 0; /* Prev link for the first table */
while (offset < buf_size) {
/*
* The @offset is always 1M aligned here and we have an
* empty table @table_ptr to fill. Each table can address
* upto 1MB data buffer. The last table may have fewer
* entries if the buffer size is not aligned.
*/
table_end = (offset + SZ_1M) < buf_size ?
(offset + SZ_1M) : buf_size;
for (i = 0; offset < table_end;
i++, offset += CATU_PAGE_SIZE) {
data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
catu_pidx * CATU_PAGE_SIZE;
catu_dbg(catu_table->dev,
"[table %5ld:%03d] 0x%llx\n",
(offset >> 20), i, data_daddr);
table_ptr[i] = catu_make_entry(data_daddr);
/* Move the pointers for data pages */
catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
if (catu_pidx == 0)
sys_pidx++;
}
/*
* If we have finished all the valid entries, fill the rest of
* the table (i.e, last table page) with invalid entries,
* to fail the lookups.
*/
if (offset == buf_size) {
memset(&table_ptr[i], 0,
sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
next_taddr = 0;
} else {
next_table = catu_get_table(catu_table,
offset, &next_taddr);
}
table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
catu_dbg(catu_table->dev,
"[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
(offset >> 20) - 1, cur_taddr, prev_taddr, next_taddr);
/* Update the prev/next addresses */
if (next_taddr) {
prev_taddr = cur_taddr;
cur_taddr = next_taddr;
table_ptr = next_table;
}
}
/* Sync the table for device */
tmc_sg_table_sync_table(catu_table);
}
static struct tmc_sg_table *
catu_init_sg_table(struct device *catu_dev, int node,
ssize_t size, void **pages)
{
int nr_tpages;
struct tmc_sg_table *catu_table;
/*
* Each table can address upto 1MB and we can have
* CATU_PAGES_PER_SYSPAGE tables in a system page.
*/
nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
size >> PAGE_SHIFT, pages);
if (IS_ERR(catu_table))
return catu_table;
catu_populate_table(catu_table);
dev_dbg(catu_dev,
"Setup table %p, size %ldKB, %d table pages\n",
catu_table, (unsigned long)size >> 10, nr_tpages);
catu_dump_table(catu_table);
return catu_table;
}
static void catu_free_etr_buf(struct etr_buf *etr_buf)
{
struct catu_etr_buf *catu_buf;
if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
return;
catu_buf = etr_buf->private;
tmc_free_sg_table(catu_buf->catu_table);
kfree(catu_buf);
}
static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
size_t len, char **bufpp)
{
struct catu_etr_buf *catu_buf = etr_buf->private;
return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
}
static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
{
struct catu_etr_buf *catu_buf = etr_buf->private;
struct tmc_sg_table *catu_table = catu_buf->catu_table;
u64 r_offset, w_offset;
/*
* ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
* offsets within the trace buffer.
*/
r_offset = rrp - etr_buf->hwaddr;
w_offset = rwp - etr_buf->hwaddr;
if (!etr_buf->full) {
etr_buf->len = w_offset - r_offset;
if (w_offset < r_offset)
etr_buf->len += etr_buf->size;
} else {
etr_buf->len = etr_buf->size;
}
etr_buf->offset = r_offset;
tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
}
static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
struct etr_buf *etr_buf, int node, void **pages)
{
struct coresight_device *csdev;
struct tmc_sg_table *catu_table;
struct catu_etr_buf *catu_buf;
csdev = tmc_etr_get_catu_device(tmc_drvdata);
if (!csdev)
return -ENODEV;
catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
if (!catu_buf)
return -ENOMEM;
catu_table = catu_init_sg_table(&csdev->dev, node,
etr_buf->size, pages);
if (IS_ERR(catu_table)) {
kfree(catu_buf);
return PTR_ERR(catu_table);
}
etr_buf->mode = ETR_MODE_CATU;
etr_buf->private = catu_buf;
etr_buf->hwaddr = CATU_DEFAULT_INADDR;
catu_buf->catu_table = catu_table;
/* Get the table base address */
catu_buf->sladdr = catu_table->table_daddr;
return 0;
}
static const struct etr_buf_operations etr_catu_buf_ops = {
.alloc = catu_alloc_etr_buf,
.free = catu_free_etr_buf,
.sync = catu_sync_etr_buf,
.get_data = catu_get_data_etr_buf,
};
static struct attribute *catu_mgmt_attrs[] = {
coresight_simple_reg32(devid, CORESIGHT_DEVID),
coresight_simple_reg32(control, CATU_CONTROL),
coresight_simple_reg32(status, CATU_STATUS),
coresight_simple_reg32(mode, CATU_MODE),
coresight_simple_reg32(axictrl, CATU_AXICTRL),
coresight_simple_reg32(irqen, CATU_IRQEN),
coresight_simple_reg64(sladdr, CATU_SLADDRLO, CATU_SLADDRHI),
coresight_simple_reg64(inaddr, CATU_INADDRLO, CATU_INADDRHI),
NULL,
};
static const struct attribute_group catu_mgmt_group = {
.attrs = catu_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group *catu_groups[] = {
&catu_mgmt_group,
NULL,
};
static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
{
struct csdev_access *csa = &drvdata->csdev->access;
return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
}
static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
void *data)
{
int rc;
u32 control, mode;
struct etr_buf *etr_buf = NULL;
struct device *dev = &drvdata->csdev->dev;
struct coresight_device *csdev = drvdata->csdev;
struct coresight_device *etrdev;
union coresight_dev_subtype etr_subtype = {
.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM
};
if (catu_wait_for_ready(drvdata))
dev_warn(dev, "Timeout while waiting for READY\n");
control = catu_read_control(drvdata);
if (control & BIT(CATU_CONTROL_ENABLE)) {
dev_warn(dev, "CATU is already enabled\n");
return -EBUSY;
}
rc = coresight_claim_device_unlocked(csdev);
if (rc)
return rc;
etrdev = coresight_find_input_type(
csdev->pdata, CORESIGHT_DEV_TYPE_SINK, etr_subtype);
if (etrdev) {
etr_buf = tmc_etr_get_buffer(etrdev, cs_mode, data);
if (IS_ERR(etr_buf))
return PTR_ERR(etr_buf);
}
control |= BIT(CATU_CONTROL_ENABLE);
if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
struct catu_etr_buf *catu_buf = etr_buf->private;
mode = CATU_MODE_TRANSLATE;
catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
catu_write_sladdr(drvdata, catu_buf->sladdr);
catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
} else {
mode = CATU_MODE_PASS_THROUGH;
catu_write_sladdr(drvdata, 0);
catu_write_inaddr(drvdata, 0);
}
catu_write_irqen(drvdata, 0);
catu_write_mode(drvdata, mode);
catu_write_control(drvdata, control);
dev_dbg(dev, "Enabled in %s mode\n",
(mode == CATU_MODE_PASS_THROUGH) ?
"Pass through" :
"Translate");
return 0;
}
static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
void *data)
{
int rc;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
CS_UNLOCK(catu_drvdata->base);
rc = catu_enable_hw(catu_drvdata, mode, data);
CS_LOCK(catu_drvdata->base);
return rc;
}
static int catu_disable_hw(struct catu_drvdata *drvdata)
{
int rc = 0;
struct device *dev = &drvdata->csdev->dev;
struct coresight_device *csdev = drvdata->csdev;
catu_write_control(drvdata, 0);
coresight_disclaim_device_unlocked(csdev);
if (catu_wait_for_ready(drvdata)) {
dev_info(dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
}
dev_dbg(dev, "Disabled\n");
return rc;
}
static int catu_disable(struct coresight_device *csdev, void *__unused)
{
int rc;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
CS_UNLOCK(catu_drvdata->base);
rc = catu_disable_hw(catu_drvdata);
CS_LOCK(catu_drvdata->base);
return rc;
}
static const struct coresight_ops_helper catu_helper_ops = {
.enable = catu_enable,
.disable = catu_disable,
};
static const struct coresight_ops catu_ops = {
.helper_ops = &catu_helper_ops,
};
static int catu_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
u32 dma_mask;
struct catu_drvdata *drvdata;
struct coresight_desc catu_desc;
struct coresight_platform_data *pdata = NULL;
struct device *dev = &adev->dev;
void __iomem *base;
catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
if (!catu_desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
ret = -ENOMEM;
goto out;
}
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out;
}
/* Setup dma mask for the device */
dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
switch (dma_mask) {
case 32:
case 40:
case 44:
case 48:
case 52:
case 56:
case 64:
break;
default:
/* Default to the 40bits as supported by TMC-ETR */
dma_mask = 40;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
if (ret)
goto out;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto out;
}
dev->platform_data = pdata;
drvdata->base = base;
catu_desc.access = CSDEV_ACCESS_IOMEM(base);
catu_desc.pdata = pdata;
catu_desc.dev = dev;
catu_desc.groups = catu_groups;
catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
catu_desc.ops = &catu_ops;
drvdata->csdev = coresight_register(&catu_desc);
if (IS_ERR(drvdata->csdev))
ret = PTR_ERR(drvdata->csdev);
else
pm_runtime_put(&adev->dev);
out:
return ret;
}
static void catu_remove(struct amba_device *adev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
}
static struct amba_id catu_ids[] = {
CS_AMBA_ID(0x000bb9ee),
{},
};
MODULE_DEVICE_TABLE(amba, catu_ids);
static struct amba_driver catu_driver = {
.drv = {
.name = "coresight-catu",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = catu_probe,
.remove = catu_remove,
.id_table = catu_ids,
};
static int __init catu_init(void)
{
int ret;
ret = amba_driver_register(&catu_driver);
if (ret)
pr_info("Error registering catu driver\n");
tmc_etr_set_catu_ops(&etr_catu_buf_ops);
return ret;
}
static void __exit catu_exit(void)
{
tmc_etr_remove_catu_ops();
amba_driver_unregister(&catu_driver);
}
module_init(catu_init);
module_exit(catu_exit);
MODULE_AUTHOR("Suzuki K Poulose <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight Address Translation Unit (CATU) Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-catu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2016 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/circ_buf.h>
#include <linux/coresight.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
#include "coresight-etm-perf.h"
static int tmc_set_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
rc = tmc_wait_for_tmcready(drvdata);
if (rc) {
dev_err(&drvdata->csdev->dev,
"Failed to enable: TMC not ready\n");
CS_LOCK(drvdata->base);
return rc;
}
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
drvdata->base + TMC_FFCR);
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
return rc;
}
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
rc = __tmc_etb_enable_hw(drvdata);
if (rc)
coresight_disclaim_device(drvdata->csdev);
return rc;
}
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
char *bufp;
u32 read_data, lost;
/* Check if the buffer wrapped around. */
lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
while (1) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
break;
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
}
if (lost)
coresight_insert_barrier_packet(drvdata->buf);
return;
}
static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
/*
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
if (drvdata->mode == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etb_disable_hw(drvdata);
coresight_disclaim_device(drvdata->csdev);
}
static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
rc = tmc_wait_for_tmcready(drvdata);
if (rc) {
dev_err(&drvdata->csdev->dev,
"Failed to enable : TMC is not ready\n");
CS_LOCK(drvdata->base);
return rc;
}
writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
drvdata->base + TMC_FFCR);
writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
return rc;
}
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
rc = __tmc_etf_enable_hw(drvdata);
if (rc)
coresight_disclaim_device(drvdata->csdev);
return rc;
}
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
/*
* Return the available trace data in the buffer from @pos, with
* a maximum limit of @len, updating the @bufpp on where to
* find it.
*/
ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp)
{
ssize_t actual = len;
/* Adjust the len to available size @pos */
if (pos + actual > drvdata->len)
actual = drvdata->len - pos;
if (actual > 0)
*bufpp = drvdata->buf + pos;
return actual;
}
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
bool used = false;
char *buf = NULL;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->buf) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocating the memory here while outside of the spinlock */
buf = kzalloc(drvdata->size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
if (drvdata->reading) {
ret = -EBUSY;
goto out;
}
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
if (drvdata->mode == CS_MODE_SYSFS) {
atomic_inc(&csdev->refcnt);
goto out;
}
/*
* If drvdata::buf isn't NULL, memory was allocated for a previous
* trace run but wasn't read. If so simply zero-out the memory.
* Otherwise use the memory allocated above.
*
* The memory is freed when users read the buffer using the
* /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
* details.
*/
if (drvdata->buf) {
memset(drvdata->buf, 0, drvdata->size);
} else {
used = true;
drvdata->buf = buf;
}
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
atomic_inc(&csdev->refcnt);
} else {
/* Free up the buffer if we failed to enable */
used = false;
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (!used)
kfree(buf);
return ret;
}
static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct perf_output_handle *handle = data;
struct cs_buffers *buf = etm_perf_sink_config(handle);
spin_lock_irqsave(&drvdata->spinlock, flags);
do {
ret = -EINVAL;
if (drvdata->reading)
break;
/*
* No need to continue if the ETB/ETF is already operated
* from sysFS.
*/
if (drvdata->mode == CS_MODE_SYSFS) {
ret = -EBUSY;
break;
}
/* Get a handle on the pid of the process to monitor */
pid = buf->pid;
if (drvdata->pid != -1 && drvdata->pid != pid) {
ret = -EBUSY;
break;
}
ret = tmc_set_etf_buffer(csdev, handle);
if (ret)
break;
/*
* No HW configuration is needed if the sink is already in
* use for this session.
*/
if (drvdata->pid == pid) {
atomic_inc(&csdev->refcnt);
break;
}
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
/* Associate with monitored process. */
drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
atomic_inc(&csdev->refcnt);
}
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
static int tmc_enable_etf_sink(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
int ret;
switch (mode) {
case CS_MODE_SYSFS:
ret = tmc_enable_etf_sink_sysfs(csdev);
break;
case CS_MODE_PERF:
ret = tmc_enable_etf_sink_perf(csdev, data);
break;
/* We shouldn't be here */
default:
ret = -EINVAL;
break;
}
if (ret)
return ret;
dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
return 0;
}
static int tmc_disable_etf_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
if (atomic_dec_return(&csdev->refcnt)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
tmc_etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
return 0;
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
if (atomic_read(&csdev->refcnt) == 0) {
ret = tmc_etf_enable_hw(drvdata);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
first_enable = true;
}
}
if (!ret)
atomic_inc(&csdev->refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
static void tmc_disable_etf_link(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return;
}
if (atomic_dec_return(&csdev->refcnt) == 0) {
tmc_etf_disable_hw(drvdata);
drvdata->mode = CS_MODE_DISABLED;
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
int node;
struct cs_buffers *buf;
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
if (!buf)
return NULL;
buf->pid = task_pid_nr(event->owner);
buf->snapshot = overwrite;
buf->nr_pages = nr_pages;
buf->data_pages = pages;
return buf;
}
static void tmc_free_etf_buffer(void *config)
{
struct cs_buffers *buf = config;
kfree(buf);
}
static int tmc_set_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle)
{
int ret = 0;
unsigned long head;
struct cs_buffers *buf = etm_perf_sink_config(handle);
if (!buf)
return -EINVAL;
/* wrap head around to the amount of space we have */
head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
/* find the page to write to */
buf->cur = head / PAGE_SIZE;
/* and offset within that page */
buf->offset = head % PAGE_SIZE;
local_set(&buf->data_size, 0);
return ret;
}
static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
bool lost = false;
int i, cur;
const u32 *barrier;
u32 *buf_ptr;
u64 read_ptr, write_ptr;
u32 status;
unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
return 0;
/* This shouldn't happen */
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return 0;
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (atomic_read(&csdev->refcnt) != 1)
goto out;
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
read_ptr = tmc_read_rrp(drvdata);
write_ptr = tmc_read_rwp(drvdata);
/*
* Get a hold of the status register and see if a wrap around
* has occurred. If so adjust things accordingly.
*/
status = readl_relaxed(drvdata->base + TMC_STS);
if (status & TMC_STS_FULL) {
lost = true;
to_read = drvdata->size;
} else {
to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
}
/*
* The TMC RAM buffer may be bigger than the space available in the
* perf ring buffer (handle->size). If so advance the RRP so that we
* get the latest trace data. In snapshot mode none of that matters
* since we are expected to clobber stale data in favour of the latest
* traces.
*/
if (!buf->snapshot && to_read > handle->size) {
u32 mask = tmc_get_memwidth_mask(drvdata);
/*
* Make sure the new size is aligned in accordance with the
* requirement explained in function tmc_get_memwidth_mask().
*/
to_read = handle->size & mask;
/* Move the RAM read pointer up */
read_ptr = (write_ptr + drvdata->size) - to_read;
/* Make sure we are still within our limits */
if (read_ptr > (drvdata->size - 1))
read_ptr -= drvdata->size;
/* Tell the HW */
tmc_write_rrp(drvdata, read_ptr);
lost = true;
}
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
* captured buffer is expected to be truncated and 2) a full buffer
* prevents the event from being re-enabled by the perf core,
* resulting in stale data being send to user space.
*/
if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
cur = buf->cur;
offset = buf->offset;
barrier = coresight_barrier_pkt;
/* for every byte to read */
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
*buf_ptr = *barrier;
barrier++;
}
offset += 4;
if (offset >= PAGE_SIZE) {
offset = 0;
cur++;
/* wrap around at the end of the buffer */
cur &= buf->nr_pages - 1;
}
}
/*
* In snapshot mode we simply increment the head by the number of byte
* that were written. User space will figure out how many bytes to get
* from the AUX buffer based on the position of the head.
*/
if (buf->snapshot)
handle->head += to_read;
/*
* CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
* data before the aux_head is updated via perf_aux_output_end(), which
* is expected by the perf ring buffer.
*/
CS_LOCK(drvdata->base);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
static const struct coresight_ops_sink tmc_etf_sink_ops = {
.enable = tmc_enable_etf_sink,
.disable = tmc_disable_etf_sink,
.alloc_buffer = tmc_alloc_etf_buffer,
.free_buffer = tmc_free_etf_buffer,
.update_buffer = tmc_update_etf_buffer,
};
static const struct coresight_ops_link tmc_etf_link_ops = {
.enable = tmc_enable_etf_link,
.disable = tmc_disable_etf_link,
};
const struct coresight_ops tmc_etb_cs_ops = {
.sink_ops = &tmc_etf_sink_ops,
};
const struct coresight_ops tmc_etf_cs_ops = {
.sink_ops = &tmc_etf_sink_ops,
.link_ops = &tmc_etf_link_ops,
};
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
{
enum tmc_mode mode;
int ret = 0;
unsigned long flags;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
return -EINVAL;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
}
/* Don't interfere if operated from Perf */
if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
/* If drvdata::buf is NULL the trace data has been read already */
if (drvdata->buf == NULL) {
ret = -EINVAL;
goto out;
}
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
ret = -EINVAL;
goto out;
}
__tmc_etb_disable_hw(drvdata);
}
drvdata->reading = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
{
char *buf = NULL;
enum tmc_mode mode;
unsigned long flags;
int rc = 0;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
return -EINVAL;
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Re-enable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EINVAL;
}
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
* up with stale data.
*
* Since the tracer is still enabled drvdata::buf
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
rc = __tmc_etb_enable_hw(drvdata);
if (rc) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
buf = drvdata->buf;
drvdata->buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/*
* Free allocated memory outside of the spinlock. There is no need
* to assert the validity of 'buf' since calling kfree(NULL) is safe.
*/
kfree(buf);
return 0;
}
| linux-master | drivers/hwtracing/coresight/coresight-tmc-etf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Linaro Limited, All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "coresight-config.h"
#include "coresight-etm-perf.h"
#include "coresight-syscfg.h"
#include "coresight-syscfg-configfs.h"
/*
* cscfg_ API manages configurations and features for the entire coresight
* infrastructure.
*
* It allows the loading of configurations and features, and loads these into
* coresight devices as appropriate.
*/
/* protect the cscsg_data and device */
static DEFINE_MUTEX(cscfg_mutex);
/* only one of these */
static struct cscfg_manager *cscfg_mgr;
/* load features and configuations into the lists */
/* get name feature instance from a coresight device list of features */
static struct cscfg_feature_csdev *
cscfg_get_feat_csdev(struct coresight_device *csdev, const char *name)
{
struct cscfg_feature_csdev *feat_csdev = NULL;
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) {
if (strcmp(feat_csdev->feat_desc->name, name) == 0)
return feat_csdev;
}
return NULL;
}
/* allocate the device config instance - with max number of used features */
static struct cscfg_config_csdev *
cscfg_alloc_csdev_cfg(struct coresight_device *csdev, int nr_feats)
{
struct cscfg_config_csdev *config_csdev = NULL;
struct device *dev = csdev->dev.parent;
/* this is being allocated using the devm for the coresight device */
config_csdev = devm_kzalloc(dev,
offsetof(struct cscfg_config_csdev, feats_csdev[nr_feats]),
GFP_KERNEL);
if (!config_csdev)
return NULL;
config_csdev->csdev = csdev;
return config_csdev;
}
/* Load a config into a device if there are any feature matches between config and device */
static int cscfg_add_csdev_cfg(struct coresight_device *csdev,
struct cscfg_config_desc *config_desc)
{
struct cscfg_config_csdev *config_csdev = NULL;
struct cscfg_feature_csdev *feat_csdev;
unsigned long flags;
int i;
/* look at each required feature and see if it matches any feature on the device */
for (i = 0; i < config_desc->nr_feat_refs; i++) {
/* look for a matching name */
feat_csdev = cscfg_get_feat_csdev(csdev, config_desc->feat_ref_names[i]);
if (feat_csdev) {
/*
* At least one feature on this device matches the config
* add a config instance to the device and a reference to the feature.
*/
if (!config_csdev) {
config_csdev = cscfg_alloc_csdev_cfg(csdev,
config_desc->nr_feat_refs);
if (!config_csdev)
return -ENOMEM;
config_csdev->config_desc = config_desc;
}
config_csdev->feats_csdev[config_csdev->nr_feat++] = feat_csdev;
}
}
/* if matched features, add config to device.*/
if (config_csdev) {
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_add(&config_csdev->node, &csdev->config_csdev_list);
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
return 0;
}
/*
* Add the config to the set of registered devices - call with mutex locked.
* Iterates through devices - any device that matches one or more of the
* configuration features will load it, the others will ignore it.
*/
static int cscfg_add_cfg_to_csdevs(struct cscfg_config_desc *config_desc)
{
struct cscfg_registered_csdev *csdev_item;
int err;
list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
err = cscfg_add_csdev_cfg(csdev_item->csdev, config_desc);
if (err)
return err;
}
return 0;
}
/*
* Allocate a feature object for load into a csdev.
* memory allocated using the csdev->dev object using devm managed allocator.
*/
static struct cscfg_feature_csdev *
cscfg_alloc_csdev_feat(struct coresight_device *csdev, struct cscfg_feature_desc *feat_desc)
{
struct cscfg_feature_csdev *feat_csdev = NULL;
struct device *dev = csdev->dev.parent;
int i;
feat_csdev = devm_kzalloc(dev, sizeof(struct cscfg_feature_csdev), GFP_KERNEL);
if (!feat_csdev)
return NULL;
/* parameters are optional - could be 0 */
feat_csdev->nr_params = feat_desc->nr_params;
/*
* if we need parameters, zero alloc the space here, the load routine in
* the csdev device driver will fill out some information according to
* feature descriptor.
*/
if (feat_csdev->nr_params) {
feat_csdev->params_csdev = devm_kcalloc(dev, feat_csdev->nr_params,
sizeof(struct cscfg_parameter_csdev),
GFP_KERNEL);
if (!feat_csdev->params_csdev)
return NULL;
/*
* fill in the feature reference in the param - other fields
* handled by loader in csdev.
*/
for (i = 0; i < feat_csdev->nr_params; i++)
feat_csdev->params_csdev[i].feat_csdev = feat_csdev;
}
/*
* Always have registers to program - again the load routine in csdev device
* will fill out according to feature descriptor and device requirements.
*/
feat_csdev->nr_regs = feat_desc->nr_regs;
feat_csdev->regs_csdev = devm_kcalloc(dev, feat_csdev->nr_regs,
sizeof(struct cscfg_regval_csdev),
GFP_KERNEL);
if (!feat_csdev->regs_csdev)
return NULL;
/* load the feature default values */
feat_csdev->feat_desc = feat_desc;
feat_csdev->csdev = csdev;
return feat_csdev;
}
/* load one feature into one coresight device */
static int cscfg_load_feat_csdev(struct coresight_device *csdev,
struct cscfg_feature_desc *feat_desc,
struct cscfg_csdev_feat_ops *ops)
{
struct cscfg_feature_csdev *feat_csdev;
unsigned long flags;
int err;
if (!ops->load_feat)
return -EINVAL;
feat_csdev = cscfg_alloc_csdev_feat(csdev, feat_desc);
if (!feat_csdev)
return -ENOMEM;
/* load the feature into the device */
err = ops->load_feat(csdev, feat_csdev);
if (err)
return err;
/* add to internal csdev feature list & initialise using reset call */
cscfg_reset_feat(feat_csdev);
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_add(&feat_csdev->node, &csdev->feature_csdev_list);
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
return 0;
}
/*
* Add feature to any matching devices - call with mutex locked.
* Iterates through devices - any device that matches the feature will be
* called to load it.
*/
static int cscfg_add_feat_to_csdevs(struct cscfg_feature_desc *feat_desc)
{
struct cscfg_registered_csdev *csdev_item;
int err;
list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
if (csdev_item->match_flags & feat_desc->match_flags) {
err = cscfg_load_feat_csdev(csdev_item->csdev, feat_desc, &csdev_item->ops);
if (err)
return err;
}
}
return 0;
}
/* check feature list for a named feature - call with mutex locked. */
static bool cscfg_match_list_feat(const char *name)
{
struct cscfg_feature_desc *feat_desc;
list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) {
if (strcmp(feat_desc->name, name) == 0)
return true;
}
return false;
}
/* check all feat needed for cfg are in the list - call with mutex locked. */
static int cscfg_check_feat_for_cfg(struct cscfg_config_desc *config_desc)
{
int i;
for (i = 0; i < config_desc->nr_feat_refs; i++)
if (!cscfg_match_list_feat(config_desc->feat_ref_names[i]))
return -EINVAL;
return 0;
}
/*
* load feature - add to feature list.
*/
static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc)
{
int err;
struct cscfg_feature_desc *feat_desc_exist;
/* new feature must have unique name */
list_for_each_entry(feat_desc_exist, &cscfg_mgr->feat_desc_list, item) {
if (!strcmp(feat_desc_exist->name, feat_desc->name))
return -EEXIST;
}
/* add feature to any matching registered devices */
err = cscfg_add_feat_to_csdevs(feat_desc);
if (err)
return err;
list_add(&feat_desc->item, &cscfg_mgr->feat_desc_list);
return 0;
}
/*
* load config into the system - validate used features exist then add to
* config list.
*/
static int cscfg_load_config(struct cscfg_config_desc *config_desc)
{
int err;
struct cscfg_config_desc *config_desc_exist;
/* new configuration must have a unique name */
list_for_each_entry(config_desc_exist, &cscfg_mgr->config_desc_list, item) {
if (!strcmp(config_desc_exist->name, config_desc->name))
return -EEXIST;
}
/* validate features are present */
err = cscfg_check_feat_for_cfg(config_desc);
if (err)
return err;
/* add config to any matching registered device */
err = cscfg_add_cfg_to_csdevs(config_desc);
if (err)
return err;
/* add config to perf fs to allow selection */
err = etm_perf_add_symlink_cscfg(cscfg_device(), config_desc);
if (err)
return err;
list_add(&config_desc->item, &cscfg_mgr->config_desc_list);
atomic_set(&config_desc->active_cnt, 0);
return 0;
}
/* get a feature descriptor by name */
const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name)
{
const struct cscfg_feature_desc *feat_desc = NULL, *feat_desc_item;
mutex_lock(&cscfg_mutex);
list_for_each_entry(feat_desc_item, &cscfg_mgr->feat_desc_list, item) {
if (strcmp(feat_desc_item->name, name) == 0) {
feat_desc = feat_desc_item;
break;
}
}
mutex_unlock(&cscfg_mutex);
return feat_desc;
}
/* called with cscfg_mutex held */
static struct cscfg_feature_csdev *
cscfg_csdev_get_feat_from_desc(struct coresight_device *csdev,
struct cscfg_feature_desc *feat_desc)
{
struct cscfg_feature_csdev *feat_csdev;
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node) {
if (feat_csdev->feat_desc == feat_desc)
return feat_csdev;
}
return NULL;
}
int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc,
int param_idx, u64 value)
{
int err = 0;
struct cscfg_feature_csdev *feat_csdev;
struct cscfg_registered_csdev *csdev_item;
mutex_lock(&cscfg_mutex);
/* check if any config active & return busy */
if (atomic_read(&cscfg_mgr->sys_active_cnt)) {
err = -EBUSY;
goto unlock_exit;
}
/* set the value */
if ((param_idx < 0) || (param_idx >= feat_desc->nr_params)) {
err = -EINVAL;
goto unlock_exit;
}
feat_desc->params_desc[param_idx].value = value;
/* update loaded instances.*/
list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
feat_csdev = cscfg_csdev_get_feat_from_desc(csdev_item->csdev, feat_desc);
if (feat_csdev)
feat_csdev->params_csdev[param_idx].current_value = value;
}
unlock_exit:
mutex_unlock(&cscfg_mutex);
return err;
}
/*
* Conditionally up reference count on owner to prevent unload.
*
* module loaded configs need to be locked in to prevent premature unload.
*/
static int cscfg_owner_get(struct cscfg_load_owner_info *owner_info)
{
if ((owner_info->type == CSCFG_OWNER_MODULE) &&
(!try_module_get(owner_info->owner_handle)))
return -EINVAL;
return 0;
}
/* conditionally lower ref count on an owner */
static void cscfg_owner_put(struct cscfg_load_owner_info *owner_info)
{
if (owner_info->type == CSCFG_OWNER_MODULE)
module_put(owner_info->owner_handle);
}
static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, void *load_owner)
{
struct cscfg_config_csdev *config_csdev, *tmp;
if (list_empty(&csdev->config_csdev_list))
return;
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
if (config_csdev->config_desc->load_owner == load_owner)
list_del(&config_csdev->node);
}
}
static void cscfg_remove_owned_csdev_features(struct coresight_device *csdev, void *load_owner)
{
struct cscfg_feature_csdev *feat_csdev, *tmp;
if (list_empty(&csdev->feature_csdev_list))
return;
list_for_each_entry_safe(feat_csdev, tmp, &csdev->feature_csdev_list, node) {
if (feat_csdev->feat_desc->load_owner == load_owner)
list_del(&feat_csdev->node);
}
}
/*
* Unregister all configuration and features from configfs owned by load_owner.
* Although this is called without the list mutex being held, it is in the
* context of an unload operation which are strictly serialised,
* so the lists cannot change during this call.
*/
static void cscfg_fs_unregister_cfgs_feats(void *load_owner)
{
struct cscfg_config_desc *config_desc;
struct cscfg_feature_desc *feat_desc;
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if (config_desc->load_owner == load_owner)
cscfg_configfs_del_config(config_desc);
}
list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) {
if (feat_desc->load_owner == load_owner)
cscfg_configfs_del_feature(feat_desc);
}
}
/*
* removal is relatively easy - just remove from all lists, anything that
* matches the owner. Memory for the descriptors will be managed by the owner,
* memory for the csdev items is devm_ allocated with the individual csdev
* devices.
*/
static void cscfg_unload_owned_cfgs_feats(void *load_owner)
{
struct cscfg_config_desc *config_desc, *cfg_tmp;
struct cscfg_feature_desc *feat_desc, *feat_tmp;
struct cscfg_registered_csdev *csdev_item;
lockdep_assert_held(&cscfg_mutex);
/* remove from each csdev instance feature and config lists */
list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
/*
* for each csdev, check the loaded lists and remove if
* referenced descriptor is owned
*/
cscfg_remove_owned_csdev_configs(csdev_item->csdev, load_owner);
cscfg_remove_owned_csdev_features(csdev_item->csdev, load_owner);
}
/* remove from the config descriptor lists */
list_for_each_entry_safe(config_desc, cfg_tmp, &cscfg_mgr->config_desc_list, item) {
if (config_desc->load_owner == load_owner) {
etm_perf_del_symlink_cscfg(config_desc);
list_del(&config_desc->item);
}
}
/* remove from the feature descriptor lists */
list_for_each_entry_safe(feat_desc, feat_tmp, &cscfg_mgr->feat_desc_list, item) {
if (feat_desc->load_owner == load_owner) {
list_del(&feat_desc->item);
}
}
}
/*
* load the features and configs to the lists - called with list mutex held
*/
static int cscfg_load_owned_cfgs_feats(struct cscfg_config_desc **config_descs,
struct cscfg_feature_desc **feat_descs,
struct cscfg_load_owner_info *owner_info)
{
int i, err;
lockdep_assert_held(&cscfg_mutex);
/* load features first */
if (feat_descs) {
for (i = 0; feat_descs[i]; i++) {
err = cscfg_load_feat(feat_descs[i]);
if (err) {
pr_err("coresight-syscfg: Failed to load feature %s\n",
feat_descs[i]->name);
return err;
}
feat_descs[i]->load_owner = owner_info;
}
}
/* next any configurations to check feature dependencies */
if (config_descs) {
for (i = 0; config_descs[i]; i++) {
err = cscfg_load_config(config_descs[i]);
if (err) {
pr_err("coresight-syscfg: Failed to load configuration %s\n",
config_descs[i]->name);
return err;
}
config_descs[i]->load_owner = owner_info;
config_descs[i]->available = false;
}
}
return 0;
}
/* set configurations as available to activate at the end of the load process */
static void cscfg_set_configs_available(struct cscfg_config_desc **config_descs)
{
int i;
lockdep_assert_held(&cscfg_mutex);
if (config_descs) {
for (i = 0; config_descs[i]; i++)
config_descs[i]->available = true;
}
}
/*
* Create and register each of the configurations and features with configfs.
* Called without mutex being held.
*/
static int cscfg_fs_register_cfgs_feats(struct cscfg_config_desc **config_descs,
struct cscfg_feature_desc **feat_descs)
{
int i, err;
if (feat_descs) {
for (i = 0; feat_descs[i]; i++) {
err = cscfg_configfs_add_feature(feat_descs[i]);
if (err)
return err;
}
}
if (config_descs) {
for (i = 0; config_descs[i]; i++) {
err = cscfg_configfs_add_config(config_descs[i]);
if (err)
return err;
}
}
return 0;
}
/**
* cscfg_load_config_sets - API function to load feature and config sets.
*
* Take a 0 terminated array of feature descriptors and/or configuration
* descriptors and load into the system.
* Features are loaded first to ensure configuration dependencies can be met.
*
* To facilitate dynamic loading and unloading, features and configurations
* have a "load_owner", to allow later unload by the same owner. An owner may
* be a loadable module or configuration dynamically created via configfs.
* As later loaded configurations can use earlier loaded features, creating load
* dependencies, a load order list is maintained. Unload is strictly in the
* reverse order to load.
*
* @config_descs: 0 terminated array of configuration descriptors.
* @feat_descs: 0 terminated array of feature descriptors.
* @owner_info: Information on the owner of this set.
*/
int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
struct cscfg_feature_desc **feat_descs,
struct cscfg_load_owner_info *owner_info)
{
int err = 0;
mutex_lock(&cscfg_mutex);
if (cscfg_mgr->load_state != CSCFG_NONE) {
mutex_unlock(&cscfg_mutex);
return -EBUSY;
}
cscfg_mgr->load_state = CSCFG_LOAD;
/* first load and add to the lists */
err = cscfg_load_owned_cfgs_feats(config_descs, feat_descs, owner_info);
if (err)
goto err_clean_load;
/* add the load owner to the load order list */
list_add_tail(&owner_info->item, &cscfg_mgr->load_order_list);
if (!list_is_singular(&cscfg_mgr->load_order_list)) {
/* lock previous item in load order list */
err = cscfg_owner_get(list_prev_entry(owner_info, item));
if (err)
goto err_clean_owner_list;
}
/*
* make visible to configfs - configfs manipulation must occur outside
* the list mutex lock to avoid circular lockdep issues with configfs
* built in mutexes and semaphores. This is safe as it is not possible
* to start a new load/unload operation till the current one is done.
*/
mutex_unlock(&cscfg_mutex);
/* create the configfs elements */
err = cscfg_fs_register_cfgs_feats(config_descs, feat_descs);
mutex_lock(&cscfg_mutex);
if (err)
goto err_clean_cfs;
/* mark any new configs as available for activation */
cscfg_set_configs_available(config_descs);
goto exit_unlock;
err_clean_cfs:
/* cleanup after error registering with configfs */
cscfg_fs_unregister_cfgs_feats(owner_info);
if (!list_is_singular(&cscfg_mgr->load_order_list))
cscfg_owner_put(list_prev_entry(owner_info, item));
err_clean_owner_list:
list_del(&owner_info->item);
err_clean_load:
cscfg_unload_owned_cfgs_feats(owner_info);
exit_unlock:
cscfg_mgr->load_state = CSCFG_NONE;
mutex_unlock(&cscfg_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cscfg_load_config_sets);
/**
* cscfg_unload_config_sets - unload a set of configurations by owner.
*
* Dynamic unload of configuration and feature sets is done on the basis of
* the load owner of that set. Later loaded configurations can depend on
* features loaded earlier.
*
* Therefore, unload is only possible if:-
* 1) no configurations are active.
* 2) the set being unloaded was the last to be loaded to maintain dependencies.
*
* Once the unload operation commences, we disallow any configuration being
* made active until it is complete.
*
* @owner_info: Information on owner for set being unloaded.
*/
int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
{
int err = 0;
struct cscfg_load_owner_info *load_list_item = NULL;
mutex_lock(&cscfg_mutex);
if (cscfg_mgr->load_state != CSCFG_NONE) {
mutex_unlock(&cscfg_mutex);
return -EBUSY;
}
/* unload op in progress also prevents activation of any config */
cscfg_mgr->load_state = CSCFG_UNLOAD;
/* cannot unload if anything is active */
if (atomic_read(&cscfg_mgr->sys_active_cnt)) {
err = -EBUSY;
goto exit_unlock;
}
/* cannot unload if not last loaded in load order */
if (!list_empty(&cscfg_mgr->load_order_list)) {
load_list_item = list_last_entry(&cscfg_mgr->load_order_list,
struct cscfg_load_owner_info, item);
if (load_list_item != owner_info)
load_list_item = NULL;
}
if (!load_list_item) {
err = -EINVAL;
goto exit_unlock;
}
/* remove from configfs - again outside the scope of the list mutex */
mutex_unlock(&cscfg_mutex);
cscfg_fs_unregister_cfgs_feats(owner_info);
mutex_lock(&cscfg_mutex);
/* unload everything from lists belonging to load_owner */
cscfg_unload_owned_cfgs_feats(owner_info);
/* remove from load order list */
if (!list_is_singular(&cscfg_mgr->load_order_list)) {
/* unlock previous item in load order list */
cscfg_owner_put(list_prev_entry(owner_info, item));
}
list_del(&owner_info->item);
exit_unlock:
cscfg_mgr->load_state = CSCFG_NONE;
mutex_unlock(&cscfg_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cscfg_unload_config_sets);
/* Handle coresight device registration and add configs and features to devices */
/* iterate through config lists and load matching configs to device */
static int cscfg_add_cfgs_csdev(struct coresight_device *csdev)
{
struct cscfg_config_desc *config_desc;
int err = 0;
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
err = cscfg_add_csdev_cfg(csdev, config_desc);
if (err)
break;
}
return err;
}
/* iterate through feature lists and load matching features to device */
static int cscfg_add_feats_csdev(struct coresight_device *csdev,
u32 match_flags,
struct cscfg_csdev_feat_ops *ops)
{
struct cscfg_feature_desc *feat_desc;
int err = 0;
if (!ops->load_feat)
return -EINVAL;
list_for_each_entry(feat_desc, &cscfg_mgr->feat_desc_list, item) {
if (feat_desc->match_flags & match_flags) {
err = cscfg_load_feat_csdev(csdev, feat_desc, ops);
if (err)
break;
}
}
return err;
}
/* Add coresight device to list and copy its matching info */
static int cscfg_list_add_csdev(struct coresight_device *csdev,
u32 match_flags,
struct cscfg_csdev_feat_ops *ops)
{
struct cscfg_registered_csdev *csdev_item;
/* allocate the list entry structure */
csdev_item = kzalloc(sizeof(struct cscfg_registered_csdev), GFP_KERNEL);
if (!csdev_item)
return -ENOMEM;
csdev_item->csdev = csdev;
csdev_item->match_flags = match_flags;
csdev_item->ops.load_feat = ops->load_feat;
list_add(&csdev_item->item, &cscfg_mgr->csdev_desc_list);
INIT_LIST_HEAD(&csdev->feature_csdev_list);
INIT_LIST_HEAD(&csdev->config_csdev_list);
spin_lock_init(&csdev->cscfg_csdev_lock);
return 0;
}
/* remove a coresight device from the list and free data */
static void cscfg_list_remove_csdev(struct coresight_device *csdev)
{
struct cscfg_registered_csdev *csdev_item, *tmp;
list_for_each_entry_safe(csdev_item, tmp, &cscfg_mgr->csdev_desc_list, item) {
if (csdev_item->csdev == csdev) {
list_del(&csdev_item->item);
kfree(csdev_item);
break;
}
}
}
/**
* cscfg_register_csdev - register a coresight device with the syscfg manager.
*
* Registers the coresight device with the system. @match_flags used to check
* if the device is a match for registered features. Any currently registered
* configurations and features that match the device will be loaded onto it.
*
* @csdev: The coresight device to register.
* @match_flags: Matching information to load features.
* @ops: Standard operations supported by the device.
*/
int cscfg_register_csdev(struct coresight_device *csdev,
u32 match_flags,
struct cscfg_csdev_feat_ops *ops)
{
int ret = 0;
mutex_lock(&cscfg_mutex);
/* add device to list of registered devices */
ret = cscfg_list_add_csdev(csdev, match_flags, ops);
if (ret)
goto reg_csdev_unlock;
/* now load any registered features and configs matching the device. */
ret = cscfg_add_feats_csdev(csdev, match_flags, ops);
if (ret) {
cscfg_list_remove_csdev(csdev);
goto reg_csdev_unlock;
}
ret = cscfg_add_cfgs_csdev(csdev);
if (ret) {
cscfg_list_remove_csdev(csdev);
goto reg_csdev_unlock;
}
pr_info("CSCFG registered %s", dev_name(&csdev->dev));
reg_csdev_unlock:
mutex_unlock(&cscfg_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(cscfg_register_csdev);
/**
* cscfg_unregister_csdev - remove coresight device from syscfg manager.
*
* @csdev: Device to remove.
*/
void cscfg_unregister_csdev(struct coresight_device *csdev)
{
mutex_lock(&cscfg_mutex);
cscfg_list_remove_csdev(csdev);
mutex_unlock(&cscfg_mutex);
}
EXPORT_SYMBOL_GPL(cscfg_unregister_csdev);
/**
* cscfg_csdev_reset_feats - reset features for a CoreSight device.
*
* Resets all parameters and register values for any features loaded
* into @csdev to their default values.
*
* @csdev: The CoreSight device.
*/
void cscfg_csdev_reset_feats(struct coresight_device *csdev)
{
struct cscfg_feature_csdev *feat_csdev;
unsigned long flags;
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
if (list_empty(&csdev->feature_csdev_list))
goto unlock_exit;
list_for_each_entry(feat_csdev, &csdev->feature_csdev_list, node)
cscfg_reset_feat(feat_csdev);
unlock_exit:
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
/*
* This activate configuration for either perf or sysfs. Perf can have multiple
* active configs, selected per event, sysfs is limited to one.
*
* Increments the configuration descriptor active count and the global active
* count.
*
* @cfg_hash: Hash value of the selected configuration name.
*/
static int _cscfg_activate_config(unsigned long cfg_hash)
{
struct cscfg_config_desc *config_desc;
int err = -EINVAL;
if (cscfg_mgr->load_state == CSCFG_UNLOAD)
return -EBUSY;
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
/* if we happen upon a partly loaded config, can't use it */
if (config_desc->available == false)
return -EBUSY;
/* must ensure that config cannot be unloaded in use */
err = cscfg_owner_get(config_desc->load_owner);
if (err)
break;
/*
* increment the global active count - control changes to
* active configurations
*/
atomic_inc(&cscfg_mgr->sys_active_cnt);
/*
* mark the descriptor as active so enable config on a
* device instance will use it
*/
atomic_inc(&config_desc->active_cnt);
err = 0;
dev_dbg(cscfg_device(), "Activate config %s.\n", config_desc->name);
break;
}
}
return err;
}
static void _cscfg_deactivate_config(unsigned long cfg_hash)
{
struct cscfg_config_desc *config_desc;
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
atomic_dec(&config_desc->active_cnt);
atomic_dec(&cscfg_mgr->sys_active_cnt);
cscfg_owner_put(config_desc->load_owner);
dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
break;
}
}
}
/*
* called from configfs to set/clear the active configuration for use when
* using sysfs to control trace.
*/
int cscfg_config_sysfs_activate(struct cscfg_config_desc *config_desc, bool activate)
{
unsigned long cfg_hash;
int err = 0;
mutex_lock(&cscfg_mutex);
cfg_hash = (unsigned long)config_desc->event_ea->var;
if (activate) {
/* cannot be a current active value to activate this */
if (cscfg_mgr->sysfs_active_config) {
err = -EBUSY;
goto exit_unlock;
}
err = _cscfg_activate_config(cfg_hash);
if (!err)
cscfg_mgr->sysfs_active_config = cfg_hash;
} else {
/* disable if matching current value */
if (cscfg_mgr->sysfs_active_config == cfg_hash) {
_cscfg_deactivate_config(cfg_hash);
cscfg_mgr->sysfs_active_config = 0;
} else
err = -EINVAL;
}
exit_unlock:
mutex_unlock(&cscfg_mutex);
return err;
}
/* set the sysfs preset value */
void cscfg_config_sysfs_set_preset(int preset)
{
mutex_lock(&cscfg_mutex);
cscfg_mgr->sysfs_active_preset = preset;
mutex_unlock(&cscfg_mutex);
}
/*
* Used by a device to get the config and preset selected as active in configfs,
* when using sysfs to control trace.
*/
void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset)
{
mutex_lock(&cscfg_mutex);
*preset = cscfg_mgr->sysfs_active_preset;
*cfg_hash = cscfg_mgr->sysfs_active_config;
mutex_unlock(&cscfg_mutex);
}
EXPORT_SYMBOL_GPL(cscfg_config_sysfs_get_active_cfg);
/**
* cscfg_activate_config - Mark a configuration descriptor as active.
*
* This will be seen when csdev devices are enabled in the system.
* Only activated configurations can be enabled on individual devices.
* Activation protects the configuration from alteration or removal while
* active.
*
* Selection by hash value - generated from the configuration name when it
* was loaded and added to the cs_etm/configurations file system for selection
* by perf.
*
* @cfg_hash: Hash value of the selected configuration name.
*/
int cscfg_activate_config(unsigned long cfg_hash)
{
int err = 0;
mutex_lock(&cscfg_mutex);
err = _cscfg_activate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cscfg_activate_config);
/**
* cscfg_deactivate_config - Mark a config descriptor as inactive.
*
* Decrement the configuration and global active counts.
*
* @cfg_hash: Hash value of the selected configuration name.
*/
void cscfg_deactivate_config(unsigned long cfg_hash)
{
mutex_lock(&cscfg_mutex);
_cscfg_deactivate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
}
EXPORT_SYMBOL_GPL(cscfg_deactivate_config);
/**
* cscfg_csdev_enable_active_config - Enable matching active configuration for device.
*
* Enables the configuration selected by @cfg_hash if the configuration is supported
* on the device and has been activated.
*
* If active and supported the CoreSight device @csdev will be programmed with the
* configuration, using @preset parameters.
*
* Should be called before driver hardware enable for the requested device, prior to
* programming and enabling the physical hardware.
*
* @csdev: CoreSight device to program.
* @cfg_hash: Selector for the configuration.
* @preset: Preset parameter values to use, 0 for current / default values.
*/
int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
unsigned long cfg_hash, int preset)
{
struct cscfg_config_csdev *config_csdev_active = NULL, *config_csdev_item;
const struct cscfg_config_desc *config_desc;
unsigned long flags;
int err = 0;
/* quickly check global count */
if (!atomic_read(&cscfg_mgr->sys_active_cnt))
return 0;
/*
* Look for matching configuration - set the active configuration
* context if found.
*/
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) {
config_desc = config_csdev_item->config_desc;
if ((atomic_read(&config_desc->active_cnt)) &&
((unsigned long)config_desc->event_ea->var == cfg_hash)) {
config_csdev_active = config_csdev_item;
csdev->active_cscfg_ctxt = (void *)config_csdev_active;
break;
}
}
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
/*
* If found, attempt to enable
*/
if (config_csdev_active) {
/*
* Call the generic routine that will program up the internal
* driver structures prior to programming up the hardware.
* This routine takes the driver spinlock saved in the configs.
*/
err = cscfg_csdev_enable_config(config_csdev_active, preset);
if (!err) {
/*
* Successful programming. Check the active_cscfg_ctxt
* pointer to ensure no pre-emption disabled it via
* cscfg_csdev_disable_active_config() before
* we could start.
*
* Set enabled if OK, err if not.
*/
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
if (csdev->active_cscfg_ctxt)
config_csdev_active->enabled = true;
else
err = -EBUSY;
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
}
return err;
}
EXPORT_SYMBOL_GPL(cscfg_csdev_enable_active_config);
/**
* cscfg_csdev_disable_active_config - disable an active config on the device.
*
* Disables the active configuration on the CoreSight device @csdev.
* Disable will save the values of any registers marked in the configurations
* as save on disable.
*
* Should be called after driver hardware disable for the requested device,
* after disabling the physical hardware and reading back registers.
*
* @csdev: The CoreSight device.
*/
void cscfg_csdev_disable_active_config(struct coresight_device *csdev)
{
struct cscfg_config_csdev *config_csdev;
unsigned long flags;
/*
* Check if we have an active config, and that it was successfully enabled.
* If it was not enabled, we have no work to do, otherwise mark as disabled.
* Clear the active config pointer.
*/
spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
config_csdev = (struct cscfg_config_csdev *)csdev->active_cscfg_ctxt;
if (config_csdev) {
if (!config_csdev->enabled)
config_csdev = NULL;
else
config_csdev->enabled = false;
}
csdev->active_cscfg_ctxt = NULL;
spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
/* true if there was an enabled active config */
if (config_csdev)
cscfg_csdev_disable_config(config_csdev);
}
EXPORT_SYMBOL_GPL(cscfg_csdev_disable_active_config);
/* Initialise system configuration management device. */
struct device *cscfg_device(void)
{
return cscfg_mgr ? &cscfg_mgr->dev : NULL;
}
/* Must have a release function or the kernel will complain on module unload */
static void cscfg_dev_release(struct device *dev)
{
mutex_lock(&cscfg_mutex);
kfree(cscfg_mgr);
cscfg_mgr = NULL;
mutex_unlock(&cscfg_mutex);
}
/* a device is needed to "own" some kernel elements such as sysfs entries. */
static int cscfg_create_device(void)
{
struct device *dev;
int err = -ENOMEM;
mutex_lock(&cscfg_mutex);
if (cscfg_mgr) {
err = -EINVAL;
goto create_dev_exit_unlock;
}
cscfg_mgr = kzalloc(sizeof(struct cscfg_manager), GFP_KERNEL);
if (!cscfg_mgr)
goto create_dev_exit_unlock;
/* initialise the cscfg_mgr structure */
INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->config_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->load_order_list);
atomic_set(&cscfg_mgr->sys_active_cnt, 0);
cscfg_mgr->load_state = CSCFG_NONE;
/* setup the device */
dev = cscfg_device();
dev->release = cscfg_dev_release;
dev->init_name = "cs_system_cfg";
err = device_register(dev);
if (err)
put_device(dev);
create_dev_exit_unlock:
mutex_unlock(&cscfg_mutex);
return err;
}
/*
* Loading and unloading is generally on user discretion.
* If exiting due to coresight module unload, we need to unload any configurations that remain,
* before we unregister the configfs intrastructure.
*
* Do this by walking the load_owner list and taking appropriate action, depending on the load
* owner type.
*/
static void cscfg_unload_cfgs_on_exit(void)
{
struct cscfg_load_owner_info *owner_info = NULL;
/*
* grab the mutex - even though we are exiting, some configfs files
* may still be live till we dump them, so ensure list data is
* protected from a race condition.
*/
mutex_lock(&cscfg_mutex);
while (!list_empty(&cscfg_mgr->load_order_list)) {
/* remove in reverse order of loading */
owner_info = list_last_entry(&cscfg_mgr->load_order_list,
struct cscfg_load_owner_info, item);
/* action according to type */
switch (owner_info->type) {
case CSCFG_OWNER_PRELOAD:
/*
* preloaded descriptors are statically allocated in
* this module - just need to unload dynamic items from
* csdev lists, and remove from configfs directories.
*/
pr_info("cscfg: unloading preloaded configurations\n");
break;
case CSCFG_OWNER_MODULE:
/*
* this is an error - the loadable module must have been unloaded prior
* to the coresight module unload. Therefore that module has not
* correctly unloaded configs in its own exit code.
* Nothing to do other than emit an error string as the static descriptor
* references we need to unload will have disappeared with the module.
*/
pr_err("cscfg: ERROR: prior module failed to unload configuration\n");
goto list_remove;
}
/* remove from configfs - outside the scope of the list mutex */
mutex_unlock(&cscfg_mutex);
cscfg_fs_unregister_cfgs_feats(owner_info);
mutex_lock(&cscfg_mutex);
/* Next unload from csdev lists. */
cscfg_unload_owned_cfgs_feats(owner_info);
list_remove:
/* remove from load order list */
list_del(&owner_info->item);
}
mutex_unlock(&cscfg_mutex);
}
static void cscfg_clear_device(void)
{
cscfg_unload_cfgs_on_exit();
cscfg_configfs_release(cscfg_mgr);
device_unregister(cscfg_device());
}
/* Initialise system config management API device */
int __init cscfg_init(void)
{
int err = 0;
/* create the device and init cscfg_mgr */
err = cscfg_create_device();
if (err)
return err;
/* initialise configfs subsystem */
err = cscfg_configfs_init(cscfg_mgr);
if (err)
goto exit_err;
/* preload built-in configurations */
err = cscfg_preload(THIS_MODULE);
if (err)
goto exit_err;
dev_info(cscfg_device(), "CoreSight Configuration manager initialised");
return 0;
exit_err:
cscfg_clear_device();
return err;
}
void cscfg_exit(void)
{
cscfg_clear_device();
}
| linux-master | drivers/hwtracing/coresight/coresight-syscfg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Linaro Limited, All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include <linux/configfs.h>
#include "coresight-config.h"
#include "coresight-syscfg-configfs.h"
/* create a default ci_type. */
static inline struct config_item_type *cscfg_create_ci_type(void)
{
struct config_item_type *ci_type;
ci_type = devm_kzalloc(cscfg_device(), sizeof(*ci_type), GFP_KERNEL);
if (ci_type)
ci_type->ct_owner = THIS_MODULE;
return ci_type;
}
/* configurations sub-group */
/* attributes for the config view group */
static ssize_t cscfg_cfg_description_show(struct config_item *item, char *page)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
return scnprintf(page, PAGE_SIZE, "%s", fs_config->config_desc->description);
}
CONFIGFS_ATTR_RO(cscfg_cfg_, description);
static ssize_t cscfg_cfg_feature_refs_show(struct config_item *item, char *page)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
const struct cscfg_config_desc *config_desc = fs_config->config_desc;
ssize_t ch_used = 0;
int i;
for (i = 0; i < config_desc->nr_feat_refs; i++)
ch_used += scnprintf(page + ch_used, PAGE_SIZE - ch_used,
"%s\n", config_desc->feat_ref_names[i]);
return ch_used;
}
CONFIGFS_ATTR_RO(cscfg_cfg_, feature_refs);
/* list preset values in order of features and params */
static ssize_t cscfg_cfg_values_show(struct config_item *item, char *page)
{
const struct cscfg_feature_desc *feat_desc;
const struct cscfg_config_desc *config_desc;
struct cscfg_fs_preset *fs_preset;
int i, j, val_idx, preset_idx;
ssize_t used = 0;
fs_preset = container_of(to_config_group(item), struct cscfg_fs_preset, group);
config_desc = fs_preset->config_desc;
if (!config_desc->nr_presets)
return 0;
preset_idx = fs_preset->preset_num - 1;
/* start index on the correct array line */
val_idx = config_desc->nr_total_params * preset_idx;
/*
* A set of presets is the sum of all params in used features,
* in order of declaration of features and params in the features
*/
for (i = 0; i < config_desc->nr_feat_refs; i++) {
feat_desc = cscfg_get_named_feat_desc(config_desc->feat_ref_names[i]);
for (j = 0; j < feat_desc->nr_params; j++) {
used += scnprintf(page + used, PAGE_SIZE - used,
"%s.%s = 0x%llx ",
feat_desc->name,
feat_desc->params_desc[j].name,
config_desc->presets[val_idx++]);
}
}
used += scnprintf(page + used, PAGE_SIZE - used, "\n");
return used;
}
CONFIGFS_ATTR_RO(cscfg_cfg_, values);
static ssize_t cscfg_cfg_enable_show(struct config_item *item, char *page)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->active);
}
static ssize_t cscfg_cfg_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
int err;
bool val;
err = kstrtobool(page, &val);
if (!err)
err = cscfg_config_sysfs_activate(fs_config->config_desc, val);
if (!err) {
fs_config->active = val;
if (val)
cscfg_config_sysfs_set_preset(fs_config->preset);
}
return err ? err : count;
}
CONFIGFS_ATTR(cscfg_cfg_, enable);
static ssize_t cscfg_cfg_preset_show(struct config_item *item, char *page)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->preset);
}
static ssize_t cscfg_cfg_preset_store(struct config_item *item,
const char *page, size_t count)
{
struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
struct cscfg_fs_config, group);
int preset, err;
err = kstrtoint(page, 0, &preset);
if (!err) {
/*
* presets start at 1, and go up to max (15),
* but the config may provide fewer.
*/
if ((preset < 1) || (preset > fs_config->config_desc->nr_presets))
err = -EINVAL;
}
if (!err) {
/* set new value */
fs_config->preset = preset;
/* set on system if active */
if (fs_config->active)
cscfg_config_sysfs_set_preset(fs_config->preset);
}
return err ? err : count;
}
CONFIGFS_ATTR(cscfg_cfg_, preset);
static struct configfs_attribute *cscfg_config_view_attrs[] = {
&cscfg_cfg_attr_description,
&cscfg_cfg_attr_feature_refs,
&cscfg_cfg_attr_enable,
&cscfg_cfg_attr_preset,
NULL,
};
static struct config_item_type cscfg_config_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_config_view_attrs,
};
static struct configfs_attribute *cscfg_config_preset_attrs[] = {
&cscfg_cfg_attr_values,
NULL,
};
static struct config_item_type cscfg_config_preset_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_config_preset_attrs,
};
static int cscfg_add_preset_groups(struct cscfg_fs_config *cfg_view)
{
int preset_num;
struct cscfg_fs_preset *cfg_fs_preset;
struct cscfg_config_desc *config_desc = cfg_view->config_desc;
char name[CONFIGFS_ITEM_NAME_LEN];
if (!config_desc->nr_presets)
return 0;
for (preset_num = 1; preset_num <= config_desc->nr_presets; preset_num++) {
cfg_fs_preset = devm_kzalloc(cscfg_device(),
sizeof(struct cscfg_fs_preset), GFP_KERNEL);
if (!cfg_fs_preset)
return -ENOMEM;
snprintf(name, CONFIGFS_ITEM_NAME_LEN, "preset%d", preset_num);
cfg_fs_preset->preset_num = preset_num;
cfg_fs_preset->config_desc = cfg_view->config_desc;
config_group_init_type_name(&cfg_fs_preset->group, name,
&cscfg_config_preset_type);
configfs_add_default_group(&cfg_fs_preset->group, &cfg_view->group);
}
return 0;
}
static struct config_group *cscfg_create_config_group(struct cscfg_config_desc *config_desc)
{
struct cscfg_fs_config *cfg_view;
struct device *dev = cscfg_device();
int err;
if (!dev)
return ERR_PTR(-EINVAL);
cfg_view = devm_kzalloc(dev, sizeof(struct cscfg_fs_config), GFP_KERNEL);
if (!cfg_view)
return ERR_PTR(-ENOMEM);
cfg_view->config_desc = config_desc;
config_group_init_type_name(&cfg_view->group, config_desc->name, &cscfg_config_view_type);
/* add in a preset<n> dir for each preset */
err = cscfg_add_preset_groups(cfg_view);
if (err)
return ERR_PTR(err);
return &cfg_view->group;
}
/* attributes for features view */
static ssize_t cscfg_feat_description_show(struct config_item *item, char *page)
{
struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item),
struct cscfg_fs_feature, group);
return scnprintf(page, PAGE_SIZE, "%s", fs_feat->feat_desc->description);
}
CONFIGFS_ATTR_RO(cscfg_feat_, description);
static ssize_t cscfg_feat_matches_show(struct config_item *item, char *page)
{
struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item),
struct cscfg_fs_feature, group);
u32 match_flags = fs_feat->feat_desc->match_flags;
int used = 0;
if (match_flags & CS_CFG_MATCH_CLASS_SRC_ALL)
used = scnprintf(page, PAGE_SIZE, "SRC_ALL ");
if (match_flags & CS_CFG_MATCH_CLASS_SRC_ETM4)
used += scnprintf(page + used, PAGE_SIZE - used, "SRC_ETMV4 ");
used += scnprintf(page + used, PAGE_SIZE - used, "\n");
return used;
}
CONFIGFS_ATTR_RO(cscfg_feat_, matches);
static ssize_t cscfg_feat_nr_params_show(struct config_item *item, char *page)
{
struct cscfg_fs_feature *fs_feat = container_of(to_config_group(item),
struct cscfg_fs_feature, group);
return scnprintf(page, PAGE_SIZE, "%d\n", fs_feat->feat_desc->nr_params);
}
CONFIGFS_ATTR_RO(cscfg_feat_, nr_params);
/* base feature desc attrib structures */
static struct configfs_attribute *cscfg_feature_view_attrs[] = {
&cscfg_feat_attr_description,
&cscfg_feat_attr_matches,
&cscfg_feat_attr_nr_params,
NULL,
};
static struct config_item_type cscfg_feature_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_feature_view_attrs,
};
static ssize_t cscfg_param_value_show(struct config_item *item, char *page)
{
struct cscfg_fs_param *param_item = container_of(to_config_group(item),
struct cscfg_fs_param, group);
u64 value = param_item->feat_desc->params_desc[param_item->param_idx].value;
return scnprintf(page, PAGE_SIZE, "0x%llx\n", value);
}
static ssize_t cscfg_param_value_store(struct config_item *item,
const char *page, size_t size)
{
struct cscfg_fs_param *param_item = container_of(to_config_group(item),
struct cscfg_fs_param, group);
struct cscfg_feature_desc *feat_desc = param_item->feat_desc;
int param_idx = param_item->param_idx;
u64 value;
int err;
err = kstrtoull(page, 0, &value);
if (!err)
err = cscfg_update_feat_param_val(feat_desc, param_idx, value);
return err ? err : size;
}
CONFIGFS_ATTR(cscfg_param_, value);
static struct configfs_attribute *cscfg_param_view_attrs[] = {
&cscfg_param_attr_value,
NULL,
};
static struct config_item_type cscfg_param_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_param_view_attrs,
};
/*
* configfs has far less functionality provided to add attributes dynamically than sysfs,
* and the show and store fns pass the enclosing config_item so the actual attribute cannot
* be determined. Therefore we add each item as a group directory, with a value attribute.
*/
static int cscfg_create_params_group_items(struct cscfg_feature_desc *feat_desc,
struct config_group *params_group)
{
struct device *dev = cscfg_device();
struct cscfg_fs_param *param_item;
int i;
/* parameter items - as groups with default_value attribute */
for (i = 0; i < feat_desc->nr_params; i++) {
param_item = devm_kzalloc(dev, sizeof(struct cscfg_fs_param), GFP_KERNEL);
if (!param_item)
return -ENOMEM;
param_item->feat_desc = feat_desc;
param_item->param_idx = i;
config_group_init_type_name(¶m_item->group,
feat_desc->params_desc[i].name,
&cscfg_param_view_type);
configfs_add_default_group(¶m_item->group, params_group);
}
return 0;
}
static struct config_group *cscfg_create_feature_group(struct cscfg_feature_desc *feat_desc)
{
struct cscfg_fs_feature *feat_view;
struct config_item_type *params_group_type;
struct config_group *params_group = NULL;
struct device *dev = cscfg_device();
int item_err;
if (!dev)
return ERR_PTR(-EINVAL);
feat_view = devm_kzalloc(dev, sizeof(struct cscfg_fs_feature), GFP_KERNEL);
if (!feat_view)
return ERR_PTR(-ENOMEM);
if (feat_desc->nr_params) {
params_group = devm_kzalloc(dev, sizeof(struct config_group), GFP_KERNEL);
if (!params_group)
return ERR_PTR(-ENOMEM);
params_group_type = cscfg_create_ci_type();
if (!params_group_type)
return ERR_PTR(-ENOMEM);
}
feat_view->feat_desc = feat_desc;
config_group_init_type_name(&feat_view->group,
feat_desc->name,
&cscfg_feature_view_type);
if (params_group) {
config_group_init_type_name(params_group, "params", params_group_type);
configfs_add_default_group(params_group, &feat_view->group);
item_err = cscfg_create_params_group_items(feat_desc, params_group);
if (item_err)
return ERR_PTR(item_err);
}
return &feat_view->group;
}
static struct config_item_type cscfg_configs_type = {
.ct_owner = THIS_MODULE,
};
static struct config_group cscfg_configs_grp = {
.cg_item = {
.ci_namebuf = "configurations",
.ci_type = &cscfg_configs_type,
},
};
/* add configuration to configurations group */
int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc)
{
struct config_group *new_group;
int err;
new_group = cscfg_create_config_group(config_desc);
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_configs_grp, new_group);
if (!err)
config_desc->fs_group = new_group;
return err;
}
void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc)
{
if (config_desc->fs_group) {
configfs_unregister_group(config_desc->fs_group);
config_desc->fs_group = NULL;
}
}
static struct config_item_type cscfg_features_type = {
.ct_owner = THIS_MODULE,
};
static struct config_group cscfg_features_grp = {
.cg_item = {
.ci_namebuf = "features",
.ci_type = &cscfg_features_type,
},
};
/* add feature to features group */
int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc)
{
struct config_group *new_group;
int err;
new_group = cscfg_create_feature_group(feat_desc);
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_features_grp, new_group);
if (!err)
feat_desc->fs_group = new_group;
return err;
}
void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc)
{
if (feat_desc->fs_group) {
configfs_unregister_group(feat_desc->fs_group);
feat_desc->fs_group = NULL;
}
}
int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr)
{
struct configfs_subsystem *subsys;
struct config_item_type *ci_type;
if (!cscfg_mgr)
return -EINVAL;
ci_type = cscfg_create_ci_type();
if (!ci_type)
return -ENOMEM;
subsys = &cscfg_mgr->cfgfs_subsys;
config_item_set_name(&subsys->su_group.cg_item, CSCFG_FS_SUBSYS_NAME);
subsys->su_group.cg_item.ci_type = ci_type;
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
/* Add default groups to subsystem */
config_group_init(&cscfg_configs_grp);
configfs_add_default_group(&cscfg_configs_grp, &subsys->su_group);
config_group_init(&cscfg_features_grp);
configfs_add_default_group(&cscfg_features_grp, &subsys->su_group);
return configfs_register_subsystem(subsys);
}
void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr)
{
configfs_unregister_subsystem(&cscfg_mgr->cfgfs_subsys);
}
| linux-master | drivers/hwtracing/coresight/coresight-syscfg-configfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/amba/bus.h>
#include <linux/bitmap.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include "coresight-priv.h"
#include "coresight-tpdm.h"
DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val;
/* Set the enable bit of DSB control register to 1 */
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
val |= TPDM_DSB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
/* TPDM enable operations */
static void __tpdm_enable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
/* Check if DSB datasets is present for TPDM. */
if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
tpdm_enable_dsb(drvdata);
CS_LOCK(drvdata->base);
}
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (drvdata->enable) {
spin_unlock(&drvdata->spinlock);
return -EBUSY;
}
__tpdm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDM tracing enabled\n");
return 0;
}
static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val;
/* Set the enable bit of DSB control register to 0 */
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
val &= ~TPDM_DSB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
/* TPDM disable operations */
static void __tpdm_disable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
/* Check if DSB datasets is present for TPDM. */
if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
tpdm_disable_dsb(drvdata);
CS_LOCK(drvdata->base);
}
static void tpdm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (!drvdata->enable) {
spin_unlock(&drvdata->spinlock);
return;
}
__tpdm_disable(drvdata);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDM tracing disabled\n");
}
static const struct coresight_ops_source tpdm_source_ops = {
.enable = tpdm_enable,
.disable = tpdm_disable,
};
static const struct coresight_ops tpdm_cs_ops = {
.source_ops = &tpdm_source_ops,
};
static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
{
u32 pidr;
CS_UNLOCK(drvdata->base);
/* Get the datasets present on the TPDM. */
pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0);
CS_LOCK(drvdata->base);
}
/*
* value 1: 64 bits test data
* value 2: 32 bits test data
*/
static ssize_t integration_test_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
int i, ret = 0;
unsigned long val;
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val != 1 && val != 2)
return -EINVAL;
if (!drvdata->enable)
return -EINVAL;
if (val == 1)
val = ATBCNTRL_VAL_64;
else
val = ATBCNTRL_VAL_32;
CS_UNLOCK(drvdata->base);
writel_relaxed(0x1, drvdata->base + TPDM_ITCNTRL);
for (i = 0; i < INTEGRATION_TEST_CYCLE; i++)
writel_relaxed(val, drvdata->base + TPDM_ITATBCNTRL);
writel_relaxed(0, drvdata->base + TPDM_ITCNTRL);
CS_LOCK(drvdata->base);
return size;
}
static DEVICE_ATTR_WO(integration_test);
static struct attribute *tpdm_attrs[] = {
&dev_attr_integration_test.attr,
NULL,
};
static struct attribute_group tpdm_attr_grp = {
.attrs = tpdm_attrs,
};
static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_attr_grp,
NULL,
};
static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
{
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpdm_drvdata *drvdata;
struct coresight_desc desc = { 0 };
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
adev->dev.platform_data = pdata;
/* driver data*/
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
/* Set up coresight component description */
desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &tpdm_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
desc.access = CSDEV_ACCESS_IOMEM(base);
desc.groups = tpdm_attr_grps;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
spin_lock_init(&drvdata->spinlock);
tpdm_init_default_data(drvdata);
/* Decrease pm refcount when probe is done.*/
pm_runtime_put(&adev->dev);
return 0;
}
static void tpdm_remove(struct amba_device *adev)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
}
/*
* Different TPDM has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
static struct amba_id tpdm_ids[] = {
{
.id = 0x000f0e00,
.mask = 0x000fff00,
},
{ 0, 0},
};
static struct amba_driver tpdm_driver = {
.drv = {
.name = "coresight-tpdm",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpdm_probe,
.id_table = tpdm_ids,
.remove = tpdm_remove,
};
module_amba_driver(tpdm_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
| linux-master | drivers/hwtracing/coresight/coresight-tpdm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Trace Memory Controller driver
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
struct csdev_access *csa = &csdev->access;
/* Ensure formatter, unformatter and hardware fifo are empty */
if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
return -EBUSY;
}
return 0;
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
struct csdev_access *csa = &csdev->access;
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
ffcr |= TMC_FFCR_STOP_ON_FLUSH;
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
dev_err(&csdev->dev,
"timeout while waiting for completion of Manual Flush\n");
}
tmc_wait_for_tmcready(drvdata);
}
void tmc_enable_hw(struct tmc_drvdata *drvdata)
{
writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
}
void tmc_disable_hw(struct tmc_drvdata *drvdata)
{
writel_relaxed(0x0, drvdata->base + TMC_CTL);
}
u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
{
u32 mask = 0;
/*
* When moving RRP or an offset address forward, the new values must
* be byte-address aligned to the width of the trace memory databus
* _and_ to a frame boundary (16 byte), whichever is the biggest. For
* example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
* LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
* be 0s.
*/
switch (drvdata->memwidth) {
case TMC_MEM_INTF_WIDTH_32BITS:
case TMC_MEM_INTF_WIDTH_64BITS:
case TMC_MEM_INTF_WIDTH_128BITS:
mask = GENMASK(31, 4);
break;
case TMC_MEM_INTF_WIDTH_256BITS:
mask = GENMASK(31, 5);
break;
}
return mask;
}
static int tmc_read_prepare(struct tmc_drvdata *drvdata)
{
int ret = 0;
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
case TMC_CONFIG_TYPE_ETF:
ret = tmc_read_prepare_etb(drvdata);
break;
case TMC_CONFIG_TYPE_ETR:
ret = tmc_read_prepare_etr(drvdata);
break;
default:
ret = -EINVAL;
}
if (!ret)
dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
return ret;
}
static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
{
int ret = 0;
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
case TMC_CONFIG_TYPE_ETF:
ret = tmc_read_unprepare_etb(drvdata);
break;
case TMC_CONFIG_TYPE_ETR:
ret = tmc_read_unprepare_etr(drvdata);
break;
default:
ret = -EINVAL;
}
if (!ret)
dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
return ret;
}
static int tmc_open(struct inode *inode, struct file *file)
{
int ret;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
ret = tmc_read_prepare(drvdata);
if (ret)
return ret;
nonseekable_open(inode, file);
dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
return 0;
}
static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp)
{
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
case TMC_CONFIG_TYPE_ETF:
return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
case TMC_CONFIG_TYPE_ETR:
return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
}
return -EINVAL;
}
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
char *bufp;
ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
if (actual <= 0)
return 0;
if (copy_to_user(data, bufp, actual)) {
dev_dbg(&drvdata->csdev->dev,
"%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
*ppos += actual;
dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
return actual;
}
static int tmc_release(struct inode *inode, struct file *file)
{
int ret;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
ret = tmc_read_unprepare(drvdata);
if (ret)
return ret;
dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
return 0;
}
static const struct file_operations tmc_fops = {
.owner = THIS_MODULE,
.open = tmc_open,
.read = tmc_read,
.release = tmc_release,
.llseek = no_llseek,
};
static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
{
enum tmc_mem_intf_width memwidth;
/*
* Excerpt from the TRM:
*
* DEVID::MEMWIDTH[10:8]
* 0x2 Memory interface databus is 32 bits wide.
* 0x3 Memory interface databus is 64 bits wide.
* 0x4 Memory interface databus is 128 bits wide.
* 0x5 Memory interface databus is 256 bits wide.
*/
switch (BMVAL(devid, 8, 10)) {
case 0x2:
memwidth = TMC_MEM_INTF_WIDTH_32BITS;
break;
case 0x3:
memwidth = TMC_MEM_INTF_WIDTH_64BITS;
break;
case 0x4:
memwidth = TMC_MEM_INTF_WIDTH_128BITS;
break;
case 0x5:
memwidth = TMC_MEM_INTF_WIDTH_256BITS;
break;
default:
memwidth = 0;
}
return memwidth;
}
static struct attribute *coresight_tmc_mgmt_attrs[] = {
coresight_simple_reg32(rsz, TMC_RSZ),
coresight_simple_reg32(sts, TMC_STS),
coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
coresight_simple_reg32(trg, TMC_TRG),
coresight_simple_reg32(ctl, TMC_CTL),
coresight_simple_reg32(ffsr, TMC_FFSR),
coresight_simple_reg32(ffcr, TMC_FFCR),
coresight_simple_reg32(mode, TMC_MODE),
coresight_simple_reg32(pscr, TMC_PSCR),
coresight_simple_reg32(devid, CORESIGHT_DEVID),
coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
coresight_simple_reg32(axictl, TMC_AXICTL),
coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
NULL,
};
static ssize_t trigger_cntr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->trigger_cntr;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t trigger_cntr_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
drvdata->trigger_cntr = val;
return size;
}
static DEVICE_ATTR_RW(trigger_cntr);
static ssize_t buffer_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return sprintf(buf, "%#x\n", drvdata->size);
}
static ssize_t buffer_size_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
/* Only permitted for TMC-ETRs */
if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
return -EPERM;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
/* The buffer size should be page aligned */
if (val & (PAGE_SIZE - 1))
return -EINVAL;
drvdata->size = val;
return size;
}
static DEVICE_ATTR_RW(buffer_size);
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
NULL,
};
static const struct attribute_group coresight_tmc_group = {
.attrs = coresight_tmc_attrs,
};
static const struct attribute_group coresight_tmc_mgmt_group = {
.attrs = coresight_tmc_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group *coresight_tmc_groups[] = {
&coresight_tmc_group,
&coresight_tmc_mgmt_group,
NULL,
};
static inline bool tmc_etr_can_use_sg(struct device *dev)
{
return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
}
static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
{
u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
return (auth & TMC_AUTH_NSID_MASK) == 0x3;
}
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
{
int rc;
u32 dma_mask = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
if (!tmc_etr_has_non_secure_access(drvdata))
return -EACCES;
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
if (devid & TMC_DEVID_AXIAW_VALID)
dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
TMC_DEVID_AXIAW_MASK);
/*
* Unless specified in the device configuration, ETR uses a 40-bit
* AXI master in place of the embedded SRAM of ETB/ETF.
*/
switch (dma_mask) {
case 32:
case 40:
case 44:
case 48:
case 52:
dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
break;
default:
dma_mask = 40;
}
rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
if (rc)
dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
return rc;
}
static u32 tmc_etr_get_default_buffer_size(struct device *dev)
{
u32 size;
if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
size = SZ_1M;
return size;
}
static u32 tmc_etr_get_max_burst_size(struct device *dev)
{
u32 burst_size;
if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
&burst_size))
return TMC_AXICTL_WR_BURST_16;
/* Only permissible values are 0 to 15 */
if (burst_size > 0xF)
burst_size = TMC_AXICTL_WR_BURST_16;
return burst_size;
}
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
u32 devid;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct tmc_drvdata *drvdata;
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
struct coresight_dev_list *dev_list = NULL;
ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
goto out;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out;
}
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
/* This device is not associated with a session */
drvdata->pid = -1;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->size = tmc_etr_get_default_buffer_size(dev);
drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
desc.dev = dev;
desc.groups = coresight_tmc_groups;
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
dev_list = &etb_devs;
break;
case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &tmc_etr_cs_ops;
ret = tmc_etr_setup_caps(dev, devid,
coresight_get_uci_data(id));
if (ret)
goto out;
idr_init(&drvdata->idr);
mutex_init(&drvdata->idr_mutex);
dev_list = &etr_devs;
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
dev_list = &etf_devs;
break;
default:
pr_err("%s: Unsupported TMC config\n", desc.name);
ret = -EINVAL;
goto out;
}
desc.name = coresight_alloc_device_name(dev_list, dev);
if (!desc.name) {
ret = -ENOMEM;
goto out;
}
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto out;
}
adev->dev.platform_data = pdata;
desc.pdata = pdata;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto out;
}
drvdata->miscdev.name = desc.name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
if (ret)
coresight_unregister(drvdata->csdev);
else
pm_runtime_put(&adev->dev);
out:
return ret;
}
static void tmc_shutdown(struct amba_device *adev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->mode == CS_MODE_DISABLED)
goto out;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
tmc_etr_disable_hw(drvdata);
/*
* We do not care about coresight unregister here unlike remove
* callback which is required for making coresight modular since
* the system is going down after this.
*/
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
/*
* Since misc_open() holds a refcount on the f_ops, which is
* etb fops in this case, device is there until last file
* handler to this device is closed.
*/
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
}
static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb961),
/* Coresight SoC 600 TMC-ETR/ETS */
CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
/* Coresight SoC 600 TMC-ETB */
CS_AMBA_ID(0x000bb9e9),
/* Coresight SoC 600 TMC-ETF */
CS_AMBA_ID(0x000bb9ea),
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, tmc_ids);
static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tmc_probe,
.shutdown = tmc_shutdown,
.remove = tmc_remove,
.id_table = tmc_ids,
};
module_amba_driver(tmc_driver);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-tmc-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright(C) 2020 Linaro Limited. All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include <linux/sysfs.h>
#include "coresight-config.h"
#include "coresight-priv.h"
/*
* This provides a set of generic functions that operate on configurations
* and features to manage the handling of parameters, the programming and
* saving of registers used by features on devices.
*/
/*
* Write the value held in the register structure into the driver internal memory
* location.
*/
static void cscfg_set_reg(struct cscfg_regval_csdev *reg_csdev)
{
u32 *p_val32 = (u32 *)reg_csdev->driver_regval;
u32 tmp32 = reg_csdev->reg_desc.val32;
if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT) {
*((u64 *)reg_csdev->driver_regval) = reg_csdev->reg_desc.val64;
return;
}
if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_MASK) {
tmp32 = *p_val32;
tmp32 &= ~reg_csdev->reg_desc.mask32;
tmp32 |= reg_csdev->reg_desc.val32 & reg_csdev->reg_desc.mask32;
}
*p_val32 = tmp32;
}
/*
* Read the driver value into the reg if this is marked as one we want to save.
*/
static void cscfg_save_reg(struct cscfg_regval_csdev *reg_csdev)
{
if (!(reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_SAVE))
return;
if (reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT)
reg_csdev->reg_desc.val64 = *(u64 *)(reg_csdev->driver_regval);
else
reg_csdev->reg_desc.val32 = *(u32 *)(reg_csdev->driver_regval);
}
/*
* Some register values are set from parameters. Initialise these registers
* from the current parameter values.
*/
static void cscfg_init_reg_param(struct cscfg_feature_csdev *feat_csdev,
struct cscfg_regval_desc *reg_desc,
struct cscfg_regval_csdev *reg_csdev)
{
struct cscfg_parameter_csdev *param_csdev;
/* for param, load routines have validated the index */
param_csdev = &feat_csdev->params_csdev[reg_desc->param_idx];
param_csdev->reg_csdev = reg_csdev;
param_csdev->val64 = reg_csdev->reg_desc.type & CS_CFG_REG_TYPE_VAL_64BIT;
if (param_csdev->val64)
reg_csdev->reg_desc.val64 = param_csdev->current_value;
else
reg_csdev->reg_desc.val32 = (u32)param_csdev->current_value;
}
/* set values into the driver locations referenced in cscfg_reg_csdev */
static int cscfg_set_on_enable(struct cscfg_feature_csdev *feat_csdev)
{
unsigned long flags;
int i;
spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
for (i = 0; i < feat_csdev->nr_regs; i++)
cscfg_set_reg(&feat_csdev->regs_csdev[i]);
spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s",
feat_csdev->feat_desc->name, "set on enable");
return 0;
}
/* copy back values from the driver locations referenced in cscfg_reg_csdev */
static void cscfg_save_on_disable(struct cscfg_feature_csdev *feat_csdev)
{
unsigned long flags;
int i;
spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
for (i = 0; i < feat_csdev->nr_regs; i++)
cscfg_save_reg(&feat_csdev->regs_csdev[i]);
spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s",
feat_csdev->feat_desc->name, "save on disable");
}
/* default reset - restore default values */
void cscfg_reset_feat(struct cscfg_feature_csdev *feat_csdev)
{
struct cscfg_regval_desc *reg_desc;
struct cscfg_regval_csdev *reg_csdev;
int i;
/*
* set the default values for all parameters and regs from the
* relevant static descriptors.
*/
for (i = 0; i < feat_csdev->nr_params; i++)
feat_csdev->params_csdev[i].current_value =
feat_csdev->feat_desc->params_desc[i].value;
for (i = 0; i < feat_csdev->nr_regs; i++) {
reg_desc = &feat_csdev->feat_desc->regs_desc[i];
reg_csdev = &feat_csdev->regs_csdev[i];
reg_csdev->reg_desc.type = reg_desc->type;
/* check if reg set from a parameter otherwise desc default */
if (reg_desc->type & CS_CFG_REG_TYPE_VAL_PARAM)
cscfg_init_reg_param(feat_csdev, reg_desc, reg_csdev);
else
/*
* for normal values the union between val64 & val32 + mask32
* allows us to init using the 64 bit value
*/
reg_csdev->reg_desc.val64 = reg_desc->val64;
}
}
/*
* For the selected presets, we set the register associated with the parameter, to
* the value of the preset index associated with the parameter.
*/
static int cscfg_update_presets(struct cscfg_config_csdev *config_csdev, int preset)
{
int i, j, val_idx = 0, nr_cfg_params;
struct cscfg_parameter_csdev *param_csdev;
struct cscfg_feature_csdev *feat_csdev;
const struct cscfg_config_desc *config_desc = config_csdev->config_desc;
const char *name;
const u64 *preset_base;
u64 val;
/* preset in range 1 to nr_presets */
if (preset < 1 || preset > config_desc->nr_presets)
return -EINVAL;
/*
* Go through the array of features, assigning preset values to
* feature parameters in the order they appear.
* There should be precisely the same number of preset values as the
* sum of number of parameters over all the features - but we will
* ensure there is no overrun.
*/
nr_cfg_params = config_desc->nr_total_params;
preset_base = &config_desc->presets[(preset - 1) * nr_cfg_params];
for (i = 0; i < config_csdev->nr_feat; i++) {
feat_csdev = config_csdev->feats_csdev[i];
if (!feat_csdev->nr_params)
continue;
for (j = 0; j < feat_csdev->nr_params; j++) {
param_csdev = &feat_csdev->params_csdev[j];
name = feat_csdev->feat_desc->params_desc[j].name;
val = preset_base[val_idx++];
if (param_csdev->val64) {
dev_dbg(&config_csdev->csdev->dev,
"set param %s (%lld)", name, val);
param_csdev->reg_csdev->reg_desc.val64 = val;
} else {
param_csdev->reg_csdev->reg_desc.val32 = (u32)val;
dev_dbg(&config_csdev->csdev->dev,
"set param %s (%d)", name, (u32)val);
}
}
/* exit early if all params filled */
if (val_idx >= nr_cfg_params)
break;
}
return 0;
}
/*
* if we are not using a preset, then need to update the feature params
* with current values. This sets the register associated with the parameter
* with the current value of that parameter.
*/
static int cscfg_update_curr_params(struct cscfg_config_csdev *config_csdev)
{
int i, j;
struct cscfg_feature_csdev *feat_csdev;
struct cscfg_parameter_csdev *param_csdev;
const char *name;
u64 val;
for (i = 0; i < config_csdev->nr_feat; i++) {
feat_csdev = config_csdev->feats_csdev[i];
if (!feat_csdev->nr_params)
continue;
for (j = 0; j < feat_csdev->nr_params; j++) {
param_csdev = &feat_csdev->params_csdev[j];
name = feat_csdev->feat_desc->params_desc[j].name;
val = param_csdev->current_value;
if (param_csdev->val64) {
dev_dbg(&config_csdev->csdev->dev,
"set param %s (%lld)", name, val);
param_csdev->reg_csdev->reg_desc.val64 = val;
} else {
param_csdev->reg_csdev->reg_desc.val32 = (u32)val;
dev_dbg(&config_csdev->csdev->dev,
"set param %s (%d)", name, (u32)val);
}
}
}
return 0;
}
/*
* Configuration values will be programmed into the driver locations if enabling, or read
* from relevant locations on disable.
*/
static int cscfg_prog_config(struct cscfg_config_csdev *config_csdev, bool enable)
{
int i, err = 0;
struct cscfg_feature_csdev *feat_csdev;
struct coresight_device *csdev;
for (i = 0; i < config_csdev->nr_feat; i++) {
feat_csdev = config_csdev->feats_csdev[i];
csdev = feat_csdev->csdev;
dev_dbg(&csdev->dev, "cfg %s; %s feature:%s", config_csdev->config_desc->name,
enable ? "enable" : "disable", feat_csdev->feat_desc->name);
if (enable)
err = cscfg_set_on_enable(feat_csdev);
else
cscfg_save_on_disable(feat_csdev);
if (err)
break;
}
return err;
}
/*
* Enable configuration for the device. Will result in the internal driver data
* being updated ready for programming into the device.
*
* @config_csdev: config_csdev to set.
* @preset: preset values to use - 0 for default.
*/
int cscfg_csdev_enable_config(struct cscfg_config_csdev *config_csdev, int preset)
{
int err = 0;
if (preset)
err = cscfg_update_presets(config_csdev, preset);
else
err = cscfg_update_curr_params(config_csdev);
if (!err)
err = cscfg_prog_config(config_csdev, true);
return err;
}
void cscfg_csdev_disable_config(struct cscfg_config_csdev *config_csdev)
{
cscfg_prog_config(config_csdev, false);
}
| linux-master | drivers/hwtracing/coresight/coresight-config.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Description: CoreSight System Trace Macrocell driver
*
* Initial implementation by Pratik Patel
* (C) 2014-2015 Pratik Patel <[email protected]>
*
* Serious refactoring, code cleanup and upgrading to the Coresight upstream
* framework by Mathieu Poirier
* (C) 2015-2016 Mathieu Poirier <[email protected]>
*
* Guaranteed timing and support for various packet type coming from the
* generic STM API by Chunyan Zhang
* (C) 2015-2016 Chunyan Zhang <[email protected]>
*/
#include <asm/local.h>
#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/coresight.h>
#include <linux/coresight-stm.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
#include <linux/stm.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
#define STMDMASTARTR 0xc04
#define STMDMASTOPR 0xc08
#define STMDMASTATR 0xc0c
#define STMDMACTLR 0xc10
#define STMDMAIDR 0xcfc
#define STMHEER 0xd00
#define STMHETER 0xd20
#define STMHEBSR 0xd60
#define STMHEMCR 0xd64
#define STMHEMASTR 0xdf4
#define STMHEFEAT1R 0xdf8
#define STMHEIDR 0xdfc
#define STMSPER 0xe00
#define STMSPTER 0xe20
#define STMPRIVMASKR 0xe40
#define STMSPSCR 0xe60
#define STMSPMSCR 0xe64
#define STMSPOVERRIDER 0xe68
#define STMSPMOVERRIDER 0xe6c
#define STMSPTRIGCSR 0xe70
#define STMTCSR 0xe80
#define STMTSSTIMR 0xe84
#define STMTSFREQR 0xe8c
#define STMSYNCR 0xe90
#define STMAUXCR 0xe94
#define STMSPFEAT1R 0xea0
#define STMSPFEAT2R 0xea4
#define STMSPFEAT3R 0xea8
#define STMITTRIGGER 0xee8
#define STMITATBDATA0 0xeec
#define STMITATBCTR2 0xef0
#define STMITATBID 0xef4
#define STMITATBCTR0 0xef8
#define STM_32_CHANNEL 32
#define BYTES_PER_CHANNEL 256
#define STM_TRACE_BUF_SIZE 4096
#define STM_SW_MASTER_END 127
/* Register bit definition */
#define STMTCSR_BUSY_BIT 23
/* Reserve the first 10 channels for kernel usage */
#define STM_CHANNEL_OFFSET 0
enum stm_pkt_type {
STM_PKT_TYPE_DATA = 0x98,
STM_PKT_TYPE_FLAG = 0xE8,
STM_PKT_TYPE_TRIG = 0xF8,
};
#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \
(ch * BYTES_PER_CHANNEL))
#define stm_channel_off(type, opts) (type & ~opts)
static int boot_nr_channel;
/*
* Not really modular but using module_param is the easiest way to
* remain consistent with existing use cases for now.
*/
module_param_named(
boot_nr_channel, boot_nr_channel, int, S_IRUGO
);
/*
* struct channel_space - central management entity for extended ports
* @base: memory mapped base address where channels start.
* @phys: physical base address of channel region.
* @guaraneed: is the channel delivery guaranteed.
*/
struct channel_space {
void __iomem *base;
phys_addr_t phys;
unsigned long *guaranteed;
};
DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm");
/**
* struct stm_drvdata - specifics associated to an STM component
* @base: memory mapped base address for this component.
* @atclk: optional clock for the core parts of the STM.
* @csdev: component vitals needed by the framework.
* @spinlock: only one at a time pls.
* @chs: the channels accociated to this STM.
* @stm: structure associated to the generic STM interface.
* @mode: this tracer's mode (enum cs_mode), i.e sysFS, or disabled.
* @traceid: value of the current ID for this component.
* @write_bytes: Maximus bytes this STM can write at a time.
* @stmsper: settings for register STMSPER.
* @stmspscr: settings for register STMSPSCR.
* @numsp: the total number of stimulus port support by this STM.
* @stmheer: settings for register STMHEER.
* @stmheter: settings for register STMHETER.
* @stmhebsr: settings for register STMHEBSR.
*/
struct stm_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
struct channel_space chs;
struct stm_data stm;
local_t mode;
u8 traceid;
u32 write_bytes;
u32 stmsper;
u32 stmspscr;
u32 numsp;
u32 stmheer;
u32 stmheter;
u32 stmhebsr;
};
static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR);
writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER);
writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER);
writel_relaxed(0x01 | /* Enable HW event tracing */
0x04, /* Error detection on event tracing */
drvdata->base + STMHEMCR);
CS_LOCK(drvdata->base);
}
static void stm_port_enable_hw(struct stm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
/* ATB trigger enable on direct writes to TRIG locations */
writel_relaxed(0x10,
drvdata->base + STMSPTRIGCSR);
writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
CS_LOCK(drvdata->base);
}
static void stm_enable_hw(struct stm_drvdata *drvdata)
{
if (drvdata->stmheer)
stm_hwevent_enable_hw(drvdata);
stm_port_enable_hw(drvdata);
CS_UNLOCK(drvdata->base);
/* 4096 byte between synchronisation packets */
writel_relaxed(0xFFF, drvdata->base + STMSYNCR);
writel_relaxed((drvdata->traceid << 16 | /* trace id */
0x02 | /* timestamp enable */
0x01), /* global STM enable */
drvdata->base + STMTCSR);
CS_LOCK(drvdata->base);
}
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
u32 val;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (mode != CS_MODE_SYSFS)
return -EINVAL;
val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
/* Someone is already using the tracer */
if (val)
return -EBUSY;
pm_runtime_get_sync(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
stm_enable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
dev_dbg(&csdev->dev, "STM tracing enabled\n");
return 0;
}
static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
writel_relaxed(0x0, drvdata->base + STMHEMCR);
writel_relaxed(0x0, drvdata->base + STMHEER);
writel_relaxed(0x0, drvdata->base + STMHETER);
CS_LOCK(drvdata->base);
}
static void stm_port_disable_hw(struct stm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
writel_relaxed(0x0, drvdata->base + STMSPER);
writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR);
CS_LOCK(drvdata->base);
}
static void stm_disable_hw(struct stm_drvdata *drvdata)
{
u32 val;
CS_UNLOCK(drvdata->base);
val = readl_relaxed(drvdata->base + STMTCSR);
val &= ~0x1; /* clear global STM enable [0] */
writel_relaxed(val, drvdata->base + STMTCSR);
CS_LOCK(drvdata->base);
stm_port_disable_hw(drvdata);
if (drvdata->stmheer)
stm_hwevent_disable_hw(drvdata);
}
static void stm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct csdev_access *csa = &csdev->access;
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
spin_lock(&drvdata->spinlock);
stm_disable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(csdev->dev.parent);
local_set(&drvdata->mode, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "STM tracing disabled\n");
}
}
static const struct coresight_ops_source stm_source_ops = {
.enable = stm_enable,
.disable = stm_disable,
};
static const struct coresight_ops stm_cs_ops = {
.source_ops = &stm_source_ops,
};
static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
{
return ((unsigned long)addr & (write_bytes - 1));
}
static void stm_send(void __iomem *addr, const void *data,
u32 size, u8 write_bytes)
{
u8 paload[8];
if (stm_addr_unaligned(data, write_bytes)) {
memcpy(paload, data, size);
data = paload;
}
/* now we are 64bit/32bit aligned */
switch (size) {
#ifdef CONFIG_64BIT
case 8:
writeq_relaxed(*(u64 *)data, addr);
break;
#endif
case 4:
writel_relaxed(*(u32 *)data, addr);
break;
case 2:
writew_relaxed(*(u16 *)data, addr);
break;
case 1:
writeb_relaxed(*(u8 *)data, addr);
break;
default:
break;
}
}
static int stm_generic_link(struct stm_data *stm_data,
unsigned int master, unsigned int channel)
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
if (!drvdata || !drvdata->csdev)
return -EINVAL;
return coresight_enable(drvdata->csdev);
}
static void stm_generic_unlink(struct stm_data *stm_data,
unsigned int master, unsigned int channel)
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
if (!drvdata || !drvdata->csdev)
return;
coresight_disable(drvdata->csdev);
}
static phys_addr_t
stm_mmio_addr(struct stm_data *stm_data, unsigned int master,
unsigned int channel, unsigned int nr_chans)
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
phys_addr_t addr;
addr = drvdata->chs.phys + channel * BYTES_PER_CHANNEL;
if (offset_in_page(addr) ||
offset_in_page(nr_chans * BYTES_PER_CHANNEL))
return 0;
return addr;
}
static long stm_generic_set_options(struct stm_data *stm_data,
unsigned int master,
unsigned int channel,
unsigned int nr_chans,
unsigned long options)
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
if (!(drvdata && local_read(&drvdata->mode)))
return -EINVAL;
if (channel >= drvdata->numsp)
return -EINVAL;
switch (options) {
case STM_OPTION_GUARANTEED:
set_bit(channel, drvdata->chs.guaranteed);
break;
case STM_OPTION_INVARIANT:
clear_bit(channel, drvdata->chs.guaranteed);
break;
default:
return -EINVAL;
}
return 0;
}
static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
unsigned int master,
unsigned int channel,
unsigned int packet,
unsigned int flags,
unsigned int size,
const unsigned char *payload)
{
void __iomem *ch_addr;
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
unsigned int stm_flags;
if (!(drvdata && local_read(&drvdata->mode)))
return -EACCES;
if (channel >= drvdata->numsp)
return -EINVAL;
ch_addr = stm_channel_addr(drvdata, channel);
stm_flags = (flags & STP_PACKET_TIMESTAMPED) ?
STM_FLAG_TIMESTAMPED : 0;
stm_flags |= test_bit(channel, drvdata->chs.guaranteed) ?
STM_FLAG_GUARANTEED : 0;
if (size > drvdata->write_bytes)
size = drvdata->write_bytes;
else
size = rounddown_pow_of_two(size);
switch (packet) {
case STP_PACKET_FLAG:
ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, stm_flags);
/*
* The generic STM core sets a size of '0' on flag packets.
* As such send a flag packet of size '1' and tell the
* core we did so.
*/
stm_send(ch_addr, payload, 1, drvdata->write_bytes);
size = 1;
break;
case STP_PACKET_DATA:
stm_flags |= (flags & STP_PACKET_MARKED) ? STM_FLAG_MARKED : 0;
ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, stm_flags);
stm_send(ch_addr, payload, size,
drvdata->write_bytes);
break;
default:
return -ENOTSUPP;
}
return size;
}
static ssize_t hwevent_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->stmheer;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t hwevent_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
int ret = 0;
ret = kstrtoul(buf, 16, &val);
if (ret)
return -EINVAL;
drvdata->stmheer = val;
/* HW event enable and trigger go hand in hand */
drvdata->stmheter = val;
return size;
}
static DEVICE_ATTR_RW(hwevent_enable);
static ssize_t hwevent_select_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->stmhebsr;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t hwevent_select_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
int ret = 0;
ret = kstrtoul(buf, 16, &val);
if (ret)
return -EINVAL;
drvdata->stmhebsr = val;
return size;
}
static DEVICE_ATTR_RW(hwevent_select);
static ssize_t port_select_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (!local_read(&drvdata->mode)) {
val = drvdata->stmspscr;
} else {
spin_lock(&drvdata->spinlock);
val = readl_relaxed(drvdata->base + STMSPSCR);
spin_unlock(&drvdata->spinlock);
}
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t port_select_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val, stmsper;
int ret = 0;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
drvdata->stmspscr = val;
if (local_read(&drvdata->mode)) {
CS_UNLOCK(drvdata->base);
/* Process as per ARM's TRM recommendation */
stmsper = readl_relaxed(drvdata->base + STMSPER);
writel_relaxed(0x0, drvdata->base + STMSPER);
writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
writel_relaxed(stmsper, drvdata->base + STMSPER);
CS_LOCK(drvdata->base);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(port_select);
static ssize_t port_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (!local_read(&drvdata->mode)) {
val = drvdata->stmsper;
} else {
spin_lock(&drvdata->spinlock);
val = readl_relaxed(drvdata->base + STMSPER);
spin_unlock(&drvdata->spinlock);
}
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t port_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
int ret = 0;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
spin_lock(&drvdata->spinlock);
drvdata->stmsper = val;
if (local_read(&drvdata->mode)) {
CS_UNLOCK(drvdata->base);
writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
CS_LOCK(drvdata->base);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(port_enable);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->traceid;
return sprintf(buf, "%#lx\n", val);
}
static DEVICE_ATTR_RO(traceid);
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
&dev_attr_hwevent_select.attr,
&dev_attr_port_enable.attr,
&dev_attr_port_select.attr,
&dev_attr_traceid.attr,
NULL,
};
static struct attribute *coresight_stm_mgmt_attrs[] = {
coresight_simple_reg32(tcsr, STMTCSR),
coresight_simple_reg32(tsfreqr, STMTSFREQR),
coresight_simple_reg32(syncr, STMSYNCR),
coresight_simple_reg32(sper, STMSPER),
coresight_simple_reg32(spter, STMSPTER),
coresight_simple_reg32(privmaskr, STMPRIVMASKR),
coresight_simple_reg32(spscr, STMSPSCR),
coresight_simple_reg32(spmscr, STMSPMSCR),
coresight_simple_reg32(spfeat1r, STMSPFEAT1R),
coresight_simple_reg32(spfeat2r, STMSPFEAT2R),
coresight_simple_reg32(spfeat3r, STMSPFEAT3R),
coresight_simple_reg32(devid, CORESIGHT_DEVID),
NULL,
};
static const struct attribute_group coresight_stm_group = {
.attrs = coresight_stm_attrs,
};
static const struct attribute_group coresight_stm_mgmt_group = {
.attrs = coresight_stm_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group *coresight_stm_groups[] = {
&coresight_stm_group,
&coresight_stm_mgmt_group,
NULL,
};
#ifdef CONFIG_OF
static int of_stm_get_stimulus_area(struct device *dev, struct resource *res)
{
const char *name = NULL;
int index = 0, found = 0;
struct device_node *np = dev->of_node;
while (!of_property_read_string_index(np, "reg-names", index, &name)) {
if (strcmp("stm-stimulus-base", name)) {
index++;
continue;
}
/* We have a match and @index is where it's at */
found = 1;
break;
}
if (!found)
return -EINVAL;
return of_address_to_resource(np, index, res);
}
#else
static inline int of_stm_get_stimulus_area(struct device *dev,
struct resource *res)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_ACPI
static int acpi_stm_get_stimulus_area(struct device *dev, struct resource *res)
{
int rc;
bool found_base = false;
struct resource_entry *rent;
LIST_HEAD(res_list);
struct acpi_device *adev = ACPI_COMPANION(dev);
rc = acpi_dev_get_resources(adev, &res_list, NULL, NULL);
if (rc < 0)
return rc;
/*
* The stimulus base for STM device must be listed as the second memory
* resource, followed by the programming base address as described in
* "Section 2.3 Resources" in ACPI for CoreSightTM 1.0 Platform Design
* document (DEN0067).
*/
rc = -ENOENT;
list_for_each_entry(rent, &res_list, node) {
if (resource_type(rent->res) != IORESOURCE_MEM)
continue;
if (found_base) {
*res = *rent->res;
rc = 0;
break;
}
found_base = true;
}
acpi_dev_free_resource_list(&res_list);
return rc;
}
#else
static inline int acpi_stm_get_stimulus_area(struct device *dev,
struct resource *res)
{
return -ENOENT;
}
#endif
static int stm_get_stimulus_area(struct device *dev, struct resource *res)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
if (is_of_node(fwnode))
return of_stm_get_stimulus_area(dev, res);
else if (is_acpi_node(fwnode))
return acpi_stm_get_stimulus_area(dev, res);
return -ENOENT;
}
static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata)
{
u32 stmspfeat2r;
if (!IS_ENABLED(CONFIG_64BIT))
return 4;
stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R);
/*
* bit[15:12] represents the fundamental data size
* 0 - 32-bit data
* 1 - 64-bit data
*/
return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4;
}
static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata)
{
u32 numsp;
numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
/*
* NUMPS in STMDEVID is 17 bit long and if equal to 0x0,
* 32 stimulus ports are supported.
*/
numsp &= 0x1ffff;
if (!numsp)
numsp = STM_32_CHANNEL;
return numsp;
}
static void stm_init_default_data(struct stm_drvdata *drvdata)
{
/* Don't use port selection */
drvdata->stmspscr = 0x0;
/*
* Enable all channel regardless of their number. When port
* selection isn't used (see above) STMSPER applies to all
* 32 channel group available, hence setting all 32 bits to 1
*/
drvdata->stmsper = ~0x0;
/* Set invariant transaction timing on all channels */
bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
}
static void stm_init_generic_data(struct stm_drvdata *drvdata,
const char *name)
{
drvdata->stm.name = name;
/*
* MasterIDs are assigned at HW design phase. As such the core is
* using a single master for interaction with this device.
*/
drvdata->stm.sw_start = 1;
drvdata->stm.sw_end = 1;
drvdata->stm.hw_override = true;
drvdata->stm.sw_nchannels = drvdata->numsp;
drvdata->stm.sw_mmiosz = BYTES_PER_CHANNEL;
drvdata->stm.packet = stm_generic_packet;
drvdata->stm.mmio_addr = stm_mmio_addr;
drvdata->stm.link = stm_generic_link;
drvdata->stm.unlink = stm_generic_unlink;
drvdata->stm.set_options = stm_generic_set_options;
}
static int stm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret, trace_id;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&stm_devs, dev);
if (!desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
ret = stm_get_stimulus_area(dev, &ch_res);
if (ret)
return ret;
drvdata->chs.phys = ch_res.start;
base = devm_ioremap_resource(dev, &ch_res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->chs.base = base;
drvdata->write_bytes = stm_fundamental_data_size(drvdata);
if (boot_nr_channel)
drvdata->numsp = boot_nr_channel;
else
drvdata->numsp = stm_num_stimulus_port(drvdata);
drvdata->chs.guaranteed = devm_bitmap_zalloc(dev, drvdata->numsp,
GFP_KERNEL);
if (!drvdata->chs.guaranteed)
return -ENOMEM;
spin_lock_init(&drvdata->spinlock);
stm_init_default_data(drvdata);
stm_init_generic_data(drvdata, desc.name);
if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
dev_info(dev,
"%s : stm_register_device failed, probing deferred\n",
desc.name);
return -EPROBE_DEFER;
}
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto stm_unregister;
}
adev->dev.platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
desc.ops = &stm_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_stm_groups;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto stm_unregister;
}
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0) {
ret = trace_id;
goto cs_unregister;
}
drvdata->traceid = (u8)trace_id;
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "%s initialized\n",
(char *)coresight_get_uci_data(id));
return 0;
cs_unregister:
coresight_unregister(drvdata->csdev);
stm_unregister:
stm_unregister_device(&drvdata->stm);
return ret;
}
static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_trace_id_put_system_id(drvdata->traceid);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
}
#ifdef CONFIG_PM
static int stm_runtime_suspend(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return 0;
}
static int stm_runtime_resume(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_prepare_enable(drvdata->atclk);
return 0;
}
#endif
static const struct dev_pm_ops stm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
};
static const struct amba_id stm_ids[] = {
CS_AMBA_ID_DATA(0x000bb962, "STM32"),
CS_AMBA_ID_DATA(0x000bb963, "STM500"),
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, stm_ids);
static struct amba_driver stm_driver = {
.drv = {
.name = "coresight-stm",
.owner = THIS_MODULE,
.pm = &stm_dev_pm_ops,
.suppress_bind_attrs = true,
},
.probe = stm_probe,
.remove = stm_remove,
.id_table = stm_ids,
};
module_amba_driver(stm_driver);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight System Trace Macrocell driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-stm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Linaro Limited, All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include <linux/amba/bus.h>
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/coresight.h>
#include <linux/cpu_pm.h>
#include <linux/cpuhotplug.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/spinlock.h>
#include "coresight-priv.h"
#include "coresight-cti.h"
/*
* CTI devices can be associated with a PE, or be connected to CoreSight
* hardware. We have a list of all CTIs irrespective of CPU bound or
* otherwise.
*
* We assume that the non-CPU CTIs are always powered as we do with sinks etc.
*
* We leave the client to figure out if all the CTIs are interconnected with
* the same CTM, in general this is the case but does not always have to be.
*/
/* net of CTI devices connected via CTM */
static LIST_HEAD(ect_net);
/* protect the list */
static DEFINE_MUTEX(ect_mutex);
#define csdev_to_cti_drvdata(csdev) \
dev_get_drvdata(csdev->dev.parent)
/* power management handling */
static int nr_cti_cpu;
/* quick lookup list for CPU bound CTIs when power handling */
static struct cti_drvdata *cti_cpu_drvdata[NR_CPUS];
/*
* CTI naming. CTI bound to cores will have the name cti_cpu<N> where
* N is the CPU ID. System CTIs will have the name cti_sys<I> where I
* is an index allocated by order of discovery.
*
* CTI device name list - for CTI not bound to cores.
*/
DEFINE_CORESIGHT_DEVLIST(cti_sys_devs, "cti_sys");
/* write set of regs to hardware - call with spinlock claimed */
void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
int i;
CS_UNLOCK(drvdata->base);
/* disable CTI before writing registers */
writel_relaxed(0, drvdata->base + CTICONTROL);
/* write the CTI trigger registers */
for (i = 0; i < config->nr_trig_max; i++) {
writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i));
writel_relaxed(config->ctiouten[i],
drvdata->base + CTIOUTEN(i));
}
/* other regs */
writel_relaxed(config->ctigate, drvdata->base + CTIGATE);
writel_relaxed(config->asicctl, drvdata->base + ASICCTL);
writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET);
/* re-enable CTI */
writel_relaxed(1, drvdata->base + CTICONTROL);
CS_LOCK(drvdata->base);
}
/* write regs to hardware and enable */
static int cti_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&drvdata->spinlock, flags);
/* no need to do anything if enabled or unpowered*/
if (config->hw_enabled || !config->hw_powered)
goto cti_state_unchanged;
/* claim the device */
rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
cti_write_all_hw_regs(drvdata);
config->hw_enabled = true;
drvdata->config.enable_req_count++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
cti_state_unchanged:
drvdata->config.enable_req_count++;
/* cannot enable due to error */
cti_err_not_enabled:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
/* re-enable CTI on CPU when using CPU hotplug */
static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
config->hw_powered = true;
/* no need to do anything if no enable request */
if (!drvdata->config.enable_req_count)
goto cti_hp_not_enabled;
/* try to claim the device */
if (coresight_claim_device(drvdata->csdev))
goto cti_hp_not_enabled;
cti_write_all_hw_regs(drvdata);
config->hw_enabled = true;
spin_unlock(&drvdata->spinlock);
return;
/* did not re-enable due to no claim / no request */
cti_hp_not_enabled:
spin_unlock(&drvdata->spinlock);
}
/* disable hardware */
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
int ret = 0;
spin_lock(&drvdata->spinlock);
/* don't allow negative refcounts, return an error */
if (!drvdata->config.enable_req_count) {
ret = -EINVAL;
goto cti_not_disabled;
}
/* check refcount - disable on 0 */
if (--drvdata->config.enable_req_count > 0)
goto cti_not_disabled;
/* no need to do anything if disabled or cpu unpowered */
if (!config->hw_enabled || !config->hw_powered)
goto cti_not_disabled;
CS_UNLOCK(drvdata->base);
/* disable CTI */
writel_relaxed(0, drvdata->base + CTICONTROL);
config->hw_enabled = false;
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
return ret;
/* not disabled this call */
cti_not_disabled:
spin_unlock(&drvdata->spinlock);
return ret;
}
void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
{
CS_UNLOCK(drvdata->base);
writel_relaxed(value, drvdata->base + offset);
CS_LOCK(drvdata->base);
}
void cti_write_intack(struct device *dev, u32 ackval)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
/* write if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIINTACK, ackval);
spin_unlock(&drvdata->spinlock);
}
/*
* Look at the HW DEVID register for some of the HW settings.
* DEVID[15:8] - max number of in / out triggers.
*/
#define CTI_DEVID_MAXTRIGS(devid_val) ((int) BMVAL(devid_val, 8, 15))
/* DEVID[19:16] - number of CTM channels */
#define CTI_DEVID_CTMCHANNELS(devid_val) ((int) BMVAL(devid_val, 16, 19))
static void cti_set_default_config(struct device *dev,
struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
u32 devid;
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
config->nr_trig_max = CTI_DEVID_MAXTRIGS(devid);
/*
* no current hardware should exceed this, but protect the driver
* in case of fault / out of spec hw
*/
if (config->nr_trig_max > CTIINOUTEN_MAX) {
dev_warn_once(dev,
"Limiting HW MaxTrig value(%d) to driver max(%d)\n",
config->nr_trig_max, CTIINOUTEN_MAX);
config->nr_trig_max = CTIINOUTEN_MAX;
}
config->nr_ctm_channels = CTI_DEVID_CTMCHANNELS(devid);
/* Most regs default to 0 as zalloc'ed except...*/
config->trig_filter_enable = true;
config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
config->enable_req_count = 0;
}
/*
* Add a connection entry to the list of connections for this
* CTI device.
*/
int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
struct cti_trig_con *tc,
struct coresight_device *csdev,
const char *assoc_dev_name)
{
struct cti_device *cti_dev = &drvdata->ctidev;
tc->con_dev = csdev;
/*
* Prefer actual associated CS device dev name to supplied value -
* which is likely to be node name / other conn name.
*/
if (csdev)
tc->con_dev_name = dev_name(&csdev->dev);
else if (assoc_dev_name != NULL) {
tc->con_dev_name = devm_kstrdup(dev,
assoc_dev_name, GFP_KERNEL);
if (!tc->con_dev_name)
return -ENOMEM;
}
list_add_tail(&tc->node, &cti_dev->trig_cons);
cti_dev->nr_trig_con++;
/* add connection usage bit info to overall info */
drvdata->config.trig_in_use |= tc->con_in->used_mask;
drvdata->config.trig_out_use |= tc->con_out->used_mask;
return 0;
}
/* create a trigger connection with appropriately sized signal groups */
struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
int out_sigs)
{
struct cti_trig_con *tc = NULL;
struct cti_trig_grp *in = NULL, *out = NULL;
tc = devm_kzalloc(dev, sizeof(struct cti_trig_con), GFP_KERNEL);
if (!tc)
return tc;
in = devm_kzalloc(dev,
offsetof(struct cti_trig_grp, sig_types[in_sigs]),
GFP_KERNEL);
if (!in)
return NULL;
out = devm_kzalloc(dev,
offsetof(struct cti_trig_grp, sig_types[out_sigs]),
GFP_KERNEL);
if (!out)
return NULL;
tc->con_in = in;
tc->con_out = out;
tc->con_in->nr_sigs = in_sigs;
tc->con_out->nr_sigs = out_sigs;
return tc;
}
/*
* Add a default connection if nothing else is specified.
* single connection based on max in/out info, no assoc device
*/
int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata)
{
int ret = 0;
int n_trigs = drvdata->config.nr_trig_max;
u32 n_trig_mask = GENMASK(n_trigs - 1, 0);
struct cti_trig_con *tc = NULL;
/*
* Assume max trigs for in and out,
* all used, default sig types allocated
*/
tc = cti_allocate_trig_con(dev, n_trigs, n_trigs);
if (!tc)
return -ENOMEM;
tc->con_in->used_mask = n_trig_mask;
tc->con_out->used_mask = n_trig_mask;
ret = cti_add_connection_entry(dev, drvdata, tc, NULL, "default");
return ret;
}
/** cti channel api **/
/* attach/detach channel from trigger - write through if enabled. */
int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
enum cti_trig_dir direction, u32 channel_idx,
u32 trigger_idx)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 trig_bitmask;
u32 chan_bitmask;
u32 reg_value;
int reg_offset;
/* ensure indexes in range */
if ((channel_idx >= config->nr_ctm_channels) ||
(trigger_idx >= config->nr_trig_max))
return -EINVAL;
trig_bitmask = BIT(trigger_idx);
/* ensure registered triggers and not out filtered */
if (direction == CTI_TRIG_IN) {
if (!(trig_bitmask & config->trig_in_use))
return -EINVAL;
} else {
if (!(trig_bitmask & config->trig_out_use))
return -EINVAL;
if ((config->trig_filter_enable) &&
(config->trig_out_filter & trig_bitmask))
return -EINVAL;
}
/* update the local register values */
chan_bitmask = BIT(channel_idx);
reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
CTIOUTEN(trigger_idx));
spin_lock(&drvdata->spinlock);
/* read - modify write - the trigger / channel enable value */
reg_value = direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] :
config->ctiouten[trigger_idx];
if (op == CTI_CHAN_ATTACH)
reg_value |= chan_bitmask;
else
reg_value &= ~chan_bitmask;
/* write local copy */
if (direction == CTI_TRIG_IN)
config->ctiinen[trigger_idx] = reg_value;
else
config->ctiouten[trigger_idx] = reg_value;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, reg_offset, reg_value);
spin_unlock(&drvdata->spinlock);
return 0;
}
int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
u32 channel_idx)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 chan_bitmask;
u32 reg_value;
int err = 0;
if (channel_idx >= config->nr_ctm_channels)
return -EINVAL;
chan_bitmask = BIT(channel_idx);
spin_lock(&drvdata->spinlock);
reg_value = config->ctigate;
switch (op) {
case CTI_GATE_CHAN_ENABLE:
reg_value |= chan_bitmask;
break;
case CTI_GATE_CHAN_DISABLE:
reg_value &= ~chan_bitmask;
break;
default:
err = -EINVAL;
break;
}
if (err == 0) {
config->ctigate = reg_value;
if (cti_active(config))
cti_write_single_reg(drvdata, CTIGATE, reg_value);
}
spin_unlock(&drvdata->spinlock);
return err;
}
int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
u32 channel_idx)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 chan_bitmask;
u32 reg_value;
u32 reg_offset;
int err = 0;
if (channel_idx >= config->nr_ctm_channels)
return -EINVAL;
chan_bitmask = BIT(channel_idx);
spin_lock(&drvdata->spinlock);
reg_value = config->ctiappset;
switch (op) {
case CTI_CHAN_SET:
config->ctiappset |= chan_bitmask;
reg_value = config->ctiappset;
reg_offset = CTIAPPSET;
break;
case CTI_CHAN_CLR:
config->ctiappset &= ~chan_bitmask;
reg_value = chan_bitmask;
reg_offset = CTIAPPCLEAR;
break;
case CTI_CHAN_PULSE:
config->ctiappset &= ~chan_bitmask;
reg_value = chan_bitmask;
reg_offset = CTIAPPPULSE;
break;
default:
err = -EINVAL;
break;
}
if ((err == 0) && cti_active(config))
cti_write_single_reg(drvdata, reg_offset, reg_value);
spin_unlock(&drvdata->spinlock);
return err;
}
static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
struct cti_trig_con *tc)
{
struct coresight_sysfs_link link_info;
int link_err = 0;
link_info.orig = drvdata->csdev;
link_info.orig_name = tc->con_dev_name;
link_info.target = tc->con_dev;
link_info.target_name = dev_name(&drvdata->csdev->dev);
link_err = coresight_add_sysfs_link(&link_info);
if (link_err)
dev_warn(&drvdata->csdev->dev,
"Failed to set CTI sysfs link %s<=>%s\n",
link_info.orig_name, link_info.target_name);
return !link_err;
}
static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
struct cti_trig_con *tc)
{
struct coresight_sysfs_link link_info;
link_info.orig = drvdata->csdev;
link_info.orig_name = tc->con_dev_name;
link_info.target = tc->con_dev;
link_info.target_name = dev_name(&drvdata->csdev->dev);
coresight_remove_sysfs_link(&link_info);
}
/*
* Look for a matching connection device name in the list of connections.
* If found then swap in the csdev name, set trig con association pointer
* and return found.
*/
static bool
cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
struct coresight_device *csdev)
{
struct cti_trig_con *tc;
struct cti_drvdata *drvdata = container_of(ctidev, struct cti_drvdata,
ctidev);
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev_name) {
if (!strcmp(node_name, tc->con_dev_name)) {
/* match: so swap in csdev name & dev */
tc->con_dev_name = dev_name(&csdev->dev);
tc->con_dev = csdev;
/* try to set sysfs link */
if (cti_add_sysfs_link(drvdata, tc))
return true;
/* link failed - remove CTI reference */
tc->con_dev = NULL;
break;
}
}
}
return false;
}
/*
* Search the cti list to add an associated CTI into the supplied CS device
* This will set the association if CTI declared before the CS device.
* (called from coresight_register() without coresight_mutex locked).
*/
static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
{
struct cti_drvdata *ect_item;
struct cti_device *ctidev;
const char *node_name = NULL;
/* protect the list */
mutex_lock(&ect_mutex);
/* exit if current is an ECT device.*/
if ((csdev->type == CORESIGHT_DEV_TYPE_HELPER &&
csdev->subtype.helper_subtype ==
CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI) ||
list_empty(&ect_net))
goto cti_add_done;
/* if we didn't find the csdev previously we used the fwnode name */
node_name = cti_plat_get_node_name(dev_fwnode(csdev->dev.parent));
if (!node_name)
goto cti_add_done;
/* for each CTI in list... */
list_for_each_entry(ect_item, &ect_net, node) {
ctidev = &ect_item->ctidev;
if (cti_match_fixup_csdev(ctidev, node_name, csdev)) {
/*
* if we found a matching csdev then update the ECT
* association pointer for the device with this CTI.
*/
coresight_add_helper(csdev, ect_item->csdev);
break;
}
}
cti_add_done:
mutex_unlock(&ect_mutex);
}
/*
* Removing the associated devices is easier.
*/
static void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
{
struct cti_drvdata *ctidrv;
struct cti_trig_con *tc;
struct cti_device *ctidev;
union coresight_dev_subtype cti_subtype = {
.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
};
struct coresight_device *cti_csdev = coresight_find_output_type(
csdev->pdata, CORESIGHT_DEV_TYPE_HELPER, cti_subtype);
if (!cti_csdev)
return;
mutex_lock(&ect_mutex);
ctidrv = csdev_to_cti_drvdata(cti_csdev);
ctidev = &ctidrv->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev == csdev) {
cti_remove_sysfs_link(ctidrv, tc);
tc->con_dev = NULL;
break;
}
}
mutex_unlock(&ect_mutex);
}
/*
* Operations to add and remove associated CTI.
* Register to coresight core driver as call back function.
*/
static struct cti_assoc_op cti_assoc_ops = {
.add = cti_add_assoc_to_csdev,
.remove = cti_remove_assoc_from_csdev
};
/*
* Update the cross references where the associated device was found
* while we were building the connection info. This will occur if the
* assoc device was registered before the CTI.
*/
static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
{
struct cti_trig_con *tc;
struct cti_device *ctidev = &drvdata->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev) {
/* if we can set the sysfs link */
if (cti_add_sysfs_link(drvdata, tc))
/* set the CTI/csdev association */
coresight_add_helper(tc->con_dev,
drvdata->csdev);
else
/* otherwise remove reference from CTI */
tc->con_dev = NULL;
}
}
}
static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
{
struct cti_trig_con *tc;
struct cti_device *ctidev = &drvdata->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev) {
cti_remove_sysfs_link(drvdata, tc);
tc->con_dev = NULL;
}
}
}
/** cti PM callbacks **/
static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
struct cti_drvdata *drvdata;
struct coresight_device *csdev;
unsigned int cpu = smp_processor_id();
int notify_res = NOTIFY_OK;
if (!cti_cpu_drvdata[cpu])
return NOTIFY_OK;
drvdata = cti_cpu_drvdata[cpu];
csdev = drvdata->csdev;
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
return NOTIFY_BAD;
spin_lock(&drvdata->spinlock);
switch (cmd) {
case CPU_PM_ENTER:
/* CTI regs all static - we have a copy & nothing to save */
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
coresight_disclaim_device(csdev);
break;
case CPU_PM_ENTER_FAILED:
drvdata->config.hw_powered = true;
if (drvdata->config.hw_enabled) {
if (coresight_claim_device(csdev))
drvdata->config.hw_enabled = false;
}
break;
case CPU_PM_EXIT:
/* write hardware registers to re-enable. */
drvdata->config.hw_powered = true;
drvdata->config.hw_enabled = false;
/* check enable reference count to enable HW */
if (drvdata->config.enable_req_count) {
/* check we can claim the device as we re-power */
if (coresight_claim_device(csdev))
goto cti_notify_exit;
drvdata->config.hw_enabled = true;
cti_write_all_hw_regs(drvdata);
}
break;
default:
notify_res = NOTIFY_DONE;
break;
}
cti_notify_exit:
spin_unlock(&drvdata->spinlock);
return notify_res;
}
static struct notifier_block cti_cpu_pm_nb = {
.notifier_call = cti_cpu_pm_notify,
};
/* CPU HP handlers */
static int cti_starting_cpu(unsigned int cpu)
{
struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
if (!drvdata)
return 0;
cti_cpuhp_enable_hw(drvdata);
return 0;
}
static int cti_dying_cpu(unsigned int cpu)
{
struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
if (!drvdata)
return 0;
spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
coresight_disclaim_device(drvdata->csdev);
spin_unlock(&drvdata->spinlock);
return 0;
}
static int cti_pm_setup(struct cti_drvdata *drvdata)
{
int ret;
if (drvdata->ctidev.cpu == -1)
return 0;
if (nr_cti_cpu)
goto done;
cpus_read_lock();
ret = cpuhp_setup_state_nocalls_cpuslocked(
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
"arm/coresight_cti:starting",
cti_starting_cpu, cti_dying_cpu);
if (ret) {
cpus_read_unlock();
return ret;
}
ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
cpus_read_unlock();
if (ret) {
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
return ret;
}
done:
nr_cti_cpu++;
cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
return 0;
}
/* release PM registrations */
static void cti_pm_release(struct cti_drvdata *drvdata)
{
if (drvdata->ctidev.cpu == -1)
return;
cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
if (--nr_cti_cpu == 0) {
cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
}
}
/** cti ect operations **/
int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
return cti_enable_hw(drvdata);
}
int cti_disable(struct coresight_device *csdev, void *data)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
return cti_disable_hw(drvdata);
}
static const struct coresight_ops_helper cti_ops_ect = {
.enable = cti_enable,
.disable = cti_disable,
};
static const struct coresight_ops cti_ops = {
.helper_ops = &cti_ops_ect,
};
/*
* Free up CTI specific resources
* called by dev->release, need to call down to underlying csdev release.
*/
static void cti_device_release(struct device *dev)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_drvdata *ect_item, *ect_tmp;
mutex_lock(&ect_mutex);
cti_pm_release(drvdata);
/* remove from the list */
list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
if (ect_item == drvdata) {
list_del(&ect_item->node);
break;
}
}
mutex_unlock(&ect_mutex);
if (drvdata->csdev_release)
drvdata->csdev_release(dev);
}
static void cti_remove(struct amba_device *adev)
{
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
mutex_lock(&ect_mutex);
cti_remove_conn_xrefs(drvdata);
mutex_unlock(&ect_mutex);
coresight_unregister(drvdata->csdev);
}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
void __iomem *base;
struct device *dev = &adev->dev;
struct cti_drvdata *drvdata = NULL;
struct coresight_desc cti_desc;
struct coresight_platform_data *pdata = NULL;
struct resource *res = &adev->res;
/* driver data*/
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
cti_desc.access = CSDEV_ACCESS_IOMEM(base);
dev_set_drvdata(dev, drvdata);
/* default CTI device info */
drvdata->ctidev.cpu = -1;
drvdata->ctidev.nr_trig_con = 0;
drvdata->ctidev.ctm_id = 0;
INIT_LIST_HEAD(&drvdata->ctidev.trig_cons);
spin_lock_init(&drvdata->spinlock);
/* initialise CTI driver config values */
cti_set_default_config(dev, drvdata);
pdata = coresight_cti_get_platform_data(dev);
if (IS_ERR(pdata)) {
dev_err(dev, "coresight_cti_get_platform_data err\n");
return PTR_ERR(pdata);
}
/* default to powered - could change on PM notifications */
drvdata->config.hw_powered = true;
/* set up device name - will depend if cpu bound or otherwise */
if (drvdata->ctidev.cpu >= 0)
cti_desc.name = devm_kasprintf(dev, GFP_KERNEL, "cti_cpu%d",
drvdata->ctidev.cpu);
else
cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev);
if (!cti_desc.name)
return -ENOMEM;
/* setup CPU power management handling for CPU bound CTI devices. */
ret = cti_pm_setup(drvdata);
if (ret)
return ret;
/* create dynamic attributes for connections */
ret = cti_create_cons_sysfs(dev, drvdata);
if (ret) {
dev_err(dev, "%s: create dynamic sysfs entries failed\n",
cti_desc.name);
goto pm_release;
}
/* set up coresight component description */
cti_desc.pdata = pdata;
cti_desc.type = CORESIGHT_DEV_TYPE_HELPER;
cti_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI;
cti_desc.ops = &cti_ops;
cti_desc.groups = drvdata->ctidev.con_groups;
cti_desc.dev = dev;
drvdata->csdev = coresight_register(&cti_desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto pm_release;
}
/* add to list of CTI devices */
mutex_lock(&ect_mutex);
list_add(&drvdata->node, &ect_net);
/* set any cross references */
cti_update_conn_xrefs(drvdata);
mutex_unlock(&ect_mutex);
/* set up release chain */
drvdata->csdev_release = drvdata->csdev->dev.release;
drvdata->csdev->dev.release = cti_device_release;
/* all done - dec pm refcount */
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "CTI initialized\n");
return 0;
pm_release:
cti_pm_release(drvdata);
return ret;
}
static struct amba_cs_uci_id uci_id_cti[] = {
{
/* CTI UCI data */
.devarch = 0x47701a14, /* CTI v2 */
.devarch_mask = 0xfff0ffff,
.devtype = 0x00000014, /* maj(0x4-debug) min(0x1-ECT) */
}
};
static const struct amba_id cti_ids[] = {
CS_AMBA_ID(0x000bb906), /* Coresight CTI (SoC 400), C-A72, C-A57 */
CS_AMBA_ID(0x000bb922), /* CTI - C-A8 */
CS_AMBA_ID(0x000bb9a8), /* CTI - C-A53 */
CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */
CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, cti_ids);
static struct amba_driver cti_driver = {
.drv = {
.name = "coresight-cti",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = cti_probe,
.remove = cti_remove,
.id_table = cti_ids,
};
static int __init cti_init(void)
{
int ret;
ret = amba_driver_register(&cti_driver);
if (ret)
pr_info("Error registering cti driver\n");
coresight_set_cti_ops(&cti_assoc_ops);
return ret;
}
static void __exit cti_exit(void)
{
coresight_remove_cti_ops();
amba_driver_unregister(&cti_driver);
}
module_init(cti_init);
module_exit(cti_exit);
MODULE_AUTHOR("Mike Leach <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight CTI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-cti-core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Linaro Limited, All rights reserved.
* Author: Mike Leach <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/coresight.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
#include "coresight-cti.h"
/*
* Declare the number of static declared attribute groups
* Value includes groups + NULL value at end of table.
*/
#define CORESIGHT_CTI_STATIC_GROUPS_MAX 5
/*
* List of trigger signal type names. Match the constants declared in
* include\dt-bindings\arm\coresight-cti-dt.h
*/
static const char * const sig_type_names[] = {
"genio", /* GEN_IO */
"intreq", /* GEN_INTREQ */
"intack", /* GEN_INTACK */
"haltreq", /* GEN_HALTREQ */
"restartreq", /* GEN_RESTARTREQ */
"pe_edbgreq", /* PE_EDBGREQ */
"pe_dbgrestart",/* PE_DBGRESTART */
"pe_ctiirq", /* PE_CTIIRQ */
"pe_pmuirq", /* PE_PMUIRQ */
"pe_dbgtrigger",/* PE_DBGTRIGGER */
"etm_extout", /* ETM_EXTOUT */
"etm_extin", /* ETM_EXTIN */
"snk_full", /* SNK_FULL */
"snk_acqcomp", /* SNK_ACQCOMP */
"snk_flushcomp",/* SNK_FLUSHCOMP */
"snk_flushin", /* SNK_FLUSHIN */
"snk_trigin", /* SNK_TRIGIN */
"stm_asyncout", /* STM_ASYNCOUT */
"stm_tout_spte",/* STM_TOUT_SPTE */
"stm_tout_sw", /* STM_TOUT_SW */
"stm_tout_hete",/* STM_TOUT_HETE */
"stm_hwevent", /* STM_HWEVENT */
"ela_tstart", /* ELA_TSTART */
"ela_tstop", /* ELA_TSTOP */
"ela_dbgreq", /* ELA_DBGREQ */
};
/* Show function pointer used in the connections dynamic declared attributes*/
typedef ssize_t (*p_show_fn)(struct device *dev, struct device_attribute *attr,
char *buf);
/* Connection attribute types */
enum cti_conn_attr_type {
CTI_CON_ATTR_NAME,
CTI_CON_ATTR_TRIGIN_SIG,
CTI_CON_ATTR_TRIGOUT_SIG,
CTI_CON_ATTR_TRIGIN_TYPES,
CTI_CON_ATTR_TRIGOUT_TYPES,
CTI_CON_ATTR_MAX,
};
/* Names for the connection attributes */
static const char * const con_attr_names[CTI_CON_ATTR_MAX] = {
"name",
"in_signals",
"out_signals",
"in_types",
"out_types",
};
/* basic attributes */
static ssize_t enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int enable_req;
bool enabled, powered;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
enable_req = drvdata->config.enable_req_count;
powered = drvdata->config.hw_powered;
enabled = drvdata->config.hw_enabled;
spin_unlock(&drvdata->spinlock);
if (powered)
return sprintf(buf, "%d\n", enabled);
else
return sprintf(buf, "%d\n", !!enable_req);
}
static ssize_t enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = 0;
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val) {
ret = pm_runtime_resume_and_get(dev->parent);
if (ret)
return ret;
ret = cti_enable(drvdata->csdev, CS_MODE_SYSFS, NULL);
if (ret)
pm_runtime_put(dev->parent);
} else {
ret = cti_disable(drvdata->csdev, NULL);
if (!ret)
pm_runtime_put(dev->parent);
}
if (ret)
return ret;
return size;
}
static DEVICE_ATTR_RW(enable);
static ssize_t powered_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
bool powered;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
powered = drvdata->config.hw_powered;
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%d\n", powered);
}
static DEVICE_ATTR_RO(powered);
static ssize_t ctmid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
return sprintf(buf, "%d\n", drvdata->ctidev.ctm_id);
}
static DEVICE_ATTR_RO(ctmid);
static ssize_t nr_trigger_cons_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
return sprintf(buf, "%d\n", drvdata->ctidev.nr_trig_con);
}
static DEVICE_ATTR_RO(nr_trigger_cons);
/* attribute and group sysfs tables. */
static struct attribute *coresight_cti_attrs[] = {
&dev_attr_enable.attr,
&dev_attr_powered.attr,
&dev_attr_ctmid.attr,
&dev_attr_nr_trigger_cons.attr,
NULL,
};
/* register based attributes */
/* Read registers with power check only (no enable check). */
static ssize_t coresight_cti_reg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
u32 val = 0;
pm_runtime_get_sync(dev->parent);
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
val = readl_relaxed(drvdata->base + cti_attr->off);
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
return sysfs_emit(buf, "0x%x\n", val);
}
/* Write registers with power check only (no enable check). */
static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
unsigned long val = 0;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
pm_runtime_get_sync(dev->parent);
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
cti_write_single_reg(drvdata, cti_attr->off, val);
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
return size;
}
#define coresight_cti_reg(name, offset) \
(&((struct cs_off_attribute[]) { \
{ \
__ATTR(name, 0444, coresight_cti_reg_show, NULL), \
offset \
} \
})[0].attr.attr)
#define coresight_cti_reg_rw(name, offset) \
(&((struct cs_off_attribute[]) { \
{ \
__ATTR(name, 0644, coresight_cti_reg_show, \
coresight_cti_reg_store), \
offset \
} \
})[0].attr.attr)
#define coresight_cti_reg_wo(name, offset) \
(&((struct cs_off_attribute[]) { \
{ \
__ATTR(name, 0200, NULL, coresight_cti_reg_store), \
offset \
} \
})[0].attr.attr)
/* coresight management registers */
static struct attribute *coresight_cti_mgmt_attrs[] = {
coresight_cti_reg(devaff0, CTIDEVAFF0),
coresight_cti_reg(devaff1, CTIDEVAFF1),
coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS),
coresight_cti_reg(devarch, CORESIGHT_DEVARCH),
coresight_cti_reg(devid, CORESIGHT_DEVID),
coresight_cti_reg(devtype, CORESIGHT_DEVTYPE),
coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0),
coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1),
coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2),
coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3),
coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4),
NULL,
};
/* CTI low level programming registers */
/*
* Show a simple 32 bit value if enabled and powered.
* If inaccessible & pcached_val not NULL then show cached value.
*/
static ssize_t cti_reg32_show(struct device *dev, char *buf,
u32 *pcached_val, int reg_offset)
{
u32 val = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
if ((reg_offset >= 0) && cti_active(config)) {
CS_UNLOCK(drvdata->base);
val = readl_relaxed(drvdata->base + reg_offset);
if (pcached_val)
*pcached_val = val;
CS_LOCK(drvdata->base);
} else if (pcached_val) {
val = *pcached_val;
}
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#x\n", val);
}
/*
* Store a simple 32 bit value.
* If pcached_val not NULL, then copy to here too,
* if reg_offset >= 0 then write through if enabled.
*/
static ssize_t cti_reg32_store(struct device *dev, const char *buf,
size_t size, u32 *pcached_val, int reg_offset)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* local store */
if (pcached_val)
*pcached_val = (u32)val;
/* write through if offset and enabled */
if ((reg_offset >= 0) && cti_active(config))
cti_write_single_reg(drvdata, reg_offset, val);
spin_unlock(&drvdata->spinlock);
return size;
}
/* Standard macro for simple rw cti config registers */
#define cti_config_reg32_rw(name, cfgname, offset) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
return cti_reg32_show(dev, buf, \
&drvdata->config.cfgname, offset); \
} \
\
static ssize_t name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t size) \
{ \
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
return cti_reg32_store(dev, buf, size, \
&drvdata->config.cfgname, offset); \
} \
static DEVICE_ATTR_RW(name)
static ssize_t inout_sel_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u32 val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = (u32)drvdata->config.ctiinout_sel;
return sprintf(buf, "%d\n", val);
}
static ssize_t inout_sel_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > (CTIINOUTEN_MAX - 1))
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->config.ctiinout_sel = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(inout_sel);
static ssize_t inen_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long val;
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
index = drvdata->config.ctiinout_sel;
val = drvdata->config.ctiinen[index];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t inen_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
index = config->ctiinout_sel;
config->ctiinen[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIINEN(index), val);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(inen);
static ssize_t outen_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long val;
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
index = drvdata->config.ctiinout_sel;
val = drvdata->config.ctiouten[index];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
static ssize_t outen_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
index = config->ctiinout_sel;
config->ctiouten[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIOUTEN(index), val);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(outen);
static ssize_t intack_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
cti_write_intack(dev, val);
return size;
}
static DEVICE_ATTR_WO(intack);
cti_config_reg32_rw(gate, ctigate, CTIGATE);
cti_config_reg32_rw(asicctl, asicctl, ASICCTL);
cti_config_reg32_rw(appset, ctiappset, CTIAPPSET);
static ssize_t appclear_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* a 1'b1 in appclr clears down the same bit in appset*/
config->ctiappset &= ~val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(appclear);
static ssize_t apppulse_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPPULSE, val);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(apppulse);
/*
* Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
* integration control registers. Normally only used to investigate connection
* data.
*/
static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_inout_sel.attr,
&dev_attr_inen.attr,
&dev_attr_outen.attr,
&dev_attr_gate.attr,
&dev_attr_asicctl.attr,
&dev_attr_intack.attr,
&dev_attr_appset.attr,
&dev_attr_appclear.attr,
&dev_attr_apppulse.attr,
coresight_cti_reg(triginstatus, CTITRIGINSTATUS),
coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS),
coresight_cti_reg(chinstatus, CTICHINSTATUS),
coresight_cti_reg(choutstatus, CTICHOUTSTATUS),
#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL),
coresight_cti_reg(ittrigin, ITTRIGIN),
coresight_cti_reg(itchin, ITCHIN),
coresight_cti_reg_rw(ittrigout, ITTRIGOUT),
coresight_cti_reg_rw(itchout, ITCHOUT),
coresight_cti_reg(itchoutack, ITCHOUTACK),
coresight_cti_reg(ittrigoutack, ITTRIGOUTACK),
coresight_cti_reg_wo(ittriginack, ITTRIGINACK),
coresight_cti_reg_wo(itchinack, ITCHINACK),
#endif
NULL,
};
/* CTI channel x-trigger programming */
static int
cti_trig_op_parse(struct device *dev, enum cti_chan_op op,
enum cti_trig_dir dir, const char *buf, size_t size)
{
u32 chan_idx;
u32 trig_idx;
int items, err = -EINVAL;
/* extract chan idx and trigger idx */
items = sscanf(buf, "%d %d", &chan_idx, &trig_idx);
if (items == 2) {
err = cti_channel_trig_op(dev, op, dir, chan_idx, trig_idx);
if (!err)
err = size;
}
return err;
}
static ssize_t trigin_attach_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_IN,
buf, size);
}
static DEVICE_ATTR_WO(trigin_attach);
static ssize_t trigin_detach_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_IN,
buf, size);
}
static DEVICE_ATTR_WO(trigin_detach);
static ssize_t trigout_attach_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_OUT,
buf, size);
}
static DEVICE_ATTR_WO(trigout_attach);
static ssize_t trigout_detach_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_OUT,
buf, size);
}
static DEVICE_ATTR_WO(trigout_detach);
static ssize_t chan_gate_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int err = 0, channel = 0;
if (kstrtoint(buf, 0, &channel))
return -EINVAL;
err = cti_channel_gate_op(dev, CTI_GATE_CHAN_ENABLE, channel);
return err ? err : size;
}
static ssize_t chan_gate_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
unsigned long ctigate_bitmask = cfg->ctigate;
int size = 0;
if (cfg->ctigate == 0)
size = sprintf(buf, "\n");
else
size = bitmap_print_to_pagebuf(true, buf, &ctigate_bitmask,
cfg->nr_ctm_channels);
return size;
}
static DEVICE_ATTR_RW(chan_gate_enable);
static ssize_t chan_gate_disable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int err = 0, channel = 0;
if (kstrtoint(buf, 0, &channel))
return -EINVAL;
err = cti_channel_gate_op(dev, CTI_GATE_CHAN_DISABLE, channel);
return err ? err : size;
}
static DEVICE_ATTR_WO(chan_gate_disable);
static int
chan_op_parse(struct device *dev, enum cti_chan_set_op op, const char *buf)
{
int err = 0, channel = 0;
if (kstrtoint(buf, 0, &channel))
return -EINVAL;
err = cti_channel_setop(dev, op, channel);
return err;
}
static ssize_t chan_set_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int err = chan_op_parse(dev, CTI_CHAN_SET, buf);
return err ? err : size;
}
static DEVICE_ATTR_WO(chan_set);
static ssize_t chan_clear_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int err = chan_op_parse(dev, CTI_CHAN_CLR, buf);
return err ? err : size;
}
static DEVICE_ATTR_WO(chan_clear);
static ssize_t chan_pulse_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int err = chan_op_parse(dev, CTI_CHAN_PULSE, buf);
return err ? err : size;
}
static DEVICE_ATTR_WO(chan_pulse);
static ssize_t trig_filter_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u32 val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
val = drvdata->config.trig_filter_enable;
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%d\n", val);
}
static ssize_t trig_filter_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->config.trig_filter_enable = !!val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(trig_filter_enable);
static ssize_t trigout_filtered_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
int size = 0, nr_trig_max = cfg->nr_trig_max;
unsigned long mask = cfg->trig_out_filter;
if (mask)
size = bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
return size;
}
static DEVICE_ATTR_RO(trigout_filtered);
/* clear all xtrigger / channel programming */
static ssize_t chan_xtrigs_reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int i;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
/* clear the CTI trigger / channel programming registers */
for (i = 0; i < config->nr_trig_max; i++) {
config->ctiinen[i] = 0;
config->ctiouten[i] = 0;
}
/* clear the other regs */
config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
config->asicctl = 0;
config->ctiappset = 0;
config->ctiinout_sel = 0;
config->xtrig_rchan_sel = 0;
/* if enabled then write through */
if (cti_active(config))
cti_write_all_hw_regs(drvdata);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(chan_xtrigs_reset);
/*
* Write to select a channel to view, read to display the
* cross triggers for the selected channel.
*/
static ssize_t chan_xtrigs_sel_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > (drvdata->config.nr_ctm_channels - 1))
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->config.xtrig_rchan_sel = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static ssize_t chan_xtrigs_sel_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
spin_lock(&drvdata->spinlock);
val = drvdata->config.xtrig_rchan_sel;
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%ld\n", val);
}
static DEVICE_ATTR_RW(chan_xtrigs_sel);
static ssize_t chan_xtrigs_in_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
int used = 0, reg_idx;
int nr_trig_max = drvdata->config.nr_trig_max;
u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
if (chan_mask & cfg->ctiinen[reg_idx])
used += sprintf(buf + used, "%d ", reg_idx);
}
used += sprintf(buf + used, "\n");
return used;
}
static DEVICE_ATTR_RO(chan_xtrigs_in);
static ssize_t chan_xtrigs_out_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
int used = 0, reg_idx;
int nr_trig_max = drvdata->config.nr_trig_max;
u32 chan_mask = BIT(cfg->xtrig_rchan_sel);
for (reg_idx = 0; reg_idx < nr_trig_max; reg_idx++) {
if (chan_mask & cfg->ctiouten[reg_idx])
used += sprintf(buf + used, "%d ", reg_idx);
}
used += sprintf(buf + used, "\n");
return used;
}
static DEVICE_ATTR_RO(chan_xtrigs_out);
static ssize_t print_chan_list(struct device *dev,
char *buf, bool inuse)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
int size, i;
unsigned long inuse_bits = 0, chan_mask;
/* scan regs to get bitmap of channels in use. */
spin_lock(&drvdata->spinlock);
for (i = 0; i < config->nr_trig_max; i++) {
inuse_bits |= config->ctiinen[i];
inuse_bits |= config->ctiouten[i];
}
spin_unlock(&drvdata->spinlock);
/* inverse bits if printing free channels */
if (!inuse)
inuse_bits = ~inuse_bits;
/* list of channels, or 'none' */
chan_mask = GENMASK(config->nr_ctm_channels - 1, 0);
if (inuse_bits & chan_mask)
size = bitmap_print_to_pagebuf(true, buf, &inuse_bits,
config->nr_ctm_channels);
else
size = sprintf(buf, "\n");
return size;
}
static ssize_t chan_inuse_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return print_chan_list(dev, buf, true);
}
static DEVICE_ATTR_RO(chan_inuse);
static ssize_t chan_free_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return print_chan_list(dev, buf, false);
}
static DEVICE_ATTR_RO(chan_free);
static struct attribute *coresight_cti_channel_attrs[] = {
&dev_attr_trigin_attach.attr,
&dev_attr_trigin_detach.attr,
&dev_attr_trigout_attach.attr,
&dev_attr_trigout_detach.attr,
&dev_attr_trig_filter_enable.attr,
&dev_attr_trigout_filtered.attr,
&dev_attr_chan_gate_enable.attr,
&dev_attr_chan_gate_disable.attr,
&dev_attr_chan_set.attr,
&dev_attr_chan_clear.attr,
&dev_attr_chan_pulse.attr,
&dev_attr_chan_inuse.attr,
&dev_attr_chan_free.attr,
&dev_attr_chan_xtrigs_sel.attr,
&dev_attr_chan_xtrigs_in.attr,
&dev_attr_chan_xtrigs_out.attr,
&dev_attr_chan_xtrigs_reset.attr,
NULL,
};
/* Create the connections trigger groups and attrs dynamically */
/*
* Each connection has dynamic group triggers<N> + name, trigin/out sigs/types
* attributes, + each device has static nr_trigger_cons giving the number
* of groups. e.g. in sysfs:-
* /cti_<name>/triggers0
* /cti_<name>/triggers1
* /cti_<name>/nr_trigger_cons
* where nr_trigger_cons = 2
*/
static ssize_t con_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr =
container_of(attr, struct dev_ext_attribute, attr);
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
return sprintf(buf, "%s\n", con->con_dev_name);
}
static ssize_t trigin_sig_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr =
container_of(attr, struct dev_ext_attribute, attr);
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
unsigned long mask = con->con_in->used_mask;
return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
}
static ssize_t trigout_sig_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr =
container_of(attr, struct dev_ext_attribute, attr);
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
unsigned long mask = con->con_out->used_mask;
return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
}
/* convert a sig type id to a name */
static const char *
cti_sig_type_name(struct cti_trig_con *con, int used_count, bool in)
{
int idx = 0;
struct cti_trig_grp *grp = in ? con->con_in : con->con_out;
if (used_count < grp->nr_sigs)
idx = grp->sig_types[used_count];
return sig_type_names[idx];
}
static ssize_t trigin_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr =
container_of(attr, struct dev_ext_attribute, attr);
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
int sig_idx, used = 0;
const char *name;
for (sig_idx = 0; sig_idx < con->con_in->nr_sigs; sig_idx++) {
name = cti_sig_type_name(con, sig_idx, true);
used += sprintf(buf + used, "%s ", name);
}
used += sprintf(buf + used, "\n");
return used;
}
static ssize_t trigout_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr =
container_of(attr, struct dev_ext_attribute, attr);
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
int sig_idx, used = 0;
const char *name;
for (sig_idx = 0; sig_idx < con->con_out->nr_sigs; sig_idx++) {
name = cti_sig_type_name(con, sig_idx, false);
used += sprintf(buf + used, "%s ", name);
}
used += sprintf(buf + used, "\n");
return used;
}
/*
* Array of show function names declared above to allow selection
* for the connection attributes
*/
static p_show_fn show_fns[CTI_CON_ATTR_MAX] = {
con_name_show,
trigin_sig_show,
trigout_sig_show,
trigin_type_show,
trigout_type_show,
};
static int cti_create_con_sysfs_attr(struct device *dev,
struct cti_trig_con *con,
enum cti_conn_attr_type attr_type,
int attr_idx)
{
struct dev_ext_attribute *eattr;
char *name;
eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
GFP_KERNEL);
if (eattr) {
name = devm_kstrdup(dev, con_attr_names[attr_type],
GFP_KERNEL);
if (name) {
/* fill out the underlying attribute struct */
eattr->attr.attr.name = name;
eattr->attr.attr.mode = 0444;
/* now the device_attribute struct */
eattr->attr.show = show_fns[attr_type];
} else {
return -ENOMEM;
}
} else {
return -ENOMEM;
}
eattr->var = con;
con->con_attrs[attr_idx] = &eattr->attr.attr;
/*
* Initialize the dynamically allocated attribute
* to avoid LOCKDEP splat. See include/linux/sysfs.h
* for more details.
*/
sysfs_attr_init(con->con_attrs[attr_idx]);
return 0;
}
static struct attribute_group *
cti_create_con_sysfs_group(struct device *dev, struct cti_device *ctidev,
int con_idx, struct cti_trig_con *tc)
{
struct attribute_group *group = NULL;
int grp_idx;
group = devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
if (!group)
return NULL;
group->name = devm_kasprintf(dev, GFP_KERNEL, "triggers%d", con_idx);
if (!group->name)
return NULL;
grp_idx = con_idx + CORESIGHT_CTI_STATIC_GROUPS_MAX - 1;
ctidev->con_groups[grp_idx] = group;
tc->attr_group = group;
return group;
}
/* create a triggers connection group and the attributes for that group */
static int cti_create_con_attr_set(struct device *dev, int con_idx,
struct cti_device *ctidev,
struct cti_trig_con *tc)
{
struct attribute_group *attr_group = NULL;
int attr_idx = 0;
int err = -ENOMEM;
attr_group = cti_create_con_sysfs_group(dev, ctidev, con_idx, tc);
if (!attr_group)
return -ENOMEM;
/* allocate NULL terminated array of attributes */
tc->con_attrs = devm_kcalloc(dev, CTI_CON_ATTR_MAX + 1,
sizeof(struct attribute *), GFP_KERNEL);
if (!tc->con_attrs)
return -ENOMEM;
err = cti_create_con_sysfs_attr(dev, tc, CTI_CON_ATTR_NAME,
attr_idx++);
if (err)
return err;
if (tc->con_in->nr_sigs > 0) {
err = cti_create_con_sysfs_attr(dev, tc,
CTI_CON_ATTR_TRIGIN_SIG,
attr_idx++);
if (err)
return err;
err = cti_create_con_sysfs_attr(dev, tc,
CTI_CON_ATTR_TRIGIN_TYPES,
attr_idx++);
if (err)
return err;
}
if (tc->con_out->nr_sigs > 0) {
err = cti_create_con_sysfs_attr(dev, tc,
CTI_CON_ATTR_TRIGOUT_SIG,
attr_idx++);
if (err)
return err;
err = cti_create_con_sysfs_attr(dev, tc,
CTI_CON_ATTR_TRIGOUT_TYPES,
attr_idx++);
if (err)
return err;
}
attr_group->attrs = tc->con_attrs;
return 0;
}
/* create the array of group pointers for the CTI sysfs groups */
static int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
{
int nr_groups;
/* nr groups = dynamic + static + NULL terminator */
nr_groups = ctidev->nr_trig_con + CORESIGHT_CTI_STATIC_GROUPS_MAX;
ctidev->con_groups = devm_kcalloc(dev, nr_groups,
sizeof(struct attribute_group *),
GFP_KERNEL);
if (!ctidev->con_groups)
return -ENOMEM;
return 0;
}
int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
{
struct cti_device *ctidev = &drvdata->ctidev;
int err, con_idx = 0, i;
struct cti_trig_con *tc;
err = cti_create_cons_groups(dev, ctidev);
if (err)
return err;
/* populate first locations with the static set of groups */
for (i = 0; i < (CORESIGHT_CTI_STATIC_GROUPS_MAX - 1); i++)
ctidev->con_groups[i] = coresight_cti_groups[i];
/* add dynamic set for each connection */
list_for_each_entry(tc, &ctidev->trig_cons, node) {
err = cti_create_con_attr_set(dev, con_idx++, ctidev, tc);
if (err)
break;
}
return err;
}
/* attribute and group sysfs tables. */
static const struct attribute_group coresight_cti_group = {
.attrs = coresight_cti_attrs,
};
static const struct attribute_group coresight_cti_mgmt_group = {
.attrs = coresight_cti_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group coresight_cti_regs_group = {
.attrs = coresight_cti_regs_attrs,
.name = "regs",
};
static const struct attribute_group coresight_cti_channels_group = {
.attrs = coresight_cti_channel_attrs,
.name = "channels",
};
const struct attribute_group *
coresight_cti_groups[CORESIGHT_CTI_STATIC_GROUPS_MAX] = {
&coresight_cti_group,
&coresight_cti_mgmt_group,
&coresight_cti_regs_group,
&coresight_cti_channels_group,
NULL,
};
| linux-master | drivers/hwtracing/coresight/coresight-cti-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Funnel driver
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include "coresight-priv.h"
#define FUNNEL_FUNCTL 0x000
#define FUNNEL_PRICTL 0x004
#define FUNNEL_HOLDTIME_MASK 0xf00
#define FUNNEL_HOLDTIME_SHFT 0x8
#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
#define FUNNEL_ENSx_MASK 0xff
DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
/**
* struct funnel_drvdata - specifics associated to a funnel component
* @base: memory mapped base address for this component.
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
* @spinlock: serialize enable/disable operations.
*/
struct funnel_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
/* Claim the device only when we enable the first slave */
if (!(functl & FUNNEL_ENSx_MASK)) {
rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
}
functl &= ~FUNNEL_HOLDTIME_MASK;
functl |= FUNNEL_HOLDTIME;
functl |= (1 << port);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
done:
CS_LOCK(drvdata->base);
return rc;
}
static int funnel_enable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (atomic_read(&in->dest_refcnt) == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
if (!rc)
first_enable = true;
}
if (!rc)
atomic_inc(&in->dest_refcnt);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n",
in->dest_port);
return rc;
}
static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
int inport)
{
u32 functl;
struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
functl &= ~(1 << inport);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
/* Disclaim the device if none of the slaves are now active */
if (!(functl & FUNNEL_ENSx_MASK))
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
static void funnel_disable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (atomic_dec_return(&in->dest_refcnt) == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n",
in->dest_port);
}
static const struct coresight_ops_link funnel_link_ops = {
.enable = funnel_enable,
.disable = funnel_disable,
};
static const struct coresight_ops funnel_cs_ops = {
.link_ops = &funnel_link_ops,
};
static ssize_t priority_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->priority;
return sprintf(buf, "%#lx\n", val);
}
static ssize_t priority_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret;
unsigned long val;
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
drvdata->priority = val;
return size;
}
static DEVICE_ATTR_RW(priority);
static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
{
u32 functl;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
CS_LOCK(drvdata->base);
return functl;
}
static ssize_t funnel_ctrl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 val;
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
pm_runtime_get_sync(dev->parent);
val = get_funnel_ctrl_hw(drvdata);
pm_runtime_put(dev->parent);
return sprintf(buf, "%#x\n", val);
}
static DEVICE_ATTR_RO(funnel_ctrl);
static struct attribute *coresight_funnel_attrs[] = {
&dev_attr_funnel_ctrl.attr,
&dev_attr_priority.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_probe(struct device *dev, struct resource *res)
{
int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct coresight_desc desc = { 0 };
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
dev_warn_once(dev, "Uses OBSOLETE CoreSight funnel binding\n");
desc.name = coresight_alloc_device_name(&funnel_devs, dev);
if (!desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
/*
* Map the device base for dynamic-funnel, which has been
* validated by AMBA core.
*/
if (res) {
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_disable_clk;
}
drvdata->base = base;
desc.groups = coresight_funnel_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
}
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata);
goto out_disable_clk;
}
dev->platform_data = pdata;
spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto out_disable_clk;
}
pm_runtime_put(dev);
ret = 0;
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return ret;
}
static int funnel_remove(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
coresight_unregister(drvdata->csdev);
return 0;
}
#ifdef CONFIG_PM
static int funnel_runtime_suspend(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
return 0;
}
static int funnel_runtime_resume(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata && !IS_ERR(drvdata->atclk))
clk_prepare_enable(drvdata->atclk);
return 0;
}
#endif
static const struct dev_pm_ops funnel_dev_pm_ops = {
SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
};
static int static_funnel_probe(struct platform_device *pdev)
{
int ret;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* Static funnel do not have programming base */
ret = funnel_probe(&pdev->dev, NULL);
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
return ret;
}
static int static_funnel_remove(struct platform_device *pdev)
{
funnel_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id static_funnel_match[] = {
{.compatible = "arm,coresight-static-funnel"},
{}
};
MODULE_DEVICE_TABLE(of, static_funnel_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_funnel_ids[] = {
{"ARMHC9FE", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, static_funnel_ids);
#endif
static struct platform_driver static_funnel_driver = {
.probe = static_funnel_probe,
.remove = static_funnel_remove,
.driver = {
.name = "coresight-static-funnel",
/* THIS_MODULE is taken care of by platform_driver_register() */
.of_match_table = static_funnel_match,
.acpi_match_table = ACPI_PTR(static_funnel_ids),
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
};
static int dynamic_funnel_probe(struct amba_device *adev,
const struct amba_id *id)
{
return funnel_probe(&adev->dev, &adev->res);
}
static void dynamic_funnel_remove(struct amba_device *adev)
{
funnel_remove(&adev->dev);
}
static const struct amba_id dynamic_funnel_ids[] = {
{
.id = 0x000bb908,
.mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
.id = 0x000bb9eb,
.mask = 0x000fffff,
},
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
static struct amba_driver dynamic_funnel_driver = {
.drv = {
.name = "coresight-dynamic-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
.probe = dynamic_funnel_probe,
.remove = dynamic_funnel_remove,
.id_table = dynamic_funnel_ids,
};
static int __init funnel_init(void)
{
int ret;
ret = platform_driver_register(&static_funnel_driver);
if (ret) {
pr_info("Error registering platform driver\n");
return ret;
}
ret = amba_driver_register(&dynamic_funnel_driver);
if (ret) {
pr_info("Error registering amba driver\n");
platform_driver_unregister(&static_funnel_driver);
}
return ret;
}
static void __exit funnel_exit(void)
{
platform_driver_unregister(&static_funnel_driver);
amba_driver_unregister(&dynamic_funnel_driver);
}
module_init(funnel_init);
module_exit(funnel_exit);
MODULE_AUTHOR("Pratik Patel <[email protected]>");
MODULE_AUTHOR("Mathieu Poirier <[email protected]>");
MODULE_DESCRIPTION("Arm CoreSight Funnel Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/hwtracing/coresight/coresight-funnel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple kernel driver to link kernel Ftrace and an STM device
* Copyright (c) 2016, Linaro Ltd.
*
* STM Ftrace will be registered as a trace_export.
*/
#include <linux/module.h>
#include <linux/stm.h>
#include <linux/trace.h>
#define STM_FTRACE_NR_CHANNELS 1
#define STM_FTRACE_CHAN 0
static int stm_ftrace_link(struct stm_source_data *data);
static void stm_ftrace_unlink(struct stm_source_data *data);
static struct stm_ftrace {
struct stm_source_data data;
struct trace_export ftrace;
} stm_ftrace = {
.data = {
.name = "ftrace",
.nr_chans = STM_FTRACE_NR_CHANNELS,
.link = stm_ftrace_link,
.unlink = stm_ftrace_unlink,
},
};
/**
* stm_ftrace_write() - write data to STM via 'stm_ftrace' source
* @buf: buffer containing the data packet
* @len: length of the data packet
*/
static void notrace
stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
/* This is called from trace system with preemption disabled */
unsigned int cpu = smp_processor_id();
stm_source_write(&stm->data, STM_FTRACE_CHAN + cpu, buf, len);
}
static int stm_ftrace_link(struct stm_source_data *data)
{
struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
sf->ftrace.write = stm_ftrace_write;
sf->ftrace.flags = TRACE_EXPORT_FUNCTION | TRACE_EXPORT_EVENT
| TRACE_EXPORT_MARKER;
return register_ftrace_export(&sf->ftrace);
}
static void stm_ftrace_unlink(struct stm_source_data *data)
{
struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
unregister_ftrace_export(&sf->ftrace);
}
static int __init stm_ftrace_init(void)
{
int ret;
stm_ftrace.data.nr_chans = roundup_pow_of_two(num_possible_cpus());
ret = stm_source_register_device(NULL, &stm_ftrace.data);
if (ret)
pr_err("Failed to register stm_source - ftrace.\n");
return ret;
}
static void __exit stm_ftrace_exit(void)
{
stm_source_unregister_device(&stm_ftrace.data);
}
module_init(stm_ftrace_init);
module_exit(stm_ftrace_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("stm_ftrace driver");
MODULE_AUTHOR("Chunyan Zhang <[email protected]>");
| linux-master | drivers/hwtracing/stm/ftrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Trace Module (STM) infrastructure
* Copyright (c) 2014, Intel Corporation.
*
* STM class implements generic infrastructure for System Trace Module devices
* as defined in MIPI STPv2 specification.
*/
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/compat.h>
#include <linux/kdev_t.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/stm.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include "stm.h"
#include <uapi/linux/stm.h>
static unsigned int stm_core_up;
/*
* The SRCU here makes sure that STM device doesn't disappear from under a
* stm_source_write() caller, which may want to have as little overhead as
* possible.
*/
static struct srcu_struct stm_source_srcu;
static ssize_t masters_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stm_device *stm = to_stm_device(dev);
int ret;
ret = sprintf(buf, "%u %u\n", stm->data->sw_start, stm->data->sw_end);
return ret;
}
static DEVICE_ATTR_RO(masters);
static ssize_t channels_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stm_device *stm = to_stm_device(dev);
int ret;
ret = sprintf(buf, "%u\n", stm->data->sw_nchannels);
return ret;
}
static DEVICE_ATTR_RO(channels);
static ssize_t hw_override_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stm_device *stm = to_stm_device(dev);
int ret;
ret = sprintf(buf, "%u\n", stm->data->hw_override);
return ret;
}
static DEVICE_ATTR_RO(hw_override);
static struct attribute *stm_attrs[] = {
&dev_attr_masters.attr,
&dev_attr_channels.attr,
&dev_attr_hw_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(stm);
static struct class stm_class = {
.name = "stm",
.dev_groups = stm_groups,
};
/**
* stm_find_device() - find stm device by name
* @buf: character buffer containing the name
*
* This is called when either policy gets assigned to an stm device or an
* stm_source device gets linked to an stm device.
*
* This grabs device's reference (get_device()) and module reference, both
* of which the calling path needs to make sure to drop with stm_put_device().
*
* Return: stm device pointer or null if lookup failed.
*/
struct stm_device *stm_find_device(const char *buf)
{
struct stm_device *stm;
struct device *dev;
if (!stm_core_up)
return NULL;
dev = class_find_device_by_name(&stm_class, buf);
if (!dev)
return NULL;
stm = to_stm_device(dev);
if (!try_module_get(stm->owner)) {
/* matches class_find_device() above */
put_device(dev);
return NULL;
}
return stm;
}
/**
* stm_put_device() - drop references on the stm device
* @stm: stm device, previously acquired by stm_find_device()
*
* This drops the module reference and device reference taken by
* stm_find_device() or stm_char_open().
*/
void stm_put_device(struct stm_device *stm)
{
module_put(stm->owner);
put_device(&stm->dev);
}
/*
* Internally we only care about software-writable masters here, that is the
* ones in the range [stm_data->sw_start..stm_data..sw_end], however we need
* original master numbers to be visible externally, since they are the ones
* that will appear in the STP stream. Thus, the internal bookkeeping uses
* $master - stm_data->sw_start to reference master descriptors and such.
*/
#define __stm_master(_s, _m) \
((_s)->masters[(_m) - (_s)->data->sw_start])
static inline struct stp_master *
stm_master(struct stm_device *stm, unsigned int idx)
{
if (idx < stm->data->sw_start || idx > stm->data->sw_end)
return NULL;
return __stm_master(stm, idx);
}
static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
{
struct stp_master *master;
master = kzalloc(struct_size(master, chan_map,
BITS_TO_LONGS(stm->data->sw_nchannels)),
GFP_ATOMIC);
if (!master)
return -ENOMEM;
master->nr_free = stm->data->sw_nchannels;
__stm_master(stm, idx) = master;
return 0;
}
static void stp_master_free(struct stm_device *stm, unsigned int idx)
{
struct stp_master *master = stm_master(stm, idx);
if (!master)
return;
__stm_master(stm, idx) = NULL;
kfree(master);
}
static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
lockdep_assert_held(&stm->mc_lock);
lockdep_assert_held(&output->lock);
if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
return;
bitmap_allocate_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
master->nr_free -= output->nr_chans;
}
static void
stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
lockdep_assert_held(&stm->mc_lock);
lockdep_assert_held(&output->lock);
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
master->nr_free += output->nr_chans;
output->nr_chans = 0;
}
/*
* This is like bitmap_find_free_region(), except it can ignore @start bits
* at the beginning.
*/
static int find_free_channels(unsigned long *bitmap, unsigned int start,
unsigned int end, unsigned int width)
{
unsigned int pos;
int i;
for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) {
pos = find_next_zero_bit(bitmap, end + 1, pos);
if (pos + width > end + 1)
break;
if (pos & (width - 1))
continue;
for (i = 1; i < width && !test_bit(pos + i, bitmap); i++)
;
if (i == width)
return pos;
/* step over [pos..pos+i) to continue search */
pos += i;
}
return -1;
}
static int
stm_find_master_chan(struct stm_device *stm, unsigned int width,
unsigned int *mstart, unsigned int mend,
unsigned int *cstart, unsigned int cend)
{
struct stp_master *master;
unsigned int midx;
int pos, err;
for (midx = *mstart; midx <= mend; midx++) {
if (!stm_master(stm, midx)) {
err = stp_master_alloc(stm, midx);
if (err)
return err;
}
master = stm_master(stm, midx);
if (!master->nr_free)
continue;
pos = find_free_channels(master->chan_map, *cstart, cend,
width);
if (pos < 0)
continue;
*mstart = midx;
*cstart = pos;
return 0;
}
return -ENOSPC;
}
static int stm_output_assign(struct stm_device *stm, unsigned int width,
struct stp_policy_node *policy_node,
struct stm_output *output)
{
unsigned int midx, cidx, mend, cend;
int ret = -EINVAL;
if (width > stm->data->sw_nchannels)
return -EINVAL;
/* We no longer accept policy_node==NULL here */
if (WARN_ON_ONCE(!policy_node))
return -EINVAL;
/*
* Also, the caller holds reference to policy_node, so it won't
* disappear on us.
*/
stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
spin_lock(&stm->mc_lock);
spin_lock(&output->lock);
/* output is already assigned -- shouldn't happen */
if (WARN_ON_ONCE(output->nr_chans))
goto unlock;
ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
if (ret < 0)
goto unlock;
output->master = midx;
output->channel = cidx;
output->nr_chans = width;
if (stm->pdrv->output_open) {
void *priv = stp_policy_node_priv(policy_node);
if (WARN_ON_ONCE(!priv))
goto unlock;
/* configfs subsys mutex is held by the caller */
ret = stm->pdrv->output_open(priv, output);
if (ret)
goto unlock;
}
stm_output_claim(stm, output);
dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
ret = 0;
unlock:
if (ret)
output->nr_chans = 0;
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
return ret;
}
static void stm_output_free(struct stm_device *stm, struct stm_output *output)
{
spin_lock(&stm->mc_lock);
spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
if (stm->pdrv && stm->pdrv->output_close)
stm->pdrv->output_close(output);
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
static void stm_output_init(struct stm_output *output)
{
spin_lock_init(&output->lock);
}
static int major_match(struct device *dev, const void *data)
{
unsigned int major = *(unsigned int *)data;
return MAJOR(dev->devt) == major;
}
/*
* Framing protocol management
* Modules can implement STM protocol drivers and (un-)register them
* with the STM class framework.
*/
static struct list_head stm_pdrv_head;
static struct mutex stm_pdrv_mutex;
struct stm_pdrv_entry {
struct list_head entry;
const struct stm_protocol_driver *pdrv;
const struct config_item_type *node_type;
};
static const struct stm_pdrv_entry *
__stm_lookup_protocol(const char *name)
{
struct stm_pdrv_entry *pe;
/*
* If no name is given (NULL or ""), fall back to "p_basic".
*/
if (!name || !*name)
name = "p_basic";
list_for_each_entry(pe, &stm_pdrv_head, entry) {
if (!strcmp(name, pe->pdrv->name))
return pe;
}
return NULL;
}
int stm_register_protocol(const struct stm_protocol_driver *pdrv)
{
struct stm_pdrv_entry *pe = NULL;
int ret = -ENOMEM;
mutex_lock(&stm_pdrv_mutex);
if (__stm_lookup_protocol(pdrv->name)) {
ret = -EEXIST;
goto unlock;
}
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
if (!pe)
goto unlock;
if (pdrv->policy_attr) {
pe->node_type = get_policy_node_type(pdrv->policy_attr);
if (!pe->node_type)
goto unlock;
}
list_add_tail(&pe->entry, &stm_pdrv_head);
pe->pdrv = pdrv;
ret = 0;
unlock:
mutex_unlock(&stm_pdrv_mutex);
if (ret)
kfree(pe);
return ret;
}
EXPORT_SYMBOL_GPL(stm_register_protocol);
void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
{
struct stm_pdrv_entry *pe, *iter;
mutex_lock(&stm_pdrv_mutex);
list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
if (pe->pdrv == pdrv) {
list_del(&pe->entry);
if (pe->node_type) {
kfree(pe->node_type->ct_attrs);
kfree(pe->node_type);
}
kfree(pe);
break;
}
}
mutex_unlock(&stm_pdrv_mutex);
}
EXPORT_SYMBOL_GPL(stm_unregister_protocol);
static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
{
return try_module_get(pdrv->owner);
}
void stm_put_protocol(const struct stm_protocol_driver *pdrv)
{
module_put(pdrv->owner);
}
int stm_lookup_protocol(const char *name,
const struct stm_protocol_driver **pdrv,
const struct config_item_type **node_type)
{
const struct stm_pdrv_entry *pe;
mutex_lock(&stm_pdrv_mutex);
pe = __stm_lookup_protocol(name);
if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
*pdrv = pe->pdrv;
*node_type = pe->node_type;
}
mutex_unlock(&stm_pdrv_mutex);
return pe ? 0 : -ENOENT;
}
static int stm_char_open(struct inode *inode, struct file *file)
{
struct stm_file *stmf;
struct device *dev;
unsigned int major = imajor(inode);
int err = -ENOMEM;
dev = class_find_device(&stm_class, NULL, &major, major_match);
if (!dev)
return -ENODEV;
stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
if (!stmf)
goto err_put_device;
err = -ENODEV;
stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
if (!try_module_get(stmf->stm->owner))
goto err_free;
file->private_data = stmf;
return nonseekable_open(inode, file);
err_free:
kfree(stmf);
err_put_device:
/* matches class_find_device() above */
put_device(dev);
return err;
}
static int stm_char_release(struct inode *inode, struct file *file)
{
struct stm_file *stmf = file->private_data;
struct stm_device *stm = stmf->stm;
if (stm->data->unlink)
stm->data->unlink(stm->data, stmf->output.master,
stmf->output.channel);
stm_output_free(stm, &stmf->output);
/*
* matches the stm_char_open()'s
* class_find_device() + try_module_get()
*/
stm_put_device(stm);
kfree(stmf);
return 0;
}
static int
stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
char **ids, unsigned int width)
{
struct stp_policy_node *pn;
int err, n;
/*
* On success, stp_policy_node_lookup() will return holding the
* configfs subsystem mutex, which is then released in
* stp_policy_node_put(). This allows the pdrv->output_open() in
* stm_output_assign() to serialize against the attribute accessors.
*/
for (n = 0, pn = NULL; ids[n] && !pn; n++)
pn = stp_policy_node_lookup(stm, ids[n]);
if (!pn)
return -EINVAL;
err = stm_output_assign(stm, width, pn, output);
stp_policy_node_put(pn);
return err;
}
/**
* stm_data_write() - send the given payload as data packets
* @data: stm driver's data
* @m: STP master
* @c: STP channel
* @ts_first: timestamp the first packet
* @buf: data payload buffer
* @count: data payload size
*/
ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
unsigned int c, bool ts_first, const void *buf,
size_t count)
{
unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
ssize_t sz;
size_t pos;
for (pos = 0, sz = 0; pos < count; pos += sz) {
sz = min_t(unsigned int, count - pos, 8);
sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
&((u8 *)buf)[pos]);
if (sz <= 0)
break;
if (ts_first) {
flags = 0;
ts_first = false;
}
}
return sz < 0 ? sz : pos;
}
EXPORT_SYMBOL_GPL(stm_data_write);
static ssize_t notrace
stm_write(struct stm_device *stm, struct stm_output *output,
unsigned int chan, const char *buf, size_t count)
{
int err;
/* stm->pdrv is serialized against policy_mutex */
if (!stm->pdrv)
return -ENODEV;
err = stm->pdrv->write(stm->data, output, chan, buf, count);
if (err < 0)
return err;
return err;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct stm_file *stmf = file->private_data;
struct stm_device *stm = stmf->stm;
char *kbuf;
int err;
if (count + 1 > PAGE_SIZE)
count = PAGE_SIZE - 1;
/*
* If no m/c have been assigned to this writer up to this
* point, try to use the task name and "default" policy entries.
*/
if (!stmf->output.nr_chans) {
char comm[sizeof(current->comm)];
char *ids[] = { comm, "default", NULL };
get_task_comm(comm, current);
err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
/*
* EBUSY means that somebody else just assigned this
* output, which is just fine for write()
*/
if (err)
return err;
}
kbuf = kmalloc(count + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
err = copy_from_user(kbuf, buf, count);
if (err) {
kfree(kbuf);
return -EFAULT;
}
pm_runtime_get_sync(&stm->dev);
count = stm_write(stm, &stmf->output, 0, kbuf, count);
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
kfree(kbuf);
return count;
}
static void stm_mmap_open(struct vm_area_struct *vma)
{
struct stm_file *stmf = vma->vm_file->private_data;
struct stm_device *stm = stmf->stm;
pm_runtime_get(&stm->dev);
}
static void stm_mmap_close(struct vm_area_struct *vma)
{
struct stm_file *stmf = vma->vm_file->private_data;
struct stm_device *stm = stmf->stm;
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
}
static const struct vm_operations_struct stm_mmap_vmops = {
.open = stm_mmap_open,
.close = stm_mmap_close,
};
static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
{
struct stm_file *stmf = file->private_data;
struct stm_device *stm = stmf->stm;
unsigned long size, phys;
if (!stm->data->mmio_addr)
return -EOPNOTSUPP;
if (vma->vm_pgoff)
return -EINVAL;
size = vma->vm_end - vma->vm_start;
if (stmf->output.nr_chans * stm->data->sw_mmiosz != size)
return -EINVAL;
phys = stm->data->mmio_addr(stm->data, stmf->output.master,
stmf->output.channel,
stmf->output.nr_chans);
if (!phys)
return -EINVAL;
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
return 0;
}
static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
char *ids[] = { NULL, NULL };
int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
return -EBUSY;
if (copy_from_user(&size, arg, sizeof(size)))
return -EFAULT;
if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
return -EINVAL;
/*
* size + 1 to make sure the .id string at the bottom is terminated,
* which is also why memdup_user() is not useful here
*/
id = kzalloc(size + 1, GFP_KERNEL);
if (!id)
return -ENOMEM;
if (copy_from_user(id, arg, size)) {
ret = -EFAULT;
goto err_free;
}
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
if (stm->data->sw_mmiosz)
wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
if (id->width < 1 || id->width > wlimit)
goto err_free;
ids[0] = id->id;
ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
id->width);
if (ret)
goto err_free;
if (stm->data->link)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
if (ret)
stm_output_free(stmf->stm, &stmf->output);
err_free:
kfree(id);
return ret;
}
static int stm_char_policy_get_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stp_policy_id id = {
.size = sizeof(id),
.master = stmf->output.master,
.channel = stmf->output.channel,
.width = stmf->output.nr_chans,
.__reserved_0 = 0,
.__reserved_1 = 0,
};
return copy_to_user(arg, &id, id.size) ? -EFAULT : 0;
}
static long
stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct stm_file *stmf = file->private_data;
struct stm_data *stm_data = stmf->stm->data;
int err = -ENOTTY;
u64 options;
switch (cmd) {
case STP_POLICY_ID_SET:
err = stm_char_policy_set_ioctl(stmf, (void __user *)arg);
if (err)
return err;
return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
case STP_POLICY_ID_GET:
return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
case STP_SET_OPTIONS:
if (copy_from_user(&options, (u64 __user *)arg, sizeof(u64)))
return -EFAULT;
if (stm_data->set_options)
err = stm_data->set_options(stm_data,
stmf->output.master,
stmf->output.channel,
stmf->output.nr_chans,
options);
break;
default:
break;
}
return err;
}
static const struct file_operations stm_fops = {
.open = stm_char_open,
.release = stm_char_release,
.write = stm_char_write,
.mmap = stm_char_mmap,
.unlocked_ioctl = stm_char_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
static void stm_device_release(struct device *dev)
{
struct stm_device *stm = to_stm_device(dev);
vfree(stm);
}
int stm_register_device(struct device *parent, struct stm_data *stm_data,
struct module *owner)
{
struct stm_device *stm;
unsigned int nmasters;
int err = -ENOMEM;
if (!stm_core_up)
return -EPROBE_DEFER;
if (!stm_data->packet || !stm_data->sw_nchannels)
return -EINVAL;
nmasters = stm_data->sw_end - stm_data->sw_start + 1;
stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
if (!stm)
return -ENOMEM;
stm->major = register_chrdev(0, stm_data->name, &stm_fops);
if (stm->major < 0)
goto err_free;
device_initialize(&stm->dev);
stm->dev.devt = MKDEV(stm->major, 0);
stm->dev.class = &stm_class;
stm->dev.parent = parent;
stm->dev.release = stm_device_release;
mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
/* initialize the object before it is accessible via sysfs */
spin_lock_init(&stm->mc_lock);
mutex_init(&stm->policy_mutex);
stm->sw_nmasters = nmasters;
stm->owner = owner;
stm->data = stm_data;
stm_data->stm = stm;
err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
if (err)
goto err_device;
err = device_add(&stm->dev);
if (err)
goto err_device;
/*
* Use delayed autosuspend to avoid bouncing back and forth
* on recurring character device writes, with the initial
* delay time of 2 seconds.
*/
pm_runtime_no_callbacks(&stm->dev);
pm_runtime_use_autosuspend(&stm->dev);
pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
pm_runtime_set_suspended(&stm->dev);
pm_runtime_enable(&stm->dev);
return 0;
err_device:
unregister_chrdev(stm->major, stm_data->name);
/* matches device_initialize() above */
put_device(&stm->dev);
err_free:
vfree(stm);
return err;
}
EXPORT_SYMBOL_GPL(stm_register_device);
static int __stm_source_link_drop(struct stm_source_device *src,
struct stm_device *stm);
void stm_unregister_device(struct stm_data *stm_data)
{
struct stm_device *stm = stm_data->stm;
struct stm_source_device *src, *iter;
int i, ret;
pm_runtime_dont_use_autosuspend(&stm->dev);
pm_runtime_disable(&stm->dev);
mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
ret = __stm_source_link_drop(src, stm);
/*
* src <-> stm link must not change under the same
* stm::link_mutex, so complain loudly if it has;
* also in this situation ret!=0 means this src is
* not connected to this stm and it should be otherwise
* safe to proceed with the tear-down of stm.
*/
WARN_ON_ONCE(ret);
}
mutex_unlock(&stm->link_mutex);
synchronize_srcu(&stm_source_srcu);
unregister_chrdev(stm->major, stm_data->name);
mutex_lock(&stm->policy_mutex);
if (stm->policy)
stp_policy_unbind(stm->policy);
mutex_unlock(&stm->policy_mutex);
for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
stp_master_free(stm, i);
device_unregister(&stm->dev);
stm_data->stm = NULL;
}
EXPORT_SYMBOL_GPL(stm_unregister_device);
/*
* stm::link_list access serialization uses a spinlock and a mutex; holding
* either of them guarantees that the list is stable; modification requires
* holding both of them.
*
* Lock ordering is as follows:
* stm::link_mutex
* stm::link_lock
* src::link_lock
*/
/**
* stm_source_link_add() - connect an stm_source device to an stm device
* @src: stm_source device
* @stm: stm device
*
* This function establishes a link from stm_source to an stm device so that
* the former can send out trace data to the latter.
*
* Return: 0 on success, -errno otherwise.
*/
static int stm_source_link_add(struct stm_source_device *src,
struct stm_device *stm)
{
char *ids[] = { NULL, "default", NULL };
int err = -ENOMEM;
mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
/* src->link is dereferenced under stm_source_srcu but not the list */
rcu_assign_pointer(src->link, stm);
list_add_tail(&src->link_entry, &stm->link_list);
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
mutex_unlock(&stm->link_mutex);
ids[0] = kstrdup(src->data->name, GFP_KERNEL);
if (!ids[0])
goto fail_detach;
err = stm_assign_first_policy(stm, &src->output, ids,
src->data->nr_chans);
kfree(ids[0]);
if (err)
goto fail_detach;
/* this is to notify the STM device that a new link has been made */
if (stm->data->link)
err = stm->data->link(stm->data, src->output.master,
src->output.channel);
if (err)
goto fail_free_output;
/* this is to let the source carry out all necessary preparations */
if (src->data->link)
src->data->link(src->data);
return 0;
fail_free_output:
stm_output_free(stm, &src->output);
fail_detach:
mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
rcu_assign_pointer(src->link, NULL);
list_del_init(&src->link_entry);
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
mutex_unlock(&stm->link_mutex);
return err;
}
/**
* __stm_source_link_drop() - detach stm_source from an stm device
* @src: stm_source device
* @stm: stm device
*
* If @stm is @src::link, disconnect them from one another and put the
* reference on the @stm device.
*
* Caller must hold stm::link_mutex.
*/
static int __stm_source_link_drop(struct stm_source_device *src,
struct stm_device *stm)
{
struct stm_device *link;
int ret = 0;
lockdep_assert_held(&stm->link_mutex);
/* for stm::link_list modification, we hold both mutex and spinlock */
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
/*
* The linked device may have changed since we last looked, because
* we weren't holding the src::link_lock back then; if this is the
* case, tell the caller to retry.
*/
if (link != stm) {
ret = -EAGAIN;
goto unlock;
}
stm_output_free(link, &src->output);
list_del_init(&src->link_entry);
pm_runtime_mark_last_busy(&link->dev);
pm_runtime_put_autosuspend(&link->dev);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
unlock:
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
/*
* Call the unlink callbacks for both source and stm, when we know
* that we have actually performed the unlinking.
*/
if (!ret) {
if (src->data->unlink)
src->data->unlink(src->data);
if (stm->data->unlink)
stm->data->unlink(stm->data, src->output.master,
src->output.channel);
}
return ret;
}
/**
* stm_source_link_drop() - detach stm_source from its stm device
* @src: stm_source device
*
* Unlinking means disconnecting from source's STM device; after this
* writes will be unsuccessful until it is linked to a new STM device.
*
* This will happen on "stm_source_link" sysfs attribute write to undo
* the existing link (if any), or on linked STM device's de-registration.
*/
static void stm_source_link_drop(struct stm_source_device *src)
{
struct stm_device *stm;
int idx, ret;
retry:
idx = srcu_read_lock(&stm_source_srcu);
/*
* The stm device will be valid for the duration of this
* read section, but the link may change before we grab
* the src::link_lock in __stm_source_link_drop().
*/
stm = srcu_dereference(src->link, &stm_source_srcu);
ret = 0;
if (stm) {
mutex_lock(&stm->link_mutex);
ret = __stm_source_link_drop(src, stm);
mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
/* if it did change, retry */
if (ret == -EAGAIN)
goto retry;
}
static ssize_t stm_source_link_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct stm_source_device *src = to_stm_source_device(dev);
struct stm_device *stm;
int idx, ret;
idx = srcu_read_lock(&stm_source_srcu);
stm = srcu_dereference(src->link, &stm_source_srcu);
ret = sprintf(buf, "%s\n",
stm ? dev_name(&stm->dev) : "<none>");
srcu_read_unlock(&stm_source_srcu, idx);
return ret;
}
static ssize_t stm_source_link_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct stm_source_device *src = to_stm_source_device(dev);
struct stm_device *link;
int err;
stm_source_link_drop(src);
link = stm_find_device(buf);
if (!link)
return -EINVAL;
pm_runtime_get(&link->dev);
err = stm_source_link_add(src, link);
if (err) {
pm_runtime_put_autosuspend(&link->dev);
/* matches the stm_find_device() above */
stm_put_device(link);
}
return err ? : count;
}
static DEVICE_ATTR_RW(stm_source_link);
static struct attribute *stm_source_attrs[] = {
&dev_attr_stm_source_link.attr,
NULL,
};
ATTRIBUTE_GROUPS(stm_source);
static struct class stm_source_class = {
.name = "stm_source",
.dev_groups = stm_source_groups,
};
static void stm_source_device_release(struct device *dev)
{
struct stm_source_device *src = to_stm_source_device(dev);
kfree(src);
}
/**
* stm_source_register_device() - register an stm_source device
* @parent: parent device
* @data: device description structure
*
* This will create a device of stm_source class that can write
* data to an stm device once linked.
*
* Return: 0 on success, -errno otherwise.
*/
int stm_source_register_device(struct device *parent,
struct stm_source_data *data)
{
struct stm_source_device *src;
int err;
if (!stm_core_up)
return -EPROBE_DEFER;
src = kzalloc(sizeof(*src), GFP_KERNEL);
if (!src)
return -ENOMEM;
device_initialize(&src->dev);
src->dev.class = &stm_source_class;
src->dev.parent = parent;
src->dev.release = stm_source_device_release;
err = kobject_set_name(&src->dev.kobj, "%s", data->name);
if (err)
goto err;
pm_runtime_no_callbacks(&src->dev);
pm_runtime_forbid(&src->dev);
err = device_add(&src->dev);
if (err)
goto err;
stm_output_init(&src->output);
spin_lock_init(&src->link_lock);
INIT_LIST_HEAD(&src->link_entry);
src->data = data;
data->src = src;
return 0;
err:
put_device(&src->dev);
return err;
}
EXPORT_SYMBOL_GPL(stm_source_register_device);
/**
* stm_source_unregister_device() - unregister an stm_source device
* @data: device description that was used to register the device
*
* This will remove a previously created stm_source device from the system.
*/
void stm_source_unregister_device(struct stm_source_data *data)
{
struct stm_source_device *src = data->src;
stm_source_link_drop(src);
device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
int notrace stm_source_write(struct stm_source_data *data,
unsigned int chan,
const char *buf, size_t count)
{
struct stm_source_device *src = data->src;
struct stm_device *stm;
int idx;
if (!src->output.nr_chans)
return -ENODEV;
if (chan >= src->output.nr_chans)
return -EINVAL;
idx = srcu_read_lock(&stm_source_srcu);
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
count = stm_write(stm, &src->output, chan, buf, count);
else
count = -ENODEV;
srcu_read_unlock(&stm_source_srcu, idx);
return count;
}
EXPORT_SYMBOL_GPL(stm_source_write);
static int __init stm_core_init(void)
{
int err;
err = class_register(&stm_class);
if (err)
return err;
err = class_register(&stm_source_class);
if (err)
goto err_stm;
err = stp_configfs_init();
if (err)
goto err_src;
init_srcu_struct(&stm_source_srcu);
INIT_LIST_HEAD(&stm_pdrv_head);
mutex_init(&stm_pdrv_mutex);
/*
* So as to not confuse existing users with a requirement
* to load yet another module, do it here.
*/
if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
(void)request_module_nowait("stm_p_basic");
stm_core_up++;
return 0;
err_src:
class_unregister(&stm_source_class);
err_stm:
class_unregister(&stm_class);
return err;
}
module_init(stm_core_init);
static void __exit stm_core_exit(void)
{
cleanup_srcu_struct(&stm_source_srcu);
class_unregister(&stm_source_class);
class_unregister(&stm_class);
stp_configfs_exit();
}
module_exit(stm_core_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("System Trace Module device class");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple kernel console driver for STM devices
* Copyright (c) 2014, Intel Corporation.
*
* STM console will send kernel messages over STM devices to a trace host.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/stm.h>
static int stm_console_link(struct stm_source_data *data);
static void stm_console_unlink(struct stm_source_data *data);
static struct stm_console {
struct stm_source_data data;
struct console console;
} stm_console = {
.data = {
.name = "console",
.nr_chans = 1,
.link = stm_console_link,
.unlink = stm_console_unlink,
},
};
static void
stm_console_write(struct console *con, const char *buf, unsigned len)
{
struct stm_console *sc = container_of(con, struct stm_console, console);
stm_source_write(&sc->data, 0, buf, len);
}
static int stm_console_link(struct stm_source_data *data)
{
struct stm_console *sc = container_of(data, struct stm_console, data);
strcpy(sc->console.name, "stm_console");
sc->console.write = stm_console_write;
sc->console.flags = CON_ENABLED | CON_PRINTBUFFER;
register_console(&sc->console);
return 0;
}
static void stm_console_unlink(struct stm_source_data *data)
{
struct stm_console *sc = container_of(data, struct stm_console, data);
unregister_console(&sc->console);
}
static int stm_console_init(void)
{
return stm_source_register_device(NULL, &stm_console.data);
}
static void stm_console_exit(void)
{
stm_source_unregister_device(&stm_console.data);
}
module_init(stm_console_init);
module_exit(stm_console_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("stm_console driver");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/console.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MIPI SyS-T framing protocol for STM devices.
* Copyright (c) 2018, Intel Corporation.
*/
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include <linux/stm.h>
#include "stm.h"
enum sys_t_message_type {
MIPI_SYST_TYPE_BUILD = 0,
MIPI_SYST_TYPE_SHORT32,
MIPI_SYST_TYPE_STRING,
MIPI_SYST_TYPE_CATALOG,
MIPI_SYST_TYPE_RAW = 6,
MIPI_SYST_TYPE_SHORT64,
MIPI_SYST_TYPE_CLOCK,
};
enum sys_t_message_severity {
MIPI_SYST_SEVERITY_MAX = 0,
MIPI_SYST_SEVERITY_FATAL,
MIPI_SYST_SEVERITY_ERROR,
MIPI_SYST_SEVERITY_WARNING,
MIPI_SYST_SEVERITY_INFO,
MIPI_SYST_SEVERITY_USER1,
MIPI_SYST_SEVERITY_USER2,
MIPI_SYST_SEVERITY_DEBUG,
};
enum sys_t_message_build_subtype {
MIPI_SYST_BUILD_ID_COMPACT32 = 0,
MIPI_SYST_BUILD_ID_COMPACT64,
MIPI_SYST_BUILD_ID_LONG,
};
enum sys_t_message_clock_subtype {
MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1,
};
enum sys_t_message_string_subtype {
MIPI_SYST_STRING_GENERIC = 1,
MIPI_SYST_STRING_FUNCTIONENTER,
MIPI_SYST_STRING_FUNCTIONEXIT,
MIPI_SYST_STRING_INVALIDPARAM = 5,
MIPI_SYST_STRING_ASSERT = 7,
MIPI_SYST_STRING_PRINTF_32 = 11,
MIPI_SYST_STRING_PRINTF_64 = 12,
};
#define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t))
#define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4)
#define MIPI_SYST_OPT_LOC BIT(8)
#define MIPI_SYST_OPT_LEN BIT(9)
#define MIPI_SYST_OPT_CHK BIT(10)
#define MIPI_SYST_OPT_TS BIT(11)
#define MIPI_SYST_UNIT(u) ((u32)(u) << 12)
#define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16)
#define MIPI_SYST_OPT_GUID BIT(23)
#define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24)
#define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \
MIPI_SYST_ORIGIN(u >> 4))
#define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \
MIPI_SYST_SUBTYPE(t ## _ ## s))
#define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \
MIPI_SYST_SEVERITY(INFO) | \
MIPI_SYST_OPT_GUID)
#define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \
MIPI_SYST_SEVERITY(MAX))
struct sys_t_policy_node {
uuid_t uuid;
bool do_len;
unsigned long ts_interval;
unsigned long clocksync_interval;
};
struct sys_t_output {
struct sys_t_policy_node node;
unsigned long ts_jiffies;
unsigned long clocksync_jiffies;
};
static void sys_t_policy_node_init(void *priv)
{
struct sys_t_policy_node *pn = priv;
uuid_gen(&pn->uuid);
}
static int sys_t_output_open(void *priv, struct stm_output *output)
{
struct sys_t_policy_node *pn = priv;
struct sys_t_output *opriv;
opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC);
if (!opriv)
return -ENOMEM;
memcpy(&opriv->node, pn, sizeof(opriv->node));
output->pdrv_private = opriv;
return 0;
}
static void sys_t_output_close(struct stm_output *output)
{
kfree(output->pdrv_private);
}
static ssize_t sys_t_policy_uuid_show(struct config_item *item,
char *page)
{
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
return sprintf(page, "%pU\n", &pn->uuid);
}
static ssize_t
sys_t_policy_uuid_store(struct config_item *item, const char *page,
size_t count)
{
struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
int ret;
mutex_lock(mutexp);
ret = uuid_parse(page, &pn->uuid);
mutex_unlock(mutexp);
return ret < 0 ? ret : count;
}
CONFIGFS_ATTR(sys_t_policy_, uuid);
static ssize_t sys_t_policy_do_len_show(struct config_item *item,
char *page)
{
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
return sprintf(page, "%d\n", pn->do_len);
}
static ssize_t
sys_t_policy_do_len_store(struct config_item *item, const char *page,
size_t count)
{
struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
int ret;
mutex_lock(mutexp);
ret = kstrtobool(page, &pn->do_len);
mutex_unlock(mutexp);
return ret ? ret : count;
}
CONFIGFS_ATTR(sys_t_policy_, do_len);
static ssize_t sys_t_policy_ts_interval_show(struct config_item *item,
char *page)
{
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval));
}
static ssize_t
sys_t_policy_ts_interval_store(struct config_item *item, const char *page,
size_t count)
{
struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
unsigned int ms;
int ret;
mutex_lock(mutexp);
ret = kstrtouint(page, 10, &ms);
mutex_unlock(mutexp);
if (!ret) {
pn->ts_interval = msecs_to_jiffies(ms);
return count;
}
return ret;
}
CONFIGFS_ATTR(sys_t_policy_, ts_interval);
static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item,
char *page)
{
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval));
}
static ssize_t
sys_t_policy_clocksync_interval_store(struct config_item *item,
const char *page, size_t count)
{
struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
unsigned int ms;
int ret;
mutex_lock(mutexp);
ret = kstrtouint(page, 10, &ms);
mutex_unlock(mutexp);
if (!ret) {
pn->clocksync_interval = msecs_to_jiffies(ms);
return count;
}
return ret;
}
CONFIGFS_ATTR(sys_t_policy_, clocksync_interval);
static struct configfs_attribute *sys_t_policy_attrs[] = {
&sys_t_policy_attr_uuid,
&sys_t_policy_attr_do_len,
&sys_t_policy_attr_ts_interval,
&sys_t_policy_attr_clocksync_interval,
NULL,
};
static inline bool sys_t_need_ts(struct sys_t_output *op)
{
if (op->node.ts_interval &&
time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
op->ts_jiffies = jiffies;
return true;
}
return false;
}
static bool sys_t_need_clock_sync(struct sys_t_output *op)
{
if (op->node.clocksync_interval &&
time_after(jiffies,
op->clocksync_jiffies + op->node.clocksync_interval)) {
op->clocksync_jiffies = jiffies;
return true;
}
return false;
}
static ssize_t
sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c)
{
u32 header = CLOCK_SYNC_HEADER;
const unsigned char nil = 0;
u64 payload[2]; /* Clock value and frequency */
ssize_t sz;
sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
4, (u8 *)&header);
if (sz <= 0)
return sz;
payload[0] = ktime_get_real_ns();
payload[1] = NSEC_PER_SEC;
sz = stm_data_write(data, m, c, false, &payload, sizeof(payload));
if (sz <= 0)
return sz;
data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
return sizeof(header) + sizeof(payload);
}
static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
unsigned int chan, const char *buf, size_t count)
{
struct sys_t_output *op = output->pdrv_private;
unsigned int c = output->channel + chan;
unsigned int m = output->master;
const unsigned char nil = 0;
u32 header = DATA_HEADER;
u8 uuid[UUID_SIZE];
ssize_t sz;
/* We require an existing policy node to proceed */
if (!op)
return -EINVAL;
if (sys_t_need_clock_sync(op)) {
sz = sys_t_clock_sync(data, m, c);
if (sz <= 0)
return sz;
}
if (op->node.do_len)
header |= MIPI_SYST_OPT_LEN;
if (sys_t_need_ts(op))
header |= MIPI_SYST_OPT_TS;
/*
* STP framing rules for SyS-T frames:
* * the first packet of the SyS-T frame is timestamped;
* * the last packet is a FLAG.
*/
/* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */
/* HEADER */
sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
4, (u8 *)&header);
if (sz <= 0)
return sz;
/* GUID */
export_uuid(uuid, &op->node.uuid);
sz = stm_data_write(data, m, c, false, uuid, sizeof(op->node.uuid));
if (sz <= 0)
return sz;
/* [LENGTH] */
if (op->node.do_len) {
u16 length = count;
sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2,
(u8 *)&length);
if (sz <= 0)
return sz;
}
/* [TIMESTAMP] */
if (header & MIPI_SYST_OPT_TS) {
u64 ts = ktime_get_real_ns();
sz = stm_data_write(data, m, c, false, &ts, sizeof(ts));
if (sz <= 0)
return sz;
}
/* DATA */
sz = stm_data_write(data, m, c, false, buf, count);
if (sz > 0)
data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
return sz;
}
static const struct stm_protocol_driver sys_t_pdrv = {
.owner = THIS_MODULE,
.name = "p_sys-t",
.priv_sz = sizeof(struct sys_t_policy_node),
.write = sys_t_write,
.policy_attr = sys_t_policy_attrs,
.policy_node_init = sys_t_policy_node_init,
.output_open = sys_t_output_open,
.output_close = sys_t_output_close,
};
static int sys_t_stm_init(void)
{
return stm_register_protocol(&sys_t_pdrv);
}
static void sys_t_stm_exit(void)
{
stm_unregister_protocol(&sys_t_pdrv);
}
module_init(sys_t_stm_init);
module_exit(sys_t_stm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/p_sys-t.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Trace Module (STM) master/channel allocation policy management
* Copyright (c) 2014, Intel Corporation.
*
* A master/channel allocation policy allows mapping string identifiers to
* master and channel ranges, where allocation can be done.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/configfs.h>
#include <linux/slab.h>
#include <linux/stm.h>
#include "stm.h"
/*
* STP Master/Channel allocation policy configfs layout.
*/
struct stp_policy {
struct config_group group;
struct stm_device *stm;
};
struct stp_policy_node {
struct config_group group;
struct stp_policy *policy;
unsigned int first_master;
unsigned int last_master;
unsigned int first_channel;
unsigned int last_channel;
/* this is the one that's exposed to the attributes */
unsigned char priv[];
};
void *stp_policy_node_priv(struct stp_policy_node *pn)
{
if (!pn)
return NULL;
return pn->priv;
}
static struct configfs_subsystem stp_policy_subsys;
void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
unsigned int *mstart, unsigned int *mend,
unsigned int *cstart, unsigned int *cend)
{
*mstart = policy_node->first_master;
*mend = policy_node->last_master;
*cstart = policy_node->first_channel;
*cend = policy_node->last_channel;
}
static inline struct stp_policy *to_stp_policy(struct config_item *item)
{
return item ?
container_of(to_config_group(item), struct stp_policy, group) :
NULL;
}
static inline struct stp_policy_node *
to_stp_policy_node(struct config_item *item)
{
return item ?
container_of(to_config_group(item), struct stp_policy_node,
group) :
NULL;
}
void *to_pdrv_policy_node(struct config_item *item)
{
struct stp_policy_node *node = to_stp_policy_node(item);
return stp_policy_node_priv(node);
}
EXPORT_SYMBOL_GPL(to_pdrv_policy_node);
static ssize_t
stp_policy_node_masters_show(struct config_item *item, char *page)
{
struct stp_policy_node *policy_node = to_stp_policy_node(item);
ssize_t count;
count = sprintf(page, "%u %u\n", policy_node->first_master,
policy_node->last_master);
return count;
}
static ssize_t
stp_policy_node_masters_store(struct config_item *item, const char *page,
size_t count)
{
struct stp_policy_node *policy_node = to_stp_policy_node(item);
unsigned int first, last;
struct stm_device *stm;
char *p = (char *)page;
ssize_t ret = -ENODEV;
if (sscanf(p, "%u %u", &first, &last) != 2)
return -EINVAL;
mutex_lock(&stp_policy_subsys.su_mutex);
stm = policy_node->policy->stm;
if (!stm)
goto unlock;
/* must be within [sw_start..sw_end], which is an inclusive range */
if (first > last || first < stm->data->sw_start ||
last > stm->data->sw_end) {
ret = -ERANGE;
goto unlock;
}
ret = count;
policy_node->first_master = first;
policy_node->last_master = last;
unlock:
mutex_unlock(&stp_policy_subsys.su_mutex);
return ret;
}
static ssize_t
stp_policy_node_channels_show(struct config_item *item, char *page)
{
struct stp_policy_node *policy_node = to_stp_policy_node(item);
ssize_t count;
count = sprintf(page, "%u %u\n", policy_node->first_channel,
policy_node->last_channel);
return count;
}
static ssize_t
stp_policy_node_channels_store(struct config_item *item, const char *page,
size_t count)
{
struct stp_policy_node *policy_node = to_stp_policy_node(item);
unsigned int first, last;
struct stm_device *stm;
char *p = (char *)page;
ssize_t ret = -ENODEV;
if (sscanf(p, "%u %u", &first, &last) != 2)
return -EINVAL;
mutex_lock(&stp_policy_subsys.su_mutex);
stm = policy_node->policy->stm;
if (!stm)
goto unlock;
if (first > INT_MAX || last > INT_MAX || first > last ||
last >= stm->data->sw_nchannels) {
ret = -ERANGE;
goto unlock;
}
ret = count;
policy_node->first_channel = first;
policy_node->last_channel = last;
unlock:
mutex_unlock(&stp_policy_subsys.su_mutex);
return ret;
}
static void stp_policy_node_release(struct config_item *item)
{
struct stp_policy_node *node = to_stp_policy_node(item);
kfree(node);
}
static struct configfs_item_operations stp_policy_node_item_ops = {
.release = stp_policy_node_release,
};
CONFIGFS_ATTR(stp_policy_node_, masters);
CONFIGFS_ATTR(stp_policy_node_, channels);
static struct configfs_attribute *stp_policy_node_attrs[] = {
&stp_policy_node_attr_masters,
&stp_policy_node_attr_channels,
NULL,
};
static const struct config_item_type stp_policy_type;
static const struct config_item_type stp_policy_node_type;
const struct config_item_type *
get_policy_node_type(struct configfs_attribute **attrs)
{
struct config_item_type *type;
struct configfs_attribute **merged;
type = kmemdup(&stp_policy_node_type, sizeof(stp_policy_node_type),
GFP_KERNEL);
if (!type)
return NULL;
merged = memcat_p(stp_policy_node_attrs, attrs);
if (!merged) {
kfree(type);
return NULL;
}
type->ct_attrs = merged;
return type;
}
static struct config_group *
stp_policy_node_make(struct config_group *group, const char *name)
{
const struct config_item_type *type = &stp_policy_node_type;
struct stp_policy_node *policy_node, *parent_node;
const struct stm_protocol_driver *pdrv;
struct stp_policy *policy;
if (group->cg_item.ci_type == &stp_policy_type) {
policy = container_of(group, struct stp_policy, group);
} else {
parent_node = container_of(group, struct stp_policy_node,
group);
policy = parent_node->policy;
}
if (!policy->stm)
return ERR_PTR(-ENODEV);
pdrv = policy->stm->pdrv;
policy_node =
kzalloc(offsetof(struct stp_policy_node, priv[pdrv->priv_sz]),
GFP_KERNEL);
if (!policy_node)
return ERR_PTR(-ENOMEM);
if (pdrv->policy_node_init)
pdrv->policy_node_init((void *)policy_node->priv);
if (policy->stm->pdrv_node_type)
type = policy->stm->pdrv_node_type;
config_group_init_type_name(&policy_node->group, name, type);
policy_node->policy = policy;
/* default values for the attributes */
policy_node->first_master = policy->stm->data->sw_start;
policy_node->last_master = policy->stm->data->sw_end;
policy_node->first_channel = 0;
policy_node->last_channel = policy->stm->data->sw_nchannels - 1;
return &policy_node->group;
}
static void
stp_policy_node_drop(struct config_group *group, struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations stp_policy_node_group_ops = {
.make_group = stp_policy_node_make,
.drop_item = stp_policy_node_drop,
};
static const struct config_item_type stp_policy_node_type = {
.ct_item_ops = &stp_policy_node_item_ops,
.ct_group_ops = &stp_policy_node_group_ops,
.ct_attrs = stp_policy_node_attrs,
.ct_owner = THIS_MODULE,
};
/*
* Root group: policies.
*/
static ssize_t stp_policy_device_show(struct config_item *item,
char *page)
{
struct stp_policy *policy = to_stp_policy(item);
ssize_t count;
count = sprintf(page, "%s\n",
(policy && policy->stm) ?
policy->stm->data->name :
"<none>");
return count;
}
CONFIGFS_ATTR_RO(stp_policy_, device);
static ssize_t stp_policy_protocol_show(struct config_item *item,
char *page)
{
struct stp_policy *policy = to_stp_policy(item);
ssize_t count;
count = sprintf(page, "%s\n",
(policy && policy->stm) ?
policy->stm->pdrv->name :
"<none>");
return count;
}
CONFIGFS_ATTR_RO(stp_policy_, protocol);
static struct configfs_attribute *stp_policy_attrs[] = {
&stp_policy_attr_device,
&stp_policy_attr_protocol,
NULL,
};
void stp_policy_unbind(struct stp_policy *policy)
{
struct stm_device *stm = policy->stm;
/*
* stp_policy_release() will not call here if the policy is already
* unbound; other users should not either, as no link exists between
* this policy and anything else in that case
*/
if (WARN_ON_ONCE(!policy->stm))
return;
lockdep_assert_held(&stm->policy_mutex);
stm->policy = NULL;
policy->stm = NULL;
/*
* Drop the reference on the protocol driver and lose the link.
*/
stm_put_protocol(stm->pdrv);
stm->pdrv = NULL;
stm_put_device(stm);
}
static void stp_policy_release(struct config_item *item)
{
struct stp_policy *policy = to_stp_policy(item);
struct stm_device *stm = policy->stm;
/* a policy *can* be unbound and still exist in configfs tree */
if (!stm)
return;
mutex_lock(&stm->policy_mutex);
stp_policy_unbind(policy);
mutex_unlock(&stm->policy_mutex);
kfree(policy);
}
static struct configfs_item_operations stp_policy_item_ops = {
.release = stp_policy_release,
};
static struct configfs_group_operations stp_policy_group_ops = {
.make_group = stp_policy_node_make,
};
static const struct config_item_type stp_policy_type = {
.ct_item_ops = &stp_policy_item_ops,
.ct_group_ops = &stp_policy_group_ops,
.ct_attrs = stp_policy_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *
stp_policy_make(struct config_group *group, const char *name)
{
const struct config_item_type *pdrv_node_type;
const struct stm_protocol_driver *pdrv;
char *devname, *proto, *p;
struct config_group *ret;
struct stm_device *stm;
int err;
devname = kasprintf(GFP_KERNEL, "%s", name);
if (!devname)
return ERR_PTR(-ENOMEM);
/*
* node must look like <device_name>.<policy_name>, where
* <device_name> is the name of an existing stm device; may
* contain dots;
* <policy_name> is an arbitrary string; may not contain dots
* <device_name>:<protocol_name>.<policy_name>
*/
p = strrchr(devname, '.');
if (!p) {
kfree(devname);
return ERR_PTR(-EINVAL);
}
*p = '\0';
/*
* look for ":<protocol_name>":
* + no protocol suffix: fall back to whatever is available;
* + unknown protocol: fail the whole thing
*/
proto = strrchr(devname, ':');
if (proto)
*proto++ = '\0';
stm = stm_find_device(devname);
if (!stm) {
kfree(devname);
return ERR_PTR(-ENODEV);
}
err = stm_lookup_protocol(proto, &pdrv, &pdrv_node_type);
kfree(devname);
if (err) {
stm_put_device(stm);
return ERR_PTR(-ENODEV);
}
mutex_lock(&stm->policy_mutex);
if (stm->policy) {
ret = ERR_PTR(-EBUSY);
goto unlock_policy;
}
stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
if (!stm->policy) {
ret = ERR_PTR(-ENOMEM);
goto unlock_policy;
}
config_group_init_type_name(&stm->policy->group, name,
&stp_policy_type);
stm->pdrv = pdrv;
stm->pdrv_node_type = pdrv_node_type;
stm->policy->stm = stm;
ret = &stm->policy->group;
unlock_policy:
mutex_unlock(&stm->policy_mutex);
if (IS_ERR(ret)) {
/*
* pdrv and stm->pdrv at this point can be quite different,
* and only one of them needs to be 'put'
*/
stm_put_protocol(pdrv);
stm_put_device(stm);
}
return ret;
}
static struct configfs_group_operations stp_policy_root_group_ops = {
.make_group = stp_policy_make,
};
static const struct config_item_type stp_policy_root_type = {
.ct_group_ops = &stp_policy_root_group_ops,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem stp_policy_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "stp-policy",
.ci_type = &stp_policy_root_type,
},
},
};
/*
* Lock the policy mutex from the outside
*/
static struct stp_policy_node *
__stp_policy_node_lookup(struct stp_policy *policy, char *s)
{
struct stp_policy_node *policy_node, *ret = NULL;
struct list_head *head = &policy->group.cg_children;
struct config_item *item;
char *start, *end = s;
if (list_empty(head))
return NULL;
next:
for (;;) {
start = strsep(&end, "/");
if (!start)
break;
if (!*start)
continue;
list_for_each_entry(item, head, ci_entry) {
policy_node = to_stp_policy_node(item);
if (!strcmp(start,
policy_node->group.cg_item.ci_name)) {
ret = policy_node;
if (!end)
goto out;
head = &policy_node->group.cg_children;
goto next;
}
}
break;
}
out:
return ret;
}
struct stp_policy_node *
stp_policy_node_lookup(struct stm_device *stm, char *s)
{
struct stp_policy_node *policy_node = NULL;
mutex_lock(&stp_policy_subsys.su_mutex);
mutex_lock(&stm->policy_mutex);
if (stm->policy)
policy_node = __stp_policy_node_lookup(stm->policy, s);
mutex_unlock(&stm->policy_mutex);
if (policy_node)
config_item_get(&policy_node->group.cg_item);
else
mutex_unlock(&stp_policy_subsys.su_mutex);
return policy_node;
}
void stp_policy_node_put(struct stp_policy_node *policy_node)
{
lockdep_assert_held(&stp_policy_subsys.su_mutex);
mutex_unlock(&stp_policy_subsys.su_mutex);
config_item_put(&policy_node->group.cg_item);
}
int __init stp_configfs_init(void)
{
config_group_init(&stp_policy_subsys.su_group);
mutex_init(&stp_policy_subsys.su_mutex);
return configfs_register_subsystem(&stp_policy_subsys);
}
void __exit stp_configfs_exit(void)
{
configfs_unregister_subsystem(&stp_policy_subsys);
}
| linux-master | drivers/hwtracing/stm/policy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A dummy STM device for stm/stm_source class testing.
* Copyright (c) 2014, Intel Corporation.
*
* STM class implements generic infrastructure for System Trace Module devices
* as defined in MIPI STPv2 specification.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stm.h>
#include <uapi/linux/stm.h>
static ssize_t notrace
dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
unsigned int channel, unsigned int packet, unsigned int flags,
unsigned int size, const unsigned char *payload)
{
#ifdef DEBUG
u64 pl = 0;
if (payload)
pl = *(u64 *)payload;
if (size < 8)
pl &= (1ull << (size * 8)) - 1;
trace_printk("[%u:%u] [pkt: %x/%x] (%llx)\n", master, channel,
packet, size, pl);
#endif
return size;
}
#define DUMMY_STM_MAX 32
static struct stm_data dummy_stm[DUMMY_STM_MAX];
static int nr_dummies = 4;
module_param(nr_dummies, int, 0400);
static unsigned int fail_mode;
module_param(fail_mode, int, 0600);
static unsigned int master_min;
module_param(master_min, int, 0400);
static unsigned int master_max = STP_MASTER_MAX;
module_param(master_max, int, 0400);
static unsigned int nr_channels = STP_CHANNEL_MAX;
module_param(nr_channels, int, 0400);
static int dummy_stm_link(struct stm_data *data, unsigned int master,
unsigned int channel)
{
if (fail_mode && (channel & fail_mode))
return -EINVAL;
return 0;
}
static int dummy_stm_init(void)
{
int i, ret = -ENOMEM;
if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX)
return -EINVAL;
if (master_min > master_max ||
master_max > STP_MASTER_MAX ||
nr_channels > STP_CHANNEL_MAX)
return -EINVAL;
for (i = 0; i < nr_dummies; i++) {
dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
if (!dummy_stm[i].name)
goto fail_unregister;
dummy_stm[i].sw_start = master_min;
dummy_stm[i].sw_end = master_max;
dummy_stm[i].sw_nchannels = nr_channels;
dummy_stm[i].packet = dummy_stm_packet;
dummy_stm[i].link = dummy_stm_link;
ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
if (ret)
goto fail_free;
}
return 0;
fail_unregister:
for (i--; i >= 0; i--) {
stm_unregister_device(&dummy_stm[i]);
fail_free:
kfree(dummy_stm[i].name);
}
return ret;
}
static void dummy_stm_exit(void)
{
int i;
for (i = 0; i < nr_dummies; i++) {
stm_unregister_device(&dummy_stm[i]);
kfree(dummy_stm[i].name);
}
}
module_init(dummy_stm_init);
module_exit(dummy_stm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("dummy_stm device");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/dummy_stm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Basic framing protocol for STM devices.
* Copyright (c) 2018, Intel Corporation.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/stm.h>
#include "stm.h"
static ssize_t basic_write(struct stm_data *data, struct stm_output *output,
unsigned int chan, const char *buf, size_t count)
{
unsigned int c = output->channel + chan;
unsigned int m = output->master;
const unsigned char nil = 0;
ssize_t sz;
sz = stm_data_write(data, m, c, true, buf, count);
if (sz > 0)
data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
return sz;
}
static const struct stm_protocol_driver basic_pdrv = {
.owner = THIS_MODULE,
.name = "p_basic",
.write = basic_write,
};
static int basic_stm_init(void)
{
return stm_register_protocol(&basic_pdrv);
}
static void basic_stm_exit(void)
{
stm_unregister_protocol(&basic_pdrv);
}
module_init(basic_stm_init);
module_exit(basic_stm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Basic STM framing protocol driver");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/p_basic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Simple heartbeat STM source driver
* Copyright (c) 2016, Intel Corporation.
*
* Heartbeat STM source will send repetitive messages over STM devices to a
* trace host.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/slab.h>
#include <linux/stm.h>
#define STM_HEARTBEAT_MAX 32
static int nr_devs = 4;
static int interval_ms = 10;
module_param(nr_devs, int, 0400);
module_param(interval_ms, int, 0600);
static struct stm_heartbeat {
struct stm_source_data data;
struct hrtimer hrtimer;
unsigned int active;
} stm_heartbeat[STM_HEARTBEAT_MAX];
static const char str[] = "heartbeat stm source driver is here to serve you";
static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
{
struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
hrtimer);
stm_source_write(&heartbeat->data, 0, str, sizeof str);
if (heartbeat->active)
hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
}
static int stm_heartbeat_link(struct stm_source_data *data)
{
struct stm_heartbeat *heartbeat =
container_of(data, struct stm_heartbeat, data);
heartbeat->active = 1;
hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
HRTIMER_MODE_ABS);
return 0;
}
static void stm_heartbeat_unlink(struct stm_source_data *data)
{
struct stm_heartbeat *heartbeat =
container_of(data, struct stm_heartbeat, data);
heartbeat->active = 0;
hrtimer_cancel(&heartbeat->hrtimer);
}
static int stm_heartbeat_init(void)
{
int i, ret;
if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
if (!stm_heartbeat[i].data.name) {
ret = -ENOMEM;
goto fail_unregister;
}
stm_heartbeat[i].data.nr_chans = 1;
stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
stm_heartbeat[i].hrtimer.function =
stm_heartbeat_hrtimer_handler;
ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
if (ret)
goto fail_free;
}
return 0;
fail_unregister:
for (i--; i >= 0; i--) {
stm_source_unregister_device(&stm_heartbeat[i].data);
fail_free:
kfree(stm_heartbeat[i].data.name);
}
return ret;
}
static void stm_heartbeat_exit(void)
{
int i;
for (i = 0; i < nr_devs; i++) {
stm_source_unregister_device(&stm_heartbeat[i].data);
kfree(stm_heartbeat[i].data.name);
}
}
module_init(stm_heartbeat_init);
module_exit(stm_heartbeat_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("stm_heartbeat driver");
MODULE_AUTHOR("Alexander Shishkin <[email protected]>");
| linux-master | drivers/hwtracing/stm/heartbeat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for HiSilicon PCIe tune and trace device
*
* Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
* Author: Yicong Yang <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/sysfs.h>
#include <linux/vmalloc.h>
#include "hisi_ptt.h"
/* Dynamic CPU hotplug state used by PTT */
static enum cpuhp_state hisi_ptt_pmu_online;
static bool hisi_ptt_wait_tuning_finish(struct hisi_ptt *hisi_ptt)
{
u32 val;
return !readl_poll_timeout(hisi_ptt->iobase + HISI_PTT_TUNING_INT_STAT,
val, !(val & HISI_PTT_TUNING_INT_STAT_MASK),
HISI_PTT_WAIT_POLL_INTERVAL_US,
HISI_PTT_WAIT_TUNE_TIMEOUT_US);
}
static ssize_t hisi_ptt_tune_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
struct dev_ext_attribute *ext_attr;
struct hisi_ptt_tune_desc *desc;
u32 reg;
u16 val;
ext_attr = container_of(attr, struct dev_ext_attribute, attr);
desc = ext_attr->var;
mutex_lock(&hisi_ptt->tune_lock);
reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
desc->event_code);
writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
/* Write all 1 to indicates it's the read process */
writel(~0U, hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
mutex_unlock(&hisi_ptt->tune_lock);
return -ETIMEDOUT;
}
reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
reg &= HISI_PTT_TUNING_DATA_VAL_MASK;
val = FIELD_GET(HISI_PTT_TUNING_DATA_VAL_MASK, reg);
mutex_unlock(&hisi_ptt->tune_lock);
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t hisi_ptt_tune_attr_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
struct dev_ext_attribute *ext_attr;
struct hisi_ptt_tune_desc *desc;
u32 reg;
u16 val;
ext_attr = container_of(attr, struct dev_ext_attribute, attr);
desc = ext_attr->var;
if (kstrtou16(buf, 10, &val))
return -EINVAL;
mutex_lock(&hisi_ptt->tune_lock);
reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
desc->event_code);
writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
writel(FIELD_PREP(HISI_PTT_TUNING_DATA_VAL_MASK, val),
hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
mutex_unlock(&hisi_ptt->tune_lock);
return -ETIMEDOUT;
}
mutex_unlock(&hisi_ptt->tune_lock);
return count;
}
#define HISI_PTT_TUNE_ATTR(_name, _val, _show, _store) \
static struct hisi_ptt_tune_desc _name##_desc = { \
.name = #_name, \
.event_code = (_val), \
}; \
static struct dev_ext_attribute hisi_ptt_##_name##_attr = { \
.attr = __ATTR(_name, 0600, _show, _store), \
.var = &_name##_desc, \
}
#define HISI_PTT_TUNE_ATTR_COMMON(_name, _val) \
HISI_PTT_TUNE_ATTR(_name, _val, \
hisi_ptt_tune_attr_show, \
hisi_ptt_tune_attr_store)
/*
* The value of the tuning event are composed of two parts: main event code
* in BIT[0,15] and subevent code in BIT[16,23]. For example, qox_tx_cpl is
* a subevent of 'Tx path QoS control' which for tuning the weight of Tx
* completion TLPs. See hisi_ptt.rst documentation for more information.
*/
#define HISI_PTT_TUNE_QOS_TX_CPL (0x4 | (3 << 16))
#define HISI_PTT_TUNE_QOS_TX_NP (0x4 | (4 << 16))
#define HISI_PTT_TUNE_QOS_TX_P (0x4 | (5 << 16))
#define HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL (0x5 | (6 << 16))
#define HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL (0x5 | (7 << 16))
HISI_PTT_TUNE_ATTR_COMMON(qos_tx_cpl, HISI_PTT_TUNE_QOS_TX_CPL);
HISI_PTT_TUNE_ATTR_COMMON(qos_tx_np, HISI_PTT_TUNE_QOS_TX_NP);
HISI_PTT_TUNE_ATTR_COMMON(qos_tx_p, HISI_PTT_TUNE_QOS_TX_P);
HISI_PTT_TUNE_ATTR_COMMON(rx_alloc_buf_level, HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL);
HISI_PTT_TUNE_ATTR_COMMON(tx_alloc_buf_level, HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL);
static struct attribute *hisi_ptt_tune_attrs[] = {
&hisi_ptt_qos_tx_cpl_attr.attr.attr,
&hisi_ptt_qos_tx_np_attr.attr.attr,
&hisi_ptt_qos_tx_p_attr.attr.attr,
&hisi_ptt_rx_alloc_buf_level_attr.attr.attr,
&hisi_ptt_tx_alloc_buf_level_attr.attr.attr,
NULL,
};
static struct attribute_group hisi_ptt_tune_group = {
.name = "tune",
.attrs = hisi_ptt_tune_attrs,
};
static u16 hisi_ptt_get_filter_val(u16 devid, bool is_port)
{
if (is_port)
return BIT(HISI_PCIE_CORE_PORT_ID(devid & 0xff));
return devid;
}
static bool hisi_ptt_wait_trace_hw_idle(struct hisi_ptt *hisi_ptt)
{
u32 val;
return !readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_STS,
val, val & HISI_PTT_TRACE_IDLE,
HISI_PTT_WAIT_POLL_INTERVAL_US,
HISI_PTT_WAIT_TRACE_TIMEOUT_US);
}
static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
{
u32 val;
readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS,
val, !val, HISI_PTT_RESET_POLL_INTERVAL_US,
HISI_PTT_RESET_TIMEOUT_US);
}
static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
{
writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
hisi_ptt->trace_ctrl.started = false;
}
static int hisi_ptt_trace_start(struct hisi_ptt *hisi_ptt)
{
struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
u32 val;
int i;
/* Check device idle before start trace */
if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) {
pci_err(hisi_ptt->pdev, "Failed to start trace, the device is still busy\n");
return -EBUSY;
}
ctrl->started = true;
/* Reset the DMA before start tracing */
val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
val |= HISI_PTT_TRACE_CTRL_RST;
writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
hisi_ptt_wait_dma_reset_done(hisi_ptt);
val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
val &= ~HISI_PTT_TRACE_CTRL_RST;
writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
/* Reset the index of current buffer */
hisi_ptt->trace_ctrl.buf_index = 0;
/* Zero the trace buffers */
for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++)
memset(ctrl->trace_buf[i].addr, 0, HISI_PTT_TRACE_BUF_SIZE);
/* Clear the interrupt status */
writel(HISI_PTT_TRACE_INT_STAT_MASK, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
/* Set the trace control register */
val = FIELD_PREP(HISI_PTT_TRACE_CTRL_TYPE_SEL, ctrl->type);
val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_RXTX_SEL, ctrl->direction);
val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_DATA_FORMAT, ctrl->format);
val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_TARGET_SEL, hisi_ptt->trace_ctrl.filter);
if (!hisi_ptt->trace_ctrl.is_port)
val |= HISI_PTT_TRACE_CTRL_FILTER_MODE;
/* Start the Trace */
val |= HISI_PTT_TRACE_CTRL_EN;
writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
return 0;
}
static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
{
struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
struct perf_output_handle *handle = &ctrl->handle;
struct perf_event *event = handle->event;
struct hisi_ptt_pmu_buf *buf;
size_t size;
void *addr;
buf = perf_get_aux(handle);
if (!buf || !handle->size)
return -EINVAL;
addr = ctrl->trace_buf[ctrl->buf_index].addr;
/*
* If we're going to stop, read the size of already traced data from
* HISI_PTT_TRACE_WR_STS. Otherwise we're coming from the interrupt,
* the data size is always HISI_PTT_TRACE_BUF_SIZE.
*/
if (stop) {
u32 reg;
reg = readl(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS);
size = FIELD_GET(HISI_PTT_TRACE_WR_STS_WRITE, reg);
} else {
size = HISI_PTT_TRACE_BUF_SIZE;
}
memcpy(buf->base + buf->pos, addr, size);
buf->pos += size;
/*
* Just commit the traced data if we're going to stop. Otherwise if the
* resident AUX buffer cannot contain the data of next trace buffer,
* apply a new one.
*/
if (stop) {
perf_aux_output_end(handle, buf->pos);
} else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
perf_aux_output_end(handle, buf->pos);
buf = perf_aux_output_begin(handle, event);
if (!buf)
return -EINVAL;
buf->pos = handle->head % buf->length;
if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
perf_aux_output_end(handle, 0);
return -EINVAL;
}
}
return 0;
}
static irqreturn_t hisi_ptt_isr(int irq, void *context)
{
struct hisi_ptt *hisi_ptt = context;
u32 status, buf_idx;
status = readl(hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
if (!(status & HISI_PTT_TRACE_INT_STAT_MASK))
return IRQ_NONE;
buf_idx = ffs(status) - 1;
/* Clear the interrupt status of buffer @buf_idx */
writel(status, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
/*
* Update the AUX buffer and cache the current buffer index,
* as we need to know this and save the data when the trace
* is ended out of the interrupt handler. End the trace
* if the updating fails.
*/
if (hisi_ptt_update_aux(hisi_ptt, buf_idx, false))
hisi_ptt_trace_end(hisi_ptt);
else
hisi_ptt->trace_ctrl.buf_index = (buf_idx + 1) % HISI_PTT_TRACE_BUF_CNT;
return IRQ_HANDLED;
}
static void hisi_ptt_irq_free_vectors(void *pdev)
{
pci_free_irq_vectors(pdev);
}
static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
{
struct pci_dev *pdev = hisi_ptt->pdev;
int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret < 0) {
pci_err(pdev, "failed to allocate irq vector, ret = %d\n", ret);
return ret;
}
ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_irq_free_vectors, pdev);
if (ret < 0)
return ret;
hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
NULL, hisi_ptt_isr, 0,
DRV_NAME, hisi_ptt);
if (ret) {
pci_err(pdev, "failed to request irq %d, ret = %d\n",
hisi_ptt->trace_irq, ret);
return ret;
}
return 0;
}
static void hisi_ptt_del_free_filter(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
if (filter->is_port)
hisi_ptt->port_mask &= ~hisi_ptt_get_filter_val(filter->devid, true);
list_del(&filter->list);
kfree(filter->name);
kfree(filter);
}
static struct hisi_ptt_filter_desc *
hisi_ptt_alloc_add_filter(struct hisi_ptt *hisi_ptt, u16 devid, bool is_port)
{
struct hisi_ptt_filter_desc *filter;
u8 devfn = devid & 0xff;
char *filter_name;
filter_name = kasprintf(GFP_KERNEL, "%04x:%02x:%02x.%d", pci_domain_nr(hisi_ptt->pdev->bus),
PCI_BUS_NUM(devid), PCI_SLOT(devfn), PCI_FUNC(devfn));
if (!filter_name) {
pci_err(hisi_ptt->pdev, "failed to allocate name for filter %04x:%02x:%02x.%d\n",
pci_domain_nr(hisi_ptt->pdev->bus), PCI_BUS_NUM(devid),
PCI_SLOT(devfn), PCI_FUNC(devfn));
return NULL;
}
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter) {
pci_err(hisi_ptt->pdev, "failed to add filter for %s\n",
filter_name);
kfree(filter_name);
return NULL;
}
filter->name = filter_name;
filter->is_port = is_port;
filter->devid = devid;
if (filter->is_port) {
list_add_tail(&filter->list, &hisi_ptt->port_filters);
/* Update the available port mask */
hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true);
} else {
list_add_tail(&filter->list, &hisi_ptt->req_filters);
}
return filter;
}
static ssize_t hisi_ptt_filter_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hisi_ptt_filter_desc *filter;
unsigned long filter_val;
filter = container_of(attr, struct hisi_ptt_filter_desc, attr);
filter_val = hisi_ptt_get_filter_val(filter->devid, filter->is_port) |
(filter->is_port ? HISI_PTT_PMU_FILTER_IS_PORT : 0);
return sysfs_emit(buf, "0x%05lx\n", filter_val);
}
static int hisi_ptt_create_rp_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
sysfs_attr_init(&filter->attr.attr);
filter->attr.attr.name = filter->name;
filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
filter->attr.show = hisi_ptt_filter_show;
return sysfs_add_file_to_group(kobj, &filter->attr.attr,
HISI_PTT_RP_FILTERS_GRP_NAME);
}
static void hisi_ptt_remove_rp_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
sysfs_remove_file_from_group(kobj, &filter->attr.attr,
HISI_PTT_RP_FILTERS_GRP_NAME);
}
static int hisi_ptt_create_req_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
sysfs_attr_init(&filter->attr.attr);
filter->attr.attr.name = filter->name;
filter->attr.attr.mode = 0400; /* DEVICE_ATTR_ADMIN_RO */
filter->attr.show = hisi_ptt_filter_show;
return sysfs_add_file_to_group(kobj, &filter->attr.attr,
HISI_PTT_REQ_FILTERS_GRP_NAME);
}
static void hisi_ptt_remove_req_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
struct kobject *kobj = &hisi_ptt->hisi_ptt_pmu.dev->kobj;
sysfs_remove_file_from_group(kobj, &filter->attr.attr,
HISI_PTT_REQ_FILTERS_GRP_NAME);
}
static int hisi_ptt_create_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
int ret;
if (filter->is_port)
ret = hisi_ptt_create_rp_filter_attr(hisi_ptt, filter);
else
ret = hisi_ptt_create_req_filter_attr(hisi_ptt, filter);
if (ret)
pci_err(hisi_ptt->pdev, "failed to create sysfs attribute for filter %s\n",
filter->name);
return ret;
}
static void hisi_ptt_remove_filter_attr(struct hisi_ptt *hisi_ptt,
struct hisi_ptt_filter_desc *filter)
{
if (filter->is_port)
hisi_ptt_remove_rp_filter_attr(hisi_ptt, filter);
else
hisi_ptt_remove_req_filter_attr(hisi_ptt, filter);
}
static void hisi_ptt_remove_all_filter_attributes(void *data)
{
struct hisi_ptt_filter_desc *filter;
struct hisi_ptt *hisi_ptt = data;
mutex_lock(&hisi_ptt->filter_lock);
list_for_each_entry(filter, &hisi_ptt->req_filters, list)
hisi_ptt_remove_filter_attr(hisi_ptt, filter);
list_for_each_entry(filter, &hisi_ptt->port_filters, list)
hisi_ptt_remove_filter_attr(hisi_ptt, filter);
hisi_ptt->sysfs_inited = false;
mutex_unlock(&hisi_ptt->filter_lock);
}
static int hisi_ptt_init_filter_attributes(struct hisi_ptt *hisi_ptt)
{
struct hisi_ptt_filter_desc *filter;
int ret;
mutex_lock(&hisi_ptt->filter_lock);
/*
* Register the reset callback in the first stage. In reset we traverse
* the filters list to remove the sysfs attributes so the callback can
* be called safely even without below filter attributes creation.
*/
ret = devm_add_action(&hisi_ptt->pdev->dev,
hisi_ptt_remove_all_filter_attributes,
hisi_ptt);
if (ret)
goto out;
list_for_each_entry(filter, &hisi_ptt->port_filters, list) {
ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
if (ret)
goto out;
}
list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
ret = hisi_ptt_create_filter_attr(hisi_ptt, filter);
if (ret)
goto out;
}
hisi_ptt->sysfs_inited = true;
out:
mutex_unlock(&hisi_ptt->filter_lock);
return ret;
}
static void hisi_ptt_update_filters(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct hisi_ptt_filter_update_info info;
struct hisi_ptt_filter_desc *filter;
struct hisi_ptt *hisi_ptt;
hisi_ptt = container_of(delayed_work, struct hisi_ptt, work);
if (!mutex_trylock(&hisi_ptt->filter_lock)) {
schedule_delayed_work(&hisi_ptt->work, HISI_PTT_WORK_DELAY_MS);
return;
}
while (kfifo_get(&hisi_ptt->filter_update_kfifo, &info)) {
if (info.is_add) {
/*
* Notify the users if failed to add this filter, others
* still work and available. See the comments in
* hisi_ptt_init_filters().
*/
filter = hisi_ptt_alloc_add_filter(hisi_ptt, info.devid, info.is_port);
if (!filter)
continue;
/*
* If filters' sysfs entries hasn't been initialized,
* then we're still at probe stage. Add the filters to
* the list and later hisi_ptt_init_filter_attributes()
* will create sysfs attributes for all the filters.
*/
if (hisi_ptt->sysfs_inited &&
hisi_ptt_create_filter_attr(hisi_ptt, filter)) {
hisi_ptt_del_free_filter(hisi_ptt, filter);
continue;
}
} else {
struct hisi_ptt_filter_desc *tmp;
struct list_head *target_list;
target_list = info.is_port ? &hisi_ptt->port_filters :
&hisi_ptt->req_filters;
list_for_each_entry_safe(filter, tmp, target_list, list)
if (filter->devid == info.devid) {
if (hisi_ptt->sysfs_inited)
hisi_ptt_remove_filter_attr(hisi_ptt, filter);
hisi_ptt_del_free_filter(hisi_ptt, filter);
break;
}
}
}
mutex_unlock(&hisi_ptt->filter_lock);
}
/*
* A PCI bus notifier is used here for dynamically updating the filter
* list.
*/
static int hisi_ptt_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct hisi_ptt *hisi_ptt = container_of(nb, struct hisi_ptt, hisi_ptt_nb);
struct hisi_ptt_filter_update_info info;
struct pci_dev *pdev, *root_port;
struct device *dev = data;
u32 port_devid;
pdev = to_pci_dev(dev);
root_port = pcie_find_root_port(pdev);
if (!root_port)
return 0;
port_devid = pci_dev_id(root_port);
if (port_devid < hisi_ptt->lower_bdf ||
port_devid > hisi_ptt->upper_bdf)
return 0;
info.is_port = pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT;
info.devid = pci_dev_id(pdev);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
info.is_add = true;
break;
case BUS_NOTIFY_DEL_DEVICE:
info.is_add = false;
break;
default:
return 0;
}
/*
* The FIFO size is 16 which is sufficient for almost all the cases,
* since each PCIe core will have most 8 Root Ports (typically only
* 1~4 Root Ports). On failure log the failed filter and let user
* handle it.
*/
if (kfifo_in_spinlocked(&hisi_ptt->filter_update_kfifo, &info, 1,
&hisi_ptt->filter_update_lock))
schedule_delayed_work(&hisi_ptt->work, 0);
else
pci_warn(hisi_ptt->pdev,
"filter update fifo overflow for target %s\n",
pci_name(pdev));
return 0;
}
static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
{
struct pci_dev *root_port = pcie_find_root_port(pdev);
struct hisi_ptt_filter_desc *filter;
struct hisi_ptt *hisi_ptt = data;
u32 port_devid;
if (!root_port)
return 0;
port_devid = pci_dev_id(root_port);
if (port_devid < hisi_ptt->lower_bdf ||
port_devid > hisi_ptt->upper_bdf)
return 0;
/*
* We won't fail the probe if filter allocation failed here. The filters
* should be partial initialized and users would know which filter fails
* through the log. Other functions of PTT device are still available.
*/
filter = hisi_ptt_alloc_add_filter(hisi_ptt, pci_dev_id(pdev),
pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT);
if (!filter)
return -ENOMEM;
return 0;
}
static void hisi_ptt_release_filters(void *data)
{
struct hisi_ptt_filter_desc *filter, *tmp;
struct hisi_ptt *hisi_ptt = data;
list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list)
hisi_ptt_del_free_filter(hisi_ptt, filter);
list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list)
hisi_ptt_del_free_filter(hisi_ptt, filter);
}
static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt)
{
struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
struct device *dev = &hisi_ptt->pdev->dev;
int i;
ctrl->trace_buf = devm_kcalloc(dev, HISI_PTT_TRACE_BUF_CNT,
sizeof(*ctrl->trace_buf), GFP_KERNEL);
if (!ctrl->trace_buf)
return -ENOMEM;
for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; ++i) {
ctrl->trace_buf[i].addr = dmam_alloc_coherent(dev, HISI_PTT_TRACE_BUF_SIZE,
&ctrl->trace_buf[i].dma,
GFP_KERNEL);
if (!ctrl->trace_buf[i].addr)
return -ENOMEM;
}
/* Configure the trace DMA buffer */
for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) {
writel(lower_32_bits(ctrl->trace_buf[i].dma),
hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_LO_0 +
i * HISI_PTT_TRACE_ADDR_STRIDE);
writel(upper_32_bits(ctrl->trace_buf[i].dma),
hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_HI_0 +
i * HISI_PTT_TRACE_ADDR_STRIDE);
}
writel(HISI_PTT_TRACE_BUF_SIZE, hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_SIZE);
return 0;
}
static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt)
{
struct pci_dev *pdev = hisi_ptt->pdev;
struct pci_bus *bus;
int ret;
u32 reg;
INIT_DELAYED_WORK(&hisi_ptt->work, hisi_ptt_update_filters);
INIT_KFIFO(hisi_ptt->filter_update_kfifo);
spin_lock_init(&hisi_ptt->filter_update_lock);
INIT_LIST_HEAD(&hisi_ptt->port_filters);
INIT_LIST_HEAD(&hisi_ptt->req_filters);
mutex_init(&hisi_ptt->filter_lock);
ret = hisi_ptt_config_trace_buf(hisi_ptt);
if (ret)
return ret;
/*
* The device range register provides the information about the root
* ports which the RCiEP can control and trace. The RCiEP and the root
* ports which it supports are on the same PCIe core, with same domain
* number but maybe different bus number. The device range register
* will tell us which root ports we can support, Bit[31:16] indicates
* the upper BDF numbers of the root port, while Bit[15:0] indicates
* the lower.
*/
reg = readl(hisi_ptt->iobase + HISI_PTT_DEVICE_RANGE);
hisi_ptt->upper_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_UPPER, reg);
hisi_ptt->lower_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_LOWER, reg);
bus = pci_find_bus(pci_domain_nr(pdev->bus), PCI_BUS_NUM(hisi_ptt->upper_bdf));
if (bus)
pci_walk_bus(bus, hisi_ptt_init_filters, hisi_ptt);
ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_release_filters, hisi_ptt);
if (ret)
return ret;
hisi_ptt->trace_ctrl.on_cpu = -1;
return 0;
}
static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
const cpumask_t *cpumask = cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev));
return cpumap_print_to_pagebuf(true, buf, cpumask);
}
static DEVICE_ATTR_RO(cpumask);
static struct attribute *hisi_ptt_cpumask_attrs[] = {
&dev_attr_cpumask.attr,
NULL
};
static const struct attribute_group hisi_ptt_cpumask_attr_group = {
.attrs = hisi_ptt_cpumask_attrs,
};
/*
* Bit 19 indicates the filter type, 1 for Root Port filter and 0 for Requester
* filter. Bit[15:0] indicates the filter value, for Root Port filter it's
* a bit mask of desired ports and for Requester filter it's the Requester ID
* of the desired PCIe function. Bit[18:16] is reserved for extension.
*
* See hisi_ptt.rst documentation for detailed information.
*/
PMU_FORMAT_ATTR(filter, "config:0-19");
PMU_FORMAT_ATTR(direction, "config:20-23");
PMU_FORMAT_ATTR(type, "config:24-31");
PMU_FORMAT_ATTR(format, "config:32-35");
static struct attribute *hisi_ptt_pmu_format_attrs[] = {
&format_attr_filter.attr,
&format_attr_direction.attr,
&format_attr_type.attr,
&format_attr_format.attr,
NULL
};
static struct attribute_group hisi_ptt_pmu_format_group = {
.name = "format",
.attrs = hisi_ptt_pmu_format_attrs,
};
static ssize_t hisi_ptt_filter_multiselect_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dev_ext_attribute *ext_attr;
ext_attr = container_of(attr, struct dev_ext_attribute, attr);
return sysfs_emit(buf, "%s\n", (char *)ext_attr->var);
}
static struct dev_ext_attribute root_port_filters_multiselect = {
.attr = {
.attr = { .name = "multiselect", .mode = 0400 },
.show = hisi_ptt_filter_multiselect_show,
},
.var = "1",
};
static struct attribute *hisi_ptt_pmu_root_ports_attrs[] = {
&root_port_filters_multiselect.attr.attr,
NULL
};
static struct attribute_group hisi_ptt_pmu_root_ports_group = {
.name = HISI_PTT_RP_FILTERS_GRP_NAME,
.attrs = hisi_ptt_pmu_root_ports_attrs,
};
static struct dev_ext_attribute requester_filters_multiselect = {
.attr = {
.attr = { .name = "multiselect", .mode = 0400 },
.show = hisi_ptt_filter_multiselect_show,
},
.var = "0",
};
static struct attribute *hisi_ptt_pmu_requesters_attrs[] = {
&requester_filters_multiselect.attr.attr,
NULL
};
static struct attribute_group hisi_ptt_pmu_requesters_group = {
.name = HISI_PTT_REQ_FILTERS_GRP_NAME,
.attrs = hisi_ptt_pmu_requesters_attrs,
};
static const struct attribute_group *hisi_ptt_pmu_groups[] = {
&hisi_ptt_cpumask_attr_group,
&hisi_ptt_pmu_format_group,
&hisi_ptt_tune_group,
&hisi_ptt_pmu_root_ports_group,
&hisi_ptt_pmu_requesters_group,
NULL
};
static int hisi_ptt_trace_valid_direction(u32 val)
{
/*
* The direction values have different effects according to the data
* format (specified in the parentheses). TLP set A/B means different
* set of TLP types. See hisi_ptt.rst documentation for more details.
*/
static const u32 hisi_ptt_trace_available_direction[] = {
0, /* inbound(4DW) or reserved(8DW) */
1, /* outbound(4DW) */
2, /* {in, out}bound(4DW) or inbound(8DW), TLP set A */
3, /* {in, out}bound(4DW) or inbound(8DW), TLP set B */
};
int i;
for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_direction); i++) {
if (val == hisi_ptt_trace_available_direction[i])
return 0;
}
return -EINVAL;
}
static int hisi_ptt_trace_valid_type(u32 val)
{
/* Different types can be set simultaneously */
static const u32 hisi_ptt_trace_available_type[] = {
1, /* posted_request */
2, /* non-posted_request */
4, /* completion */
};
int i;
if (!val)
return -EINVAL;
/*
* Walk the available list and clear the valid bits of
* the config. If there is any resident bit after the
* walk then the config is invalid.
*/
for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_type); i++)
val &= ~hisi_ptt_trace_available_type[i];
if (val)
return -EINVAL;
return 0;
}
static int hisi_ptt_trace_valid_format(u32 val)
{
static const u32 hisi_ptt_trace_availble_format[] = {
0, /* 4DW */
1, /* 8DW */
};
int i;
for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_availble_format); i++) {
if (val == hisi_ptt_trace_availble_format[i])
return 0;
}
return -EINVAL;
}
static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
{
unsigned long val, port_mask = hisi_ptt->port_mask;
struct hisi_ptt_filter_desc *filter;
int ret = 0;
hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config);
val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config);
/*
* Port filters are defined as bit mask. For port filters, check
* the bits in the @val are within the range of hisi_ptt->port_mask
* and whether it's empty or not, otherwise user has specified
* some unsupported root ports.
*
* For Requester ID filters, walk the available filter list to see
* whether we have one matched.
*/
mutex_lock(&hisi_ptt->filter_lock);
if (!hisi_ptt->trace_ctrl.is_port) {
list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
if (val == hisi_ptt_get_filter_val(filter->devid, filter->is_port))
goto out;
}
} else if (bitmap_subset(&val, &port_mask, BITS_PER_LONG)) {
goto out;
}
ret = -EINVAL;
out:
mutex_unlock(&hisi_ptt->filter_lock);
return ret;
}
static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event)
{
struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
u32 val;
val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, event->attr.config);
hisi_ptt->trace_ctrl.filter = val;
val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
ctrl->direction = val;
val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
ctrl->type = val;
val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
ctrl->format = val;
}
static int hisi_ptt_pmu_event_init(struct perf_event *event)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
int ret;
u32 val;
if (event->cpu < 0) {
dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
return -EOPNOTSUPP;
}
if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
return -ENOENT;
ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
if (ret < 0)
return ret;
val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
ret = hisi_ptt_trace_valid_direction(val);
if (ret < 0)
return ret;
val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
ret = hisi_ptt_trace_valid_type(val);
if (ret < 0)
return ret;
val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
return hisi_ptt_trace_valid_format(val);
}
static void *hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
struct hisi_ptt_pmu_buf *buf;
struct page **pagelist;
int i;
if (overwrite) {
dev_warn(event->pmu->dev, "Overwrite mode is not supported\n");
return NULL;
}
/* If the pages size less than buffers, we cannot start trace */
if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
return NULL;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
pagelist = kcalloc(nr_pages, sizeof(*pagelist), GFP_KERNEL);
if (!pagelist)
goto err;
for (i = 0; i < nr_pages; i++)
pagelist[i] = virt_to_page(pages[i]);
buf->base = vmap(pagelist, nr_pages, VM_MAP, PAGE_KERNEL);
if (!buf->base) {
kfree(pagelist);
goto err;
}
buf->nr_pages = nr_pages;
buf->length = nr_pages * PAGE_SIZE;
buf->pos = 0;
kfree(pagelist);
return buf;
err:
kfree(buf);
return NULL;
}
static void hisi_ptt_pmu_free_aux(void *aux)
{
struct hisi_ptt_pmu_buf *buf = aux;
vunmap(buf->base);
kfree(buf);
}
static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
struct perf_output_handle *handle = &hisi_ptt->trace_ctrl.handle;
struct hw_perf_event *hwc = &event->hw;
struct device *dev = event->pmu->dev;
struct hisi_ptt_pmu_buf *buf;
int cpu = event->cpu;
int ret;
hwc->state = 0;
/* Serialize the perf process if user specified several CPUs */
spin_lock(&hisi_ptt->pmu_lock);
if (hisi_ptt->trace_ctrl.started) {
dev_dbg(dev, "trace has already started\n");
goto stop;
}
/*
* Handle the interrupt on the same cpu which starts the trace to avoid
* context mismatch. Otherwise we'll trigger the WARN from the perf
* core in event_function_local(). If CPU passed is offline we'll fail
* here, just log it since we can do nothing here.
*/
ret = irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(cpu));
if (ret)
dev_warn(dev, "failed to set the affinity of trace interrupt\n");
hisi_ptt->trace_ctrl.on_cpu = cpu;
buf = perf_aux_output_begin(handle, event);
if (!buf) {
dev_dbg(dev, "aux output begin failed\n");
goto stop;
}
buf->pos = handle->head % buf->length;
hisi_ptt_pmu_init_configs(hisi_ptt, event);
ret = hisi_ptt_trace_start(hisi_ptt);
if (ret) {
dev_dbg(dev, "trace start failed, ret = %d\n", ret);
perf_aux_output_end(handle, 0);
goto stop;
}
spin_unlock(&hisi_ptt->pmu_lock);
return;
stop:
event->hw.state |= PERF_HES_STOPPED;
spin_unlock(&hisi_ptt->pmu_lock);
}
static void hisi_ptt_pmu_stop(struct perf_event *event, int flags)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (hwc->state & PERF_HES_STOPPED)
return;
spin_lock(&hisi_ptt->pmu_lock);
if (hisi_ptt->trace_ctrl.started) {
hisi_ptt_trace_end(hisi_ptt);
if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt))
dev_warn(event->pmu->dev, "Device is still busy\n");
hisi_ptt_update_aux(hisi_ptt, hisi_ptt->trace_ctrl.buf_index, true);
}
spin_unlock(&hisi_ptt->pmu_lock);
hwc->state |= PERF_HES_STOPPED;
perf_event_update_userpage(event);
hwc->state |= PERF_HES_UPTODATE;
}
static int hisi_ptt_pmu_add(struct perf_event *event, int flags)
{
struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int cpu = event->cpu;
/* Only allow the cpus on the device's node to add the event */
if (!cpumask_test_cpu(cpu, cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev))))
return 0;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START) {
hisi_ptt_pmu_start(event, PERF_EF_RELOAD);
if (hwc->state & PERF_HES_STOPPED)
return -EINVAL;
}
return 0;
}
static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
{
hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
}
static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
{
cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
}
static void hisi_ptt_unregister_pmu(void *pmu)
{
perf_pmu_unregister(pmu);
}
static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
{
u16 core_id, sicl_id;
char *pmu_name;
u32 reg;
int ret;
ret = cpuhp_state_add_instance_nocalls(hisi_ptt_pmu_online,
&hisi_ptt->hotplug_node);
if (ret)
return ret;
ret = devm_add_action_or_reset(&hisi_ptt->pdev->dev,
hisi_ptt_remove_cpuhp_instance,
&hisi_ptt->hotplug_node);
if (ret)
return ret;
mutex_init(&hisi_ptt->tune_lock);
spin_lock_init(&hisi_ptt->pmu_lock);
hisi_ptt->hisi_ptt_pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_sw_context,
.attr_groups = hisi_ptt_pmu_groups,
.event_init = hisi_ptt_pmu_event_init,
.setup_aux = hisi_ptt_pmu_setup_aux,
.free_aux = hisi_ptt_pmu_free_aux,
.start = hisi_ptt_pmu_start,
.stop = hisi_ptt_pmu_stop,
.add = hisi_ptt_pmu_add,
.del = hisi_ptt_pmu_del,
};
reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
core_id = FIELD_GET(HISI_PTT_CORE_ID, reg);
sicl_id = FIELD_GET(HISI_PTT_SICL_ID, reg);
pmu_name = devm_kasprintf(&hisi_ptt->pdev->dev, GFP_KERNEL, "hisi_ptt%u_%u",
sicl_id, core_id);
if (!pmu_name)
return -ENOMEM;
ret = perf_pmu_register(&hisi_ptt->hisi_ptt_pmu, pmu_name, -1);
if (ret)
return ret;
return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
hisi_ptt_unregister_pmu,
&hisi_ptt->hisi_ptt_pmu);
}
static void hisi_ptt_unregister_filter_update_notifier(void *data)
{
struct hisi_ptt *hisi_ptt = data;
bus_unregister_notifier(&pci_bus_type, &hisi_ptt->hisi_ptt_nb);
/* Cancel any work that has been queued */
cancel_delayed_work_sync(&hisi_ptt->work);
}
/* Register the bus notifier for dynamically updating the filter list */
static int hisi_ptt_register_filter_update_notifier(struct hisi_ptt *hisi_ptt)
{
int ret;
hisi_ptt->hisi_ptt_nb.notifier_call = hisi_ptt_notifier_call;
ret = bus_register_notifier(&pci_bus_type, &hisi_ptt->hisi_ptt_nb);
if (ret)
return ret;
return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
hisi_ptt_unregister_filter_update_notifier,
hisi_ptt);
}
/*
* The DMA of PTT trace can only use direct mappings due to some
* hardware restriction. Check whether there is no IOMMU or the
* policy of the IOMMU domain is passthrough, otherwise the trace
* cannot work.
*
* The PTT device is supposed to behind an ARM SMMUv3, which
* should have passthrough the device by a quirk.
*/
static int hisi_ptt_check_iommu_mapping(struct pci_dev *pdev)
{
struct iommu_domain *iommu_domain;
iommu_domain = iommu_get_domain_for_dev(&pdev->dev);
if (!iommu_domain || iommu_domain->type == IOMMU_DOMAIN_IDENTITY)
return 0;
return -EOPNOTSUPP;
}
static int hisi_ptt_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct hisi_ptt *hisi_ptt;
int ret;
ret = hisi_ptt_check_iommu_mapping(pdev);
if (ret) {
pci_err(pdev, "requires direct DMA mappings\n");
return ret;
}
hisi_ptt = devm_kzalloc(&pdev->dev, sizeof(*hisi_ptt), GFP_KERNEL);
if (!hisi_ptt)
return -ENOMEM;
hisi_ptt->pdev = pdev;
pci_set_drvdata(pdev, hisi_ptt);
ret = pcim_enable_device(pdev);
if (ret) {
pci_err(pdev, "failed to enable device, ret = %d\n", ret);
return ret;
}
ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
if (ret) {
pci_err(pdev, "failed to remap io memory, ret = %d\n", ret);
return ret;
}
hisi_ptt->iobase = pcim_iomap_table(pdev)[2];
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
pci_err(pdev, "failed to set 64 bit dma mask, ret = %d\n", ret);
return ret;
}
pci_set_master(pdev);
ret = hisi_ptt_register_irq(hisi_ptt);
if (ret)
return ret;
ret = hisi_ptt_init_ctrls(hisi_ptt);
if (ret) {
pci_err(pdev, "failed to init controls, ret = %d\n", ret);
return ret;
}
ret = hisi_ptt_register_filter_update_notifier(hisi_ptt);
if (ret)
pci_warn(pdev, "failed to register filter update notifier, ret = %d", ret);
ret = hisi_ptt_register_pmu(hisi_ptt);
if (ret) {
pci_err(pdev, "failed to register PMU device, ret = %d", ret);
return ret;
}
ret = hisi_ptt_init_filter_attributes(hisi_ptt);
if (ret) {
pci_err(pdev, "failed to init sysfs filter attributes, ret = %d", ret);
return ret;
}
return 0;
}
static const struct pci_device_id hisi_ptt_id_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12e) },
{ }
};
MODULE_DEVICE_TABLE(pci, hisi_ptt_id_tbl);
static struct pci_driver hisi_ptt_driver = {
.name = DRV_NAME,
.id_table = hisi_ptt_id_tbl,
.probe = hisi_ptt_probe,
};
static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
struct hisi_ptt *hisi_ptt;
struct device *dev;
int target, src;
hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node);
src = hisi_ptt->trace_ctrl.on_cpu;
dev = hisi_ptt->hisi_ptt_pmu.dev;
if (!hisi_ptt->trace_ctrl.started || src != cpu)
return 0;
target = cpumask_any_but(cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)), cpu);
if (target >= nr_cpu_ids) {
dev_err(dev, "no available cpu for perf context migration\n");
return 0;
}
perf_pmu_migrate_context(&hisi_ptt->hisi_ptt_pmu, src, target);
/*
* Also make sure the interrupt bind to the migrated CPU as well. Warn
* the user on failure here.
*/
if (irq_set_affinity(hisi_ptt->trace_irq, cpumask_of(target)))
dev_warn(dev, "failed to set the affinity of trace interrupt\n");
hisi_ptt->trace_ctrl.on_cpu = target;
return 0;
}
static int __init hisi_ptt_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRV_NAME, NULL,
hisi_ptt_cpu_teardown);
if (ret < 0)
return ret;
hisi_ptt_pmu_online = ret;
ret = pci_register_driver(&hisi_ptt_driver);
if (ret)
cpuhp_remove_multi_state(hisi_ptt_pmu_online);
return ret;
}
module_init(hisi_ptt_init);
static void __exit hisi_ptt_exit(void)
{
pci_unregister_driver(&hisi_ptt_driver);
cpuhp_remove_multi_state(hisi_ptt_pmu_online);
}
module_exit(hisi_ptt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yicong Yang <[email protected]>");
MODULE_DESCRIPTION("Driver for HiSilicon PCIe tune and trace device");
| linux-master | drivers/hwtracing/ptt/hisi_ptt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2007-2008 Pierre Ossman
*/
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#define RESULT_OK 0
#define RESULT_FAIL 1
#define RESULT_UNSUP_HOST 2
#define RESULT_UNSUP_CARD 3
#define BUFFER_ORDER 2
#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
#define TEST_ALIGN_END 8
/*
* Limit the test area size to the maximum MMC HC erase group size. Note that
* the maximum SD allocation unit size is just 4MiB.
*/
#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
/**
* struct mmc_test_pages - pages allocated by 'alloc_pages()'.
* @page: first page in the allocation
* @order: order of the number of pages allocated
*/
struct mmc_test_pages {
struct page *page;
unsigned int order;
};
/**
* struct mmc_test_mem - allocated memory.
* @arr: array of allocations
* @cnt: number of allocations
*/
struct mmc_test_mem {
struct mmc_test_pages *arr;
unsigned int cnt;
};
/**
* struct mmc_test_area - information for performance tests.
* @max_sz: test area size (in bytes)
* @dev_addr: address on card at which to do performance tests
* @max_tfr: maximum transfer size allowed by driver (in bytes)
* @max_segs: maximum segments allowed by driver in scatterlist @sg
* @max_seg_sz: maximum segment size allowed by driver
* @blocks: number of (512 byte) blocks currently mapped by @sg
* @sg_len: length of currently mapped scatterlist @sg
* @mem: allocated memory
* @sg: scatterlist
* @sg_areq: scatterlist for non-blocking request
*/
struct mmc_test_area {
unsigned long max_sz;
unsigned int dev_addr;
unsigned int max_tfr;
unsigned int max_segs;
unsigned int max_seg_sz;
unsigned int blocks;
unsigned int sg_len;
struct mmc_test_mem *mem;
struct scatterlist *sg;
struct scatterlist *sg_areq;
};
/**
* struct mmc_test_transfer_result - transfer results for performance tests.
* @link: double-linked list
* @count: amount of group of sectors to check
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
* @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
/**
* struct mmc_test_general_result - results for tests.
* @link: double-linked list
* @card: card under test
* @testcase: number of test case
* @result: result of test run
* @tr_lst: transfer measurements if any as mmc_test_transfer_result
*/
struct mmc_test_general_result {
struct list_head link;
struct mmc_card *card;
int testcase;
int result;
struct list_head tr_lst;
};
/**
* struct mmc_test_dbgfs_file - debugfs related file.
* @link: double-linked list
* @card: card under test
* @file: file created under debugfs
*/
struct mmc_test_dbgfs_file {
struct list_head link;
struct mmc_card *card;
struct dentry *file;
};
/**
* struct mmc_test_card - test information.
* @card: card under test
* @scratch: transfer buffer
* @buffer: transfer buffer
* @highmem: buffer for highmem tests
* @area: information for performance tests
* @gr: pointer to results of current testcase
*/
struct mmc_test_card {
struct mmc_card *card;
u8 scratch[BUFFER_SIZE];
u8 *buffer;
#ifdef CONFIG_HIGHMEM
struct page *highmem;
#endif
struct mmc_test_area area;
struct mmc_test_general_result *gr;
};
enum mmc_test_prep_media {
MMC_TEST_PREP_NONE = 0,
MMC_TEST_PREP_WRITE_FULL = 1 << 0,
MMC_TEST_PREP_ERASE = 1 << 1,
};
struct mmc_test_multiple_rw {
unsigned int *sg_len;
unsigned int *bs;
unsigned int len;
unsigned int size;
bool do_write;
bool do_nonblock_req;
enum mmc_test_prep_media prepare;
};
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
/*
* Configure correct block size in card
*/
static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
{
return mmc_set_blocklen(test->card, size);
}
static bool mmc_test_card_cmd23(struct mmc_card *card)
{
return mmc_card_mmc(card) ||
(mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
}
static void mmc_test_prepare_sbc(struct mmc_test_card *test,
struct mmc_request *mrq, unsigned int blocks)
{
struct mmc_card *card = test->card;
if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
!mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
(card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
mrq->sbc = NULL;
return;
}
mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
mrq->sbc->arg = blocks;
mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
}
/*
* Fill in the mmc_request structure given a set of transfer parameters.
*/
static void mmc_test_prepare_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
{
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
return;
if (blocks > 1) {
mrq->cmd->opcode = write ?
MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
} else {
mrq->cmd->opcode = write ?
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
}
mrq->cmd->arg = dev_addr;
if (!mmc_card_blockaddr(test->card))
mrq->cmd->arg <<= 9;
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
if (blocks == 1)
mrq->stop = NULL;
else {
mrq->stop->opcode = MMC_STOP_TRANSMISSION;
mrq->stop->arg = 0;
mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
}
mrq->data->blksz = blksz;
mrq->data->blocks = blocks;
mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mrq->data->sg = sg;
mrq->data->sg_len = sg_len;
mmc_test_prepare_sbc(test, mrq, blocks);
mmc_set_data_timeout(mrq->data, test->card);
}
static int mmc_test_busy(struct mmc_command *cmd)
{
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
}
/*
* Wait for the card to finish the busy state
*/
static int mmc_test_wait_busy(struct mmc_test_card *test)
{
int ret, busy;
struct mmc_command cmd = {};
busy = 0;
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
cmd.arg = test->card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
if (ret)
break;
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
pr_info("%s: Warning: Host did not wait for busy state to end.\n",
mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
return ret;
}
/*
* Transfer a single sector of kernel addressable data
*/
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
u8 *buffer, unsigned addr, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
sg_init_one(&sg, buffer, blksz);
mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
mmc_wait_for_req(test->card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return mmc_test_wait_busy(test);
}
static void mmc_test_free_mem(struct mmc_test_mem *mem)
{
if (!mem)
return;
while (mem->cnt--)
__free_pages(mem->arr[mem->cnt].page,
mem->arr[mem->cnt].order);
kfree(mem->arr);
kfree(mem);
}
/*
* Allocate a lot of memory, preferably max_sz but at least min_sz. In case
* there isn't much memory do not exceed 1/16th total lowmem pages. Also do
* not exceed a maximum number of segments and try not to make segments much
* bigger than maximum segment size.
*/
static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
unsigned long max_sz,
unsigned int max_segs,
unsigned int max_seg_sz)
{
unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
unsigned long page_cnt = 0;
unsigned long limit = nr_free_buffer_pages() >> 4;
struct mmc_test_mem *mem;
if (max_page_cnt > limit)
max_page_cnt = limit;
if (min_page_cnt > max_page_cnt)
min_page_cnt = max_page_cnt;
if (max_seg_page_cnt > max_page_cnt)
max_seg_page_cnt = max_page_cnt;
if (max_segs > max_page_cnt)
max_segs = max_page_cnt;
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
if (!mem->arr)
goto out_free;
while (max_page_cnt) {
struct page *page;
unsigned int order;
gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
__GFP_NORETRY;
order = get_order(max_seg_page_cnt << PAGE_SHIFT);
while (1) {
page = alloc_pages(flags, order);
if (page || !order)
break;
order -= 1;
}
if (!page) {
if (page_cnt < min_page_cnt)
goto out_free;
break;
}
mem->arr[mem->cnt].page = page;
mem->arr[mem->cnt].order = order;
mem->cnt += 1;
if (max_page_cnt <= (1UL << order))
break;
max_page_cnt -= 1UL << order;
page_cnt += 1UL << order;
if (mem->cnt >= max_segs) {
if (page_cnt < min_page_cnt)
goto out_free;
break;
}
}
return mem;
out_free:
mmc_test_free_mem(mem);
return NULL;
}
/*
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
unsigned long sz = size;
sg_init_table(sglist, max_segs);
if (min_sg_len > max_segs)
min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
if (min_sg_len && (size / min_sg_len < len))
len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
len = max_seg_sz;
if (sg)
sg = sg_next(sg);
else
sg = sglist;
if (!sg)
return -EINVAL;
sg_set_page(sg, mem->arr[i].page, len, 0);
sz -= len;
*sg_len += 1;
if (!sz)
break;
}
} while (sz && repeat);
if (sz)
return -EINVAL;
if (sg)
sg_mark_end(sg);
return 0;
}
/*
* Map memory into a scatterlist so that no pages are contiguous. Allow the
* same memory to be mapped more than once.
*/
static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
unsigned long sz,
struct scatterlist *sglist,
unsigned int max_segs,
unsigned int max_seg_sz,
unsigned int *sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i = mem->cnt, cnt;
unsigned long len;
void *base, *addr, *last_addr = NULL;
sg_init_table(sglist, max_segs);
*sg_len = 0;
while (sz) {
base = page_address(mem->arr[--i].page);
cnt = 1 << mem->arr[i].order;
while (sz && cnt) {
addr = base + PAGE_SIZE * --cnt;
if (last_addr && last_addr + PAGE_SIZE == addr)
continue;
last_addr = addr;
len = PAGE_SIZE;
if (len > max_seg_sz)
len = max_seg_sz;
if (len > sz)
len = sz;
if (sg)
sg = sg_next(sg);
else
sg = sglist;
if (!sg)
return -EINVAL;
sg_set_page(sg, virt_to_page(addr), len, 0);
sz -= len;
*sg_len += 1;
}
if (i == 0)
i = mem->cnt;
}
if (sg)
sg_mark_end(sg);
return 0;
}
/*
* Calculate transfer rate in bytes per second.
*/
static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
bytes >>= 1;
ns >>= 1;
}
if (!ns)
return 0;
do_div(bytes, (uint32_t)ns);
return bytes;
}
/*
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
if (!test->gr)
return;
tr = kmalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
return;
tr->count = count;
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
/*
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
struct timespec64 ts;
ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
(u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec64 *ts1,
struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec64 ts;
ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
* Return the card size in sectors.
*/
static unsigned int mmc_test_capacity(struct mmc_card *card)
{
if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
return card->ext_csd.sectors;
else
return card->csd.capacity << (card->csd.read_blkbits - 9);
}
/*******************************************************************/
/* Test preparation and cleanup */
/*******************************************************************/
/*
* Fill the first couple of sectors of the card with known data
* so that bad reads/writes can be detected
*/
static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
{
int ret, i;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
if (write)
memset(test->buffer, val, 512);
else {
for (i = 0; i < 512; i++)
test->buffer[i] = i;
}
for (i = 0; i < BUFFER_SIZE / 512; i++) {
ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_prepare_write(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 1, 0xDF);
}
static int mmc_test_prepare_read(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 0, 0);
}
static int mmc_test_cleanup(struct mmc_test_card *test)
{
return __mmc_test_prepare(test, 1, 0);
}
/*******************************************************************/
/* Test execution helpers */
/*******************************************************************/
/*
* Modifies the mmc_request to perform the "short transfer" tests
*/
static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
struct mmc_request *mrq, int write)
{
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return;
if (mrq->data->blocks > 1) {
mrq->cmd->opcode = write ?
MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
mrq->stop = NULL;
} else {
mrq->cmd->opcode = MMC_SEND_STATUS;
mrq->cmd->arg = test->card->rca << 16;
}
}
/*
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
struct mmc_request *mrq)
{
int ret;
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return -EINVAL;
ret = 0;
if (mrq->sbc && mrq->sbc->error)
ret = mrq->sbc->error;
if (!ret && mrq->cmd->error)
ret = mrq->cmd->error;
if (!ret && mrq->data->error)
ret = mrq->data->error;
if (!ret && mrq->stop && mrq->stop->error)
ret = mrq->stop->error;
if (!ret && mrq->data->bytes_xfered !=
mrq->data->blocks * mrq->data->blksz)
ret = RESULT_FAIL;
if (ret == -EINVAL)
ret = RESULT_UNSUP_HOST;
return ret;
}
/*
* Checks that a "short transfer" behaved as expected
*/
static int mmc_test_check_broken_result(struct mmc_test_card *test,
struct mmc_request *mrq)
{
int ret;
if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
return -EINVAL;
ret = 0;
if (!ret && mrq->cmd->error)
ret = mrq->cmd->error;
if (!ret && mrq->data->error == 0)
ret = RESULT_FAIL;
if (!ret && mrq->data->error != -ETIMEDOUT)
ret = mrq->data->error;
if (!ret && mrq->stop && mrq->stop->error)
ret = mrq->stop->error;
if (mrq->data->blocks > 1) {
if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
ret = RESULT_FAIL;
} else {
if (!ret && mrq->data->bytes_xfered > 0)
ret = RESULT_FAIL;
}
if (ret == -EINVAL)
ret = RESULT_UNSUP_HOST;
return ret;
}
struct mmc_test_req {
struct mmc_request mrq;
struct mmc_command sbc;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_command status;
struct mmc_data data;
};
/*
* Tests nonblock transfer with certain parameters
*/
static void mmc_test_req_reset(struct mmc_test_req *rq)
{
memset(rq, 0, sizeof(struct mmc_test_req));
rq->mrq.cmd = &rq->cmd;
rq->mrq.data = &rq->data;
rq->mrq.stop = &rq->stop;
}
static struct mmc_test_req *mmc_test_req_alloc(void)
{
struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
if (rq)
mmc_test_req_reset(rq);
return rq;
}
static void mmc_test_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
static int mmc_test_start_areq(struct mmc_test_card *test,
struct mmc_request *mrq,
struct mmc_request *prev_mrq)
{
struct mmc_host *host = test->card->host;
int err = 0;
if (mrq) {
init_completion(&mrq->completion);
mrq->done = mmc_test_wait_done;
mmc_pre_req(host, mrq);
}
if (prev_mrq) {
wait_for_completion(&prev_mrq->completion);
err = mmc_test_wait_busy(test);
if (!err)
err = mmc_test_check_result(test, prev_mrq);
}
if (!err && mrq) {
err = mmc_start_request(host, mrq);
if (err)
mmc_retune_release(host);
}
if (prev_mrq)
mmc_post_req(host, prev_mrq, 0);
if (err && mrq)
mmc_post_req(host, mrq, err);
return err;
}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int write,
int count)
{
struct mmc_test_req *rq1, *rq2;
struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
struct mmc_test_area *t = &test->area;
struct scatterlist *sg = t->sg;
struct scatterlist *sg_areq = t->sg_areq;
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
ret = RESULT_FAIL;
goto err;
}
mrq = &rq1->mrq;
prev_mrq = NULL;
for (i = 0; i < count; i++) {
mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
t->blocks, 512, write);
ret = mmc_test_start_areq(test, mrq, prev_mrq);
if (ret)
goto err;
if (!prev_mrq)
prev_mrq = &rq2->mrq;
swap(mrq, prev_mrq);
swap(sg, sg_areq);
dev_addr += t->blocks;
}
ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
return ret;
}
/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
mmc_wait_for_req(test->card->host, &mrq);
mmc_test_wait_busy(test);
return mmc_test_check_result(test, &mrq);
}
/*
* Tests a transfer where the card will fail completely or partly
*/
static int mmc_test_broken_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_command stop = {};
struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
sg_init_one(&sg, test->buffer, blocks * blksz);
mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
mmc_test_prepare_broken_mrq(test, &mrq, write);
mmc_wait_for_req(test->card->host, &mrq);
mmc_test_wait_busy(test);
return mmc_test_check_broken_result(test, &mrq);
}
/*
* Does a complete transfer test where data is also validated
*
* Note: mmc_test_prepare() must have been done before this call
*/
static int mmc_test_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
int ret, i;
if (write) {
for (i = 0; i < blocks * blksz; i++)
test->scratch[i] = i;
} else {
memset(test->scratch, 0, BUFFER_SIZE);
}
sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
ret = mmc_test_set_blksize(test, blksz);
if (ret)
return ret;
ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
blocks, blksz, write);
if (ret)
return ret;
if (write) {
int sectors;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sectors = (blocks * blksz + 511) / 512;
if ((sectors * 512) == (blocks * blksz))
sectors++;
if ((sectors * 512) > BUFFER_SIZE)
return -EINVAL;
memset(test->buffer, 0, sectors * 512);
for (i = 0; i < sectors; i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
dev_addr + i, 512, 0);
if (ret)
return ret;
}
for (i = 0; i < blocks * blksz; i++) {
if (test->buffer[i] != (u8)i)
return RESULT_FAIL;
}
for (; i < sectors * 512; i++) {
if (test->buffer[i] != 0xDF)
return RESULT_FAIL;
}
} else {
sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
for (i = 0; i < blocks * blksz; i++) {
if (test->scratch[i] != (u8)i)
return RESULT_FAIL;
}
}
return 0;
}
/*******************************************************************/
/* Tests */
/*******************************************************************/
struct mmc_test_case {
const char *name;
int (*prepare)(struct mmc_test_card *);
int (*run)(struct mmc_test_card *);
int (*cleanup)(struct mmc_test_card *);
};
static int mmc_test_basic_write(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_basic_read(struct mmc_test_card *test)
{
int ret;
struct scatterlist sg;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_verify_write(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_verify_read(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_one(&sg, test->buffer, 512);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_one(&sg, test->buffer, size);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_one(&sg, test->buffer, size);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
static int mmc_test_pow2_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_pow2_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
for (i = 1; i < 512; i <<= 1) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_weird_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.write_partial)
return RESULT_UNSUP_CARD;
for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_weird_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
if (!test->card->csd.read_partial)
return RESULT_UNSUP_CARD;
for (i = 3; i < 512; i += 7) {
sg_init_one(&sg, test->buffer, i);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_write(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, 512);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_read(struct mmc_test_card *test)
{
int ret, i;
struct scatterlist sg;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, 512);
ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_multi_write(struct mmc_test_card *test)
{
int ret, i;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_align_multi_read(struct mmc_test_card *test)
{
int ret, i;
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
for (i = 1; i < TEST_ALIGN_END; i++) {
sg_init_one(&sg, test->buffer + i, size);
ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
if (ret)
return ret;
}
return 0;
}
static int mmc_test_xfersize_write(struct mmc_test_card *test)
{
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 1, 512, 1);
}
static int mmc_test_xfersize_read(struct mmc_test_card *test)
{
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 1, 512, 0);
}
static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
{
int ret;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 2, 512, 1);
}
static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
{
int ret;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
return mmc_test_broken_transfer(test, 2, 512, 0);
}
#ifdef CONFIG_HIGHMEM
static int mmc_test_write_high(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
}
static int mmc_test_read_high(struct mmc_test_card *test)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, 512, 0);
return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
}
static int mmc_test_multi_write_high(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
}
static int mmc_test_multi_read_high(struct mmc_test_card *test)
{
unsigned int size;
struct scatterlist sg;
if (test->card->host->max_blk_count == 1)
return RESULT_UNSUP_HOST;
size = PAGE_SIZE * 2;
size = min(size, test->card->host->max_req_size);
size = min(size, test->card->host->max_seg_size);
size = min(size, test->card->host->max_blk_count * 512);
if (size < 1024)
return RESULT_UNSUP_HOST;
sg_init_table(&sg, 1);
sg_set_page(&sg, test->highmem, size, 0);
return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
}
#else
static int mmc_test_no_highmem(struct mmc_test_card *test)
{
pr_info("%s: Highmem not configured - test skipped\n",
mmc_hostname(test->card->host));
return 0;
}
#endif /* CONFIG_HIGHMEM */
/*
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int min_sg_len, bool nonblock)
{
struct mmc_test_area *t = &test->area;
int err;
unsigned int sg_len = 0;
t->blocks = sz >> 9;
if (max_scatter) {
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
t->max_segs, t->max_seg_sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err || !nonblock)
goto err;
if (max_scatter) {
err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
t->max_segs, t->max_seg_sz,
&sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
t->max_seg_sz, &sg_len, min_sg_len);
}
if (!err && sg_len != t->sg_len)
err = -EINVAL;
err:
if (err)
pr_info("%s: Failed to map sg list\n",
mmc_hostname(test->card->host));
return err;
}
/*
* Transfer bytes mapped by mmc_test_area_map().
*/
static int mmc_test_area_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int write)
{
struct mmc_test_area *t = &test->area;
return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
t->blocks, 512, write);
}
/*
* Map and transfer bytes for multiple transfers.
*/
static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
unsigned int dev_addr, int write,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
struct timespec64 ts1, ts2;
int ret = 0;
int i;
/*
* In the case of a maximally scattered transfer, the maximum transfer
* size is further limited by using PAGE_SIZE segments.
*/
if (max_scatter) {
struct mmc_test_area *t = &test->area;
unsigned long max_tfr;
if (t->max_seg_sz >= PAGE_SIZE)
max_tfr = t->max_segs * PAGE_SIZE;
else
max_tfr = t->max_segs * t->max_seg_sz;
if (sz > max_tfr)
sz = max_tfr;
}
ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
if (ret)
return ret;
if (timed)
ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
else
for (i = 0; i < count && ret == 0; i++) {
ret = mmc_test_area_transfer(test, dev_addr, write);
dev_addr += sz >> 9;
}
if (ret)
return ret;
if (timed)
ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
unsigned int dev_addr, int write, int max_scatter,
int timed)
{
return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
timed, 1, false, 0);
}
/*
* Write the test area entirely.
*/
static int mmc_test_area_fill(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
}
/*
* Erase the test area entirely.
*/
static int mmc_test_area_erase(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
if (!mmc_can_erase(test->card))
return 0;
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
MMC_ERASE_ARG);
}
/*
* Cleanup struct mmc_test_area.
*/
static int mmc_test_area_cleanup(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
kfree(t->sg);
kfree(t->sg_areq);
mmc_test_free_mem(t->mem);
return 0;
}
/*
* Initialize an area for testing large transfers. The test area is set to the
* middle of the card because cards may have different characteristics at the
* front (for FAT file system optimization). Optionally, the area is erased
* (if the card supports it) which may improve write performance. Optionally,
* the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
/* Make the test area size about 4MiB */
sz = (unsigned long)test->card->pref_erase << 9;
t->max_sz = sz;
while (t->max_sz < 4 * 1024 * 1024)
t->max_sz += sz;
while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
t->max_seg_sz -= t->max_seg_sz % 512;
t->max_tfr = t->max_sz;
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
t->max_tfr = test->card->host->max_blk_count << 9;
if (t->max_tfr > test->card->host->max_req_size)
t->max_tfr = test->card->host->max_req_size;
if (t->max_tfr / t->max_seg_sz > t->max_segs)
t->max_tfr = t->max_segs * t->max_seg_sz;
/*
* Try to allocate enough memory for a max. sized transfer. Less is OK
* because the same memory can be mapped into the scatterlist more than
* once. Also, take into account the limits imposed on scatterlist
* segments by the host driver.
*/
t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
t->max_seg_sz);
if (!t->mem)
return -ENOMEM;
t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
if (!t->sg) {
ret = -ENOMEM;
goto out_free;
}
t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
GFP_KERNEL);
if (!t->sg_areq) {
ret = -ENOMEM;
goto out_free;
}
t->dev_addr = mmc_test_capacity(test->card) / 2;
t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
if (erase) {
ret = mmc_test_area_erase(test);
if (ret)
goto out_free;
}
if (fill) {
ret = mmc_test_area_fill(test);
if (ret)
goto out_free;
}
return 0;
out_free:
mmc_test_area_cleanup(test);
return ret;
}
/*
* Prepare for large transfers. Do not erase the test area.
*/
static int mmc_test_area_prepare(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 0, 0);
}
/*
* Prepare for large transfers. Do erase the test area.
*/
static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 1, 0);
}
/*
* Prepare for large transfers. Erase and fill the test area.
*/
static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
{
return mmc_test_area_init(test, 1, 1);
}
/*
* Test best-case performance. Best-case performance is expected from
* a single large transfer.
*
* An additional option (max_scatter) allows the measurement of the same
* transfer but with no contiguous pages in the scatter list. This tests
* the efficiency of DMA to handle scattered pages.
*/
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
int max_scatter)
{
struct mmc_test_area *t = &test->area;
return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
max_scatter, 1);
}
/*
* Best-case read performance.
*/
static int mmc_test_best_read_performance(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 0, 0);
}
/*
* Best-case write performance.
*/
static int mmc_test_best_write_performance(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 1, 0);
}
/*
* Best-case read performance into scattered pages.
*/
static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 0, 1);
}
/*
* Best-case write performance from scattered pages.
*/
static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
{
return mmc_test_best_performance(test, 1, 1);
}
/*
* Single read performance by transfer size.
*/
static int mmc_test_profile_read_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
if (ret)
return ret;
}
sz = t->max_tfr;
dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
}
/*
* Single write performance by transfer size.
*/
static int mmc_test_profile_write_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
ret = mmc_test_area_erase(test);
if (ret)
return ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
if (ret)
return ret;
}
ret = mmc_test_area_erase(test);
if (ret)
return ret;
sz = t->max_tfr;
dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
}
/*
* Single trim performance by transfer size.
*/
static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
return RESULT_UNSUP_CARD;
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
/*
* Consecutive read performance by transfer size.
*/
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_read_perf(test, sz);
if (ret)
return ret;
}
sz = t->max_tfr;
return mmc_test_seq_read_perf(test, sz);
}
static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
if (ret)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
/*
* Consecutive write performance by transfer size.
*/
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_write_perf(test, sz);
if (ret)
return ret;
}
sz = t->max_tfr;
return mmc_test_seq_write_perf(test, sz);
}
/*
* Consecutive trim performance by transfer size.
*/
static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
return RESULT_UNSUP_CARD;
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
ret = mmc_test_area_erase(test);
if (ret)
return ret;
ret = mmc_test_area_fill(test);
if (ret)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
}
static unsigned int rnd_next = 1;
static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
{
uint64_t r;
rnd_next = rnd_next * 1103515245 + 12345;
r = (rnd_next >> 16) & 0x7fff;
return (r * rnd_cnt) >> 15;
}
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
unsigned long sz)
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
rnd_addr = mmc_test_capacity(test->card) / 4;
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
ktime_get_ts64(&ts2);
ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
if (ea == last_ea)
ea -= 1;
last_ea = ea;
dev_addr = rnd_addr + test->card->pref_erase * ea +
ssz * mmc_test_rnd_num(range2);
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
if (ret)
return ret;
}
if (print)
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
static int mmc_test_random_perf(struct mmc_test_card *test, int write)
{
struct mmc_test_area *t = &test->area;
unsigned int next;
unsigned long sz;
int ret;
for (sz = 512; sz < t->max_tfr; sz <<= 1) {
/*
* When writing, try to get more consistent results by running
* the test twice with exactly the same I/O but outputting the
* results only for the 2nd run.
*/
if (write) {
next = rnd_next;
ret = mmc_test_rnd_perf(test, write, 0, sz);
if (ret)
return ret;
rnd_next = next;
}
ret = mmc_test_rnd_perf(test, write, 1, sz);
if (ret)
return ret;
}
sz = t->max_tfr;
if (write) {
next = rnd_next;
ret = mmc_test_rnd_perf(test, write, 0, sz);
if (ret)
return ret;
rnd_next = next;
}
return mmc_test_rnd_perf(test, write, 1, sz);
}
/*
* Random read performance by transfer size.
*/
static int mmc_test_random_read_perf(struct mmc_test_card *test)
{
return mmc_test_random_perf(test, 0);
}
/*
* Random write performance by transfer size.
*/
static int mmc_test_random_write_perf(struct mmc_test_card *test)
{
return mmc_test_random_perf(test, 1);
}
static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
unsigned int tot_sz, int max_scatter)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
/*
* In the case of a maximally scattered transfer, the maximum transfer
* size is further limited by using PAGE_SIZE segments.
*/
if (max_scatter) {
unsigned long max_tfr;
if (t->max_seg_sz >= PAGE_SIZE)
max_tfr = t->max_segs * PAGE_SIZE;
else
max_tfr = t->max_segs * t->max_seg_sz;
if (sz > max_tfr)
sz = max_tfr;
}
ssz = sz >> 9;
dev_addr = mmc_test_capacity(test->card) / 4;
if (tot_sz > dev_addr << 9)
tot_sz = dev_addr << 9;
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
if (ret)
return ret;
dev_addr += ssz;
}
ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
{
int ret, i;
for (i = 0; i < 10; i++) {
ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
if (ret)
return ret;
}
for (i = 0; i < 5; i++) {
ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
if (ret)
return ret;
}
for (i = 0; i < 3; i++) {
ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
if (ret)
return ret;
}
return ret;
}
/*
* Large sequential read performance.
*/
static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
{
return mmc_test_large_seq_perf(test, 0);
}
/*
* Large sequential write performance.
*/
static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
{
return mmc_test_large_seq_perf(test, 1);
}
static int mmc_test_rw_multiple(struct mmc_test_card *test,
struct mmc_test_multiple_rw *tdata,
unsigned int reqsize, unsigned int size,
int min_sg_len)
{
unsigned int dev_addr;
struct mmc_test_area *t = &test->area;
int ret = 0;
/* Set up test area */
if (size > mmc_test_capacity(test->card) / 2 * 512)
size = mmc_test_capacity(test->card) / 2 * 512;
if (reqsize > t->max_tfr)
reqsize = t->max_tfr;
dev_addr = mmc_test_capacity(test->card) / 4;
if ((dev_addr & 0xffff0000))
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
else
dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
if (!dev_addr)
goto err;
if (reqsize > size)
return 0;
/* prepare test area */
if (mmc_can_erase(test->card) &&
tdata->prepare & MMC_TEST_PREP_ERASE) {
ret = mmc_erase(test->card, dev_addr,
size / 512, test->card->erase_arg);
if (ret)
ret = mmc_erase(test->card, dev_addr,
size / 512, MMC_ERASE_ARG);
if (ret)
goto err;
}
/* Run test */
ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
tdata->do_write, 0, 1, size / reqsize,
tdata->do_nonblock_req, min_sg_len);
if (ret)
goto err;
return ret;
err:
pr_info("[%s] error\n", __func__);
return ret;
}
static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
struct mmc_test_multiple_rw *rw)
{
int ret = 0;
int i;
void *pre_req = test->card->host->ops->pre_req;
void *post_req = test->card->host->ops->post_req;
if (rw->do_nonblock_req &&
((!pre_req && post_req) || (pre_req && !post_req))) {
pr_info("error: only one of pre/post is defined\n");
return -EINVAL;
}
for (i = 0 ; i < rw->len && ret == 0; i++) {
ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
if (ret)
break;
}
return ret;
}
static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
struct mmc_test_multiple_rw *rw)
{
int ret = 0;
int i;
for (i = 0 ; i < rw->len && ret == 0; i++) {
ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
rw->sg_len[i]);
if (ret)
break;
}
return ret;
}
/*
* Multiple blocking write 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = true,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_size(test, &test_data);
};
/*
* Multiple non-blocking write 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = true,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple blocking read 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = false,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple non-blocking read 4k to 4 MB chunks
*/
static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
{
unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
struct mmc_test_multiple_rw test_data = {
.bs = bs,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(bs),
.do_write = false,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_size(test, &test_data);
}
/*
* Multiple blocking write 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = true,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
};
/*
* Multiple non-blocking write 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = true,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_ERASE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* Multiple blocking read 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = false,
.do_nonblock_req = false,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* Multiple non-blocking read 1 to 512 sg elements
*/
static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
{
unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
1 << 7, 1 << 8, 1 << 9};
struct mmc_test_multiple_rw test_data = {
.sg_len = sg_len,
.size = TEST_AREA_MAX_SIZE,
.len = ARRAY_SIZE(sg_len),
.do_write = false,
.do_nonblock_req = true,
.prepare = MMC_TEST_PREP_NONE,
};
return mmc_test_rw_multiple_sg_len(test, &test_data);
}
/*
* eMMC hardware reset.
*/
static int mmc_test_reset(struct mmc_test_card *test)
{
struct mmc_card *card = test->card;
int err;
err = mmc_hw_reset(card);
if (!err) {
/*
* Reset will re-enable the card's command queue, but tests
* expect it to be disabled.
*/
if (card->ext_csd.cmdq_en)
mmc_cmdq_disable(card);
return RESULT_OK;
} else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
}
return RESULT_FAIL;
}
static int mmc_test_send_status(struct mmc_test_card *test,
struct mmc_command *cmd)
{
memset(cmd, 0, sizeof(*cmd));
cmd->opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(test->card->host))
cmd->arg = test->card->rca << 16;
cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(test->card->host, cmd, 0);
}
static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
unsigned int dev_addr, int use_sbc,
int repeat_cmd, int write, int use_areq)
{
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
if (!rq)
return -ENOMEM;
mrq = &rq->mrq;
if (use_sbc)
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
if (use_sbc && t->blocks > 1 && !mrq->sbc) {
ret = mmc_host_cmd23(host) ?
RESULT_UNSUP_CARD :
RESULT_UNSUP_HOST;
goto out_free;
}
/* Start ongoing data request */
if (use_areq) {
ret = mmc_test_start_areq(test, mrq, NULL);
if (ret)
goto out_free;
} else {
mmc_wait_for_req(host, mrq);
}
timeout = jiffies + msecs_to_jiffies(3000);
do {
count += 1;
/* Send status command while data transfer in progress */
cmd_ret = mmc_test_send_status(test, &rq->status);
if (cmd_ret)
break;
status = rq->status.resp[0];
if (status & R1_ERROR) {
cmd_ret = -EIO;
break;
}
if (mmc_is_req_done(host, mrq))
break;
expired = time_after(jiffies, timeout);
if (expired) {
pr_info("%s: timeout waiting for Tran state status %#x\n",
mmc_hostname(host), status);
cmd_ret = -ETIMEDOUT;
break;
}
} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
/* Wait for data request to complete */
if (use_areq) {
ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
/*
* For cap_cmd_during_tfr request, upper layer must send stop if
* required.
*/
if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
if (ret)
mmc_wait_for_cmd(host, mrq->data->stop, 0);
else
ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
}
if (ret)
goto out_free;
if (cmd_ret) {
pr_info("%s: Send Status failed: status %#x, error %d\n",
mmc_hostname(test->card->host), status, cmd_ret);
}
ret = mmc_test_check_result(test, mrq);
if (ret)
goto out_free;
ret = mmc_test_wait_busy(test);
if (ret)
goto out_free;
if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
pr_info("%s: %d commands completed during transfer of %u blocks\n",
mmc_hostname(test->card->host), count, t->blocks);
if (cmd_ret)
ret = cmd_ret;
out_free:
kfree(rq);
return ret;
}
static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
unsigned long sz, int use_sbc, int write,
int use_areq)
{
struct mmc_test_area *t = &test->area;
int ret;
if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
return RESULT_UNSUP_HOST;
ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
if (ret)
return ret;
ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
use_areq);
if (ret)
return ret;
return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
use_areq);
}
static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
int write, int use_areq)
{
struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
for (sz = 512; sz <= t->max_tfr; sz += 512) {
ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
use_areq);
if (ret)
return ret;
}
return 0;
}
/*
* Commands during read - no Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 0, 0, 0);
}
/*
* Commands during write - no Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 0, 1, 0);
}
/*
* Commands during read - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 0, 0);
}
/*
* Commands during write - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 1, 0);
}
/*
* Commands during non-blocking read - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 0, 1);
}
/*
* Commands during non-blocking write - use Set Block Count (CMD23).
*/
static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
{
return mmc_test_cmds_during_tfr(test, 1, 1, 1);
}
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
.run = mmc_test_basic_write,
},
{
.name = "Basic read (no data verification)",
.run = mmc_test_basic_read,
},
{
.name = "Basic write (with data verification)",
.prepare = mmc_test_prepare_write,
.run = mmc_test_verify_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Basic read (with data verification)",
.prepare = mmc_test_prepare_read,
.run = mmc_test_verify_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_multi_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_multi_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Power of two block writes",
.prepare = mmc_test_prepare_write,
.run = mmc_test_pow2_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Power of two block reads",
.prepare = mmc_test_prepare_read,
.run = mmc_test_pow2_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Weird sized block writes",
.prepare = mmc_test_prepare_write,
.run = mmc_test_weird_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Weird sized block reads",
.prepare = mmc_test_prepare_read,
.run = mmc_test_weird_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_align_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_align_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned multi-block write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_align_multi_write,
.cleanup = mmc_test_cleanup,
},
{
.name = "Badly aligned multi-block read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_align_multi_read,
.cleanup = mmc_test_cleanup,
},
{
.name = "Proper xfer_size at write (start failure)",
.run = mmc_test_xfersize_write,
},
{
.name = "Proper xfer_size at read (start failure)",
.run = mmc_test_xfersize_read,
},
{
.name = "Proper xfer_size at write (midway failure)",
.run = mmc_test_multi_xfersize_write,
},
{
.name = "Proper xfer_size at read (midway failure)",
.run = mmc_test_multi_xfersize_read,
},
#ifdef CONFIG_HIGHMEM
{
.name = "Highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_read_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem write",
.prepare = mmc_test_prepare_write,
.run = mmc_test_multi_write_high,
.cleanup = mmc_test_cleanup,
},
{
.name = "Multi-block highmem read",
.prepare = mmc_test_prepare_read,
.run = mmc_test_multi_read_high,
.cleanup = mmc_test_cleanup,
},
#else
{
.name = "Highmem write",
.run = mmc_test_no_highmem,
},
{
.name = "Highmem read",
.run = mmc_test_no_highmem,
},
{
.name = "Multi-block highmem write",
.run = mmc_test_no_highmem,
},
{
.name = "Multi-block highmem read",
.run = mmc_test_no_highmem,
},
#endif /* CONFIG_HIGHMEM */
{
.name = "Best-case read performance",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_best_read_performance,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case write performance",
.prepare = mmc_test_area_prepare_erase,
.run = mmc_test_best_write_performance,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case read performance into scattered pages",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_best_read_perf_max_scatter,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Best-case write performance from scattered pages",
.prepare = mmc_test_area_prepare_erase,
.run = mmc_test_best_write_perf_max_scatter,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single read performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Single trim performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_trim_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive read performance by transfer size",
.prepare = mmc_test_area_prepare_fill,
.run = mmc_test_profile_seq_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_seq_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Consecutive trim performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_seq_trim_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Random read performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_random_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Random write performance by transfer size",
.prepare = mmc_test_area_prepare,
.run = mmc_test_random_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Large sequential read into scattered pages",
.prepare = mmc_test_area_prepare,
.run = mmc_test_large_seq_read_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Large sequential write from scattered pages",
.prepare = mmc_test_area_prepare,
.run = mmc_test_large_seq_write_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance with blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_write_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance with non-blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_write_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance with blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_read_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance with non-blocking req 4k to 4MB",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_mult_read_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_wr_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Write performance non-blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_wr_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_r_blocking_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Read performance non-blocking req 1 to 512 sg elems",
.prepare = mmc_test_area_prepare,
.run = mmc_test_profile_sglen_r_nonblock_perf,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Reset test",
.run = mmc_test_reset,
},
{
.name = "Commands during read - no Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during write - no Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during read - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read_cmd23,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during write - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write_cmd23,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_read_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
{
.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
.prepare = mmc_test_area_prepare,
.run = mmc_test_cmds_during_write_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
};
static DEFINE_MUTEX(mmc_test_lock);
static LIST_HEAD(mmc_test_result);
static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
pr_info("%s: Starting tests of card %s...\n",
mmc_hostname(test->card->host), mmc_card_id(test->card));
mmc_claim_host(test->card->host);
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
struct mmc_test_general_result *gr;
if (testcase && ((i + 1) != testcase))
continue;
pr_info("%s: Test case %d. %s...\n",
mmc_hostname(test->card->host), i + 1,
mmc_test_cases[i].name);
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
pr_info("%s: Result: Prepare stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
continue;
}
}
gr = kzalloc(sizeof(*gr), GFP_KERNEL);
if (gr) {
INIT_LIST_HEAD(&gr->tr_lst);
/* Assign data what we know already */
gr->card = test->card;
gr->testcase = i;
/* Append container to global one */
list_add_tail(&gr->link, &mmc_test_result);
/*
* Save the pointer to created container in our private
* structure.
*/
test->gr = gr;
}
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
pr_info("%s: Result: OK\n",
mmc_hostname(test->card->host));
break;
case RESULT_FAIL:
pr_info("%s: Result: FAILED\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
pr_info("%s: Result: UNSUPPORTED (by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
pr_info("%s: Result: UNSUPPORTED (by card)\n",
mmc_hostname(test->card->host));
break;
default:
pr_info("%s: Result: ERROR (%d)\n",
mmc_hostname(test->card->host), ret);
}
/* Save the result */
if (gr)
gr->result = ret;
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
}
}
}
mmc_release_host(test->card->host);
pr_info("%s: Tests completed.\n",
mmc_hostname(test->card->host));
}
static void mmc_test_free_result(struct mmc_card *card)
{
struct mmc_test_general_result *gr, *grs;
mutex_lock(&mmc_test_lock);
list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
struct mmc_test_transfer_result *tr, *trs;
if (card && gr->card != card)
continue;
list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
list_del(&tr->link);
kfree(tr);
}
list_del(&gr->link);
kfree(gr);
}
mutex_unlock(&mmc_test_lock);
}
static LIST_HEAD(mmc_test_file_test);
static int mtf_test_show(struct seq_file *sf, void *data)
{
struct mmc_card *card = sf->private;
struct mmc_test_general_result *gr;
mutex_lock(&mmc_test_lock);
list_for_each_entry(gr, &mmc_test_result, link) {
struct mmc_test_transfer_result *tr;
if (gr->card != card)
continue;
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
(u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
mutex_unlock(&mmc_test_lock);
return 0;
}
static int mtf_test_open(struct inode *inode, struct file *file)
{
return single_open(file, mtf_test_show, inode->i_private);
}
static ssize_t mtf_test_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct seq_file *sf = file->private_data;
struct mmc_card *card = sf->private;
struct mmc_test_card *test;
long testcase;
int ret;
ret = kstrtol_from_user(buf, count, 10, &testcase);
if (ret)
return ret;
test = kzalloc(sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
/*
* Remove all test cases associated with given card. Thus we have only
* actual data of the last run.
*/
mmc_test_free_result(card);
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
#ifdef CONFIG_HIGHMEM
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
#endif
#ifdef CONFIG_HIGHMEM
if (test->buffer && test->highmem) {
#else
if (test->buffer) {
#endif
mutex_lock(&mmc_test_lock);
mmc_test_run(test, testcase);
mutex_unlock(&mmc_test_lock);
}
#ifdef CONFIG_HIGHMEM
__free_pages(test->highmem, BUFFER_ORDER);
#endif
kfree(test->buffer);
kfree(test);
return count;
}
static const struct file_operations mmc_test_fops_test = {
.open = mtf_test_open,
.read = seq_read,
.write = mtf_test_write,
.llseek = seq_lseek,
.release = single_release,
};
static int mtf_testlist_show(struct seq_file *sf, void *data)
{
int i;
mutex_lock(&mmc_test_lock);
seq_puts(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
mutex_unlock(&mmc_test_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
static void mmc_test_free_dbgfs_file(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
mutex_lock(&mmc_test_lock);
list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
if (card && df->card != card)
continue;
debugfs_remove(df->file);
list_del(&df->link);
kfree(df);
}
mutex_unlock(&mmc_test_lock);
}
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
if (card->debugfs_root)
file = debugfs_create_file(name, mode, card->debugfs_root,
card, fops);
df = kmalloc(sizeof(*df), GFP_KERNEL);
if (!df) {
debugfs_remove(file);
return -ENOMEM;
}
df->card = card;
df->file = file;
list_add(&df->link, &mmc_test_file_test);
return 0;
}
static int mmc_test_register_dbgfs_file(struct mmc_card *card)
{
int ret;
mutex_lock(&mmc_test_lock);
ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
&mmc_test_fops_test);
if (ret)
goto err;
ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
&mtf_testlist_fops);
if (ret)
goto err;
err:
mutex_unlock(&mmc_test_lock);
return ret;
}
static int mmc_test_probe(struct mmc_card *card)
{
int ret;
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
if (card->ext_csd.cmdq_en) {
mmc_claim_host(card->host);
ret = mmc_cmdq_disable(card);
mmc_release_host(card->host);
if (ret)
return ret;
}
dev_info(&card->dev, "Card claimed for testing.\n");
return 0;
}
static void mmc_test_remove(struct mmc_card *card)
{
if (card->reenable_cmdq) {
mmc_claim_host(card->host);
mmc_cmdq_enable(card);
mmc_release_host(card->host);
}
mmc_test_free_result(card);
mmc_test_free_dbgfs_file(card);
}
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmc_test",
},
.probe = mmc_test_probe,
.remove = mmc_test_remove,
};
static int __init mmc_test_init(void)
{
return mmc_register_driver(&mmc_driver);
}
static void __exit mmc_test_exit(void)
{
/* Clear stalled data if card is still plugged */
mmc_test_free_result(NULL);
mmc_test_free_dbgfs_file(NULL);
mmc_unregister_driver(&mmc_driver);
}
module_init(mmc_test_init);
module_exit(mmc_test_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
MODULE_AUTHOR("Pierre Ossman");
| linux-master | drivers/mmc/core/mmc_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic GPIO card-detect helper
*
* Copyright (C) 2011, Guennadi Liakhovetski <[email protected]>
*/
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "slot-gpio.h"
struct mmc_gpio {
struct gpio_desc *ro_gpio;
struct gpio_desc *cd_gpio;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
char *cd_label;
u32 cd_debounce_delay_ms;
int cd_irq;
};
static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
{
/* Schedule a card detection after a debounce timeout */
struct mmc_host *host = dev_id;
struct mmc_gpio *ctx = host->slot.handler_priv;
host->trigger_card_event = true;
mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms));
return IRQ_HANDLED;
}
int mmc_gpio_alloc(struct mmc_host *host)
{
const char *devname = dev_name(host->parent);
struct mmc_gpio *ctx;
ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->cd_debounce_delay_ms = 200;
ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname);
if (!ctx->cd_label)
return -ENOMEM;
ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname);
if (!ctx->ro_label)
return -ENOMEM;
ctx->cd_irq = -EINVAL;
host->slot.handler_priv = ctx;
host->slot.cd_irq = -EINVAL;
return 0;
}
void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
if (!ctx || irq < 0)
return;
ctx->cd_irq = irq;
}
EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
return gpiod_get_value_cansleep(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
int mmc_gpio_get_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
int cansleep;
if (!ctx || !ctx->cd_gpio)
return -ENOSYS;
cansleep = gpiod_cansleep(ctx->cd_gpio);
return cansleep ?
gpiod_get_value_cansleep(ctx->cd_gpio) :
gpiod_get_value(ctx->cd_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
int irq = -EINVAL;
int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
/*
* Do not use IRQ if the platform prefers to poll, e.g., because that
* IRQ number is already used by another unit and cannot be shared.
*/
if (ctx->cd_irq >= 0)
irq = ctx->cd_irq;
else if (!(host->caps & MMC_CAP_NEEDS_POLL))
irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
ctx->cd_gpio_isr = mmc_gpio_cd_irqt;
ret = devm_request_threaded_irq(host->parent, irq,
NULL, ctx->cd_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
irq = ret;
}
host->slot.cd_irq = irq;
if (irq < 0)
host->caps |= MMC_CAP_NEEDS_POLL;
}
EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on)
{
int ret = 0;
if (!(host->caps & MMC_CAP_CD_WAKE) ||
host->slot.cd_irq < 0 ||
on == host->slot.cd_wake_enabled)
return 0;
if (on) {
ret = enable_irq_wake(host->slot.cd_irq);
host->slot.cd_wake_enabled = !ret;
} else {
disable_irq_wake(host->slot.cd_irq);
host->slot.cd_wake_enabled = false;
}
return ret;
}
EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
/* Register an alternate interrupt service routine for
* the card-detect GPIO.
*/
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id))
{
struct mmc_gpio *ctx = host->slot.handler_priv;
WARN_ON(ctx->cd_gpio_isr);
ctx->cd_gpio_isr = isr;
}
EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
/**
* mmc_gpiod_request_cd - request a gpio descriptor for card-detection
* @host: mmc host
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
*
* Note that this must be called prior to mmc_add_host()
* otherwise the caller must also call mmc_gpiod_request_cd_irq().
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
int ret;
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
/* Update default label if no con_id provided */
if (!con_id)
gpiod_set_consumer_name(desc, ctx->cd_label);
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
ctx->cd_debounce_delay_ms = debounce / 1000;
}
/* override forces default (active-low) polarity ... */
if (override_active_level && !gpiod_is_active_low(desc))
gpiod_toggle_active_low(desc);
/* ... or active-high */
if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
gpiod_toggle_active_low(desc);
ctx->cd_gpio = desc;
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
bool mmc_can_gpio_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->cd_gpio ? true : false;
}
EXPORT_SYMBOL(mmc_can_gpio_cd);
/**
* mmc_gpiod_request_ro - request a gpio descriptor for write protection
* @host: mmc host
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
* @debounce: debounce time in microseconds
*
* Returns zero on success, else an error.
*/
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, unsigned int debounce)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
struct gpio_desc *desc;
int ret;
desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
/* Update default label if no con_id provided */
if (!con_id)
gpiod_set_consumer_name(desc, ctx->ro_label);
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
return ret;
}
if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH)
gpiod_toggle_active_low(desc);
ctx->ro_gpio = desc;
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
bool mmc_can_gpio_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->ro_gpio ? true : false;
}
EXPORT_SYMBOL(mmc_can_gpio_ro);
| linux-master | drivers/mmc/core/slot-gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/sdio_ops.c
*
* Copyright 2006-2007 Pierre Ossman
*/
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include "core.h"
#include "sdio_ops.h"
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_IO_SEND_OP_COND;
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
/*
* Both R1_SPI_IDLE and MMC_CARD_BUSY indicate
* an initialized card under SPI, but some cards
* (Marvell's) only behave when looking at this
* one.
*/
if (cmd.resp[1] & MMC_CARD_BUSY)
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (rocr)
*rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0];
return err;
}
static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
struct mmc_command cmd = {};
int err;
if (fn > 7)
return -EINVAL;
/* sanity check */
if (addr & ~0x1FFFF)
return -EINVAL;
cmd.opcode = SD_IO_RW_DIRECT;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
cmd.arg |= addr << 9;
cmd.arg |= in;
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host)) {
/* host driver already reported errors */
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
if (out) {
if (mmc_host_is_spi(host))
*out = (cmd.resp[0] >> 8) & 0xFF;
else
*out = cmd.resp[0] & 0xFF;
}
return 0;
}
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
}
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg, *sg_ptr;
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
int err;
WARN_ON(blksz == 0);
/* sanity check */
if (addr & ~0x1FFFF)
return -EINVAL;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_IO_RW_EXTENDED;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
if (blocks == 0)
cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
else
cmd.arg |= 0x08000000 | blocks; /* block mode */
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
data.blksz = blksz;
/* Code in host drivers/fwk assumes that "blocks" always is >=1 */
data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
left_size = data.blksz * data.blocks;
nents = DIV_ROUND_UP(left_size, seg_size);
if (nents > 1) {
if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
return -ENOMEM;
data.sg = sgtable.sgl;
data.sg_len = nents;
for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
sg_set_buf(sg_ptr, buf + i * seg_size,
min(seg_size, left_size));
left_size -= seg_size;
}
} else {
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, left_size);
}
mmc_set_data_timeout(&data, card);
mmc_pre_req(card->host, &mrq);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
err = cmd.error;
else if (data.error)
err = data.error;
else if (mmc_host_is_spi(card->host))
/* host driver already reported errors */
err = 0;
else if (cmd.resp[0] & R5_ERROR)
err = -EIO;
else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
err = -EINVAL;
else if (cmd.resp[0] & R5_OUT_OF_RANGE)
err = -ERANGE;
else
err = 0;
mmc_post_req(card->host, &mrq, err);
if (nents > 1)
sg_free_table(&sgtable);
return err;
}
int sdio_reset(struct mmc_host *host)
{
int ret;
u8 abort;
/* SDIO Simplified Specification V2.0, 4.4 Reset for SDIO */
ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort);
if (ret)
abort = 0x08;
else
abort |= 0x08;
return mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
}
| linux-master | drivers/mmc/core/sdio_ops.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015, Samsung Electronics Co., Ltd.
*
* Author: Marek Szyprowski <[email protected]>
*
* Simple eMMC hardware reset provider
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
struct mmc_pwrseq_emmc {
struct mmc_pwrseq pwrseq;
struct notifier_block reset_nb;
struct gpio_desc *reset_gpio;
};
#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
udelay(1);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
udelay(200);
}
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
struct mmc_pwrseq_emmc, reset_nb);
gpiod_set_value(pwrseq->reset_gpio, 1);
udelay(1);
gpiod_set_value(pwrseq->reset_gpio, 0);
udelay(200);
return NOTIFY_DONE;
}
static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
.reset = mmc_pwrseq_emmc_reset,
};
static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq;
struct device *dev = &pdev->dev;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return -ENOMEM;
pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->reset_gpio))
return PTR_ERR(pwrseq->reset_gpio);
if (!gpiod_cansleep(pwrseq->reset_gpio)) {
/*
* register reset handler to ensure emmc reset also from
* emergency_reboot(), priority 255 is the highest priority
* so it will be executed before any system reboot handler.
*/
pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
pwrseq->reset_nb.priority = 255;
register_restart_handler(&pwrseq->reset_nb);
} else {
dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
}
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.owner = THIS_MODULE;
platform_set_drvdata(pdev, pwrseq);
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
static void mmc_pwrseq_emmc_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
unregister_restart_handler(&pwrseq->reset_nb);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
}
static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
{ .compatible = "mmc-pwrseq-emmc",},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
static struct platform_driver mmc_pwrseq_emmc_driver = {
.probe = mmc_pwrseq_emmc_probe,
.remove_new = mmc_pwrseq_emmc_remove,
.driver = {
.name = "pwrseq_emmc",
.of_match_table = mmc_pwrseq_emmc_of_match,
},
};
module_platform_driver(mmc_pwrseq_emmc_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/core/pwrseq_emmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/sdio.c
*
* Copyright 2006-2007 Pierre Ossman
*/
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "bus.h"
#include "quirks.h"
#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
#include "sdio_cis.h"
MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
#define sdio_info_attr(num) \
static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
\
if (num > card->num_info) \
return -ENODATA; \
if (!card->info[num - 1][0]) \
return 0; \
return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
sdio_info_attr(1);
sdio_info_attr(2);
sdio_info_attr(3);
sdio_info_attr(4);
static struct attribute *sdio_std_attrs[] = {
&dev_attr_vendor.attr,
&dev_attr_device.attr,
&dev_attr_revision.attr,
&dev_attr_info1.attr,
&dev_attr_info2.attr,
&dev_attr_info3.attr,
&dev_attr_info4.attr,
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdio_std);
static struct device_type sdio_type = {
.groups = sdio_std_groups,
};
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
unsigned char data;
if (mmc_card_nonstd_func_interface(func->card)) {
func->class = SDIO_CLASS_NONE;
return 0;
}
ret = mmc_io_rw_direct(func->card, 0, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
if (ret)
goto out;
data &= 0x0f;
if (data == 0x0f) {
ret = mmc_io_rw_direct(func->card, 0, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF_EXT, 0, &data);
if (ret)
goto out;
}
func->class = data;
out:
return ret;
}
static int sdio_init_func(struct mmc_card *card, unsigned int fn)
{
int ret;
struct sdio_func *func;
if (WARN_ON(fn > SDIO_MAX_FUNCS))
return -EINVAL;
func = sdio_alloc_func(card);
if (IS_ERR(func))
return PTR_ERR(func);
func->num = fn;
if (!(card->quirks & MMC_QUIRK_NONSTD_SDIO)) {
ret = sdio_read_fbr(func);
if (ret)
goto fail;
ret = sdio_read_func_cis(func);
if (ret)
goto fail;
} else {
func->vendor = func->card->cis.vendor;
func->device = func->card->cis.device;
func->max_blksize = func->card->cis.blksize;
}
card->sdio_func[fn - 1] = func;
return 0;
fail:
/*
* It is okay to remove the function here even though we hold
* the host lock as we haven't registered the device yet.
*/
sdio_remove_func(func);
return ret;
}
static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
{
int ret;
int cccr_vsn;
int uhs = ocr & R4_18V_PRESENT;
unsigned char data;
unsigned char speed;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
if (ret)
goto out;
cccr_vsn = data & 0x0f;
if (cccr_vsn > SDIO_CCCR_REV_3_00) {
pr_err("%s: unrecognised CCCR structure version %d\n",
mmc_hostname(card->host), cccr_vsn);
return -EINVAL;
}
card->cccr.sdio_vsn = (data & 0xf0) >> 4;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CAPS, 0, &data);
if (ret)
goto out;
if (data & SDIO_CCCR_CAP_SMB)
card->cccr.multi_block = 1;
if (data & SDIO_CCCR_CAP_LSC)
card->cccr.low_speed = 1;
if (data & SDIO_CCCR_CAP_4BLS)
card->cccr.wide_bus = 1;
if (cccr_vsn >= SDIO_CCCR_REV_1_10) {
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_POWER, 0, &data);
if (ret)
goto out;
if (data & SDIO_POWER_SMPC)
card->cccr.high_power = 1;
}
if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
goto out;
card->scr.sda_spec3 = 0;
card->sw_caps.sd3_bus_mode = 0;
card->sw_caps.sd3_drv_type = 0;
if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
card->scr.sda_spec3 = 1;
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_CCCR_UHS, 0, &data);
if (ret)
goto out;
if (mmc_host_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50 | SD_MODE_UHS_SDR50
| SD_MODE_UHS_SDR25 | SD_MODE_UHS_SDR12;
if (data & SDIO_UHS_SDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR25
| SD_MODE_UHS_SDR12;
if (data & SDIO_UHS_SDR104)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_SDR104 | SD_MODE_UHS_SDR50
| SD_MODE_UHS_SDR25 | SD_MODE_UHS_SDR12;
}
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
if (ret)
goto out;
if (data & SDIO_DRIVE_SDTA)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
if (data & SDIO_DRIVE_SDTC)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTERRUPT_EXT, 0, &data);
if (ret)
goto out;
if (data & SDIO_INTERRUPT_EXT_SAI) {
data |= SDIO_INTERRUPT_EXT_EAI;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_INTERRUPT_EXT,
data, NULL);
if (ret)
goto out;
card->cccr.enable_async_irq = 1;
}
}
/* if no uhs mode ensure we check for high speed */
if (!card->sw_caps.sd3_bus_mode) {
if (speed & SDIO_SPEED_SHS) {
card->cccr.high_speed = 1;
card->sw_caps.hs_max_dtr = 50000000;
} else {
card->cccr.high_speed = 0;
card->sw_caps.hs_max_dtr = 25000000;
}
}
}
out:
return ret;
}
static int sdio_enable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
pr_warn("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
mmc_hostname(card->host), ctrl);
/* set as 4-bit bus width */
ctrl &= ~SDIO_BUS_WIDTH_MASK;
ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
return ret;
return 1;
}
/*
* If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
* of the card. This may be required on certain setups of boards,
* controllers and embedded sdio device which do not need the card's
* pull-up. As a result, card detection is disabled and power is saved.
*/
static int sdio_disable_cd(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!mmc_card_disable_cd(card))
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
ctrl |= SDIO_BUS_CD_DISABLE;
return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
}
/*
* Devices that remain active during a system suspend are
* put back into 1-bit mode.
*/
static int sdio_disable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
if (ret)
return ret;
if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
return 0;
ctrl &= ~SDIO_BUS_WIDTH_4BIT;
ctrl |= SDIO_BUS_ASYNC_INT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
return ret;
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1);
return 0;
}
static int sdio_disable_4bit_bus(struct mmc_card *card)
{
int err;
if (mmc_card_sdio(card))
goto out;
if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
return 0;
if (!(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4))
return 0;
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_1);
if (err)
return err;
out:
return sdio_disable_wide(card);
}
static int sdio_enable_4bit_bus(struct mmc_card *card)
{
int err;
err = sdio_enable_wide(card);
if (err <= 0)
return err;
if (mmc_card_sdio(card))
goto out;
if (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4) {
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
if (err) {
sdio_disable_wide(card);
return err;
}
}
out:
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
return 0;
}
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
static int mmc_sdio_switch_hs(struct mmc_card *card, int enable)
{
int ret;
u8 speed;
if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
return 0;
if (!card->cccr.high_speed)
return 0;
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
return ret;
if (enable)
speed |= SDIO_SPEED_EHS;
else
speed &= ~SDIO_SPEED_EHS;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
if (ret)
return ret;
return 1;
}
/*
* Enable SDIO/combo card's high-speed mode. Return 0/1 if [not]supported.
*/
static int sdio_enable_hs(struct mmc_card *card)
{
int ret;
ret = mmc_sdio_switch_hs(card, true);
if (ret <= 0 || mmc_card_sdio(card))
return ret;
ret = mmc_sd_switch_hs(card);
if (ret <= 0)
mmc_sdio_switch_hs(card, false);
return ret;
}
static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr;
if (mmc_card_hs(card)) {
/*
* The SDIO specification doesn't mention how
* the CIS transfer speed register relates to
* high-speed, but it seems that 50 MHz is
* mandatory.
*/
max_dtr = 50000000;
} else {
max_dtr = card->cis.max_dtr;
}
if (mmc_card_sd_combo(card))
max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
return max_dtr;
}
static unsigned char host_drive_to_sdio_drive(int host_strength)
{
switch (host_strength) {
case MMC_SET_DRIVER_TYPE_A:
return SDIO_DTSx_SET_TYPE_A;
case MMC_SET_DRIVER_TYPE_B:
return SDIO_DTSx_SET_TYPE_B;
case MMC_SET_DRIVER_TYPE_C:
return SDIO_DTSx_SET_TYPE_C;
case MMC_SET_DRIVER_TYPE_D:
return SDIO_DTSx_SET_TYPE_D;
default:
return SDIO_DTSx_SET_TYPE_B;
}
}
static void sdio_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
unsigned char card_strength;
int err;
card->drive_strength = 0;
card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
drive_strength = mmc_select_drive_strength(card,
card->sw_caps.uhs_max_dtr,
card_drv_type, &drv_type);
if (drive_strength) {
/* if error just use default for drive strength B */
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
&card_strength);
if (err)
return;
card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
card_strength |= host_drive_to_sdio_drive(drive_strength);
/* if error default to drive strength B */
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
card_strength, NULL);
if (err)
return;
card->drive_strength = drive_strength;
}
if (drv_type)
mmc_set_driver_type(card->host, drv_type);
}
static int sdio_set_bus_speed_mode(struct mmc_card *card)
{
unsigned int bus_speed, timing;
int err;
unsigned char speed;
unsigned int max_rate;
/*
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!mmc_host_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
bus_speed = SDIO_SPEED_SDR104;
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
bus_speed = SDIO_SPEED_DDR50;
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
bus_speed = SDIO_SPEED_SDR50;
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
bus_speed = SDIO_SPEED_SDR25;
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
bus_speed = SDIO_SPEED_SDR12;
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (err)
return err;
speed &= ~SDIO_SPEED_BSS_MASK;
speed |= bus_speed;
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
if (err)
return err;
max_rate = min_not_zero(card->quirk_max_rate,
card->sw_caps.uhs_max_dtr);
mmc_set_timing(card->host, timing);
mmc_set_clock(card->host, max_rate);
return 0;
}
/*
* UHS-I specific initialization procedure
*/
static int mmc_sdio_init_uhs_card(struct mmc_card *card)
{
int err;
if (!card->scr.sda_spec3)
return 0;
/* Switch to wider bus */
err = sdio_enable_4bit_bus(card);
if (err)
goto out;
/* Set the driver strength for the card */
sdio_select_driver_type(card);
/* Set bus speed mode of the card */
err = sdio_set_bus_speed_mode(card);
if (err)
goto out;
/*
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
(card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
err = mmc_execute_tuning(card);
out:
return err;
}
static int mmc_sdio_pre_init(struct mmc_host *host, u32 ocr,
struct mmc_card *card)
{
if (card)
mmc_remove_card(card);
/*
* Reset the card by performing the same steps that are taken by
* mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
*
* sdio_reset() is technically not needed. Having just powered up the
* hardware, it should already be in reset state. However, some
* platforms (such as SD8686 on OLPC) do not instantly cut power,
* meaning that a reset is required when restoring power soon after
* powering off. It is harmless in other cases.
*
* The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
* is not necessary for non-removable cards. However, it is required
* for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
* harmless in other situations.
*
*/
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, ocr);
return mmc_send_io_op_cond(host, 0, NULL);
}
/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
* we're trying to reinitialise.
*/
static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
int err;
int retries = 10;
u32 rocr = 0;
u32 ocr_card = ocr;
WARN_ON(!host->claimed);
/* to query card if 1.8V signalling is supported */
if (mmc_host_uhs(host))
ocr |= R4_18V_PRESENT;
try_again:
if (!retries) {
pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
ocr &= ~R4_18V_PRESENT;
}
/*
* Inform the card of the voltage
*/
err = mmc_send_io_op_cond(host, ocr, &rocr);
if (err)
return err;
/*
* For SPI, enable CRC as appropriate.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
return err;
}
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, &sdio_type);
if (IS_ERR(card))
return PTR_ERR(card);
if ((rocr & R4_MEMORY_PRESENT) &&
mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (!mmc_card_sd_combo(oldcard) ||
memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
err = -ENOENT;
goto mismatch;
}
} else {
card->type = MMC_TYPE_SDIO;
if (oldcard && !mmc_card_sdio(oldcard)) {
err = -ENOENT;
goto mismatch;
}
}
/*
* Call the optional HC's init_card function to handle quirks.
*/
if (host->ops->init_card)
host->ops->init_card(host, card);
mmc_fixup_device(card, sdio_card_init_methods);
card->ocr = ocr_card;
/*
* If the host and card support UHS-I mode request the card
* to switch to 1.8V signaling level. No 1.8v signalling if
* UHS mode is not enabled to maintain compatibility and some
* systems that claim 1.8v signalling in fact do not support
* it. Per SDIO spec v3, section 3.1.2, if the voltage is already
* 1.8v, the card sets S18A to 0 in the R4 response. So it will
* fails to check rocr & R4_18V_PRESENT, but we still need to
* try to init uhs card. sdio_read_cccr will take over this task
* to make sure which speed mode should work.
*/
if (rocr & ocr & R4_18V_PRESENT) {
err = mmc_set_uhs_voltage(host, ocr_card);
if (err == -EAGAIN) {
mmc_sdio_pre_init(host, ocr_card, card);
retries--;
goto try_again;
} else if (err) {
ocr &= ~R4_18V_PRESENT;
}
}
/*
* For native busses: set card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto remove;
/*
* Update oldcard with the new RCA received from the SDIO
* device -- we're doing this so that it's updated in the
* "card" struct when oldcard overwrites that later.
*/
if (oldcard)
oldcard->rca = card->rca;
}
/*
* Read CSD, before selecting the card
*/
if (!oldcard && mmc_card_sd_combo(card)) {
err = mmc_sd_get_csd(card);
if (err)
goto remove;
mmc_decode_cid(card);
}
/*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto remove;
}
if (card->quirks & MMC_QUIRK_NONSTD_SDIO) {
/*
* This is non-standard SDIO device, meaning it doesn't
* have any CIA (Common I/O area) registers present.
* It's host's responsibility to fill cccr and cis
* structures in init_card().
*/
mmc_set_clock(host, card->cis.max_dtr);
if (card->cccr.high_speed) {
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
}
if (oldcard)
mmc_remove_card(card);
else
host->card = card;
return 0;
}
/*
* Read the common registers. Note that we should try to
* validate whether UHS would work or not.
*/
err = sdio_read_cccr(card, ocr);
if (err) {
mmc_sdio_pre_init(host, ocr_card, card);
if (ocr & R4_18V_PRESENT) {
/* Retry init sequence, but without R4_18V_PRESENT. */
retries = 0;
goto try_again;
}
return err;
}
/*
* Read the common CIS tuples.
*/
err = sdio_read_common_cis(card);
if (err)
goto remove;
if (oldcard) {
if (card->cis.vendor == oldcard->cis.vendor &&
card->cis.device == oldcard->cis.device) {
mmc_remove_card(card);
card = oldcard;
} else {
err = -ENOENT;
goto mismatch;
}
}
mmc_fixup_device(card, sdio_fixup_methods);
if (mmc_card_sd_combo(card)) {
err = mmc_sd_setup_card(host, card, oldcard != NULL);
/* handle as SDIO-only card if memory init failed */
if (err) {
mmc_go_idle(host);
if (mmc_host_is_spi(host))
/* should not fail, as it worked previously */
mmc_spi_set_crc(host, use_spi_crc);
card->type = MMC_TYPE_SDIO;
} else
card->dev.type = &sd_type;
}
/*
* If needed, disconnect card detection pull-up resistor.
*/
err = sdio_disable_cd(card);
if (err)
goto remove;
/* Initialization sequence for UHS-I cards */
/* Only if card supports 1.8v and UHS signaling */
if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
err = mmc_sdio_init_uhs_card(card);
if (err)
goto remove;
} else {
/*
* Switch to high-speed (if supported).
*/
err = sdio_enable_hs(card);
if (err > 0)
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
else if (err)
goto remove;
/*
* Change to the card's maximum speed.
*/
mmc_set_clock(host, mmc_sdio_get_max_clock(card));
/*
* Switch to wider bus (if supported).
*/
err = sdio_enable_4bit_bus(card);
if (err)
goto remove;
}
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
pr_err("%s: Host failed to negotiate down from 3.3V\n",
mmc_hostname(host));
err = -EINVAL;
goto remove;
}
host->card = card;
return 0;
mismatch:
pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host));
remove:
if (oldcard != card)
mmc_remove_card(card);
return err;
}
static int mmc_sdio_reinit_card(struct mmc_host *host)
{
int ret;
ret = mmc_sdio_pre_init(host, host->card->ocr, NULL);
if (ret)
return ret;
return mmc_sdio_init_card(host, host->card->ocr, host->card);
}
/*
* Host is being removed. Free up the current card.
*/
static void mmc_sdio_remove(struct mmc_host *host)
{
int i;
for (i = 0;i < host->card->sdio_funcs;i++) {
if (host->card->sdio_func[i]) {
sdio_remove_func(host->card->sdio_func[i]);
host->card->sdio_func[i] = NULL;
}
}
mmc_remove_card(host->card);
host->card = NULL;
}
/*
* Card detection - card is alive.
*/
static int mmc_sdio_alive(struct mmc_host *host)
{
return mmc_select_card(host->card);
}
/*
* Card detection callback from host.
*/
static void mmc_sdio_detect(struct mmc_host *host)
{
int err;
/* Make sure card is powered before detecting it */
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
err = pm_runtime_resume_and_get(&host->card->dev);
if (err < 0)
goto out;
}
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
mmc_release_host(host);
/*
* Tell PM core it's OK to power off the card now.
*
* The _sync variant is used in order to ensure that the card
* is left powered off in case an error occurred, and the card
* is going to be removed.
*
* Since there is no specific reason to believe a new user
* is about to show up at this point, the _sync variant is
* desirable anyway.
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(&host->card->dev);
out:
if (err) {
mmc_sdio_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
}
}
/*
* SDIO pre_suspend. We need to suspend all functions separately.
* Therefore all registered functions must have drivers with suspend
* and resume methods. Failing that we simply remove the whole card.
*/
static int mmc_sdio_pre_suspend(struct mmc_host *host)
{
int i;
for (i = 0; i < host->card->sdio_funcs; i++) {
struct sdio_func *func = host->card->sdio_func[i];
if (func && sdio_func_present(func) && func->dev.driver) {
const struct dev_pm_ops *pmops = func->dev.driver->pm;
if (!pmops || !pmops->suspend || !pmops->resume)
/* force removal of entire card in that case */
goto remove;
}
}
return 0;
remove:
if (!mmc_card_is_removable(host)) {
dev_warn(mmc_dev(host),
"missing suspend/resume ops for non-removable SDIO card\n");
/* Don't remove a non-removable card - we can't re-detect it. */
return 0;
}
/* Remove the SDIO card and let it be re-detected later on. */
mmc_sdio_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
return 0;
}
/*
* SDIO suspend. Suspend all functions separately.
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
WARN_ON(host->sdio_irqs && !mmc_card_keep_power(host));
/* Prevent processing of SDIO IRQs in suspended state. */
mmc_card_set_suspended(host->card);
cancel_work_sync(&host->sdio_irq_work);
mmc_claim_host(host);
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
sdio_disable_4bit_bus(host->card);
if (!mmc_card_keep_power(host)) {
mmc_power_off(host);
} else if (host->retune_period) {
mmc_retune_timer_stop(host);
mmc_retune_needed(host);
}
mmc_release_host(host);
return 0;
}
static int mmc_sdio_resume(struct mmc_host *host)
{
int err = 0;
/* Basic card reinitialization. */
mmc_claim_host(host);
/*
* Restore power and reinitialize the card when needed. Note that a
* removable card is checked from a detect work later on in the resume
* process.
*/
if (!mmc_card_keep_power(host)) {
mmc_power_up(host, host->card->ocr);
/*
* Tell runtime PM core we just powered up the card,
* since it still believes the card is powered off.
* Note that currently runtime PM is only enabled
* for SDIO cards that are MMC_CAP_POWER_OFF_CARD
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_active(&host->card->dev);
pm_runtime_enable(&host->card->dev);
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
}
if (err)
goto out;
/* Allow SDIO IRQs to be processed again. */
mmc_card_clr_suspended(host->card);
if (host->sdio_irqs) {
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
else if (host->caps & MMC_CAP_SDIO_IRQ)
schedule_work(&host->sdio_irq_work);
}
out:
mmc_release_host(host);
host->pm_flags &= ~MMC_PM_KEEP_POWER;
return err;
}
static int mmc_sdio_runtime_suspend(struct mmc_host *host)
{
/* No references to the card, cut the power to it. */
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
return 0;
}
static int mmc_sdio_runtime_resume(struct mmc_host *host)
{
int ret;
/* Restore power and re-initialize. */
mmc_claim_host(host);
mmc_power_up(host, host->card->ocr);
ret = mmc_sdio_reinit_card(host);
mmc_release_host(host);
return ret;
}
/*
* SDIO HW reset
*
* Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
* reset was asynchronously scheduled, else a negative error code.
*/
static int mmc_sdio_hw_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
/*
* In case the card is shared among multiple func drivers, reset the
* card through a rescan work. In this way it will be removed and
* re-detected, thus all func drivers becomes informed about it.
*/
if (atomic_read(&card->sdio_funcs_probed) > 1) {
if (mmc_card_removed(card))
return 1;
host->rescan_entered = 0;
mmc_card_set_removed(card);
_mmc_detect_change(host, 0, false);
return 1;
}
/*
* A single func driver has been probed, then let's skip the heavy
* hotplug dance above and execute the reset immediately.
*/
mmc_power_cycle(host, card->ocr);
return mmc_sdio_reinit_card(host);
}
static int mmc_sdio_sw_reset(struct mmc_host *host)
{
mmc_set_clock(host, host->f_init);
sdio_reset(host);
mmc_go_idle(host);
mmc_set_initial_state(host);
mmc_set_initial_signal_voltage(host);
return mmc_sdio_reinit_card(host);
}
static const struct mmc_bus_ops mmc_sdio_ops = {
.remove = mmc_sdio_remove,
.detect = mmc_sdio_detect,
.pre_suspend = mmc_sdio_pre_suspend,
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
.runtime_suspend = mmc_sdio_runtime_suspend,
.runtime_resume = mmc_sdio_runtime_resume,
.alive = mmc_sdio_alive,
.hw_reset = mmc_sdio_hw_reset,
.sw_reset = mmc_sdio_sw_reset,
};
/*
* Starting point for SDIO card init.
*/
int mmc_attach_sdio(struct mmc_host *host)
{
int err, i, funcs;
u32 ocr, rocr;
struct mmc_card *card;
WARN_ON(!host->claimed);
err = mmc_send_io_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus(host, &mmc_sdio_ops);
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!rocr) {
err = -EINVAL;
goto err;
}
/*
* Detect and init the card.
*/
err = mmc_sdio_init_card(host, rocr, NULL);
if (err)
goto err;
card = host->card;
/*
* Enable runtime PM only if supported by host+card+board
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD) {
/*
* Do not allow runtime suspend until after SDIO function
* devices are added.
*/
pm_runtime_get_noresume(&card->dev);
/*
* Let runtime PM core know our card is active
*/
err = pm_runtime_set_active(&card->dev);
if (err)
goto remove;
/*
* Enable runtime PM for this card
*/
pm_runtime_enable(&card->dev);
}
/*
* The number of functions on the card is encoded inside
* the ocr.
*/
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
/*
* Enable Runtime PM for this func (if supported)
*/
if (host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_enable(&card->sdio_func[i]->dev);
}
/*
* First add the card to the driver model...
*/
mmc_release_host(host);
err = mmc_add_card(host->card);
if (err)
goto remove_added;
/*
* ...then the SDIO functions.
*/
for (i = 0;i < funcs;i++) {
err = sdio_add_func(host->card->sdio_func[i]);
if (err)
goto remove_added;
}
if (host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put(&card->dev);
mmc_claim_host(host);
return 0;
remove:
mmc_release_host(host);
remove_added:
/*
* The devices are being deleted so it is not necessary to disable
* runtime PM. Similarly we also don't pm_runtime_put() the SDIO card
* because it needs to be active to remove any function devices that
* were probed, and after that it gets deleted.
*/
mmc_sdio_remove(host);
mmc_claim_host(host);
err:
mmc_detach_bus(host);
pr_err("%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err);
return err;
}
| linux-master | drivers/mmc/core/sdio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for media (i.e., flash cards)
*
* Copyright 2002 Hewlett-Packard Company
* Copyright 2005-2008 Pierre Ossman
*
* Use consistent with the GNU GPL is permitted,
* provided that this copyright notice is
* preserved in its entirety in all copies and derived works.
*
* HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
* FITNESS FOR ANY PARTICULAR PURPOSE.
*
* Many thanks to Alessandro Rubini and Jonathan Corbet!
*
* Author: Andrew Christian
* 28 May 2002
*/
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/kref.h>
#include <linux/blkdev.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <linux/debugfs.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/uaccess.h>
#include "queue.h"
#include "block.h"
#include "core.h"
#include "card.h"
#include "crypto.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "quirks.h"
#include "sd_ops.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
/*
* Set a 10 second timeout for polling write request busy state. Note, mmc core
* is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
* second software timer to timeout the whole request, so 10 seconds should be
* ample.
*/
#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
static DEFINE_MUTEX(block_mutex);
/*
* The defaults come from config options but can be overriden by module
* or bootarg options.
*/
static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
/*
* We've only got one major, so number of mmcblk devices is
* limited to (1 << 20) / number of minors per device. It is also
* limited by the MAX_DEVICES below.
*/
static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
static DEFINE_IDA(mmc_rpmb_ida);
struct mmc_blk_busy_data {
struct mmc_card *card;
u32 status;
};
/*
* There is one mmc_blk_data per slot.
*/
struct mmc_blk_data {
struct device *parent;
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
struct kref kref;
unsigned int read_only;
unsigned int part_type;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
#define MMC_BLK_CQE_RECOVERY BIT(4)
#define MMC_BLK_TRIM BIT(5)
/*
* Only set in main mmc_blk_data associated
* with mmc_card with dev_set_drvdata, and keeps
* track of the current selected device partition.
*/
unsigned int part_curr;
#define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */
int area_type;
/* debugfs files (only in main mmc_blk_data) */
struct dentry *status_dentry;
struct dentry *ext_csd_dentry;
};
/* Device type for RPMB character devices */
static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */
static struct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
/**
* struct mmc_rpmb_data - special RPMB device type for these areas
* @dev: the device for the RPMB area
* @chrdev: character device for the RPMB area
* @id: unique device ID number
* @part_index: partition index (0 on first)
* @md: parent MMC block device
* @node: list item, so we can put this device on a list
*/
struct mmc_rpmb_data {
struct device dev;
struct cdev chrdev;
int id;
unsigned int part_index;
struct mmc_blk_data *md;
struct list_head node;
};
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int recovery_mode,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static int mmc_spi_err_check(struct mmc_card *card);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
mutex_lock(&open_lock);
md = disk->private_data;
if (md && !kref_get_unless_zero(&md->kref))
md = NULL;
mutex_unlock(&open_lock);
return md;
}
static inline int mmc_get_devidx(struct gendisk *disk)
{
int devidx = disk->first_minor / perdev_minors;
return devidx;
}
static void mmc_blk_kref_release(struct kref *ref)
{
struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
int devidx;
devidx = mmc_get_devidx(md->disk);
ida_simple_remove(&mmc_blk_ida, devidx);
mutex_lock(&open_lock);
md->disk->private_data = NULL;
mutex_unlock(&open_lock);
put_disk(md->disk);
kfree(md);
}
static void mmc_blk_put(struct mmc_blk_data *md)
{
kref_put(&md->kref, mmc_blk_kref_release);
}
static ssize_t power_ro_lock_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
struct mmc_card *card = md->queue.card;
int locked = 0;
if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
locked = 2;
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
locked = 1;
ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
mmc_blk_put(md);
return ret;
}
static ssize_t power_ro_lock_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int ret;
struct mmc_blk_data *md, *part_md;
struct mmc_queue *mq;
struct request *req;
unsigned long set;
if (kstrtoul(buf, 0, &set))
return -EINVAL;
if (set != 1)
return count;
md = mmc_blk_get(dev_to_disk(dev));
mq = &md->queue;
/* Dispatch locking to the block layer */
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
md->disk->disk_name);
set_disk_ro(md->disk, 1);
list_for_each_entry(part_md, &md->part, part)
if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
set_disk_ro(part_md->disk, 1);
}
}
out_put:
mmc_blk_put(md);
return count;
}
static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
power_ro_lock_show, power_ro_lock_store);
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
ret = snprintf(buf, PAGE_SIZE, "%d\n",
get_disk_ro(dev_to_disk(dev)) ^
md->read_only);
mmc_blk_put(md);
return ret;
}
static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
unsigned long set = simple_strtoul(buf, &end, 0);
if (end == buf) {
ret = -EINVAL;
goto out;
}
set_disk_ro(dev_to_disk(dev), set || md->read_only);
ret = count;
out:
mmc_blk_put(md);
return ret;
}
static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
static struct attribute *mmc_disk_attrs[] = {
&dev_attr_force_ro.attr,
&dev_attr_ro_lock_until_next_power_on.attr,
NULL,
};
static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
umode_t mode = a->mode;
if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
(md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
md->queue.card->ext_csd.boot_ro_lockable) {
mode = S_IRUGO;
if (!(md->queue.card->ext_csd.boot_ro_lock &
EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
mode |= S_IWUSR;
}
mmc_blk_put(md);
return mode;
}
static const struct attribute_group mmc_disk_attr_group = {
.is_visible = mmc_disk_attrs_is_visible,
.attrs = mmc_disk_attrs,
};
static const struct attribute_group *mmc_disk_attr_groups[] = {
&mmc_disk_attr_group,
NULL,
};
static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(disk);
int ret = -ENXIO;
mutex_lock(&block_mutex);
if (md) {
ret = 0;
if ((mode & BLK_OPEN_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
}
mutex_unlock(&block_mutex);
return ret;
}
static void mmc_blk_release(struct gendisk *disk)
{
struct mmc_blk_data *md = disk->private_data;
mutex_lock(&block_mutex);
mmc_blk_put(md);
mutex_unlock(&block_mutex);
}
static int
mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
}
struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_ioc_cmd __user *user)
{
struct mmc_blk_ioc_data *idata;
int err;
idata = kmalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
err = -EFAULT;
goto idata_err;
}
idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
err = -EOVERFLOW;
goto idata_err;
}
if (!idata->buf_bytes) {
idata->buf = NULL;
return idata;
}
idata->buf = memdup_user((void __user *)(unsigned long)
idata->ic.data_ptr, idata->buf_bytes);
if (IS_ERR(idata->buf)) {
err = PTR_ERR(idata->buf);
goto idata_err;
}
return idata;
idata_err:
kfree(idata);
out:
return ERR_PTR(err);
}
static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
struct mmc_blk_ioc_data *idata)
{
struct mmc_ioc_cmd *ic = &idata->ic;
if (copy_to_user(&(ic_ptr->response), ic->response,
sizeof(ic->response)))
return -EFAULT;
if (!idata->ic.write_flag) {
if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
idata->buf, idata->buf_bytes))
return -EFAULT;
}
return 0;
}
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
bool r1b_resp, use_r1b_resp = false;
unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
if (!card || !md || !idata)
return -EINVAL;
/*
* The RPMB accesses comes in from the character device, so we
* need to target these explicitly. Else we just target the
* partition type for the block device the ioctl() was issued
* on.
*/
if (idata->rpmb) {
/* Support multiple RPMB partitions */
target_part = idata->rpmb->part_index;
target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
} else {
target_part = md->part_type;
}
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
cmd.flags = idata->ic.flags;
if (idata->buf_bytes) {
data.sg = &sg;
data.sg_len = 1;
data.blksz = idata->ic.blksz;
data.blocks = idata->ic.blocks;
sg_init_one(data.sg, idata->buf, idata->buf_bytes);
if (idata->ic.write_flag)
data.flags = MMC_DATA_WRITE;
else
data.flags = MMC_DATA_READ;
/* data.flags must already be set before doing this. */
mmc_set_data_timeout(&data, card);
/* Allow overriding the timeout_ns for empirical tuning. */
if (idata->ic.data_timeout_ns)
data.timeout_ns = idata->ic.data_timeout_ns;
mrq.data = &data;
}
mrq.cmd = &cmd;
err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
if (idata->ic.is_acmd) {
err = mmc_app_cmd(card->host, card);
if (err)
return err;
}
if (idata->rpmb) {
sbc.opcode = MMC_SET_BLOCK_COUNT;
/*
* We don't do any blockcount validation because the max size
* may be increased by a future standard. We just copy the
* 'Reliable Write' bit here.
*/
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
mrq.sbc = &sbc;
}
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
(cmd.opcode == MMC_SWITCH))
return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
/* If it's an R1B response we need some more preparations. */
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
if (r1b_resp)
use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
busy_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
__func__, cmd.error);
return cmd.error;
}
if (data.error) {
dev_err(mmc_dev(card->host), "%s: data error %d\n",
__func__, data.error);
return data.error;
}
/*
* Make sure the cache of the PARTITION_CONFIG register and
* PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
* changed it successfully.
*/
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
(cmd.opcode == MMC_SWITCH)) {
struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
/*
* Update cache so the next mmc_blk_part_switch call operates
* on up-to-date data.
*/
card->ext_csd.part_config = value;
main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
}
/*
* Make sure to update CACHE_CTRL in case it was changed. The cache
* will get turned back on if the card is re-initialized, e.g.
* suspend/resume or hw reset in recovery.
*/
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
(cmd.opcode == MMC_SWITCH)) {
u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
card->ext_csd.cache_ctrl = value;
}
/*
* According to the SD specs, some commands require a delay after
* issuing the command.
*/
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
/* No need to poll when using HW busy detection. */
if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
return 0;
if (mmc_host_is_spi(card->host)) {
if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
return mmc_spi_err_check(card);
return err;
}
/* Ensure RPMB/R1B command has completed by polling with CMD13. */
if (idata->rpmb || r1b_resp)
err = mmc_poll_for_busy(card, busy_timeout_ms, false,
MMC_BUSY_IO);
return err;
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
struct mmc_ioc_cmd __user *ic_ptr,
struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
struct mmc_queue *mq;
struct mmc_card *card;
int err = 0, ioc_err = 0;
struct request *req;
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
/* This will be NULL on non-RPMB ioctl():s */
idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto cmd_done;
}
/*
* Dispatch the ioctl() into the block request queue.
*/
mq = &md->queue;
req = blk_mq_alloc_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_done;
}
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_mq_free_request(req);
cmd_done:
kfree(idata->buf);
kfree(idata);
return ioc_err ? ioc_err : err;
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
struct mmc_ioc_multi_cmd __user *user,
struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
struct mmc_card *card;
struct mmc_queue *mq;
int err = 0, ioc_err = 0;
__u64 num_of_cmds;
unsigned int i, n;
struct request *req;
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
sizeof(num_of_cmds)))
return -EFAULT;
if (!num_of_cmds)
return 0;
if (num_of_cmds > MMC_IOC_MAX_CMDS)
return -EINVAL;
n = num_of_cmds;
idata = kcalloc(n, sizeof(*idata), GFP_KERNEL);
if (!idata)
return -ENOMEM;
for (i = 0; i < n; i++) {
idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
if (IS_ERR(idata[i])) {
err = PTR_ERR(idata[i]);
n = i;
goto cmd_err;
}
/* This will be NULL on non-RPMB ioctl():s */
idata[i]->rpmb = rpmb;
}
card = md->queue.card;
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto cmd_err;
}
/*
* Dispatch the ioctl()s into the block request queue.
*/
mq = &md->queue;
req = blk_mq_alloc_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_err;
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = n;
blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
for (i = 0; i < n && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
blk_mq_free_request(req);
cmd_err:
for (i = 0; i < n; i++) {
kfree(idata[i]->buf);
kfree(idata[i]);
}
kfree(idata);
return ioc_err ? ioc_err : err;
}
static int mmc_blk_check_blkdev(struct block_device *bdev)
{
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
* whole block device, not on a partition. This prevents overspray
* between sibling partitions.
*/
if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
return -EPERM;
return 0;
}
static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mmc_blk_data *md;
int ret;
switch (cmd) {
case MMC_IOC_CMD:
ret = mmc_blk_check_blkdev(bdev);
if (ret)
return ret;
md = mmc_blk_get(bdev->bd_disk);
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
(struct mmc_ioc_cmd __user *)arg,
NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
ret = mmc_blk_check_blkdev(bdev);
if (ret)
return ret;
md = mmc_blk_get(bdev->bd_disk);
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
(struct mmc_ioc_multi_cmd __user *)arg,
NULL);
mmc_blk_put(md);
return ret;
default:
return -EINVAL;
}
}
#ifdef CONFIG_COMPAT
static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
}
#endif
static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
sector_t *sector)
{
struct mmc_blk_data *md;
int ret;
md = mmc_blk_get(disk);
if (!md)
return -EINVAL;
if (md->queue.card)
ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
else
ret = -ENODEV;
mmc_blk_put(md);
return ret;
}
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
.ioctl = mmc_blk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_blk_compat_ioctl,
#endif
.alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
};
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
int ret = 0;
if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
return ret;
}
mmc_retune_pause(card->host);
}
return ret;
}
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
int ret = 0;
if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
}
return ret;
}
static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type)
{
int ret = 0;
struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
if (main_md->part_curr == part_type)
return 0;
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
ret = mmc_blk_part_switch_pre(card, part_type);
if (ret)
return ret;
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
part_config |= part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
if (ret) {
mmc_blk_part_switch_post(card, part_type);
return ret;
}
card->ext_csd.part_config = part_config;
ret = mmc_blk_part_switch_post(card, main_md->part_curr);
}
main_md->part_curr = part_type;
return ret;
}
static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
{
int err;
u32 result;
__be32 *blocks;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
err = mmc_app_cmd(card->host, card);
if (err)
return err;
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
mmc_set_data_timeout(&data, card);
mrq.cmd = &cmd;
mrq.data = &data;
blocks = kmalloc(4, GFP_KERNEL);
if (!blocks)
return -ENOMEM;
sg_init_one(&sg, blocks, 4);
mmc_wait_for_req(card->host, &mrq);
result = ntohl(*blocks);
kfree(blocks);
if (cmd.error || data.error)
return -EIO;
*written_blocks = result;
return 0;
}
static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
{
if (host->actual_clock)
return host->actual_clock / 1000;
/* Clock may be subject to a divisor, fudge it by a factor of 2. */
if (host->ios.clock)
return host->ios.clock / 2000;
/* How can there be no clock */
WARN_ON_ONCE(1);
return 100; /* 100 kHz is minimum possible value */
}
static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
struct mmc_data *data)
{
unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
unsigned int khz;
if (data->timeout_clks) {
khz = mmc_blk_clock_khz(host);
ms += DIV_ROUND_UP(data->timeout_clks, khz);
}
return ms;
}
/*
* Attempts to reset the card and get back to the requested partition.
* Therefore any error here must result in cancelling the block layer
* request, it must not be reattempted without going through the mmc_blk
* partition sanity checks.
*/
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
int err;
struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
if (md->reset_done & type)
return -EEXIST;
md->reset_done |= type;
err = mmc_hw_reset(host->card);
/*
* A successful reset will leave the card in the main partition, but
* upon failure it might not be, so set it to MMC_BLK_PART_INVALID
* in that case.
*/
main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type;
if (err)
return err;
/* Ensure we switch back to the correct partition */
if (mmc_blk_part_switch(host->card, md->part_type))
/*
* We have failed to get back into the correct
* partition, so we need to abort the whole request.
*/
return -ENODEV;
return 0;
}
static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
{
md->reset_done &= ~type;
}
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
* function.
*/
static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_blk_ioc_data **idata;
bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
break;
}
fallthrough;
case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
if (ret)
break;
}
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
mmc_cmdq_enable(card);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
card->ext_csd.boot_ro_lock |
EXT_CSD_BOOT_WP_B_PWR_WP_EN,
card->ext_csd.part_time);
if (ret)
pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
md->disk->disk_name, ret);
else
card->ext_csd.boot_ro_lock |=
EXT_CSD_BOOT_WP_B_PWR_WP_EN;
break;
case MMC_DRV_OP_GET_CARD_STATUS:
ret = mmc_send_status(card, &status);
if (!ret)
ret = status;
break;
case MMC_DRV_OP_GET_EXT_CSD:
ext_csd = mq_rq->drv_op_data;
ret = mmc_get_ext_csd(card, ext_csd);
break;
default:
pr_err("%s: unknown driver specific operation\n",
md->disk->disk_name);
ret = -EINVAL;
break;
}
mq_rq->drv_op_result = ret;
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
int type, unsigned int erase_arg)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr;
int err = 0;
blk_status_t status = BLK_STS_OK;
if (!mmc_can_erase(card)) {
status = BLK_STS_NOTSUPP;
goto fail;
}
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
do {
err = 0;
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
erase_arg == MMC_TRIM_ARG ?
INAND_CMD38_ARG_TRIM :
INAND_CMD38_ARG_ERASE,
card->ext_csd.generic_cmd6_time);
}
if (!err)
err = mmc_erase(card, from, nr, erase_arg);
} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
if (err)
status = BLK_STS_IOERR;
else
mmc_blk_reset_success(md, type);
fail:
blk_mq_end_request(req, status);
}
static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
{
mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int arg = card->erase_arg;
if (mmc_card_broken_sd_discard(card))
arg = SD_ERASE_ARG;
mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
blk_status_t status = BLK_STS_OK;
if (!(mmc_can_secure_erase_trim(card))) {
status = BLK_STS_NOTSUPP;
goto out;
}
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
arg == MMC_SECURE_TRIM1_ARG ?
INAND_CMD38_ARG_SECTRIM1 :
INAND_CMD38_ARG_SECERASE,
card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
err = mmc_erase(card, from, nr, arg);
if (err == -EIO)
goto out_retry;
if (err) {
status = BLK_STS_IOERR;
goto out;
}
if (arg == MMC_SECURE_TRIM1_ARG) {
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
card->ext_csd.generic_cmd6_time);
if (err)
goto out_retry;
}
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
if (err == -EIO)
goto out_retry;
if (err) {
status = BLK_STS_IOERR;
goto out;
}
}
out_retry:
if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
if (!err)
mmc_blk_reset_success(md, type);
out:
blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
int ret = 0;
ret = mmc_flush_cache(card->host);
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
* Reformat current write as a reliable write, supporting
* both legacy and the enhanced reliable write MMC cards.
* In each transfer we'll handle only as much as a single
* reliable write can handle, thus finish the request in
* partial completions.
*/
static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
struct mmc_card *card,
struct request *req)
{
if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
/* Legacy mode imposes restrictions on transfers. */
if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
brq->data.blocks = 1;
if (brq->data.blocks > card->ext_csd.rel_sectors)
brq->data.blocks = card->ext_csd.rel_sectors;
else if (brq->data.blocks < card->ext_csd.rel_sectors)
brq->data.blocks = 1;
}
}
#define CMD_ERRORS_EXCL_OOR \
(R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
#define CMD_ERRORS \
(CMD_ERRORS_EXCL_OOR | \
R1_OUT_OF_RANGE) /* Command argument out of range */ \
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
/*
* Per the SD specification(physical layer version 4.10)[1],
* section 4.3.3, it explicitly states that "When the last
* block of user area is read using CMD18, the host should
* ignore OUT_OF_RANGE error that may occur even the sequence
* is correct". And JESD84-B51 for eMMC also has a similar
* statement on section 6.8.3.
*
* Multiple block read/write could be done by either predefined
* method, namely CMD23, or open-ending mode. For open-ending mode,
* we should ignore the OUT_OF_RANGE error as it's normal behaviour.
*
* However the spec[1] doesn't tell us whether we should also
* ignore that for predefined method. But per the spec[1], section
* 4.15 Set Block Count Command, it says"If illegal block count
* is set, out of range error will be indicated during read/write
* operation (For example, data transfer is stopped at user area
* boundary)." In another word, we could expect a out of range error
* in the response for the following CMD18/25. And if argument of
* CMD23 + the argument of CMD18/25 exceed the max number of blocks,
* we could also expect to get a -ETIMEDOUT or any error number from
* the host drivers due to missing data response(for write)/data(for
* read), as the cards will stop the data transfer by itself per the
* spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
*/
if (!brq->stop.error) {
bool oor_with_open_end;
/* If there is no error yet, check R1 response */
val = brq->stop.resp[0] & CMD_ERRORS;
oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
if (val && !oor_with_open_end)
brq->stop.error = -EIO;
}
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
do_rel_wr = (req->cmd_flags & REQ_FUA) &&
rq_data_dir(req) == WRITE &&
(md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
mmc_crypto_prepare_req(mqrq);
brq->mrq.data = &brq->data;
brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
if (rq_data_dir(req) == READ) {
brq->data.flags = MMC_DATA_READ;
brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
} else {
brq->data.flags = MMC_DATA_WRITE;
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
}
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
brq->data.blk_addr = blk_rq_pos(req);
/*
* The command queue supports 2 priorities: "high" (1) and "simple" (0).
* The eMMC will give "high" priority tasks priority over "simple"
* priority tasks. Here we always set "simple" priority by not setting
* MMC_DATA_PRIO.
*/
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
* requests.
*/
if (brq->data.blocks > card->host->max_blk_count)
brq->data.blocks = card->host->max_blk_count;
if (brq->data.blocks > 1) {
/*
* Some SD cards in SPI mode return a CRC error or even lock up
* completely when trying to read the last block using a
* multiblock read command.
*/
if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
(blk_rq_pos(req) + blk_rq_sectors(req) ==
get_capacity(md->disk)))
brq->data.blocks--;
/*
* After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which
* sectors can be read successfully.
*/
if (recovery_mode)
brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/*
* Some controllers have HW issues while operating
* in multiple I/O mode
*/
if (card->host->ops->multi_io_quirk)
brq->data.blocks = card->host->ops->multi_io_quirk(card,
(rq_data_dir(req) == READ) ?
MMC_DATA_READ : MMC_DATA_WRITE,
brq->data.blocks);
}
if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
brq->data.flags |= MMC_DATA_REL_WR;
}
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
do_data_tag = card->ext_csd.data_tag_unit_size &&
(req->cmd_flags & REQ_META) &&
(rq_data_dir(req) == WRITE) &&
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
if (do_data_tag)
brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
brq->data.sg = mqrq->sg;
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
/*
* Adjust the sg list so it is the same size as the
* request.
*/
if (brq->data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq->data.blocks << 9;
struct scatterlist *sg;
for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
i++;
break;
}
}
brq->data.sg_len = i;
}
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
if (do_data_tag_p)
*do_data_tag_p = do_data_tag;
}
#define MMC_CQE_RETRIES 2
static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
unsigned long flags;
bool put_card;
int err;
mmc_cqe_post_req(host, mrq);
if (mrq->cmd && mrq->cmd->error)
err = mrq->cmd->error;
else if (mrq->data && mrq->data->error)
err = mrq->data->error;
else
err = 0;
if (err) {
if (mqrq->retries++ < MMC_CQE_RETRIES)
blk_mq_requeue_request(req, true);
else
blk_mq_end_request(req, BLK_STS_IOERR);
} else if (mrq->data) {
if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
mmc_cqe_check_busy(mq);
spin_unlock_irqrestore(&mq->lock, flags);
if (!mq->cqe_busy)
blk_mq_run_hw_queues(q, true);
if (put_card)
mmc_put_card(mq->card, &mq->ctx);
}
void mmc_blk_cqe_recovery(struct mmc_queue *mq)
{
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
int err;
pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
{
struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
brq.mrq);
struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
/*
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
mrq->done = mmc_blk_cqe_req_done;
mrq->recovery_notifier = mmc_cqe_recovery_notifier;
return mmc_cqe_start_req(host, mrq);
}
static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
struct request *req)
{
struct mmc_blk_request *brq = &mqrq->brq;
memset(brq, 0, sizeof(*brq));
brq->mrq.cmd = &brq->cmd;
brq->mrq.tag = req->tag;
return &brq->mrq;
}
static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
mrq->cmd->opcode = MMC_SWITCH;
mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(EXT_CSD_FLUSH_CACHE << 16) |
(1 << 8) |
EXT_CSD_CMD_SET_NORMAL;
mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
return mmc_blk_cqe_start_req(mq->card->host, mrq);
}
static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
int err;
mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
mmc_pre_req(host, &mqrq->brq.mrq);
err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
if (err)
mmc_post_req(host, &mqrq->brq.mrq, err);
return err;
}
static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
if (host->hsq_enabled)
return mmc_blk_hsq_issue_rw_rq(mq, req);
mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int recovery_mode,
struct mmc_queue *mq)
{
u32 readcmd, writecmd;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag;
mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd;
brq->cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq->cmd.arg <<= 9;
brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
if (brq->data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
*/
if (!mmc_host_is_spi(card->host) ||
rq_data_dir(req) == READ)
brq->mrq.stop = &brq->stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq->mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
/*
* Pre-defined multi-block transfers are preferable to
* open ended-ones (and necessary for reliable writes).
* However, it is not sufficient to just send CMD23,
* and avoid the final CMD12, as on an error condition
* CMD12 (stop) needs to be sent anyway. This, coupled
* with Auto-CMD23 enhancements provided by some
* hosts, means that the complexity of dealing
* with this is best left to the host. If CMD23 is
* supported by card and host, we'll fill sbc in and let
* the host deal with handling it correctly. This means
* that for hosts that don't expose MMC_CAP_CMD23, no
* change of behavior will be observed.
*
* N.B: Some MMC cards experience perf degradation.
* We'll avoid using CMD23-bounded multiblock writes for
* these, while retaining features like reliable writes.
*/
if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0) |
(do_data_tag ? (1 << 29) : 0);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
}
#define MMC_MAX_RETRIES 5
#define MMC_DATA_RETRIES 2
#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
{
struct mmc_command cmd = {
.opcode = MMC_STOP_TRANSMISSION,
.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
/* Some hosts wait for busy anyway, so provide a busy timeout */
.busy_timeout = timeout,
};
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_request *brq = &mqrq->brq;
unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
int err;
mmc_retune_hold_now(card->host);
mmc_blk_send_stop(card, timeout);
err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
mmc_retune_release(card->host);
return err;
}
#define MMC_READ_SINGLE_RETRIES 2
/* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
size_t bytes_per_read = queue_physical_block_size(mq->queue);
do {
u32 status;
int err;
int retries = 0;
while (retries++ <= MMC_READ_SINGLE_RETRIES) {
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
mmc_wait_for_req(host, mrq);
err = mmc_send_status(card, &status);
if (err)
goto error_exit;
if (!mmc_host_is_spi(host) &&
!mmc_ready_for_data(status)) {
err = mmc_blk_fix_state(card, req);
if (err)
goto error_exit;
}
if (!mrq->cmd->error)
break;
}
if (mrq->cmd->error ||
mrq->data->error ||
(!mmc_host_is_spi(host) &&
(mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
error = BLK_STS_IOERR;
else
error = BLK_STS_OK;
} while (blk_update_request(req, error, bytes_per_read));
return;
error_exit:
mrq->data->bytes_xfered = 0;
blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1;
}
static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
return !!brq->mrq.sbc;
}
static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
{
return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
}
/*
* Check for errors the host controller driver might not have seen such as
* response mode errors or invalid card state.
*/
static bool mmc_blk_status_error(struct request *req, u32 status)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_request *brq = &mqrq->brq;
struct mmc_queue *mq = req->q->queuedata;
u32 stop_err_bits;
if (mmc_host_is_spi(mq->card->host))
return false;
stop_err_bits = mmc_blk_stop_err_bits(brq);
return brq->cmd.resp[0] & CMD_ERRORS ||
brq->stop.resp[0] & stop_err_bits ||
status & stop_err_bits ||
(rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
}
static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
{
return !brq->sbc.error && !brq->cmd.error &&
!(brq->cmd.resp[0] & CMD_ERRORS);
}
/*
* Requests are completed by mmc_blk_mq_complete_rq() which sets simple
* policy:
* 1. A request that has transferred at least some data is considered
* successful and will be requeued if there is remaining data to
* transfer.
* 2. Otherwise the number of retries is incremented and the request
* will be requeued if there are remaining retries.
* 3. Otherwise the request will be errored out.
* That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
* mqrq->retries. So there are only 4 possible actions here:
* 1. do not accept the bytes_xfered value i.e. set it to zero
* 2. change mqrq->retries to determine the number of retries
* 3. try to reset the card
* 4. read one sector at a time
*/
static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_request *brq = &mqrq->brq;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = mq->card;
u32 status;
u32 blocks;
int err;
/*
* Some errors the host driver might not have seen. Set the number of
* bytes transferred to zero in that case.
*/
err = __mmc_send_status(card, &status, 0);
if (err || mmc_blk_status_error(req, status))
brq->data.bytes_xfered = 0;
mmc_retune_release(card->host);
/*
* Try again to get the status. This also provides an opportunity for
* re-tuning.
*/
if (err)
err = __mmc_send_status(card, &status, 0);
/*
* Nothing more to do after the number of bytes transferred has been
* updated and there is no card.
*/
if (err && mmc_detect_card_removed(card->host))
return;
/* Try to get back to "tran" state */
if (!mmc_host_is_spi(mq->card->host) &&
(err || !mmc_ready_for_data(status)))
err = mmc_blk_fix_state(mq->card, req);
/*
* Special case for SD cards where the card might record the number of
* blocks written.
*/
if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
rq_data_dir(req) == WRITE) {
if (mmc_sd_num_wr_blocks(card, &blocks))
brq->data.bytes_xfered = 0;
else
brq->data.bytes_xfered = blocks << 9;
}
/* Reset if the card is in a bad state */
if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
mqrq->retries = MMC_NO_RETRIES;
return;
}
/*
* If anything was done, just return and if there is anything remaining
* on the request it will get requeued.
*/
if (brq->data.bytes_xfered)
return;
/* Reset before last retry */
if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
mmc_blk_reset(md, card->host, type))
return;
/* Command errors fail fast, so use all MMC_MAX_RETRIES */
if (brq->sbc.error || brq->cmd.error)
return;
/* Reduce the remaining retries for data errors */
if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
return;
}
if (rq_data_dir(req) == READ && brq->data.blocks >
queue_physical_block_size(mq->queue) >> 9) {
/* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
return;
}
}
static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
mmc_blk_eval_resp_error(brq);
return brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
static int mmc_spi_err_check(struct mmc_card *card)
{
u32 status = 0;
int err;
/*
* SPI does not have a TRAN state we have to wait on, instead the
* card is ready again when it no longer holds the line LOW.
* We still have to ensure two things here before we know the write
* was successful:
* 1. The card has not disconnected during busy and we actually read our
* own pull-up, thinking it was still connected, so ensure it
* still responds.
* 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
* just reconnected card after being disconnected during busy.
*/
err = __mmc_send_status(card, &status, 0);
if (err)
return err;
/* All R1 and R2 bits of SPI are errors in our case */
if (status)
return -EIO;
return 0;
}
static int mmc_blk_busy_cb(void *cb_data, bool *busy)
{
struct mmc_blk_busy_data *data = cb_data;
u32 status = 0;
int err;
err = mmc_send_status(data->card, &status);
if (err)
return err;
/* Accumulate response error bits. */
data->status |= status;
*busy = !mmc_ready_for_data(status);
return 0;
}
static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_busy_data cb_data;
int err;
if (rq_data_dir(req) == READ)
return 0;
if (mmc_host_is_spi(card->host)) {
err = mmc_spi_err_check(card);
if (err)
mqrq->brq.data.bytes_xfered = 0;
return err;
}
cb_data.card = card;
cb_data.status = 0;
err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
&mmc_blk_busy_cb, &cb_data);
/*
* Do not assume data transferred correctly if there are any error bits
* set.
*/
if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
mqrq->brq.data.bytes_xfered = 0;
err = err ? err : -EIO;
}
/* Copy the exception bit so it will be seen later on */
if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
return err;
}
static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
struct request *req)
{
int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_blk_reset_success(mq->blkdata, type);
}
static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
if (nr_bytes) {
if (blk_update_request(req, BLK_STS_OK, nr_bytes))
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
} else if (!blk_rq_bytes(req)) {
__blk_mq_end_request(req, BLK_STS_IOERR);
} else if (mqrq->retries++ < MMC_MAX_RETRIES) {
blk_mq_requeue_request(req, true);
} else {
if (mmc_card_removed(mq->card))
req->rq_flags |= RQF_QUIET;
blk_mq_end_request(req, BLK_STS_IOERR);
}
}
static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
struct mmc_queue_req *mqrq)
{
return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
(mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
}
static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
struct mmc_queue_req *mqrq)
{
if (mmc_blk_urgent_bkops_needed(mq, mqrq))
mmc_run_bkops(mq->card);
}
static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
{
struct mmc_queue_req *mqrq =
container_of(mrq, struct mmc_queue_req, brq.mrq);
struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_host *host = mq->card->host;
unsigned long flags;
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
spin_lock_irqsave(&mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
spin_unlock_irqrestore(&mq->lock, flags);
host->cqe_ops->cqe_recovery_start(host);
schedule_work(&mq->recovery_work);
return;
}
mmc_blk_rw_reset_success(mq, req);
/*
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
void mmc_blk_mq_complete(struct request *req)
{
struct mmc_queue *mq = req->q->queuedata;
struct mmc_host *host = mq->card->host;
if (host->cqe_enabled)
mmc_blk_cqe_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
mmc_blk_mq_complete_rq(mq, req);
}
static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_card_busy(mq->card, req)) {
mmc_blk_mq_rw_recovery(mq, req);
} else {
mmc_blk_rw_reset_success(mq, req);
mmc_retune_release(host);
}
mmc_blk_urgent_bkops(mq, mqrq);
}
static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
spin_unlock_irqrestore(&mq->lock, flags);
if (put_card)
mmc_put_card(mq->card, &mq->ctx);
}
static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
bool can_sleep)
{
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
mmc_post_req(host, mrq, 0);
/*
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
} else if (likely(!blk_should_fake_timeout(req->q))) {
if (can_sleep)
blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
else
blk_mq_complete_request(req);
}
mmc_blk_mq_dec_in_flight(mq, issue_type);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)
{
struct request *req = mq->recovery_req;
struct mmc_host *host = mq->card->host;
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
mq->recovery_req = NULL;
mq->rw_wait = false;
if (mmc_blk_rq_error(&mqrq->brq)) {
mmc_retune_hold_now(host);
mmc_blk_mq_rw_recovery(mq, req);
}
mmc_blk_urgent_bkops(mq, mqrq);
mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
struct request **prev_req)
{
if (mmc_host_done_complete(mq->card->host))
return;
mutex_lock(&mq->complete_lock);
if (!mq->complete_req)
goto out_unlock;
mmc_blk_mq_poll_completion(mq, mq->complete_req);
if (prev_req)
*prev_req = mq->complete_req;
else
mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
out_unlock:
mutex_unlock(&mq->complete_lock);
}
void mmc_blk_mq_complete_work(struct work_struct *work)
{
struct mmc_queue *mq = container_of(work, struct mmc_queue,
complete_work);
mmc_blk_mq_complete_prev_req(mq, NULL);
}
static void mmc_blk_mq_req_done(struct mmc_request *mrq)
{
struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
brq.mrq);
struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_host *host = mq->card->host;
unsigned long flags;
if (!mmc_host_done_complete(host)) {
bool waiting;
/*
* We cannot complete the request in this context, so record
* that there is a request to complete, and that a following
* request does not need to wait (although it does need to
* complete complete_req first).
*/
spin_lock_irqsave(&mq->lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
spin_unlock_irqrestore(&mq->lock, flags);
/*
* If 'waiting' then the waiting task will complete this
* request, otherwise queue a work to do it. Note that
* complete_work may still race with the dispatch of a following
* request.
*/
if (waiting)
wake_up(&mq->wait);
else
queue_work(mq->card->complete_wq, &mq->complete_work);
return;
}
/* Take the recovery path for errors or urgent background operations */
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
spin_lock_irqsave(&mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
spin_unlock_irqrestore(&mq->lock, flags);
wake_up(&mq->wait);
schedule_work(&mq->recovery_work);
return;
}
mmc_blk_rw_reset_success(mq, req);
mq->rw_wait = false;
wake_up(&mq->wait);
/* context unknown */
mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
{
unsigned long flags;
bool done;
/*
* Wait while there is another request in progress, but not if recovery
* is needed. Also indicate whether there is a request waiting to start.
*/
spin_lock_irqsave(&mq->lock, flags);
if (mq->recovery_needed) {
*err = -EBUSY;
done = true;
} else {
done = !mq->rw_wait;
}
mq->waiting = !done;
spin_unlock_irqrestore(&mq->lock, flags);
return done;
}
static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
{
int err = 0;
wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
/* Always complete the previous request if there is one */
mmc_blk_mq_complete_prev_req(mq, prev_req);
return err;
}
static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
struct request *prev_req = NULL;
int err = 0;
mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
mqrq->brq.mrq.done = mmc_blk_mq_req_done;
mmc_pre_req(host, &mqrq->brq.mrq);
err = mmc_blk_rw_wait(mq, &prev_req);
if (err)
goto out_post_req;
mq->rw_wait = true;
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;
/* Release re-tuning here where there is no synchronization required */
if (err || mmc_host_done_complete(host))
mmc_retune_release(host);
out_post_req:
if (err)
mmc_post_req(host, &mqrq->brq.mrq, err);
return err;
}
static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
{
if (host->cqe_enabled)
return host->cqe_ops->cqe_wait_for_idle(host);
return mmc_blk_rw_wait(mq, NULL);
}
enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
int ret;
ret = mmc_blk_part_switch(card, md->part_type);
if (ret)
return MMC_REQ_FAILED_TO_START;
switch (mmc_issue_type(mq, req)) {
case MMC_ISSUE_SYNC:
ret = mmc_blk_wait_for_idle(mq, host);
if (ret)
return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_WRITE_ZEROES:
mmc_blk_issue_trim_rq(mq, req);
break;
case REQ_OP_FLUSH:
mmc_blk_issue_flush(mq, req);
break;
default:
WARN_ON_ONCE(1);
return MMC_REQ_FAILED_TO_START;
}
return MMC_REQ_FINISHED;
case MMC_ISSUE_DCMD:
case MMC_ISSUE_ASYNC:
switch (req_op(req)) {
case REQ_OP_FLUSH:
if (!mmc_cache_enabled(host)) {
blk_mq_end_request(req, BLK_STS_OK);
return MMC_REQ_FINISHED;
}
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
if (host->cqe_enabled)
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
else
ret = mmc_blk_mq_issue_rw_rq(mq, req);
break;
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
}
if (!ret)
return MMC_REQ_STARTED;
return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
default:
WARN_ON_ONCE(1);
return MMC_REQ_FAILED_TO_START;
}
}
static inline int mmc_blk_readonly(struct mmc_card *card)
{
return mmc_card_readonly(card) ||
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
bool default_ro,
const char *subname,
int area_type,
unsigned int part_type)
{
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
bool cache_enabled = false;
bool fua_enabled = false;
devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
if (devidx < 0) {
/*
* We get -ENOSPC because there are no more any available
* devidx. The reason may be that, either userspace haven't yet
* unmounted the partitions, which postpones mmc_blk_release()
* from being called, or the device has more partitions than
* what we support.
*/
if (devidx == -ENOSPC)
dev_err(mmc_dev(card->host),
"no more device IDs available\n");
return ERR_PTR(devidx);
}
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
}
md->area_type = area_type;
/*
* Set the read-only status based on the supported commands
* and the write protect switch.
*/
md->read_only = mmc_blk_readonly(card);
md->disk = mmc_init_queue(&md->queue, card);
if (IS_ERR(md->disk)) {
ret = PTR_ERR(md->disk);
goto err_kfree;
}
INIT_LIST_HEAD(&md->part);
INIT_LIST_HEAD(&md->rpmbs);
kref_init(&md->kref);
md->queue.blkdata = md;
md->part_type = part_type;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->minors = perdev_minors;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
md->disk->flags |= GENHD_FL_NO_PART;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
*
* - be set for removable media with permanent block devices
* - be unset for removable block devices with permanent media
*
* Since MMC block devices clearly fall under the second
* case, we do not set GENHD_FL_REMOVABLE. Userspace
* should use the block device creation/destruction hotplug
* messages to tell when the card is present.
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
if ((mmc_card_mmc(card) &&
card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
}
if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
fua_enabled = true;
cache_enabled = true;
}
if (mmc_cache_enabled(card->host))
cache_enabled = true;
blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s%s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? " (ro)" : "");
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
if (ret)
goto err_put_disk;
return md;
err_put_disk:
put_disk(md->disk);
blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
out:
ida_simple_remove(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
sector_t size;
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
* The EXT_CSD sector count is in number or 512 byte
* sectors.
*/
size = card->ext_csd.sectors;
} else {
/*
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
size = (typeof(sector_t))card->csd.capacity
<< (card->csd.read_blkbits - 9);
}
return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
MMC_BLK_DATA_AREA_MAIN, 0);
}
static int mmc_blk_alloc_part(struct mmc_card *card,
struct mmc_blk_data *md,
unsigned int part_type,
sector_t size,
bool default_ro,
const char *subname,
int area_type)
{
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
subname, area_type, part_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
list_add(&part_md->part, &md->part);
return 0;
}
/**
* mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
* @filp: the character device file
* @cmd: the ioctl() command
* @arg: the argument from userspace
*
* This will essentially just redirect the ioctl()s coming in over to
* the main block device spawning the RPMB character device.
*/
static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct mmc_rpmb_data *rpmb = filp->private_data;
int ret;
switch (cmd) {
case MMC_IOC_CMD:
ret = mmc_blk_ioctl_cmd(rpmb->md,
(struct mmc_ioc_cmd __user *)arg,
rpmb);
break;
case MMC_IOC_MULTI_CMD:
ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
(struct mmc_ioc_multi_cmd __user *)arg,
rpmb);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
#ifdef CONFIG_COMPAT
static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
{
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
get_device(&rpmb->dev);
filp->private_data = rpmb;
mmc_blk_get(rpmb->md->disk);
return nonseekable_open(inode, filp);
}
static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
{
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
mmc_blk_put(rpmb->md);
put_device(&rpmb->dev);
return 0;
}
static const struct file_operations mmc_rpmb_fileops = {
.release = mmc_rpmb_chrdev_release,
.open = mmc_rpmb_chrdev_open,
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mmc_rpmb_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mmc_rpmb_ioctl_compat,
#endif
};
static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_blk_data *md,
unsigned int part_index,
sector_t size,
const char *subname)
{
int devidx, ret;
char rpmb_name[DISK_NAME_LEN];
char cap_str[10];
struct mmc_rpmb_data *rpmb;
/* This creates the minor number for the RPMB char device */
devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
if (devidx < 0)
return devidx;
rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
if (!rpmb) {
ida_simple_remove(&mmc_rpmb_ida, devidx);
return -ENOMEM;
}
snprintf(rpmb_name, sizeof(rpmb_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
rpmb->id = devidx;
rpmb->part_index = part_index;
rpmb->dev.init_name = rpmb_name;
rpmb->dev.bus = &mmc_rpmb_bus_type;
rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
rpmb->dev.parent = &card->dev;
rpmb->dev.release = mmc_blk_rpmb_device_release;
device_initialize(&rpmb->dev);
dev_set_drvdata(&rpmb->dev, rpmb);
rpmb->md = md;
cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
rpmb->chrdev.owner = THIS_MODULE;
ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
if (ret) {
pr_err("%s: could not add character device\n", rpmb_name);
goto out_put_device;
}
list_add(&rpmb->node, &md->rpmbs);
string_get_size((u64)size, 512, STRING_UNITS_2,
cap_str, sizeof(cap_str));
pr_info("%s: %s %s %s, chardev (%d:%d)\n",
rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str,
MAJOR(mmc_rpmb_devt), rpmb->id);
return 0;
out_put_device:
put_device(&rpmb->dev);
return ret;
}
static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
{
cdev_device_del(&rpmb->chrdev, &rpmb->dev);
put_device(&rpmb->dev);
}
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
* to provide access to the partition.
*/
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
/*
* RPMB partitions does not provide block access, they
* are only accessed using ioctl():s. Thus create
* special RPMB block devices that do not have a
* backing block queue for these.
*/
ret = mmc_blk_alloc_rpmb_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
card->part[idx].name);
if (ret)
return ret;
} else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
card->part[idx].force_ro,
card->part[idx].name,
card->part[idx].area_type);
if (ret)
return ret;
}
}
return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
/*
* Flush remaining requests and free queues. It is freeing the queue
* that stops new requests from being accepted.
*/
del_gendisk(md->disk);
mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
static void mmc_blk_remove_parts(struct mmc_card *card,
struct mmc_blk_data *md)
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
struct mmc_rpmb_data *rpmb;
/* Remove RPMB partitions */
list_for_each_safe(pos, q, &md->rpmbs) {
rpmb = list_entry(pos, struct mmc_rpmb_data, node);
list_del(pos);
mmc_blk_remove_rpmb_part(rpmb);
}
/* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
mmc_blk_remove_req(part_md);
}
}
#ifdef CONFIG_DEBUG_FS
static int mmc_dbg_card_status_get(void *data, u64 *val)
{
struct mmc_card *card = data;
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
struct mmc_queue *mq = &md->queue;
struct request *req;
int ret;
/* Ask the block layer about the card status */
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
ret = 0;
}
blk_mq_free_request(req);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
NULL, "%08llx\n");
/* That is two digits * 512 + 1 for newline */
#define EXT_CSD_STR_LEN 1025
static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
{
struct mmc_card *card = inode->i_private;
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
struct mmc_queue *mq = &md->queue;
struct request *req;
char *buf;
ssize_t n = 0;
u8 *ext_csd;
int err, i;
buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
}
for (i = 0; i < 512; i++)
n += sprintf(buf + n, "%02x", ext_csd[i]);
n += sprintf(buf + n, "\n");
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
kfree(ext_csd);
goto out_free;
}
filp->private_data = buf;
kfree(ext_csd);
return 0;
out_free:
kfree(buf);
return err;
}
static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char *buf = filp->private_data;
return simple_read_from_buffer(ubuf, cnt, ppos,
buf, EXT_CSD_STR_LEN);
}
static int mmc_ext_csd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations mmc_dbg_ext_csd_fops = {
.open = mmc_ext_csd_open,
.read = mmc_ext_csd_read,
.release = mmc_ext_csd_release,
.llseek = default_llseek,
};
static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
struct dentry *root;
if (!card->debugfs_root)
return;
root = card->debugfs_root;
if (mmc_card_mmc(card) || mmc_card_sd(card)) {
md->status_dentry =
debugfs_create_file_unsafe("status", 0400, root,
card,
&mmc_dbg_card_status_fops);
}
if (mmc_card_mmc(card)) {
md->ext_csd_dentry =
debugfs_create_file("ext_csd", S_IRUSR, root, card,
&mmc_dbg_ext_csd_fops);
}
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
struct mmc_blk_data *md)
{
if (!card->debugfs_root)
return;
debugfs_remove(md->status_dentry);
md->status_dentry = NULL;
debugfs_remove(md->ext_csd_dentry);
md->ext_csd_dentry = NULL;
}
#else
static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{
}
static void mmc_blk_remove_debugfs(struct mmc_card *card,
struct mmc_blk_data *md)
{
}
#endif /* CONFIG_DEBUG_FS */
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int ret = 0;
/*
* Check that the card supports the command class(es) we need.
*/
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
mmc_fixup_device(card, mmc_blk_fixups);
card->complete_wq = alloc_workqueue("mmc_complete",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!card->complete_wq) {
pr_err("Failed to create mmc completion workqueue");
return -ENOMEM;
}
md = mmc_blk_alloc(card);
if (IS_ERR(md)) {
ret = PTR_ERR(md);
goto out_free;
}
ret = mmc_blk_alloc_parts(card, md);
if (ret)
goto out;
/* Add two debugfs entries */
mmc_blk_add_debugfs(card, md);
pm_runtime_set_autosuspend_delay(&card->dev, 3000);
pm_runtime_use_autosuspend(&card->dev);
/*
* Don't enable runtime PM for SD-combo cards here. Leave that
* decision to be taken during the SDIO init sequence instead.
*/
if (!mmc_card_sd_combo(card)) {
pm_runtime_set_active(&card->dev);
pm_runtime_enable(&card->dev);
}
return 0;
out:
mmc_blk_remove_parts(card, md);
mmc_blk_remove_req(md);
out_free:
destroy_workqueue(card->complete_wq);
return ret;
}
static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
mmc_blk_remove_debugfs(card, md);
mmc_blk_remove_parts(card, md);
pm_runtime_get_sync(&card->dev);
if (md->part_curr != md->part_type) {
mmc_claim_host(card->host);
mmc_blk_part_switch(card, md->part_type);
mmc_release_host(card->host);
}
if (!mmc_card_sd_combo(card))
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
destroy_workqueue(card->complete_wq);
}
static int _mmc_blk_suspend(struct mmc_card *card)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
if (md) {
mmc_queue_suspend(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_suspend(&part_md->queue);
}
}
return 0;
}
static void mmc_blk_shutdown(struct mmc_card *card)
{
_mmc_blk_suspend(card);
}
#ifdef CONFIG_PM_SLEEP
static int mmc_blk_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
return _mmc_blk_suspend(card);
}
static int mmc_blk_resume(struct device *dev)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = dev_get_drvdata(dev);
if (md) {
/*
* Resume involves the card going into idle state,
* so current partition is always the main one.
*/
md->part_curr = md->part_type;
mmc_queue_resume(&md->queue);
list_for_each_entry(part_md, &md->part, part) {
mmc_queue_resume(&part_md->queue);
}
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmcblk",
.pm = &mmc_blk_pm_ops,
},
.probe = mmc_blk_probe,
.remove = mmc_blk_remove,
.shutdown = mmc_blk_shutdown,
};
static int __init mmc_blk_init(void)
{
int res;
res = bus_register(&mmc_rpmb_bus_type);
if (res < 0) {
pr_err("mmcblk: could not register RPMB bus type\n");
return res;
}
res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
if (res < 0) {
pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
goto out_bus_unreg;
}
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
goto out_blkdev_unreg;
return 0;
out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
out_chrdev_unreg:
unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
out_bus_unreg:
bus_unregister(&mmc_rpmb_bus_type);
return res;
}
static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
bus_unregister(&mmc_rpmb_bus_type);
}
module_init(mmc_blk_init);
module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
| linux-master | drivers/mmc/core/block.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/core/sd.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
*/
#include <linux/err.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "sd.h"
#include "sd_ops.h"
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
};
static const unsigned char tran_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const unsigned int taac_exp[] = {
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
};
static const unsigned int taac_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const unsigned int sd_au_size[] = {
0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512,
SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512,
SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512,
};
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
const int __off = 3 - ((start) / 32); \
const int __shft = (start) & 31; \
u32 __res; \
\
__res = resp[__off] >> __shft; \
if (__size + __shft > 32) \
__res |= resp[__off-1] << ((32 - __shft) % 32); \
__res & __mask; \
})
#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
struct mmc_card *card;
u8 *reg_buf;
};
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
void mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
/*
* Add the raw card ID (cid) data to the entropy pool. It doesn't
* matter that not all of it is unique, it's just bonus entropy.
*/
add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
/*
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
*/
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
card->cid.year = UNSTUFF_BITS(resp, 12, 8);
card->cid.month = UNSTUFF_BITS(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
}
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
static int mmc_decode_csd(struct mmc_card *card)
{
struct mmc_csd *csd = &card->csd;
unsigned int e, m, csd_struct;
u32 *resp = card->raw_csd;
csd_struct = UNSTUFF_BITS(resp, 126, 2);
switch (csd_struct) {
case 0:
m = UNSTUFF_BITS(resp, 115, 4);
e = UNSTUFF_BITS(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
e = UNSTUFF_BITS(resp, 47, 3);
m = UNSTUFF_BITS(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
if (UNSTUFF_BITS(resp, 46, 1)) {
csd->erase_size = 1;
} else if (csd->write_blkbits >= 9) {
csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
csd->erase_size <<= csd->write_blkbits - 9;
}
if (UNSTUFF_BITS(resp, 13, 1))
mmc_card_set_readonly(card);
break;
case 1:
/*
* This is a block-addressed SDHC or SDXC card. Most
* interesting fields are unused and have fixed
* values. To avoid getting tripped by buggy cards,
* we assume those fixed values ourselves.
*/
mmc_card_set_blockaddr(card);
csd->taac_ns = 0; /* Unused */
csd->taac_clks = 0; /* Unused */
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
csd->c_size = UNSTUFF_BITS(resp, 48, 22);
/* SDXC cards have a minimum C_SIZE of 0x00FFFF */
if (csd->c_size >= 0xFFFF)
mmc_card_set_ext_capacity(card);
m = UNSTUFF_BITS(resp, 48, 22);
csd->capacity = (1 + m) << 10;
csd->read_blkbits = 9;
csd->read_partial = 0;
csd->write_misalign = 0;
csd->read_misalign = 0;
csd->r2w_factor = 4; /* Unused */
csd->write_blkbits = 9;
csd->write_partial = 0;
csd->erase_size = 1;
if (UNSTUFF_BITS(resp, 13, 1))
mmc_card_set_readonly(card);
break;
default:
pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
return -EINVAL;
}
card->erase_size = csd->erase_size;
return 0;
}
/*
* Given a 64-bit response, decode to our card SCR structure.
*/
static int mmc_decode_scr(struct mmc_card *card)
{
struct sd_scr *scr = &card->scr;
unsigned int scr_struct;
u32 resp[4];
resp[3] = card->raw_scr[1];
resp[2] = card->raw_scr[0];
scr_struct = UNSTUFF_BITS(resp, 60, 4);
if (scr_struct != 0) {
pr_err("%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
if (scr->sda_vsn == SCR_SPEC_VER_2)
/* Check if Physical Layer Spec v3.0 is supported */
scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
if (scr->sda_spec3) {
scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1);
scr->sda_specx = UNSTUFF_BITS(resp, 38, 4);
}
if (UNSTUFF_BITS(resp, 55, 1))
card->erased_byte = 0xFF;
else
card->erased_byte = 0x0;
if (scr->sda_spec4)
scr->cmds = UNSTUFF_BITS(resp, 32, 4);
else if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
!(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
return -EINVAL;
}
return 0;
}
/*
* Fetch and process SD Status register.
*/
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
__be32 *raw_ssr;
u32 resp[4] = {};
u8 discard_support;
int i;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
pr_warn("%s: card lacks mandatory SD Status function\n",
mmc_hostname(card->host));
return 0;
}
raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
if (!raw_ssr)
return -ENOMEM;
if (mmc_app_sd_status(card, raw_ssr)) {
pr_warn("%s: problem reading SD Status register\n",
mmc_hostname(card->host));
kfree(raw_ssr);
return 0;
}
for (i = 0; i < 16; i++)
card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
kfree(raw_ssr);
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(card->raw_ssr, 428 - 384, 4);
if (au) {
if (au <= 9 || card->scr.sda_spec3) {
card->ssr.au = sd_au_size[au];
es = UNSTUFF_BITS(card->raw_ssr, 408 - 384, 16);
et = UNSTUFF_BITS(card->raw_ssr, 402 - 384, 6);
if (es && et) {
eo = UNSTUFF_BITS(card->raw_ssr, 400 - 384, 2);
card->ssr.erase_timeout = (et * 1000) / es;
card->ssr.erase_offset = eo * 1000;
}
} else {
pr_warn("%s: SD Status: Invalid Allocation Unit size\n",
mmc_hostname(card->host));
}
}
/*
* starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set
*/
resp[3] = card->raw_ssr[6];
discard_support = UNSTUFF_BITS(resp, 313 - 288, 1);
card->erase_arg = (card->scr.sda_specx && discard_support) ?
SD_DISCARD_ARG : SD_ERASE_ARG;
return 0;
}
/*
* Fetches and decodes switch information
*/
static int mmc_read_switch(struct mmc_card *card)
{
int err;
u8 *status;
if (card->scr.sda_vsn < SCR_SPEC_VER_1)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH)) {
pr_warn("%s: card lacks mandatory switch function, performance might suffer\n",
mmc_hostname(card->host));
return 0;
}
status = kmalloc(64, GFP_KERNEL);
if (!status)
return -ENOMEM;
/*
* Find out the card's support bits with a mode 0 operation.
* The argument does not matter, as the support bits do not
* change with the arguments.
*/
err = mmc_sd_switch(card, 0, 0, 0, status);
if (err) {
/*
* If the host or the card can't do the switch,
* fail more gracefully.
*/
if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
goto out;
pr_warn("%s: problem reading Bus Speed modes\n",
mmc_hostname(card->host));
err = 0;
goto out;
}
if (status[13] & SD_MODE_HIGH_SPEED)
card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
/* Driver Strengths supported by the card */
card->sw_caps.sd3_drv_type = status[9];
card->sw_caps.sd3_curr_limit = status[7] | status[6] << 8;
}
out:
kfree(status);
return err;
}
/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
int mmc_sd_switch_hs(struct mmc_card *card)
{
int err;
u8 *status;
if (card->scr.sda_vsn < SCR_SPEC_VER_1)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH))
return 0;
if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
return 0;
if (card->sw_caps.hs_max_dtr == 0)
return 0;
status = kmalloc(64, GFP_KERNEL);
if (!status)
return -ENOMEM;
err = mmc_sd_switch(card, 1, 0, HIGH_SPEED_BUS_SPEED, status);
if (err)
goto out;
if ((status[16] & 0xF) != HIGH_SPEED_BUS_SPEED) {
pr_warn("%s: Problem switching card into high-speed mode!\n",
mmc_hostname(card->host));
err = 0;
} else {
err = 1;
}
out:
kfree(status);
return err;
}
static int sd_select_driver_type(struct mmc_card *card, u8 *status)
{
int card_drv_type, drive_strength, drv_type;
int err;
card->drive_strength = 0;
card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B;
drive_strength = mmc_select_drive_strength(card,
card->sw_caps.uhs_max_dtr,
card_drv_type, &drv_type);
if (drive_strength) {
err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
return err;
if ((status[15] & 0xF) != drive_strength) {
pr_warn("%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
card->drive_strength = drive_strength;
}
if (drv_type)
mmc_set_driver_type(card->host, drv_type);
return 0;
}
static void sd_update_bus_speed_mode(struct mmc_card *card)
{
/*
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!mmc_host_uhs(card->host)) {
card->sd_bus_speed = 0;
return;
}
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
}
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
{
int err;
unsigned int timing = 0;
switch (card->sd_bus_speed) {
case UHS_SDR104_BUS_SPEED:
timing = MMC_TIMING_UHS_SDR104;
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
break;
case UHS_DDR50_BUS_SPEED:
timing = MMC_TIMING_UHS_DDR50;
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
break;
case UHS_SDR50_BUS_SPEED:
timing = MMC_TIMING_UHS_SDR50;
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
break;
case UHS_SDR25_BUS_SPEED:
timing = MMC_TIMING_UHS_SDR25;
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
break;
case UHS_SDR12_BUS_SPEED:
timing = MMC_TIMING_UHS_SDR12;
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
break;
default:
return 0;
}
err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
if (err)
return err;
if ((status[16] & 0xF) != card->sd_bus_speed)
pr_warn("%s: Problem setting bus speed mode!\n",
mmc_hostname(card->host));
else {
mmc_set_timing(card->host, timing);
mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
}
return 0;
}
/* Get host's max current setting at its current voltage */
static u32 sd_get_host_max_current(struct mmc_host *host)
{
u32 voltage, max_current;
voltage = 1 << host->ios.vdd;
switch (voltage) {
case MMC_VDD_165_195:
max_current = host->max_current_180;
break;
case MMC_VDD_29_30:
case MMC_VDD_30_31:
max_current = host->max_current_300;
break;
case MMC_VDD_32_33:
case MMC_VDD_33_34:
max_current = host->max_current_330;
break;
default:
max_current = 0;
}
return max_current;
}
static int sd_set_current_limit(struct mmc_card *card, u8 *status)
{
int current_limit = SD_SET_CURRENT_NO_CHANGE;
int err;
u32 max_current;
/*
* Current limit switch is only defined for SDR50, SDR104, and DDR50
* bus speed modes. For other bus speed modes, we do not change the
* current limit.
*/
if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) &&
(card->sd_bus_speed != UHS_SDR104_BUS_SPEED) &&
(card->sd_bus_speed != UHS_DDR50_BUS_SPEED))
return 0;
/*
* Host has different current capabilities when operating at
* different voltages, so find out its max current first.
*/
max_current = sd_get_host_max_current(card->host);
/*
* We only check host's capability here, if we set a limit that is
* higher than the card's maximum current, the card will be using its
* maximum current, e.g. if the card's maximum current is 300ma, and
* when we set current limit to 200ma, the card will draw 200ma, and
* when we set current limit to 400/600/800ma, the card will draw its
* maximum 300ma from the host.
*
* The above is incorrect: if we try to set a current limit that is
* not supported by the card, the card can rightfully error out the
* attempt, and remain at the default current limit. This results
* in a 300mA card being limited to 200mA even though the host
* supports 800mA. Failures seen with SanDisk 8GB UHS cards with
* an iMX6 host. --rmk
*/
if (max_current >= 800 &&
card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
current_limit = SD_SET_CURRENT_LIMIT_800;
else if (max_current >= 600 &&
card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
current_limit = SD_SET_CURRENT_LIMIT_600;
else if (max_current >= 400 &&
card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
current_limit = SD_SET_CURRENT_LIMIT_400;
else if (max_current >= 200 &&
card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
current_limit = SD_SET_CURRENT_LIMIT_200;
if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
err = mmc_sd_switch(card, 1, 3, current_limit, status);
if (err)
return err;
if (((status[15] >> 4) & 0x0F) != current_limit)
pr_warn("%s: Problem setting current limit!\n",
mmc_hostname(card->host));
}
return 0;
}
/*
* UHS-I specific initialization procedure
*/
static int mmc_sd_init_uhs_card(struct mmc_card *card)
{
int err;
u8 *status;
if (!(card->csd.cmdclass & CCC_SWITCH))
return 0;
status = kmalloc(64, GFP_KERNEL);
if (!status)
return -ENOMEM;
/* Set 4-bit bus width */
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
if (err)
goto out;
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
/*
* Select the bus speed mode depending on host
* and card capability.
*/
sd_update_bus_speed_mode(card);
/* Set the driver strength for the card */
err = sd_select_driver_type(card, status);
if (err)
goto out;
/* Set current limit for the card */
err = sd_set_current_limit(card, status);
if (err)
goto out;
/* Set bus speed mode of the card */
err = sd_set_bus_speed_mode(card, status);
if (err)
goto out;
/*
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
(card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
err = mmc_execute_tuning(card);
/*
* As SD Specifications Part1 Physical Layer Specification
* Version 3.01 says, CMD19 tuning is available for unlocked
* cards in transfer state of 1.8V signaling mode. The small
* difference between v3.00 and 3.01 spec means that CMD19
* tuning is also available for DDR50 mode.
*/
if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
pr_warn("%s: ddr50 tuning failed\n",
mmc_hostname(card->host));
err = 0;
}
}
out:
kfree(status);
return err;
}
MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
card->raw_cid[2], card->raw_cid[3]);
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
MMC_DEV_ATTR(ssr,
"%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
card->raw_ssr[0], card->raw_ssr[1], card->raw_ssr[2],
card->raw_ssr[3], card->raw_ssr[4], card->raw_ssr[5],
card->raw_ssr[6], card->raw_ssr[7], card->raw_ssr[8],
card->raw_ssr[9], card->raw_ssr[10], card->raw_ssr[11],
card->raw_ssr[12], card->raw_ssr[13], card->raw_ssr[14],
card->raw_ssr[15]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
if (card->csd.dsr_imp && host->dsr_req)
return sysfs_emit(buf, "0x%x\n", host->dsr);
/* return default DSR value */
return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
#define sdio_info_attr(num) \
static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
\
if (num > card->num_info) \
return -ENODATA; \
if (!card->info[num - 1][0]) \
return 0; \
return sysfs_emit(buf, "%s\n", card->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
sdio_info_attr(1);
sdio_info_attr(2);
sdio_info_attr(3);
sdio_info_attr(4);
static struct attribute *sd_std_attrs[] = {
&dev_attr_vendor.attr,
&dev_attr_device.attr,
&dev_attr_revision.attr,
&dev_attr_info1.attr,
&dev_attr_info2.attr,
&dev_attr_info3.attr,
&dev_attr_info4.attr,
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_scr.attr,
&dev_attr_ssr.attr,
&dev_attr_date.attr,
&dev_attr_erase_size.attr,
&dev_attr_preferred_erase_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_hwrev.attr,
&dev_attr_manfid.attr,
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
&dev_attr_dsr.attr,
NULL,
};
static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
struct device *dev = kobj_to_dev(kobj);
struct mmc_card *card = mmc_dev_to_card(dev);
/* CIS vendor and device ids, revision and info string are available only for Combo cards */
if ((attr == &dev_attr_vendor.attr ||
attr == &dev_attr_device.attr ||
attr == &dev_attr_revision.attr ||
attr == &dev_attr_info1.attr ||
attr == &dev_attr_info2.attr ||
attr == &dev_attr_info3.attr ||
attr == &dev_attr_info4.attr
) &&!mmc_card_sd_combo(card))
return 0;
return attr->mode;
}
static const struct attribute_group sd_std_group = {
.attrs = sd_std_attrs,
.is_visible = sd_std_is_visible,
};
__ATTRIBUTE_GROUPS(sd_std);
struct device_type sd_type = {
.groups = sd_std_groups,
};
/*
* Fetch CID from card.
*/
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
{
int err;
u32 max_current;
int retries = 10;
u32 pocr = ocr;
try_again:
if (!retries) {
ocr &= ~SD_OCR_S18R;
pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
}
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
* state. We wait 1ms to give cards time to
* respond.
*/
mmc_go_idle(host);
/*
* If SD_SEND_IF_COND indicates an SD 2.0
* compliant card and we should set bit 30
* of the ocr to indicate that we can handle
* block-addressed SDHC cards.
*/
err = mmc_send_if_cond(host, ocr);
if (!err)
ocr |= SD_OCR_CCS;
/*
* If the host supports one of UHS-I modes, request the card
* to switch to 1.8V signaling level. If the card has failed
* repeatedly to switch however, skip this.
*/
if (retries && mmc_host_uhs(host))
ocr |= SD_OCR_S18R;
/*
* If the host can supply more than 150mA at current voltage,
* XPC should be set to 1.
*/
max_current = sd_get_host_max_current(host);
if (max_current > 150)
ocr |= SD_OCR_XPC;
err = mmc_send_app_op_cond(host, ocr, rocr);
if (err)
return err;
/*
* In case the S18A bit is set in the response, let's start the signal
* voltage switch procedure. SPI mode doesn't support CMD11.
* Note that, according to the spec, the S18A bit is not valid unless
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
goto try_again;
} else if (err) {
retries = 0;
goto try_again;
}
}
err = mmc_send_cid(host, cid);
return err;
}
int mmc_sd_get_csd(struct mmc_card *card)
{
int err;
/*
* Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd);
if (err)
return err;
err = mmc_decode_csd(card);
if (err)
return err;
return 0;
}
static int mmc_sd_get_ro(struct mmc_host *host)
{
int ro;
/*
* Some systems don't feature a write-protect pin and don't need one.
* E.g. because they only have micro-SD card slot. For those systems
* assume that the SD card is always read-write.
*/
if (host->caps2 & MMC_CAP2_NO_WRITE_PROTECT)
return 0;
if (!host->ops->get_ro)
return -1;
ro = host->ops->get_ro(host);
return ro;
}
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
int err;
if (!reinit) {
/*
* Fetch SCR from card.
*/
err = mmc_app_send_scr(card);
if (err)
return err;
err = mmc_decode_scr(card);
if (err)
return err;
/*
* Fetch and process SD Status register.
*/
err = mmc_read_ssr(card);
if (err)
return err;
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
}
/*
* Fetch switch information from card. Note, sd3_bus_mode can change if
* voltage switch outcome changes, so do this always.
*/
err = mmc_read_switch(card);
if (err)
return err;
/*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
* to provide valid CRCs for non-512-byte blocks.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
return err;
}
/*
* Check if read-only switch is active.
*/
if (!reinit) {
int ro = mmc_sd_get_ro(host);
if (ro < 0) {
pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n",
mmc_hostname(host));
} else if (ro > 0) {
mmc_card_set_readonly(card);
}
}
return 0;
}
unsigned mmc_sd_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr = (unsigned int)-1;
if (mmc_card_hs(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
max_dtr = card->sw_caps.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
max_dtr = card->csd.max_dtr;
}
return max_dtr;
}
static bool mmc_sd_card_using_v18(struct mmc_card *card)
{
/*
* According to the SD spec., the Bus Speed Mode (function group 1) bits
* 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
* they can be used to determine if the card has already switched to
* 1.8V signaling.
*/
return card->sw_caps.sd3_bus_mode &
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
u8 reg_data)
{
struct mmc_host *host = card->host;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
u8 *reg_buf;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
mrq.cmd = &cmd;
mrq.data = &data;
/*
* Arguments of CMD49:
* [31:31] MIO (0 = memory).
* [30:27] FNO (function number).
* [26:26] MW - mask write mode (0 = disable).
* [25:18] page number.
* [17:9] offset address.
* [8:0] length (0 = 1 byte).
*/
cmd.arg = fno << 27 | page << 18 | offset << 9;
/* The first byte in the buffer is the data to be written. */
reg_buf[0] = reg_data;
data.flags = MMC_DATA_WRITE;
data.blksz = 512;
data.blocks = 1;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, reg_buf, 512);
cmd.opcode = SD_WRITE_EXTR_SINGLE;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(host, &mrq);
kfree(reg_buf);
/*
* Note that, the SD card is allowed to signal busy on DAT0 up to 1s
* after the CMD49. Although, let's leave this to be managed by the
* caller.
*/
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
static int sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
u16 offset, u16 len, u8 *reg_buf)
{
u32 cmd_args;
/*
* Command arguments of CMD48:
* [31:31] MIO (0 = memory).
* [30:27] FNO (function number).
* [26:26] reserved (0).
* [25:18] page number.
* [17:9] offset address.
* [8:0] length (0 = 1 byte, 1ff = 512 bytes).
*/
cmd_args = fno << 27 | page << 18 | offset << 9 | (len -1);
return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
cmd_args, reg_buf, 512);
}
static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
u16 offset)
{
int err;
u8 *reg_buf;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
/* Read the extension register for power management function. */
err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
if (err) {
pr_warn("%s: error %d reading PM func of ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
/* PM revision consists of 4 bits. */
card->ext_power.rev = reg_buf[0] & 0xf;
/* Power Off Notification support at bit 4. */
if (reg_buf[1] & BIT(4))
card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY;
/* Power Sustenance support at bit 5. */
if (reg_buf[1] & BIT(5))
card->ext_power.feature_support |= SD_EXT_POWER_SUSTENANCE;
/* Power Down Mode support at bit 6. */
if (reg_buf[1] & BIT(6))
card->ext_power.feature_support |= SD_EXT_POWER_DOWN_MODE;
card->ext_power.fno = fno;
card->ext_power.page = page;
card->ext_power.offset = offset;
out:
kfree(reg_buf);
return err;
}
static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
u16 offset)
{
int err;
u8 *reg_buf;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
if (err) {
pr_warn("%s: error %d reading PERF func of ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
/* PERF revision. */
card->ext_perf.rev = reg_buf[0];
/* FX_EVENT support at bit 0. */
if (reg_buf[1] & BIT(0))
card->ext_perf.feature_support |= SD_EXT_PERF_FX_EVENT;
/* Card initiated self-maintenance support at bit 0. */
if (reg_buf[2] & BIT(0))
card->ext_perf.feature_support |= SD_EXT_PERF_CARD_MAINT;
/* Host initiated self-maintenance support at bit 1. */
if (reg_buf[2] & BIT(1))
card->ext_perf.feature_support |= SD_EXT_PERF_HOST_MAINT;
/* Cache support at bit 0. */
if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
/* Command queue support indicated via queue depth bits (0 to 4). */
if (reg_buf[6] & 0x1f)
card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
card->ext_perf.fno = fno;
card->ext_perf.page = page;
card->ext_perf.offset = offset;
out:
kfree(reg_buf);
return err;
}
static int sd_parse_ext_reg(struct mmc_card *card, u8 *gen_info_buf,
u16 *next_ext_addr)
{
u8 num_regs, fno, page;
u16 sfc, offset, ext = *next_ext_addr;
u32 reg_addr;
/*
* Parse only one register set per extension, as that is sufficient to
* support the standard functions. This means another 48 bytes in the
* buffer must be available.
*/
if (ext + 48 > 512)
return -EFAULT;
/* Standard Function Code */
memcpy(&sfc, &gen_info_buf[ext], 2);
/* Address to the next extension. */
memcpy(next_ext_addr, &gen_info_buf[ext + 40], 2);
/* Number of registers for this extension. */
num_regs = gen_info_buf[ext + 42];
/* We support only one register per extension. */
if (num_regs != 1)
return 0;
/* Extension register address. */
memcpy(®_addr, &gen_info_buf[ext + 44], 4);
/* 9 bits (0 to 8) contains the offset address. */
offset = reg_addr & 0x1ff;
/* 8 bits (9 to 16) contains the page number. */
page = reg_addr >> 9 & 0xff ;
/* 4 bits (18 to 21) contains the function number. */
fno = reg_addr >> 18 & 0xf;
/* Standard Function Code for power management. */
if (sfc == 0x1)
return sd_parse_ext_reg_power(card, fno, page, offset);
/* Standard Function Code for performance enhancement. */
if (sfc == 0x2)
return sd_parse_ext_reg_perf(card, fno, page, offset);
return 0;
}
static int sd_read_ext_regs(struct mmc_card *card)
{
int err, i;
u8 num_ext, *gen_info_buf;
u16 rev, len, next_ext_addr;
if (mmc_host_is_spi(card->host))
return 0;
if (!(card->scr.cmds & SD_SCR_CMD48_SUPPORT))
return 0;
gen_info_buf = kzalloc(512, GFP_KERNEL);
if (!gen_info_buf)
return -ENOMEM;
/*
* Read 512 bytes of general info, which is found at function number 0,
* at page 0 and with no offset.
*/
err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
if (err) {
pr_err("%s: error %d reading general info of SD ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
/* General info structure revision. */
memcpy(&rev, &gen_info_buf[0], 2);
/* Length of general info in bytes. */
memcpy(&len, &gen_info_buf[2], 2);
/* Number of extensions to be find. */
num_ext = gen_info_buf[4];
/*
* We only support revision 0 and limit it to 512 bytes for simplicity.
* No matter what, let's return zero to allow us to continue using the
* card, even if we can't support the features from the SD function
* extensions registers.
*/
if (rev != 0 || len > 512) {
pr_warn("%s: non-supported SD ext reg layout\n",
mmc_hostname(card->host));
goto out;
}
/*
* Parse the extension registers. The first extension should start
* immediately after the general info header (16 bytes).
*/
next_ext_addr = 16;
for (i = 0; i < num_ext; i++) {
err = sd_parse_ext_reg(card, gen_info_buf, &next_ext_addr);
if (err) {
pr_err("%s: error %d parsing SD ext reg\n",
mmc_hostname(card->host), err);
goto out;
}
}
out:
kfree(gen_info_buf);
return err;
}
static bool sd_cache_enabled(struct mmc_host *host)
{
return host->card->ext_perf.feature_enabled & SD_EXT_PERF_CACHE;
}
static int sd_flush_cache(struct mmc_host *host)
{
struct mmc_card *card = host->card;
u8 *reg_buf, fno, page;
u16 offset;
int err;
if (!sd_cache_enabled(host))
return 0;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
/*
* Set Flush Cache at bit 0 in the performance enhancement register at
* 261 bytes offset.
*/
fno = card->ext_perf.fno;
page = card->ext_perf.page;
offset = card->ext_perf.offset + 261;
err = sd_write_ext_reg(card, fno, page, offset, BIT(0));
if (err) {
pr_warn("%s: error %d writing Cache Flush bit\n",
mmc_hostname(host), err);
goto out;
}
err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
MMC_BUSY_EXTR_SINGLE);
if (err)
goto out;
/*
* Read the Flush Cache bit. The card shall reset it, to confirm that
* it's has completed the flushing of the cache.
*/
err = sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
if (err) {
pr_warn("%s: error %d reading Cache Flush bit\n",
mmc_hostname(host), err);
goto out;
}
if (reg_buf[0] & BIT(0))
err = -ETIMEDOUT;
out:
kfree(reg_buf);
return err;
}
static int sd_enable_cache(struct mmc_card *card)
{
u8 *reg_buf;
int err;
card->ext_perf.feature_enabled &= ~SD_EXT_PERF_CACHE;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
/*
* Set Cache Enable at bit 0 in the performance enhancement register at
* 260 bytes offset.
*/
err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
card->ext_perf.offset + 260, BIT(0));
if (err) {
pr_warn("%s: error %d writing Cache Enable bit\n",
mmc_hostname(card->host), err);
goto out;
}
err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
MMC_BUSY_EXTR_SINGLE);
if (!err)
card->ext_perf.feature_enabled |= SD_EXT_PERF_CACHE;
out:
kfree(reg_buf);
return err;
}
/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
* we're trying to reinitialise.
*/
static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
int err;
u32 cid[4];
u32 rocr = 0;
bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
return -ENOENT;
}
card = oldcard;
} else {
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, &sd_type);
if (IS_ERR(card))
return PTR_ERR(card);
card->ocr = ocr;
card->type = MMC_TYPE_SD;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
/*
* Call the optional HC's init_card function to handle quirks.
*/
if (host->ops->init_card)
host->ops->init_card(host, card);
/*
* For native busses: get card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto free_card;
}
if (!oldcard) {
err = mmc_sd_get_csd(card);
if (err)
goto free_card;
mmc_decode_cid(card);
}
/*
* handling only for cards supporting DSR and hosts requesting
* DSR configuration
*/
if (card->csd.dsr_imp && host->dsr_req)
mmc_set_dsr(host);
/*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto free_card;
}
err = mmc_sd_setup_card(host, card, oldcard != NULL);
if (err)
goto free_card;
/*
* If the card has not been power cycled, it may still be using 1.8V
* signaling. Detect that situation and try to initialize a UHS-I (1.8V)
* transfer mode.
*/
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
if (mmc_host_set_uhs_voltage(host) ||
mmc_sd_init_uhs_card(card)) {
v18_fixup_failed = true;
mmc_power_cycle(host, ocr);
if (!oldcard)
mmc_remove_card(card);
goto retry;
}
goto cont;
}
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A && mmc_host_uhs(host)) {
err = mmc_sd_init_uhs_card(card);
if (err)
goto free_card;
} else {
/*
* Attempt to change to high-speed (if supported)
*/
err = mmc_sd_switch_hs(card);
if (err > 0)
mmc_set_timing(card->host, MMC_TIMING_SD_HS);
else if (err)
goto free_card;
/*
* Set bus speed.
*/
mmc_set_clock(host, mmc_sd_get_max_clock(card));
if (host->ios.timing == MMC_TIMING_SD_HS &&
host->ops->prepare_sd_hs_tuning) {
err = host->ops->prepare_sd_hs_tuning(host, card);
if (err)
goto free_card;
}
/*
* Switch to wider bus (if supported).
*/
if ((host->caps & MMC_CAP_4_BIT_DATA) &&
(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
if (err)
goto free_card;
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
if (host->ios.timing == MMC_TIMING_SD_HS &&
host->ops->execute_sd_hs_tuning) {
err = host->ops->execute_sd_hs_tuning(host, card);
if (err)
goto free_card;
}
}
cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
if (err)
goto free_card;
}
/* Enable internal SD cache if supported. */
if (card->ext_perf.feature_support & SD_EXT_PERF_CACHE) {
err = sd_enable_cache(card);
if (err)
goto free_card;
}
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
}
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
pr_err("%s: Host failed to negotiate down from 3.3V\n",
mmc_hostname(host));
err = -EINVAL;
goto free_card;
}
host->card = card;
return 0;
free_card:
if (!oldcard)
mmc_remove_card(card);
return err;
}
/*
* Host is being removed. Free up the current card.
*/
static void mmc_sd_remove(struct mmc_host *host)
{
mmc_remove_card(host->card);
host->card = NULL;
}
/*
* Card detection - card is alive.
*/
static int mmc_sd_alive(struct mmc_host *host)
{
return mmc_send_status(host->card, NULL);
}
/*
* Card detection callback from host.
*/
static void mmc_sd_detect(struct mmc_host *host)
{
int err;
mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
}
}
static int sd_can_poweroff_notify(struct mmc_card *card)
{
return card->ext_power.feature_support & SD_EXT_POWER_OFF_NOTIFY;
}
static int sd_busy_poweroff_notify_cb(void *cb_data, bool *busy)
{
struct sd_busy_data *data = cb_data;
struct mmc_card *card = data->card;
int err;
/*
* Read the status register for the power management function. It's at
* one byte offset and is one byte long. The Power Off Notification
* Ready is bit 0.
*/
err = sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
card->ext_power.offset + 1, 1, data->reg_buf);
if (err) {
pr_warn("%s: error %d reading status reg of PM func\n",
mmc_hostname(card->host), err);
return err;
}
*busy = !(data->reg_buf[0] & BIT(0));
return 0;
}
static int sd_poweroff_notify(struct mmc_card *card)
{
struct sd_busy_data cb_data;
u8 *reg_buf;
int err;
reg_buf = kzalloc(512, GFP_KERNEL);
if (!reg_buf)
return -ENOMEM;
/*
* Set the Power Off Notification bit in the power management settings
* register at 2 bytes offset.
*/
err = sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
card->ext_power.offset + 2, BIT(0));
if (err) {
pr_warn("%s: error %d writing Power Off Notify bit\n",
mmc_hostname(card->host), err);
goto out;
}
/* Find out when the command is completed. */
err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
MMC_BUSY_EXTR_SINGLE);
if (err)
goto out;
cb_data.card = card;
cb_data.reg_buf = reg_buf;
err = __mmc_poll_for_busy(card->host, 0, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
&sd_busy_poweroff_notify_cb, &cb_data);
out:
kfree(reg_buf);
return err;
}
static int _mmc_sd_suspend(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int err = 0;
mmc_claim_host(host);
if (mmc_card_suspended(card))
goto out;
if (sd_can_poweroff_notify(card))
err = sd_poweroff_notify(card);
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
if (!err) {
mmc_power_off(host);
mmc_card_set_suspended(card);
}
out:
mmc_release_host(host);
return err;
}
/*
* Callback for suspend
*/
static int mmc_sd_suspend(struct mmc_host *host)
{
int err;
err = _mmc_sd_suspend(host);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
return err;
}
/*
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
static int _mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
goto out;
mmc_power_up(host, host->card->ocr);
err = mmc_sd_init_card(host, host->card->ocr, host->card);
mmc_card_clr_suspended(host->card);
out:
mmc_release_host(host);
return err;
}
/*
* Callback for resume
*/
static int mmc_sd_resume(struct mmc_host *host)
{
pm_runtime_enable(&host->card->dev);
return 0;
}
/*
* Callback for runtime_suspend.
*/
static int mmc_sd_runtime_suspend(struct mmc_host *host)
{
int err;
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
err = _mmc_sd_suspend(host);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
return err;
}
/*
* Callback for runtime_resume.
*/
static int mmc_sd_runtime_resume(struct mmc_host *host)
{
int err;
err = _mmc_sd_resume(host);
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
return 0;
}
static int mmc_sd_hw_reset(struct mmc_host *host)
{
mmc_power_cycle(host, host->card->ocr);
return mmc_sd_init_card(host, host->card->ocr, host->card);
}
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
.runtime_suspend = mmc_sd_runtime_suspend,
.runtime_resume = mmc_sd_runtime_resume,
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.alive = mmc_sd_alive,
.shutdown = mmc_sd_suspend,
.hw_reset = mmc_sd_hw_reset,
.cache_enabled = sd_cache_enabled,
.flush_cache = sd_flush_cache,
};
/*
* Starting point for SD card init.
*/
int mmc_attach_sd(struct mmc_host *host)
{
int err;
u32 ocr, rocr;
WARN_ON(!host->claimed);
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus(host, &mmc_sd_ops);
if (host->ocr_avail_sd)
host->ocr_avail = host->ocr_avail_sd;
/*
* We need to get OCR a different way for SPI.
*/
if (mmc_host_is_spi(host)) {
mmc_go_idle(host);
err = mmc_spi_read_ocr(host, 0, &ocr);
if (err)
goto err;
}
/*
* Some SD cards claims an out of spec VDD voltage range. Let's treat
* these bits as being in-valid and especially also bit7.
*/
ocr &= ~0x7FFF;
rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage(s) of the card(s)?
*/
if (!rocr) {
err = -EINVAL;
goto err;
}
/*
* Detect and init the card.
*/
err = mmc_sd_init_card(host, rocr, NULL);
if (err)
goto err;
mmc_release_host(host);
err = mmc_add_card(host->card);
if (err)
goto remove_card;
mmc_claim_host(host);
return 0;
remove_card:
mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host);
err:
mmc_detach_bus(host);
pr_err("%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
return err;
}
| linux-master | drivers/mmc/core/sd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/sdio_io.c
*
* Copyright 2007-2008 Pierre Ossman
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
#include "core.h"
#include "card.h"
#include "host.h"
/**
* sdio_claim_host - exclusively claim a bus for a certain SDIO function
* @func: SDIO function that will be accessed
*
* Claim a bus for a set of operations. The SDIO function given
* is used to figure out which bus is relevant.
*/
void sdio_claim_host(struct sdio_func *func)
{
if (WARN_ON(!func))
return;
mmc_claim_host(func->card->host);
}
EXPORT_SYMBOL_GPL(sdio_claim_host);
/**
* sdio_release_host - release a bus for a certain SDIO function
* @func: SDIO function that was accessed
*
* Release a bus, allowing others to claim the bus for their
* operations.
*/
void sdio_release_host(struct sdio_func *func)
{
if (WARN_ON(!func))
return;
mmc_release_host(func->card->host);
}
EXPORT_SYMBOL_GPL(sdio_release_host);
/**
* sdio_enable_func - enables a SDIO function for usage
* @func: SDIO function to enable
*
* Powers up and activates a SDIO function so that register
* access is possible.
*/
int sdio_enable_func(struct sdio_func *func)
{
int ret;
unsigned char reg;
unsigned long timeout;
if (!func)
return -EINVAL;
pr_debug("SDIO: Enabling device %s...\n", sdio_func_id(func));
ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, ®);
if (ret)
goto err;
reg |= 1 << func->num;
ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
if (ret)
goto err;
timeout = jiffies + msecs_to_jiffies(func->enable_timeout);
while (1) {
ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IORx, 0, ®);
if (ret)
goto err;
if (reg & (1 << func->num))
break;
ret = -ETIME;
if (time_after(jiffies, timeout))
goto err;
}
pr_debug("SDIO: Enabled device %s\n", sdio_func_id(func));
return 0;
err:
pr_debug("SDIO: Failed to enable device %s\n", sdio_func_id(func));
return ret;
}
EXPORT_SYMBOL_GPL(sdio_enable_func);
/**
* sdio_disable_func - disable a SDIO function
* @func: SDIO function to disable
*
* Powers down and deactivates a SDIO function. Register access
* to this function will fail until the function is reenabled.
*/
int sdio_disable_func(struct sdio_func *func)
{
int ret;
unsigned char reg;
if (!func)
return -EINVAL;
pr_debug("SDIO: Disabling device %s...\n", sdio_func_id(func));
ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IOEx, 0, ®);
if (ret)
goto err;
reg &= ~(1 << func->num);
ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IOEx, reg, NULL);
if (ret)
goto err;
pr_debug("SDIO: Disabled device %s\n", sdio_func_id(func));
return 0;
err:
pr_debug("SDIO: Failed to disable device %s\n", sdio_func_id(func));
return ret;
}
EXPORT_SYMBOL_GPL(sdio_disable_func);
/**
* sdio_set_block_size - set the block size of an SDIO function
* @func: SDIO function to change
* @blksz: new block size or 0 to use the default.
*
* The default block size is the largest supported by both the function
* and the host, with a maximum of 512 to ensure that arbitrarily sized
* data transfer use the optimal (least) number of commands.
*
* A driver may call this to override the default block size set by the
* core. This can be used to set a block size greater than the maximum
* that reported by the card; it is the driver's responsibility to ensure
* it uses a value that the card supports.
*
* Returns 0 on success, -EINVAL if the host does not support the
* requested block size, or -EIO (etc.) if one of the resultant FBR block
* size register writes failed.
*
*/
int sdio_set_block_size(struct sdio_func *func, unsigned blksz)
{
int ret;
if (blksz > func->card->host->max_blk_size)
return -EINVAL;
if (blksz == 0) {
blksz = min(func->max_blksize, func->card->host->max_blk_size);
blksz = min(blksz, 512u);
}
ret = mmc_io_rw_direct(func->card, 1, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE,
blksz & 0xff, NULL);
if (ret)
return ret;
ret = mmc_io_rw_direct(func->card, 1, 0,
SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE + 1,
(blksz >> 8) & 0xff, NULL);
if (ret)
return ret;
func->cur_blksize = blksz;
return 0;
}
EXPORT_SYMBOL_GPL(sdio_set_block_size);
/*
* Calculate the maximum byte mode transfer size
*/
static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
{
unsigned mval = func->card->host->max_blk_size;
if (mmc_blksz_for_byte_mode(func->card))
mval = min(mval, func->cur_blksize);
else
mval = min(mval, func->max_blksize);
if (mmc_card_broken_byte_mode_512(func->card))
return min(mval, 511u);
return min(mval, 512u); /* maximum size for byte mode */
}
/*
* This is legacy code, which needs to be re-worked some day. Basically we need
* to take into account the properties of the host, as to enable the SDIO func
* driver layer to allocate optimal buffers.
*/
static inline unsigned int _sdio_align_size(unsigned int sz)
{
/*
* FIXME: We don't have a system for the controller to tell
* the core about its problems yet, so for now we just 32-bit
* align the size.
*/
return ALIGN(sz, 4);
}
/**
* sdio_align_size - pads a transfer size to a more optimal value
* @func: SDIO function
* @sz: original transfer size
*
* Pads the original data size with a number of extra bytes in
* order to avoid controller bugs and/or performance hits
* (e.g. some controllers revert to PIO for certain sizes).
*
* If possible, it will also adjust the size so that it can be
* handled in just a single request.
*
* Returns the improved size, which might be unmodified.
*/
unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz)
{
unsigned int orig_sz;
unsigned int blk_sz, byte_sz;
unsigned chunk_sz;
orig_sz = sz;
/*
* Do a first check with the controller, in case it
* wants to increase the size up to a point where it
* might need more than one block.
*/
sz = _sdio_align_size(sz);
/*
* If we can still do this with just a byte transfer, then
* we're done.
*/
if (sz <= sdio_max_byte_size(func))
return sz;
if (func->card->cccr.multi_block) {
/*
* Check if the transfer is already block aligned
*/
if ((sz % func->cur_blksize) == 0)
return sz;
/*
* Realign it so that it can be done with one request,
* and recheck if the controller still likes it.
*/
blk_sz = ((sz + func->cur_blksize - 1) /
func->cur_blksize) * func->cur_blksize;
blk_sz = _sdio_align_size(blk_sz);
/*
* This value is only good if it is still just
* one request.
*/
if ((blk_sz % func->cur_blksize) == 0)
return blk_sz;
/*
* We failed to do one request, but at least try to
* pad the remainder properly.
*/
byte_sz = _sdio_align_size(sz % func->cur_blksize);
if (byte_sz <= sdio_max_byte_size(func)) {
blk_sz = sz / func->cur_blksize;
return blk_sz * func->cur_blksize + byte_sz;
}
} else {
/*
* We need multiple requests, so first check that the
* controller can handle the chunk size;
*/
chunk_sz = _sdio_align_size(sdio_max_byte_size(func));
if (chunk_sz == sdio_max_byte_size(func)) {
/*
* Fix up the size of the remainder (if any)
*/
byte_sz = orig_sz % chunk_sz;
if (byte_sz) {
byte_sz = _sdio_align_size(byte_sz);
}
return (orig_sz / chunk_sz) * chunk_sz + byte_sz;
}
}
/*
* The controller is simply incapable of transferring the size
* we want in decent manner, so just return the original size.
*/
return orig_sz;
}
EXPORT_SYMBOL_GPL(sdio_align_size);
/* Split an arbitrarily sized data transfer into several
* IO_RW_EXTENDED commands. */
static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
unsigned addr, int incr_addr, u8 *buf, unsigned size)
{
unsigned remainder = size;
unsigned max_blocks;
int ret;
if (!func || (func->num > 7))
return -EINVAL;
/* Do the bulk of the transfer using block mode (if supported). */
if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {
/* Blocks per command is limited by host count, host transfer
* size and the maximum for IO_RW_EXTENDED of 511 blocks. */
max_blocks = min(func->card->host->max_blk_count, 511u);
while (remainder >= func->cur_blksize) {
unsigned blocks;
blocks = remainder / func->cur_blksize;
if (blocks > max_blocks)
blocks = max_blocks;
size = blocks * func->cur_blksize;
ret = mmc_io_rw_extended(func->card, write,
func->num, addr, incr_addr, buf,
blocks, func->cur_blksize);
if (ret)
return ret;
remainder -= size;
buf += size;
if (incr_addr)
addr += size;
}
}
/* Write the remainder using byte mode. */
while (remainder > 0) {
size = min(remainder, sdio_max_byte_size(func));
/* Indicate byte mode by setting "blocks" = 0 */
ret = mmc_io_rw_extended(func->card, write, func->num, addr,
incr_addr, buf, 0, size);
if (ret)
return ret;
remainder -= size;
buf += size;
if (incr_addr)
addr += size;
}
return 0;
}
/**
* sdio_readb - read a single byte from a SDIO function
* @func: SDIO function to access
* @addr: address to read
* @err_ret: optional status value from transfer
*
* Reads a single byte from the address space of a given SDIO
* function. If there is a problem reading the address, 0xff
* is returned and @err_ret will contain the error code.
*/
u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
{
int ret;
u8 val;
if (!func) {
if (err_ret)
*err_ret = -EINVAL;
return 0xFF;
}
ret = mmc_io_rw_direct(func->card, 0, func->num, addr, 0, &val);
if (err_ret)
*err_ret = ret;
if (ret)
return 0xFF;
return val;
}
EXPORT_SYMBOL_GPL(sdio_readb);
/**
* sdio_writeb - write a single byte to a SDIO function
* @func: SDIO function to access
* @b: byte to write
* @addr: address to write to
* @err_ret: optional status value from transfer
*
* Writes a single byte to the address space of a given SDIO
* function. @err_ret will contain the status of the actual
* transfer.
*/
void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
{
int ret;
if (!func) {
if (err_ret)
*err_ret = -EINVAL;
return;
}
ret = mmc_io_rw_direct(func->card, 1, func->num, addr, b, NULL);
if (err_ret)
*err_ret = ret;
}
EXPORT_SYMBOL_GPL(sdio_writeb);
/**
* sdio_writeb_readb - write and read a byte from SDIO function
* @func: SDIO function to access
* @write_byte: byte to write
* @addr: address to write to
* @err_ret: optional status value from transfer
*
* Performs a RAW (Read after Write) operation as defined by SDIO spec -
* single byte is written to address space of a given SDIO function and
* response is read back from the same address, both using single request.
* If there is a problem with the operation, 0xff is returned and
* @err_ret will contain the error code.
*/
u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
unsigned int addr, int *err_ret)
{
int ret;
u8 val;
ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
write_byte, &val);
if (err_ret)
*err_ret = ret;
if (ret)
return 0xff;
return val;
}
EXPORT_SYMBOL_GPL(sdio_writeb_readb);
/**
* sdio_memcpy_fromio - read a chunk of memory from a SDIO function
* @func: SDIO function to access
* @dst: buffer to store the data
* @addr: address to begin reading from
* @count: number of bytes to read
*
* Reads from the address space of a given SDIO function. Return
* value indicates if the transfer succeeded or not.
*/
int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
unsigned int addr, int count)
{
return sdio_io_rw_ext_helper(func, 0, addr, 1, dst, count);
}
EXPORT_SYMBOL_GPL(sdio_memcpy_fromio);
/**
* sdio_memcpy_toio - write a chunk of memory to a SDIO function
* @func: SDIO function to access
* @addr: address to start writing to
* @src: buffer that contains the data to write
* @count: number of bytes to write
*
* Writes to the address space of a given SDIO function. Return
* value indicates if the transfer succeeded or not.
*/
int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
void *src, int count)
{
return sdio_io_rw_ext_helper(func, 1, addr, 1, src, count);
}
EXPORT_SYMBOL_GPL(sdio_memcpy_toio);
/**
* sdio_readsb - read from a FIFO on a SDIO function
* @func: SDIO function to access
* @dst: buffer to store the data
* @addr: address of (single byte) FIFO
* @count: number of bytes to read
*
* Reads from the specified FIFO of a given SDIO function. Return
* value indicates if the transfer succeeded or not.
*/
int sdio_readsb(struct sdio_func *func, void *dst, unsigned int addr,
int count)
{
return sdio_io_rw_ext_helper(func, 0, addr, 0, dst, count);
}
EXPORT_SYMBOL_GPL(sdio_readsb);
/**
* sdio_writesb - write to a FIFO of a SDIO function
* @func: SDIO function to access
* @addr: address of (single byte) FIFO
* @src: buffer that contains the data to write
* @count: number of bytes to write
*
* Writes to the specified FIFO of a given SDIO function. Return
* value indicates if the transfer succeeded or not.
*/
int sdio_writesb(struct sdio_func *func, unsigned int addr, void *src,
int count)
{
return sdio_io_rw_ext_helper(func, 1, addr, 0, src, count);
}
EXPORT_SYMBOL_GPL(sdio_writesb);
/**
* sdio_readw - read a 16 bit integer from a SDIO function
* @func: SDIO function to access
* @addr: address to read
* @err_ret: optional status value from transfer
*
* Reads a 16 bit integer from the address space of a given SDIO
* function. If there is a problem reading the address, 0xffff
* is returned and @err_ret will contain the error code.
*/
u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret)
{
int ret;
ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 2);
if (err_ret)
*err_ret = ret;
if (ret)
return 0xFFFF;
return le16_to_cpup((__le16 *)func->tmpbuf);
}
EXPORT_SYMBOL_GPL(sdio_readw);
/**
* sdio_writew - write a 16 bit integer to a SDIO function
* @func: SDIO function to access
* @b: integer to write
* @addr: address to write to
* @err_ret: optional status value from transfer
*
* Writes a 16 bit integer to the address space of a given SDIO
* function. @err_ret will contain the status of the actual
* transfer.
*/
void sdio_writew(struct sdio_func *func, u16 b, unsigned int addr, int *err_ret)
{
int ret;
*(__le16 *)func->tmpbuf = cpu_to_le16(b);
ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 2);
if (err_ret)
*err_ret = ret;
}
EXPORT_SYMBOL_GPL(sdio_writew);
/**
* sdio_readl - read a 32 bit integer from a SDIO function
* @func: SDIO function to access
* @addr: address to read
* @err_ret: optional status value from transfer
*
* Reads a 32 bit integer from the address space of a given SDIO
* function. If there is a problem reading the address,
* 0xffffffff is returned and @err_ret will contain the error
* code.
*/
u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret)
{
int ret;
ret = sdio_memcpy_fromio(func, func->tmpbuf, addr, 4);
if (err_ret)
*err_ret = ret;
if (ret)
return 0xFFFFFFFF;
return le32_to_cpup((__le32 *)func->tmpbuf);
}
EXPORT_SYMBOL_GPL(sdio_readl);
/**
* sdio_writel - write a 32 bit integer to a SDIO function
* @func: SDIO function to access
* @b: integer to write
* @addr: address to write to
* @err_ret: optional status value from transfer
*
* Writes a 32 bit integer to the address space of a given SDIO
* function. @err_ret will contain the status of the actual
* transfer.
*/
void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret)
{
int ret;
*(__le32 *)func->tmpbuf = cpu_to_le32(b);
ret = sdio_memcpy_toio(func, addr, func->tmpbuf, 4);
if (err_ret)
*err_ret = ret;
}
EXPORT_SYMBOL_GPL(sdio_writel);
/**
* sdio_f0_readb - read a single byte from SDIO function 0
* @func: an SDIO function of the card
* @addr: address to read
* @err_ret: optional status value from transfer
*
* Reads a single byte from the address space of SDIO function 0.
* If there is a problem reading the address, 0xff is returned
* and @err_ret will contain the error code.
*/
unsigned char sdio_f0_readb(struct sdio_func *func, unsigned int addr,
int *err_ret)
{
int ret;
unsigned char val;
if (!func) {
if (err_ret)
*err_ret = -EINVAL;
return 0xFF;
}
ret = mmc_io_rw_direct(func->card, 0, 0, addr, 0, &val);
if (err_ret)
*err_ret = ret;
if (ret)
return 0xFF;
return val;
}
EXPORT_SYMBOL_GPL(sdio_f0_readb);
/**
* sdio_f0_writeb - write a single byte to SDIO function 0
* @func: an SDIO function of the card
* @b: byte to write
* @addr: address to write to
* @err_ret: optional status value from transfer
*
* Writes a single byte to the address space of SDIO function 0.
* @err_ret will contain the status of the actual transfer.
*
* Only writes to the vendor specific CCCR registers (0xF0 -
* 0xFF) are permiited; @err_ret will be set to -EINVAL for *
* writes outside this range.
*/
void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
int *err_ret)
{
int ret;
if (!func) {
if (err_ret)
*err_ret = -EINVAL;
return;
}
if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
if (err_ret)
*err_ret = -EINVAL;
return;
}
ret = mmc_io_rw_direct(func->card, 1, 0, addr, b, NULL);
if (err_ret)
*err_ret = ret;
}
EXPORT_SYMBOL_GPL(sdio_f0_writeb);
/**
* sdio_get_host_pm_caps - get host power management capabilities
* @func: SDIO function attached to host
*
* Returns a capability bitmask corresponding to power management
* features supported by the host controller that the card function
* might rely upon during a system suspend. The host doesn't need
* to be claimed, nor the function active, for this information to be
* obtained.
*/
mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
{
if (!func)
return 0;
return func->card->host->pm_caps;
}
EXPORT_SYMBOL_GPL(sdio_get_host_pm_caps);
/**
* sdio_set_host_pm_flags - set wanted host power management capabilities
* @func: SDIO function attached to host
* @flags: Power Management flags to set
*
* Set a capability bitmask corresponding to wanted host controller
* power management features for the upcoming suspend state.
* This must be called, if needed, each time the suspend method of
* the function driver is called, and must contain only bits that
* were returned by sdio_get_host_pm_caps().
* The host doesn't need to be claimed, nor the function active,
* for this information to be set.
*/
int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
{
struct mmc_host *host;
if (!func)
return -EINVAL;
host = func->card->host;
if (flags & ~host->pm_caps)
return -EINVAL;
/* function suspend methods are serialized, hence no lock needed */
host->pm_flags |= flags;
return 0;
}
EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
/**
* sdio_retune_crc_disable - temporarily disable retuning on CRC errors
* @func: SDIO function attached to host
*
* If the SDIO card is known to be in a state where it might produce
* CRC errors on the bus in response to commands (like if we know it is
* transitioning between power states), an SDIO function driver can
* call this function to temporarily disable the SD/MMC core behavior of
* triggering an automatic retuning.
*
* This function should be called while the host is claimed and the host
* should remain claimed until sdio_retune_crc_enable() is called.
* Specifically, the expected sequence of calls is:
* - sdio_claim_host()
* - sdio_retune_crc_disable()
* - some number of calls like sdio_writeb() and sdio_readb()
* - sdio_retune_crc_enable()
* - sdio_release_host()
*/
void sdio_retune_crc_disable(struct sdio_func *func)
{
func->card->host->retune_crc_disable = true;
}
EXPORT_SYMBOL_GPL(sdio_retune_crc_disable);
/**
* sdio_retune_crc_enable - re-enable retuning on CRC errors
* @func: SDIO function attached to host
*
* This is the complement to sdio_retune_crc_disable().
*/
void sdio_retune_crc_enable(struct sdio_func *func)
{
func->card->host->retune_crc_disable = false;
}
EXPORT_SYMBOL_GPL(sdio_retune_crc_enable);
/**
* sdio_retune_hold_now - start deferring retuning requests till release
* @func: SDIO function attached to host
*
* This function can be called if it's currently a bad time to do
* a retune of the SDIO card. Retune requests made during this time
* will be held and we'll actually do the retune sometime after the
* release.
*
* This function could be useful if an SDIO card is in a power state
* where it can respond to a small subset of commands that doesn't
* include the retuning command. Care should be taken when using
* this function since (presumably) the retuning request we might be
* deferring was made for a good reason.
*
* This function should be called while the host is claimed.
*/
void sdio_retune_hold_now(struct sdio_func *func)
{
mmc_retune_hold_now(func->card->host);
}
EXPORT_SYMBOL_GPL(sdio_retune_hold_now);
/**
* sdio_retune_release - signal that it's OK to retune now
* @func: SDIO function attached to host
*
* This is the complement to sdio_retune_hold_now(). Calling this
* function won't make a retune happen right away but will allow
* them to be scheduled normally.
*
* This function should be called while the host is claimed.
*/
void sdio_retune_release(struct sdio_func *func)
{
mmc_retune_release(func->card->host);
}
EXPORT_SYMBOL_GPL(sdio_retune_release);
| linux-master | drivers/mmc/core/sdio_io.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Linaro Ltd
*
* Author: Ulf Hansson <[email protected]>
*
* MMC power sequence management
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
static DEFINE_MUTEX(pwrseq_list_mutex);
static LIST_HEAD(pwrseq_list);
int mmc_pwrseq_alloc(struct mmc_host *host)
{
struct device_node *np;
struct mmc_pwrseq *p;
np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
if (!np)
return 0;
mutex_lock(&pwrseq_list_mutex);
list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
if (device_match_of_node(p->dev, np)) {
if (!try_module_get(p->owner))
dev_err(host->parent,
"increasing module refcount failed\n");
else
host->pwrseq = p;
break;
}
}
of_node_put(np);
mutex_unlock(&pwrseq_list_mutex);
if (!host->pwrseq)
return -EPROBE_DEFER;
dev_info(host->parent, "allocated mmc-pwrseq\n");
return 0;
}
void mmc_pwrseq_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
if (pwrseq && pwrseq->ops->pre_power_on)
pwrseq->ops->pre_power_on(host);
}
void mmc_pwrseq_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
if (pwrseq && pwrseq->ops->post_power_on)
pwrseq->ops->post_power_on(host);
}
void mmc_pwrseq_power_off(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
if (pwrseq && pwrseq->ops->power_off)
pwrseq->ops->power_off(host);
}
void mmc_pwrseq_reset(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
if (pwrseq && pwrseq->ops->reset)
pwrseq->ops->reset(host);
}
void mmc_pwrseq_free(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
if (pwrseq) {
module_put(pwrseq->owner);
host->pwrseq = NULL;
}
}
int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
{
if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
return -EINVAL;
mutex_lock(&pwrseq_list_mutex);
list_add(&pwrseq->pwrseq_node, &pwrseq_list);
mutex_unlock(&pwrseq_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
{
if (pwrseq) {
mutex_lock(&pwrseq_list_mutex);
list_del(&pwrseq->pwrseq_node);
mutex_unlock(&pwrseq_list_mutex);
}
}
EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
| linux-master | drivers/mmc/core/pwrseq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Debugfs support for hosts and cards
*
* Copyright (C) 2008 Atmel Corporation
*/
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/fault-inject.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "mmc_ops.h"
#ifdef CONFIG_FAIL_MMC_REQUEST
static DECLARE_FAULT_ATTR(fail_default_attr);
static char *fail_request;
module_param(fail_request, charp, 0);
MODULE_PARM_DESC(fail_request, "default fault injection attributes");
#endif /* CONFIG_FAIL_MMC_REQUEST */
/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
static int mmc_ios_show(struct seq_file *s, void *data)
{
static const char *vdd_str[] = {
[8] = "2.0",
[9] = "2.1",
[10] = "2.2",
[11] = "2.3",
[12] = "2.4",
[13] = "2.5",
[14] = "2.6",
[15] = "2.7",
[16] = "2.8",
[17] = "2.9",
[18] = "3.0",
[19] = "3.1",
[20] = "3.2",
[21] = "3.3",
[22] = "3.4",
[23] = "3.5",
[24] = "3.6",
};
struct mmc_host *host = s->private;
struct mmc_ios *ios = &host->ios;
const char *str;
seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
if (host->actual_clock)
seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);
seq_printf(s, "vdd:\t\t%u ", ios->vdd);
if ((1 << ios->vdd) & MMC_VDD_165_195)
seq_printf(s, "(1.65 - 1.95 V)\n");
else if (ios->vdd < (ARRAY_SIZE(vdd_str) - 1)
&& vdd_str[ios->vdd] && vdd_str[ios->vdd + 1])
seq_printf(s, "(%s ~ %s V)\n", vdd_str[ios->vdd],
vdd_str[ios->vdd + 1]);
else
seq_printf(s, "(invalid)\n");
switch (ios->bus_mode) {
case MMC_BUSMODE_OPENDRAIN:
str = "open drain";
break;
case MMC_BUSMODE_PUSHPULL:
str = "push-pull";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "bus mode:\t%u (%s)\n", ios->bus_mode, str);
switch (ios->chip_select) {
case MMC_CS_DONTCARE:
str = "don't care";
break;
case MMC_CS_HIGH:
str = "active high";
break;
case MMC_CS_LOW:
str = "active low";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "chip select:\t%u (%s)\n", ios->chip_select, str);
switch (ios->power_mode) {
case MMC_POWER_OFF:
str = "off";
break;
case MMC_POWER_UP:
str = "up";
break;
case MMC_POWER_ON:
str = "on";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "power mode:\t%u (%s)\n", ios->power_mode, str);
seq_printf(s, "bus width:\t%u (%u bits)\n",
ios->bus_width, 1 << ios->bus_width);
switch (ios->timing) {
case MMC_TIMING_LEGACY:
str = "legacy";
break;
case MMC_TIMING_MMC_HS:
str = "mmc high-speed";
break;
case MMC_TIMING_SD_HS:
str = "sd high-speed";
break;
case MMC_TIMING_UHS_SDR12:
str = "sd uhs SDR12";
break;
case MMC_TIMING_UHS_SDR25:
str = "sd uhs SDR25";
break;
case MMC_TIMING_UHS_SDR50:
str = "sd uhs SDR50";
break;
case MMC_TIMING_UHS_SDR104:
str = "sd uhs SDR104";
break;
case MMC_TIMING_UHS_DDR50:
str = "sd uhs DDR50";
break;
case MMC_TIMING_MMC_DDR52:
str = "mmc DDR52";
break;
case MMC_TIMING_MMC_HS200:
str = "mmc HS200";
break;
case MMC_TIMING_MMC_HS400:
str = mmc_card_hs400es(host->card) ?
"mmc HS400 enhanced strobe" : "mmc HS400";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str);
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
str = "3.30 V";
break;
case MMC_SIGNAL_VOLTAGE_180:
str = "1.80 V";
break;
case MMC_SIGNAL_VOLTAGE_120:
str = "1.20 V";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "signal voltage:\t%u (%s)\n", ios->signal_voltage, str);
switch (ios->drv_type) {
case MMC_SET_DRIVER_TYPE_A:
str = "driver type A";
break;
case MMC_SET_DRIVER_TYPE_B:
str = "driver type B";
break;
case MMC_SET_DRIVER_TYPE_C:
str = "driver type C";
break;
case MMC_SET_DRIVER_TYPE_D:
str = "driver type D";
break;
default:
str = "invalid";
break;
}
seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mmc_ios);
static int mmc_clock_opt_get(void *data, u64 *val)
{
struct mmc_host *host = data;
*val = host->ios.clock;
return 0;
}
static int mmc_clock_opt_set(void *data, u64 val)
{
struct mmc_host *host = data;
/* We need this check due to input value is u64 */
if (val != 0 && (val > host->f_max || val < host->f_min))
return -EINVAL;
mmc_claim_host(host);
mmc_set_clock(host, (unsigned int) val);
mmc_release_host(host);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
static int mmc_err_state_get(void *data, u64 *val)
{
struct mmc_host *host = data;
int i;
if (!host)
return -EINVAL;
*val = 0;
for (i = 0; i < MMC_ERR_MAX; i++) {
if (host->err_stats[i]) {
*val = 1;
break;
}
}
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get, NULL, "%llu\n");
static int mmc_err_stats_show(struct seq_file *file, void *data)
{
struct mmc_host *host = file->private;
const char *desc[MMC_ERR_MAX] = {
[MMC_ERR_CMD_TIMEOUT] = "Command Timeout Occurred",
[MMC_ERR_CMD_CRC] = "Command CRC Errors Occurred",
[MMC_ERR_DAT_TIMEOUT] = "Data Timeout Occurred",
[MMC_ERR_DAT_CRC] = "Data CRC Errors Occurred",
[MMC_ERR_AUTO_CMD] = "Auto-Cmd Error Occurred",
[MMC_ERR_ADMA] = "ADMA Error Occurred",
[MMC_ERR_TUNING] = "Tuning Error Occurred",
[MMC_ERR_CMDQ_RED] = "CMDQ RED Errors",
[MMC_ERR_CMDQ_GCE] = "CMDQ GCE Errors",
[MMC_ERR_CMDQ_ICCE] = "CMDQ ICCE Errors",
[MMC_ERR_REQ_TIMEOUT] = "Request Timedout",
[MMC_ERR_CMDQ_REQ_TIMEOUT] = "CMDQ Request Timedout",
[MMC_ERR_ICE_CFG] = "ICE Config Errors",
[MMC_ERR_CTRL_TIMEOUT] = "Controller Timedout errors",
[MMC_ERR_UNEXPECTED_IRQ] = "Unexpected IRQ errors",
};
int i;
for (i = 0; i < MMC_ERR_MAX; i++) {
if (desc[i])
seq_printf(file, "# %s:\t %d\n",
desc[i], host->err_stats[i]);
}
return 0;
}
static int mmc_err_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, mmc_err_stats_show, inode->i_private);
}
static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct mmc_host *host = filp->f_mapping->host->i_private;
pr_debug("%s: Resetting MMC error statistics\n", __func__);
memset(host->err_stats, 0, sizeof(host->err_stats));
return cnt;
}
static const struct file_operations mmc_err_stats_fops = {
.open = mmc_err_stats_open,
.read = seq_read,
.write = mmc_err_stats_write,
.release = single_release,
};
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
root = debugfs_create_dir(mmc_hostname(host), NULL);
host->debugfs_root = root;
debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops);
debugfs_create_x32("caps", S_IRUSR, root, &host->caps);
debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2);
debugfs_create_file_unsafe("clock", S_IRUSR | S_IWUSR, root, host,
&mmc_clock_fops);
debugfs_create_file_unsafe("err_state", 0600, root, host,
&mmc_err_state);
debugfs_create_file("err_stats", 0600, root, host,
&mmc_err_stats_fops);
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
host->fail_mmc_request = fail_default_attr;
fault_create_debugfs_attr("fail_mmc_request", root,
&host->fail_mmc_request);
#endif
}
void mmc_remove_host_debugfs(struct mmc_host *host)
{
debugfs_remove_recursive(host->debugfs_root);
}
void mmc_add_card_debugfs(struct mmc_card *card)
{
struct mmc_host *host = card->host;
struct dentry *root;
if (!host->debugfs_root)
return;
root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
card->debugfs_root = root;
debugfs_create_x32("state", S_IRUSR, root, &card->state);
}
void mmc_remove_card_debugfs(struct mmc_card *card)
{
debugfs_remove_recursive(card->debugfs_root);
card->debugfs_root = NULL;
}
| linux-master | drivers/mmc/core/debugfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/mmc_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "mmc_ops.h"
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
#define MMC_OP_COND_PERIOD_US (4 * 1000) /* 4ms */
#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
static const u8 tuning_blk_pattern_4bit[] = {
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
};
static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
struct mmc_busy_data {
struct mmc_card *card;
bool retry_crc_err;
enum mmc_busy_cmd busy_cmd;
};
struct mmc_op_cond_busy_data {
struct mmc_host *host;
u32 ocr;
struct mmc_command *cmd;
};
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
{
int err;
struct mmc_command cmd = {};
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, retries);
if (err)
return err;
/* NOTE: callers are required to understand the difference
* between "native" and SPI format status words!
*/
if (status)
*status = cmd.resp[0];
return 0;
}
EXPORT_SYMBOL_GPL(__mmc_send_status);
int mmc_send_status(struct mmc_card *card, u32 *status)
{
return __mmc_send_status(card, status, MMC_CMD_RETRIES);
}
EXPORT_SYMBOL_GPL(mmc_send_status);
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
struct mmc_command cmd = {};
cmd.opcode = MMC_SELECT_CARD;
if (card) {
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
} else {
cmd.arg = 0;
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
}
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
int mmc_select_card(struct mmc_card *card)
{
return _mmc_select_card(card->host, card);
}
int mmc_deselect_cards(struct mmc_host *host)
{
return _mmc_select_card(host, NULL);
}
/*
* Write the value specified in the device tree or board code into the optional
* 16 bit Driver Stage Register. This can be used to tune raise/fall times and
* drive strength of the DAT and CMD outputs. The actual meaning of a given
* value is hardware dependant.
* The presence of the DSR register can be determined from the CSD register,
* bit 76.
*/
int mmc_set_dsr(struct mmc_host *host)
{
struct mmc_command cmd = {};
cmd.opcode = MMC_SET_DSR;
cmd.arg = (host->dsr << 16) | 0xffff;
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
int mmc_go_idle(struct mmc_host *host)
{
int err;
struct mmc_command cmd = {};
/*
* Non-SPI hosts need to prevent chipselect going active during
* GO_IDLE; that would put chips into SPI mode. Remind them of
* that in case of hardware that won't pull up DAT3/nCS otherwise.
*
* SPI hosts ignore ios.chip_select; it's managed according to
* rules that must accommodate non-MMC slaves which this layer
* won't even know about.
*/
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_HIGH);
mmc_delay(1);
}
cmd.opcode = MMC_GO_IDLE_STATE;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
err = mmc_wait_for_cmd(host, &cmd, 0);
mmc_delay(1);
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_DONTCARE);
mmc_delay(1);
}
host->use_spi_crc = 0;
return err;
}
static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
{
struct mmc_op_cond_busy_data *data = cb_data;
struct mmc_host *host = data->host;
struct mmc_command *cmd = data->cmd;
u32 ocr = data->ocr;
int err = 0;
err = mmc_wait_for_cmd(host, cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host)) {
if (!(cmd->resp[0] & R1_SPI_IDLE)) {
*busy = false;
return 0;
}
} else {
if (cmd->resp[0] & MMC_CARD_BUSY) {
*busy = false;
return 0;
}
}
*busy = true;
/*
* According to eMMC specification v5.1 section 6.4.3, we
* should issue CMD1 repeatedly in the idle state until
* the eMMC is ready. Otherwise some eMMC devices seem to enter
* the inactive mode after mmc_init_card() issued CMD0 when
* the eMMC device is busy.
*/
if (!ocr && !mmc_host_is_spi(host))
cmd->arg = cmd->resp[0] | BIT(30);
return 0;
}
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
int err = 0;
struct mmc_op_cond_busy_data cb_data = {
.host = host,
.ocr = ocr,
.cmd = &cmd
};
cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
MMC_OP_COND_TIMEOUT_MS,
&__mmc_send_op_cond_cb, &cb_data);
if (err)
return err;
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
int mmc_set_relative_addr(struct mmc_card *card)
{
struct mmc_command cmd = {};
cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
}
static int
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
{
int err;
struct mmc_command cmd = {};
cmd.opcode = opcode;
cmd.arg = arg;
cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
memcpy(cxd, cmd.resp, sizeof(u32) * 4);
return 0;
}
/*
* NOTE: void *buf, caller for the buf is required to use DMA-capable
* buffer or on-stack buffer (with some overhead in callee).
*/
int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
u32 args, void *buf, unsigned len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = len;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, len);
if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
/*
* The spec states that CSR and CID accesses have a timeout
* of 64 clock cycles.
*/
data.timeout_ns = 0;
data.timeout_clks = 64;
} else
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
{
int ret, i;
__be32 *cxd_tmp;
cxd_tmp = kzalloc(16, GFP_KERNEL);
if (!cxd_tmp)
return -ENOMEM;
ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
if (ret)
goto err;
for (i = 0; i < 4; i++)
cxd[i] = be32_to_cpu(cxd_tmp[i]);
err:
kfree(cxd_tmp);
return ret;
}
int mmc_send_csd(struct mmc_card *card, u32 *csd)
{
if (mmc_host_is_spi(card->host))
return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
return mmc_send_cxd_native(card->host, card->rca << 16, csd,
MMC_SEND_CSD);
}
int mmc_send_cid(struct mmc_host *host, u32 *cid)
{
if (mmc_host_is_spi(host))
return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
}
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{
int err;
u8 *ext_csd;
if (!card || !new_ext_csd)
return -EINVAL;
if (!mmc_can_ext_csd(card))
return -EOPNOTSUPP;
/*
* As the ext_csd is so large and mostly unused, we don't store the
* raw block in mmc_card.
*/
ext_csd = kzalloc(512, GFP_KERNEL);
if (!ext_csd)
return -ENOMEM;
err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
512);
if (err)
kfree(ext_csd);
else
*new_ext_csd = ext_csd;
return err;
}
EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_READ_OCR;
cmd.arg = highcap ? (1 << 30) : 0;
cmd.flags = MMC_RSP_SPI_R3;
err = mmc_wait_for_cmd(host, &cmd, 0);
*ocrp = cmd.resp[1];
return err;
}
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
{
struct mmc_command cmd = {};
int err;
cmd.opcode = MMC_SPI_CRC_ON_OFF;
cmd.flags = MMC_RSP_SPI_R1;
cmd.arg = use_crc;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (!err)
host->use_spi_crc = use_crc;
return err;
}
static int mmc_switch_status_error(struct mmc_host *host, u32 status)
{
if (mmc_host_is_spi(host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
return -EBADMSG;
} else {
if (R1_STATUS(status))
pr_warn("%s: unexpected status %#x after switch\n",
mmc_hostname(host), status);
if (status & R1_SWITCH_ERROR)
return -EBADMSG;
}
return 0;
}
/* Caller must hold re-tuning */
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
{
u32 status;
int err;
err = mmc_send_status(card, &status);
if (!crc_err_fatal && err == -EILSEQ)
return 0;
if (err)
return err;
return mmc_switch_status_error(card->host, status);
}
static int mmc_busy_cb(void *cb_data, bool *busy)
{
struct mmc_busy_data *data = cb_data;
struct mmc_host *host = data->card->host;
u32 status = 0;
int err;
if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
*busy = host->ops->card_busy(host);
return 0;
}
err = mmc_send_status(data->card, &status);
if (data->retry_crc_err && err == -EILSEQ) {
*busy = true;
return 0;
}
if (err)
return err;
switch (data->busy_cmd) {
case MMC_BUSY_CMD6:
err = mmc_switch_status_error(host, status);
break;
case MMC_BUSY_ERASE:
err = R1_STATUS(status) ? -EIO : 0;
break;
case MMC_BUSY_HPI:
case MMC_BUSY_EXTR_SINGLE:
case MMC_BUSY_IO:
break;
default:
err = -EINVAL;
}
if (err)
return err;
*busy = !mmc_ready_for_data(status);
return 0;
}
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
unsigned int timeout_ms,
int (*busy_cb)(void *cb_data, bool *busy),
void *cb_data)
{
int err;
unsigned long timeout;
unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
bool expired = false;
bool busy = false;
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
/*
* Due to the possibility of being preempted while polling,
* check the expiration time first.
*/
expired = time_after(jiffies, timeout);
err = (*busy_cb)(cb_data, &busy);
if (err)
return err;
/* Timeout if the device still remains busy. */
if (expired && busy) {
pr_err("%s: Card stuck being busy! %s\n",
mmc_hostname(host), __func__);
return -ETIMEDOUT;
}
/* Throttle the polling rate to avoid hogging the CPU. */
if (busy) {
usleep_range(udelay, udelay * 2);
if (udelay < udelay_max)
udelay *= 2;
}
} while (busy);
return 0;
}
EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
{
struct mmc_host *host = card->host;
struct mmc_busy_data cb_data;
cb_data.card = card;
cb_data.retry_crc_err = retry_crc_err;
cb_data.busy_cmd = busy_cmd;
return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
}
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms)
{
/*
* If the max_busy_timeout of the host is specified, make sure it's
* enough to fit the used timeout_ms. In case it's not, let's instruct
* the host to avoid HW busy detection, by converting to a R1 response
* instead of a R1B. Note, some hosts requires R1B, which also means
* they are on their own when it comes to deal with the busy timeout.
*/
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
(timeout_ms > host->max_busy_timeout)) {
cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
return false;
}
cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
cmd->busy_timeout = timeout_ms;
return true;
}
EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
* @index: EXT_CSD register index
* @value: value to program into EXT_CSD register
* @timeout_ms: timeout (ms) for operation performed by register write,
* timeout of zero implies maximum possible timeout
* @timing: new timing to change to
* @send_status: send status cmd to poll for busy
* @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
* @retries: number of retries
*
* Modifies the EXT_CSD register for selected card.
*/
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, unsigned char timing,
bool send_status, bool retry_crc_err, unsigned int retries)
{
struct mmc_host *host = card->host;
int err;
struct mmc_command cmd = {};
bool use_r1b_resp;
unsigned char old_timing = host->ios.timing;
mmc_retune_hold(host);
if (!timeout_ms) {
pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
mmc_hostname(host));
timeout_ms = card->ext_csd.generic_cmd6_time;
}
cmd.opcode = MMC_SWITCH;
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
(index << 16) |
(value << 8) |
set;
use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, retries);
if (err)
goto out;
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
mmc_host_is_spi(host))
goto out_tim;
/*
* If the host doesn't support HW polling via the ->card_busy() ops and
* when it's not allowed to poll by using CMD13, then we need to rely on
* waiting the stated timeout to be sufficient.
*/
if (!send_status && !host->ops->card_busy) {
mmc_delay(timeout_ms);
goto out_tim;
}
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
if (err)
goto out;
out_tim:
/* Switch to new timing before check switch status. */
if (timing)
mmc_set_timing(host, timing);
if (send_status) {
err = mmc_switch_status(card, true);
if (err && timing)
mmc_set_timing(host, old_timing);
}
out:
mmc_retune_release(host);
return err;
}
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms)
{
return __mmc_switch(card, set, index, value, timeout_ms, 0,
true, false, MMC_CMD_RETRIES);
}
EXPORT_SYMBOL_GPL(mmc_switch);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
struct mmc_ios *ios = &host->ios;
const u8 *tuning_block_pattern;
int size, err = 0;
u8 *data_buf;
if (ios->bus_width == MMC_BUS_WIDTH_8) {
tuning_block_pattern = tuning_blk_pattern_8bit;
size = sizeof(tuning_blk_pattern_8bit);
} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
tuning_block_pattern = tuning_blk_pattern_4bit;
size = sizeof(tuning_blk_pattern_4bit);
} else
return -EINVAL;
data_buf = kzalloc(size, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = size;
data.blocks = 1;
data.flags = MMC_DATA_READ;
/*
* According to the tuning specs, Tuning process
* is normally shorter 40 executions of CMD19,
* and timeout value should be shorter than 150 ms
*/
data.timeout_ns = 150 * NSEC_PER_MSEC;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, data_buf, size);
mmc_wait_for_req(host, &mrq);
if (cmd_error)
*cmd_error = cmd.error;
if (cmd.error) {
err = cmd.error;
goto out;
}
if (data.error) {
err = data.error;
goto out;
}
if (memcmp(data_buf, tuning_block_pattern, size))
err = -EIO;
out:
kfree(data_buf);
return err;
}
EXPORT_SYMBOL_GPL(mmc_send_tuning);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
{
struct mmc_command cmd = {};
/*
* eMMC specification specifies that CMD12 can be used to stop a tuning
* command, but SD specification does not, so do nothing unless it is
* eMMC.
*/
if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
return 0;
cmd.opcode = MMC_STOP_TRANSMISSION;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
/*
* For drivers that override R1 to R1b, set an arbitrary timeout based
* on the tuning timeout i.e. 150ms.
*/
cmd.busy_timeout = 150;
return mmc_wait_for_cmd(host, &cmd, 0);
}
EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
u8 *data_buf;
u8 *test_buf;
int i, err;
static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
if (len == 8)
test_buf = testdata_8bit;
else if (len == 4)
test_buf = testdata_4bit;
else {
pr_err("%s: Invalid bus_width %d\n",
mmc_hostname(host), len);
kfree(data_buf);
return -EINVAL;
}
if (opcode == MMC_BUS_TEST_W)
memcpy(data_buf, test_buf, len);
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = 0;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = len;
data.blocks = 1;
if (opcode == MMC_BUS_TEST_R)
data.flags = MMC_DATA_READ;
else
data.flags = MMC_DATA_WRITE;
data.sg = &sg;
data.sg_len = 1;
mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
if (opcode == MMC_BUS_TEST_R) {
for (i = 0; i < len / 4; i++)
if ((test_buf[i] ^ data_buf[i]) != 0xff) {
err = -EIO;
break;
}
}
kfree(data_buf);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return err;
}
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
int width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8;
else if (bus_width == MMC_BUS_WIDTH_4)
width = 4;
else if (bus_width == MMC_BUS_WIDTH_1)
return 0; /* no need for test */
else
return -EINVAL;
/*
* Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
* is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
static int mmc_send_hpi_cmd(struct mmc_card *card)
{
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
struct mmc_host *host = card->host;
bool use_r1b_resp = false;
struct mmc_command cmd = {};
int err;
cmd.opcode = card->ext_csd.hpi_cmd;
cmd.arg = card->rca << 16 | 1;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
if (cmd.opcode == MMC_STOP_TRANSMISSION)
use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
busy_timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err) {
pr_warn("%s: HPI error %d. Command response %#x\n",
mmc_hostname(host), err, cmd.resp[0]);
return err;
}
/* No need to poll when using HW busy detection. */
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
return 0;
/* Let's poll to find out when the HPI request completes. */
return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/**
* mmc_interrupt_hpi - Issue for High priority Interrupt
* @card: the MMC card associated with the HPI transfer
*
* Issued High Priority Interrupt, and check for card status
* until out-of prg-state.
*/
static int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
return 1;
}
err = mmc_send_status(card, &status);
if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
goto out;
}
switch (R1_CURRENT_STATE(status)) {
case R1_STATE_IDLE:
case R1_STATE_READY:
case R1_STATE_STBY:
case R1_STATE_TRAN:
/*
* In idle and transfer states, HPI is not needed and the caller
* can issue the next intended command immediately
*/
goto out;
case R1_STATE_PRG:
break;
default:
/* In all other states, it's illegal to issue HPI */
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
mmc_hostname(card->host), R1_CURRENT_STATE(status));
err = -EINVAL;
goto out;
}
err = mmc_send_hpi_cmd(card);
out:
return err;
}
int mmc_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
static int mmc_read_bkops_status(struct mmc_card *card)
{
int err;
u8 *ext_csd;
err = mmc_get_ext_csd(card, &ext_csd);
if (err)
return err;
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
kfree(ext_csd);
return 0;
}
/**
* mmc_run_bkops - Run BKOPS for supported cards
* @card: MMC card to run BKOPS for
*
* Run background operations synchronously for cards having manual BKOPS
* enabled and in case it reports urgent BKOPS level.
*/
void mmc_run_bkops(struct mmc_card *card)
{
int err;
if (!card->ext_csd.man_bkops_en)
return;
err = mmc_read_bkops_status(card);
if (err) {
pr_err("%s: Failed to read bkops status: %d\n",
mmc_hostname(card->host), err);
return;
}
if (!card->ext_csd.raw_bkops_status ||
card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
return;
mmc_retune_hold(card->host);
/*
* For urgent BKOPS status, LEVEL_2 and higher, let's execute
* synchronously. Future wise, we may consider to start BKOPS, for less
* urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
/*
* If the BKOPS timed out, the card is probably still busy in the
* R1_STATE_PRG. Rather than continue to wait, let's try to abort
* it with a HPI command to get back into R1_STATE_TRAN.
*/
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
else if (err)
pr_warn("%s: Error %d running bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
}
EXPORT_SYMBOL(mmc_run_bkops);
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
int err;
if (!card->ext_csd.cmdq_support)
return -EOPNOTSUPP;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
val, card->ext_csd.generic_cmd6_time);
if (!err)
card->ext_csd.cmdq_en = enable;
return err;
}
int mmc_cmdq_enable(struct mmc_card *card)
{
return mmc_cmdq_switch(card, true);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
int mmc_cmdq_disable(struct mmc_card *card)
{
return mmc_cmdq_switch(card, false);
}
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
{
struct mmc_host *host = card->host;
int err;
if (!mmc_can_sanitize(card)) {
pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
return -EOPNOTSUPP;
}
if (!timeout_ms)
timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
mmc_retune_hold(host);
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1, timeout_ms, 0, true, false, 0);
if (err)
pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
/*
* If the sanitize operation timed out, the card is probably still busy
* in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
* it with a HPI command to get back into R1_STATE_TRAN.
*/
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
mmc_retune_release(host);
pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
return err;
}
EXPORT_SYMBOL_GPL(mmc_sanitize);
| linux-master | drivers/mmc/core/mmc_ops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pwrseq_sd8787.c - power sequence support for Marvell SD8787 BT + Wifi chip
*
* Copyright (C) 2016 Matt Ranostay <[email protected]>
*
* Based on the original work pwrseq_simple.c
* Copyright (C) 2014 Linaro Ltd
* Author: Ulf Hansson <[email protected]>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
struct mmc_pwrseq_sd8787 {
struct mmc_pwrseq pwrseq;
struct gpio_desc *reset_gpio;
struct gpio_desc *pwrdn_gpio;
};
#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
msleep(300);
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
}
static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
{
struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
}
static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
/* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
msleep(5);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
}
static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host)
{
struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
}
static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
.pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
.power_off = mmc_pwrseq_sd8787_power_off,
};
static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = {
.pre_power_on = mmc_pwrseq_wilc1000_pre_power_on,
.power_off = mmc_pwrseq_wilc1000_power_off,
};
static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
{ .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops },
{ .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return -ENOMEM;
match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->pwrdn_gpio))
return PTR_ERR(pwrseq->pwrdn_gpio);
pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->reset_gpio))
return PTR_ERR(pwrseq->reset_gpio);
pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = match->data;
pwrseq->pwrseq.owner = THIS_MODULE;
platform_set_drvdata(pdev, pwrseq);
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
static void mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
}
static struct platform_driver mmc_pwrseq_sd8787_driver = {
.probe = mmc_pwrseq_sd8787_probe,
.remove_new = mmc_pwrseq_sd8787_remove,
.driver = {
.name = "pwrseq_sd8787",
.of_match_table = mmc_pwrseq_sd8787_of_match,
},
};
module_platform_driver(mmc_pwrseq_sd8787_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/core/pwrseq_sd8787.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/sdio_irq.c
*
* Author: Nicolas Pitre
* Created: June 18, 2007
* Copyright: MontaVista Software Inc.
*
* Copyright 2008 Pierre Ossman
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <linux/kthread.h>
#include <linux/export.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include "sdio_ops.h"
#include "core.h"
#include "card.h"
static int sdio_get_pending_irqs(struct mmc_host *host, u8 *pending)
{
struct mmc_card *card = host->card;
int ret;
WARN_ON(!host->claimed);
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, pending);
if (ret) {
pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
mmc_card_id(card), ret);
return ret;
}
if (*pending && mmc_card_broken_irq_polling(card) &&
!(host->caps & MMC_CAP_SDIO_IRQ)) {
unsigned char dummy;
/* A fake interrupt could be created when we poll SDIO_CCCR_INTx
* register with a Marvell SD8797 card. A dummy CMD52 read to
* function 0 register 0xff can avoid this.
*/
mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
}
return 0;
}
static int process_sdio_pending_irqs(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int i, ret, count;
bool sdio_irq_pending = host->sdio_irq_pending;
unsigned char pending;
struct sdio_func *func;
/* Don't process SDIO IRQs if the card is suspended. */
if (mmc_card_suspended(card))
return 0;
/* Clear the flag to indicate that we have processed the IRQ. */
host->sdio_irq_pending = false;
/*
* Optimization, if there is only 1 function interrupt registered
* and we know an IRQ was signaled then call irq handler directly.
* Otherwise do the full probe.
*/
func = card->sdio_single_irq;
if (func && sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
ret = sdio_get_pending_irqs(host, &pending);
if (ret)
return ret;
count = 0;
for (i = 1; i <= 7; i++) {
if (pending & (1 << i)) {
func = card->sdio_func[i - 1];
if (!func) {
pr_warn("%s: pending IRQ for non-existent function\n",
mmc_card_id(card));
ret = -EINVAL;
} else if (func->irq_handler) {
func->irq_handler(func);
count++;
} else {
pr_warn("%s: pending IRQ with no handler\n",
sdio_func_id(func));
ret = -EINVAL;
}
}
}
if (count)
return count;
return ret;
}
static void sdio_run_irqs(struct mmc_host *host)
{
mmc_claim_host(host);
if (host->sdio_irqs) {
process_sdio_pending_irqs(host);
if (!host->sdio_irq_pending)
host->ops->ack_sdio_irq(host);
}
mmc_release_host(host);
}
void sdio_irq_work(struct work_struct *work)
{
struct mmc_host *host =
container_of(work, struct mmc_host, sdio_irq_work);
sdio_run_irqs(host);
}
void sdio_signal_irq(struct mmc_host *host)
{
host->sdio_irq_pending = true;
schedule_work(&host->sdio_irq_work);
}
EXPORT_SYMBOL_GPL(sdio_signal_irq);
static int sdio_irq_thread(void *_host)
{
struct mmc_host *host = _host;
unsigned long period, idle_period;
int ret;
sched_set_fifo_low(current);
/*
* We want to allow for SDIO cards to work even on non SDIO
* aware hosts. One thing that non SDIO host cannot do is
* asynchronous notification of pending SDIO card interrupts
* hence we poll for them in that case.
*/
idle_period = msecs_to_jiffies(10);
period = (host->caps & MMC_CAP_SDIO_IRQ) ?
MAX_SCHEDULE_TIMEOUT : idle_period;
pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
mmc_hostname(host), period);
do {
/*
* We claim the host here on drivers behalf for a couple
* reasons:
*
* 1) it is already needed to retrieve the CCCR_INTx;
* 2) we want the driver(s) to clear the IRQ condition ASAP;
* 3) we need to control the abort condition locally.
*
* Just like traditional hard IRQ handlers, we expect SDIO
* IRQ handlers to be quick and to the point, so that the
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
ret = __mmc_claim_host(host, NULL,
&host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
mmc_release_host(host);
/*
* Give other threads a chance to run in the presence of
* errors.
*/
if (ret < 0) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule_timeout(HZ);
set_current_state(TASK_RUNNING);
}
/*
* Adaptive polling frequency based on the assumption
* that an interrupt will be closely followed by more.
* This has a substantial benefit for network devices.
*/
if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
if (ret > 0)
period /= 2;
else {
period++;
if (period > idle_period)
period = idle_period;
}
}
set_current_state(TASK_INTERRUPTIBLE);
if (host->caps & MMC_CAP_SDIO_IRQ)
host->ops->enable_sdio_irq(host, 1);
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
if (host->caps & MMC_CAP_SDIO_IRQ)
host->ops->enable_sdio_irq(host, 0);
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
return ret;
}
static int sdio_card_irq_get(struct mmc_card *card)
{
struct mmc_host *host = card->host;
WARN_ON(!host->claimed);
if (!host->sdio_irqs++) {
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
atomic_set(&host->sdio_irq_thread_abort, 0);
host->sdio_irq_thread =
kthread_run(sdio_irq_thread, host,
"ksdioirqd/%s", mmc_hostname(host));
if (IS_ERR(host->sdio_irq_thread)) {
int err = PTR_ERR(host->sdio_irq_thread);
host->sdio_irqs--;
return err;
}
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
host->ops->enable_sdio_irq(host, 1);
}
}
return 0;
}
static int sdio_card_irq_put(struct mmc_card *card)
{
struct mmc_host *host = card->host;
WARN_ON(!host->claimed);
if (host->sdio_irqs < 1)
return -EINVAL;
if (!--host->sdio_irqs) {
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
atomic_set(&host->sdio_irq_thread_abort, 1);
kthread_stop(host->sdio_irq_thread);
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
host->ops->enable_sdio_irq(host, 0);
}
}
return 0;
}
/* If there is only 1 function registered set sdio_single_irq */
static void sdio_single_irq_set(struct mmc_card *card)
{
struct sdio_func *func;
int i;
card->sdio_single_irq = NULL;
if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
card->host->sdio_irqs == 1) {
for (i = 0; i < card->sdio_funcs; i++) {
func = card->sdio_func[i];
if (func && func->irq_handler) {
card->sdio_single_irq = func;
break;
}
}
}
}
/**
* sdio_claim_irq - claim the IRQ for a SDIO function
* @func: SDIO function
* @handler: IRQ handler callback
*
* Claim and activate the IRQ for the given SDIO function. The provided
* handler will be called when that IRQ is asserted. The host is always
* claimed already when the handler is called so the handler should not
* call sdio_claim_host() or sdio_release_host().
*/
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
int ret;
unsigned char reg;
if (!func)
return -EINVAL;
pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
if (func->irq_handler) {
pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
return -EBUSY;
}
ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
if (ret)
return ret;
reg |= 1 << func->num;
reg |= 1; /* Master interrupt enable */
ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
if (ret)
return ret;
func->irq_handler = handler;
ret = sdio_card_irq_get(func->card);
if (ret)
func->irq_handler = NULL;
sdio_single_irq_set(func->card);
return ret;
}
EXPORT_SYMBOL_GPL(sdio_claim_irq);
/**
* sdio_release_irq - release the IRQ for a SDIO function
* @func: SDIO function
*
* Disable and release the IRQ for the given SDIO function.
*/
int sdio_release_irq(struct sdio_func *func)
{
int ret;
unsigned char reg;
if (!func)
return -EINVAL;
pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
if (func->irq_handler) {
func->irq_handler = NULL;
sdio_card_irq_put(func->card);
sdio_single_irq_set(func->card);
}
ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®);
if (ret)
return ret;
reg &= ~(1 << func->num);
/* Disable master interrupt with the last function interrupt */
if (!(reg & 0xFE))
reg = 0;
ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(sdio_release_irq);
| linux-master | drivers/mmc/core/sdio_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/backing-dev.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "queue.h"
#include "block.h"
#include "core.h"
#include "card.h"
#include "crypto.h"
#include "host.h"
#define MMC_DMA_MAP_MERGE_SEGMENTS 512
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
/* Allow only 1 DCMD at a time */
return mq->in_flight[MMC_ISSUE_DCMD];
}
void mmc_cqe_check_busy(struct mmc_queue *mq)
{
if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
}
static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_CQE_DCMD;
}
static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
struct request *req)
{
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return MMC_ISSUE_SYNC;
case REQ_OP_FLUSH:
return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
default:
return MMC_ISSUE_ASYNC;
}
}
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
struct mmc_host *host = mq->card->host;
if (host->cqe_enabled && !host->hsq_enabled)
return mmc_cqe_issue_type(host, req);
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
return MMC_ISSUE_ASYNC;
return MMC_ISSUE_SYNC;
}
static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
{
if (!mq->recovery_needed) {
mq->recovery_needed = true;
schedule_work(&mq->recovery_work);
}
}
void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
brq.mrq);
struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
spin_lock_irqsave(&mq->lock, flags);
__mmc_cqe_recovery_notifier(mq);
spin_unlock_irqrestore(&mq->lock, flags);
}
static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_queue *mq = req->q->queuedata;
struct mmc_host *host = mq->card->host;
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
bool recovery_needed = false;
switch (issue_type) {
case MMC_ISSUE_ASYNC:
case MMC_ISSUE_DCMD:
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
if (recovery_needed)
mmc_cqe_recovery_notifier(mrq);
return BLK_EH_RESET_TIMER;
}
/* The request has gone already */
return BLK_EH_DONE;
default:
/* Timeout is handled by mmc core */
return BLK_EH_RESET_TIMER;
}
}
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
unsigned long flags;
bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
}
static void mmc_mq_recovery_handler(struct work_struct *work)
{
struct mmc_queue *mq = container_of(work, struct mmc_queue,
recovery_work);
struct request_queue *q = mq->queue;
struct mmc_host *host = mq->card->host;
mmc_get_card(mq->card, &mq->ctx);
mq->in_recovery = true;
if (host->cqe_enabled && !host->hsq_enabled)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
mq->in_recovery = false;
spin_lock_irq(&mq->lock);
mq->recovery_needed = false;
spin_unlock_irq(&mq->lock);
if (host->hsq_enabled)
host->cqe_ops->cqe_recovery_finish(host);
mmc_put_card(mq->card, &mq->ctx);
blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
{
struct scatterlist *sg;
sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
if (sg)
sg_init_table(sg, sg_len);
return sg;
}
static void mmc_queue_setup_discard(struct request_queue *q,
struct mmc_card *card)
{
unsigned max_discard;
max_discard = mmc_calc_max_discard(card);
if (!max_discard)
return;
blk_queue_max_discard_sectors(q, max_discard);
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = SECTOR_SIZE;
if (mmc_can_secure_erase_trim(card))
blk_queue_max_secure_erase_sectors(q, max_discard);
if (mmc_can_trim(card) && card->erased_byte == 0)
blk_queue_max_write_zeroes_sectors(q, max_discard);
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
{
return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
host->max_segs;
}
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
struct mmc_queue *mq = set->driver_data;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
if (!mq_rq->sg)
return -ENOMEM;
return 0;
}
static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
kfree(mq_rq->sg);
mq_rq->sg = NULL;
}
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
enum mmc_issue_type issue_type;
enum mmc_issued issued;
bool get_card, cqe_retune_ok;
blk_status_t ret;
if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
}
issue_type = mmc_issue_type(mq, req);
spin_lock_irq(&mq->lock);
if (mq->recovery_needed || mq->busy) {
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
switch (issue_type) {
case MMC_ISSUE_DCMD:
if (mmc_cqe_dcmd_busy(mq)) {
mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
case MMC_ISSUE_ASYNC:
/*
* For MMC host software queue, we only allow 2 requests in
* flight to avoid a long latency.
*/
if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
default:
/*
* Timeouts are handled by mmc core, and we don't have a host
* API to abort requests, so we can't handle the timeout anyway.
* However, when the timeout happens, blk_mq_complete_request()
* no longer works (to stop the request disappearing under us).
* To avoid racing with that, set a large timeout.
*/
req->timeout = 600 * HZ;
break;
}
/* Parallel dispatch of requests is not supported at the moment */
mq->busy = true;
mq->in_flight[issue_type] += 1;
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
spin_unlock_irq(&mq->lock);
if (!(req->rq_flags & RQF_DONTPREP)) {
req_to_mmc_queue_req(req)->retries = 0;
req->rq_flags |= RQF_DONTPREP;
}
if (get_card)
mmc_get_card(card, &mq->ctx);
if (host->cqe_enabled) {
host->retune_now = host->need_retune && cqe_retune_ok &&
!host->hold_retune;
}
blk_mq_start_request(req);
issued = mmc_blk_mq_issue_rq(mq, req);
switch (issued) {
case MMC_REQ_BUSY:
ret = BLK_STS_RESOURCE;
break;
case MMC_REQ_FAILED_TO_START:
ret = BLK_STS_IOERR;
break;
default:
ret = BLK_STS_OK;
break;
}
if (issued != MMC_REQ_STARTED) {
bool put_card = false;
spin_lock_irq(&mq->lock);
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
mq->busy = false;
spin_unlock_irq(&mq->lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
} else {
WRITE_ONCE(mq->busy, false);
}
return ret;
}
static const struct blk_mq_ops mmc_mq_ops = {
.queue_rq = mmc_mq_queue_rq,
.init_request = mmc_mq_init_request,
.exit_request = mmc_mq_exit_request,
.complete = mmc_blk_mq_complete,
.timeout = mmc_mq_timed_out,
};
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned block_size = 512;
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
if (host->can_dma_map_merge)
WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
mmc_dev(host)),
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
block_size = card->ext_csd.data_sector_size;
WARN_ON(block_size != 512 && block_size != 4096);
}
blk_queue_logical_block_size(mq->queue, block_size);
/*
* After blk_queue_can_use_dma_map_merging() was called with succeed,
* since it calls blk_queue_virt_boundary(), the mmc should not call
* both blk_queue_max_segment_size().
*/
if (!host->can_dma_map_merge)
blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
mutex_init(&mq->complete_lock);
init_waitqueue_head(&mq->wait);
mmc_crypto_setup_queue(mq->queue, host);
}
static inline bool mmc_merge_capable(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
}
/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
*
* Initialise a MMC card request queue.
*/
struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
struct gendisk *disk;
int ret;
mq->card = card;
spin_lock_init(&mq->lock);
memset(&mq->tag_set, 0, sizeof(mq->tag_set));
mq->tag_set.ops = &mmc_mq_ops;
/*
* The queue depth for CQE must match the hardware because the request
* tag is used to index the hardware queue.
*/
if (host->cqe_enabled && !host->hsq_enabled)
mq->tag_set.queue_depth =
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
else
mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
/*
* Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
* the host->can_dma_map_merge should be set before to get max_segs
* from mmc_get_max_segments().
*/
if (mmc_merge_capable(host) &&
host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
dma_get_merge_boundary(mmc_dev(host)))
host->can_dma_map_merge = 1;
else
host->can_dma_map_merge = 0;
ret = blk_mq_alloc_tag_set(&mq->tag_set);
if (ret)
return ERR_PTR(ret);
disk = blk_mq_alloc_disk(&mq->tag_set, mq);
if (IS_ERR(disk)) {
blk_mq_free_tag_set(&mq->tag_set);
return disk;
}
mq->queue = disk->queue;
if (mmc_host_is_spi(host) && host->use_spi_crc)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
blk_queue_rq_timeout(mq->queue, 60 * HZ);
mmc_setup_queue(mq, card);
return disk;
}
void mmc_queue_suspend(struct mmc_queue *mq)
{
blk_mq_quiesce_queue(mq->queue);
/*
* The host remains claimed while there are outstanding requests, so
* simply claiming and releasing here ensures there are none.
*/
mmc_claim_host(mq->card->host);
mmc_release_host(mq->card->host);
}
void mmc_queue_resume(struct mmc_queue *mq)
{
blk_mq_unquiesce_queue(mq->queue);
}
void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
/*
* The legacy code handled the possibility of being suspended,
* so do that here too.
*/
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
/*
* If the recovery completes the last (and only remaining) request in
* the queue, and the card has been removed, we could end up here with
* the recovery not quite finished yet, so cancel it.
*/
cancel_work_sync(&mq->recovery_work);
blk_mq_free_tag_set(&mq->tag_set);
/*
* A request can be completed before the next request, potentially
* leaving a complete_work with nothing to do. Such a work item might
* still be queued at this point. Flush it.
*/
flush_work(&mq->complete_work);
mq->card = NULL;
}
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
}
| linux-master | drivers/mmc/core/queue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Linaro Ltd
*
* Author: Ulf Hansson <[email protected]>
*
* Simple MMC power sequence management
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
struct mmc_pwrseq_simple {
struct mmc_pwrseq pwrseq;
bool clk_enabled;
u32 post_power_on_delay_ms;
u32 power_off_delay_us;
struct clk *ext_clk;
struct gpio_descs *reset_gpios;
};
#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
if (!IS_ERR(reset_gpios)) {
unsigned long *values;
int nvalues = reset_gpios->ndescs;
values = bitmap_alloc(nvalues, GFP_KERNEL);
if (!values)
return;
if (value)
bitmap_fill(values, nvalues);
else
bitmap_zero(values, nvalues);
gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
reset_gpios->info, values);
bitmap_free(values);
}
}
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
pwrseq->clk_enabled = true;
}
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
}
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
if (pwrseq->post_power_on_delay_ms)
msleep(pwrseq->post_power_on_delay_ms);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
if (pwrseq->power_off_delay_us)
usleep_range(pwrseq->power_off_delay_us,
2 * pwrseq->power_off_delay_us);
if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
clk_disable_unprepare(pwrseq->ext_clk);
pwrseq->clk_enabled = false;
}
}
static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
.pre_power_on = mmc_pwrseq_simple_pre_power_on,
.post_power_on = mmc_pwrseq_simple_post_power_on,
.power_off = mmc_pwrseq_simple_power_off,
};
static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
{ .compatible = "mmc-pwrseq-simple",},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq;
struct device *dev = &pdev->dev;
pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
return -ENOMEM;
pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
return dev_err_probe(dev, PTR_ERR(pwrseq->ext_clk), "external clock not ready\n");
pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios) &&
PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
return dev_err_probe(dev, PTR_ERR(pwrseq->reset_gpios), "reset GPIOs not ready\n");
}
device_property_read_u32(dev, "post-power-on-delay-ms",
&pwrseq->post_power_on_delay_ms);
device_property_read_u32(dev, "power-off-delay-us",
&pwrseq->power_off_delay_us);
pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
pwrseq->pwrseq.owner = THIS_MODULE;
platform_set_drvdata(pdev, pwrseq);
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
static void mmc_pwrseq_simple_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
}
static struct platform_driver mmc_pwrseq_simple_driver = {
.probe = mmc_pwrseq_simple_probe,
.remove_new = mmc_pwrseq_simple_remove,
.driver = {
.name = "pwrseq_simple",
.of_match_table = mmc_pwrseq_simple_of_match,
},
};
module_platform_driver(mmc_pwrseq_simple_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/core/pwrseq_simple.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include "core.h"
#include "sd_ops.h"
#include "mmc_ops.h"
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
{
int err;
struct mmc_command cmd = {};
if (WARN_ON(card && card->host != host))
return -EINVAL;
cmd.opcode = MMC_APP_CMD;
if (card) {
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
} else {
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_BCR;
}
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
/* Check that card supported application commands */
if (!mmc_host_is_spi(host) && !(cmd.resp[0] & R1_APP_CMD))
return -EOPNOTSUPP;
return 0;
}
EXPORT_SYMBOL_GPL(mmc_app_cmd);
static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
struct mmc_command *cmd)
{
struct mmc_request mrq = {};
int i, err = -EIO;
/*
* We have to resend MMC_APP_CMD for each attempt so
* we cannot use the retries field in mmc_command.
*/
for (i = 0; i <= MMC_CMD_RETRIES; i++) {
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
continue;
}
memset(&mrq, 0, sizeof(struct mmc_request));
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = 0;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
err = cmd->error;
if (!cmd->error)
break;
/* no point in retrying illegal APP commands */
if (mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
break;
}
}
return err;
}
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
struct mmc_command cmd = {};
cmd.opcode = SD_APP_SET_BUS_WIDTH;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
switch (width) {
case MMC_BUS_WIDTH_1:
cmd.arg = SD_BUS_WIDTH_1;
break;
case MMC_BUS_WIDTH_4:
cmd.arg = SD_BUS_WIDTH_4;
break;
default:
return -EINVAL;
}
return mmc_wait_for_app_cmd(card->host, card, &cmd);
}
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {};
int i, err = 0;
cmd.opcode = SD_APP_OP_COND;
if (mmc_host_is_spi(host))
cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
else
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_app_cmd(host, NULL, &cmd);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
if (!(cmd.resp[0] & R1_SPI_IDLE))
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (!i)
pr_err("%s: card never left busy state\n", mmc_hostname(host));
if (rocr && !mmc_host_is_spi(host))
*rocr = cmd.resp[0];
return err;
}
static int __mmc_send_if_cond(struct mmc_host *host, u32 ocr, u8 pcie_bits,
u32 *resp)
{
struct mmc_command cmd = {};
int err;
static const u8 test_pattern = 0xAA;
u8 result_pattern;
/*
* To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
* before SD_APP_OP_COND. This command will harmlessly fail for
* SD 1.0 cards.
*/
cmd.opcode = SD_SEND_IF_COND;
cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | pcie_bits << 8 | test_pattern;
cmd.flags = MMC_RSP_SPI_R7 | MMC_RSP_R7 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host))
result_pattern = cmd.resp[1] & 0xFF;
else
result_pattern = cmd.resp[0] & 0xFF;
if (result_pattern != test_pattern)
return -EIO;
if (resp)
*resp = cmd.resp[0];
return 0;
}
int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
{
return __mmc_send_if_cond(host, ocr, 0, NULL);
}
int mmc_send_if_cond_pcie(struct mmc_host *host, u32 ocr)
{
u32 resp = 0;
u8 pcie_bits = 0;
int ret;
if (host->caps2 & MMC_CAP2_SD_EXP) {
/* Probe card for SD express support via PCIe. */
pcie_bits = 0x10;
if (host->caps2 & MMC_CAP2_SD_EXP_1_2V)
/* Probe also for 1.2V support. */
pcie_bits = 0x30;
}
ret = __mmc_send_if_cond(host, ocr, pcie_bits, &resp);
if (ret)
return 0;
/* Continue with the SD express init, if the card supports it. */
resp &= 0x3000;
if (pcie_bits && resp) {
if (resp == 0x3000)
host->ios.timing = MMC_TIMING_SD_EXP_1_2V;
else
host->ios.timing = MMC_TIMING_SD_EXP;
/*
* According to the spec the clock shall also be gated, but
* let's leave this to the host driver for more flexibility.
*/
return host->ops->init_sd_express(host, &host->ios);
}
return 0;
}
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
{
int err;
struct mmc_command cmd = {};
cmd.opcode = SD_SEND_RELATIVE_ADDR;
cmd.arg = 0;
cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
return err;
*rca = cmd.resp[0] >> 16;
return 0;
}
int mmc_app_send_scr(struct mmc_card *card)
{
int err;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
__be32 *scr;
/* NOTE: caller guarantees scr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
scr = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
if (!scr)
return -ENOMEM;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SEND_SCR;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 8;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, scr, 8);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
card->raw_scr[0] = be32_to_cpu(scr[0]);
card->raw_scr[1] = be32_to_cpu(scr[1]);
kfree(scr);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
int mmc_sd_switch(struct mmc_card *card, int mode, int group,
u8 value, u8 *resp)
{
u32 cmd_args;
/* NOTE: caller guarantees resp is heap-allocated */
mode = !!mode;
value &= 0xF;
cmd_args = mode << 31 | 0x00FFFFFF;
cmd_args &= ~(0xF << (group * 4));
cmd_args |= value << (group * 4);
return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
64);
}
EXPORT_SYMBOL_GPL(mmc_sd_switch);
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
int err;
struct mmc_request mrq = {};
struct mmc_command cmd = {};
struct mmc_data data = {};
struct scatterlist sg;
/* NOTE: caller guarantees ssr is heap-allocated */
err = mmc_app_cmd(card->host, card);
if (err)
return err;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_APP_SD_STATUS;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = 64;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, ssr, 64);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return 0;
}
| linux-master | drivers/mmc/core/sd_ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Helper functions for MMC regulators.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/mmc/host.h>
#include "core.h"
#include "host.h"
#ifdef CONFIG_REGULATOR
/**
* mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
* @vdd_bit: OCR bit number
* @min_uV: minimum voltage value (mV)
* @max_uV: maximum voltage value (mV)
*
* This function returns the voltage range according to the provided OCR
* bit number. If conversion is not possible a negative errno value returned.
*/
static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
{
int tmp;
if (!vdd_bit)
return -EINVAL;
/*
* REVISIT mmc_vddrange_to_ocrmask() may have set some
* bits this regulator doesn't quite support ... don't
* be too picky, most cards and regulators are OK with
* a 0.1V range goof (it's a small error percentage).
*/
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
if (tmp == 0) {
*min_uV = 1650 * 1000;
*max_uV = 1950 * 1000;
} else {
*min_uV = 1900 * 1000 + tmp * 100 * 1000;
*max_uV = *min_uV + 100 * 1000;
}
return 0;
}
/**
* mmc_regulator_get_ocrmask - return mask of supported voltages
* @supply: regulator to use
*
* This returns either a negative errno, or a mask of voltages that
* can be provided to MMC/SD/SDIO devices using the specified voltage
* regulator. This would normally be called before registering the
* MMC host adapter.
*/
static int mmc_regulator_get_ocrmask(struct regulator *supply)
{
int result = 0;
int count;
int i;
int vdd_uV;
int vdd_mV;
count = regulator_count_voltages(supply);
if (count < 0)
return count;
for (i = 0; i < count; i++) {
vdd_uV = regulator_list_voltage(supply, i);
if (vdd_uV <= 0)
continue;
vdd_mV = vdd_uV / 1000;
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
if (!result) {
vdd_uV = regulator_get_voltage(supply);
if (vdd_uV <= 0)
return vdd_uV;
vdd_mV = vdd_uV / 1000;
result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
return result;
}
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
* @mmc: the host to regulate
* @supply: regulator to use
* @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
*
* Returns zero on success, else negative errno.
*
* MMC host drivers may use this to enable or disable a regulator using
* a particular supply voltage. This would normally be called from the
* set_ios() method.
*/
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
{
int result = 0;
int min_uV, max_uV;
if (IS_ERR(supply))
return 0;
if (vdd_bit) {
mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
result = regulator_set_voltage(supply, min_uV, max_uV);
if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
if (!result)
mmc->regulator_enabled = true;
}
} else if (mmc->regulator_enabled) {
result = regulator_disable(supply);
if (result == 0)
mmc->regulator_enabled = false;
}
if (result)
dev_err(mmc_dev(mmc),
"could not set regulator OCR (%d)\n", result);
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
int min_uV, int target_uV,
int max_uV)
{
int current_uV;
/*
* Check if supported first to avoid errors since we may try several
* signal levels during power up and don't want to show errors.
*/
if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
return -EINVAL;
/*
* The voltage is already set, no need to switch.
* Return 1 to indicate that no switch happened.
*/
current_uV = regulator_get_voltage(regulator);
if (current_uV == target_uV)
return 1;
return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
max_uV);
}
/**
* mmc_regulator_set_vqmmc - Set VQMMC as per the ios
* @mmc: the host to regulate
* @ios: io bus settings
*
* For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
* That will match the behavior of old boards where VQMMC and VMMC were supplied
* by the same supply. The Bus Operating conditions for 3.3V signaling in the
* SD card spec also define VQMMC in terms of VMMC.
* If this is not possible we'll try the full 2.7-3.6V of the spec.
*
* For 1.2V and 1.8V signaling we'll try to get as close as possible to the
* requested voltage. This is definitely a good idea for UHS where there's a
* separate regulator on the card that's trying to make 1.8V and it's best if
* we match.
*
* This function is expected to be used by a controller's
* start_signal_voltage_switch() function.
*/
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct device *dev = mmc_dev(mmc);
int ret, volt, min_uV, max_uV;
/* If no vqmmc supply then we can't change the voltage */
if (IS_ERR(mmc->supply.vqmmc))
return -EINVAL;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_120:
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1100000, 1200000, 1300000);
case MMC_SIGNAL_VOLTAGE_180:
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1700000, 1800000, 1950000);
case MMC_SIGNAL_VOLTAGE_330:
ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
if (ret < 0)
return ret;
dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
__func__, volt, max_uV);
min_uV = max(volt - 300000, 2700000);
max_uV = min(max_uV + 200000, 3600000);
/*
* Due to a limitation in the current implementation of
* regulator_set_voltage_triplet() which is taking the lowest
* voltage possible if below the target, search for a suitable
* voltage in two steps and try to stay close to vmmc
* with a 0.3V tolerance at first.
*/
ret = mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
min_uV, volt, max_uV);
if (ret >= 0)
return ret;
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
2700000, volt, 3600000);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#else
static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
{
return 0;
}
#endif /* CONFIG_REGULATOR */
/**
* mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. errno should be handled, it is either a critical error
* or -EPROBE_DEFER. 0 means no critical error but it does not mean all
* regulators have been found because they all are optional. If you require
* certain regulators, you need to check separately in your driver if they got
* populated after calling this function.
*/
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
int ret;
mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
if (IS_ERR(mmc->supply.vmmc)) {
if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(dev, "No vmmc regulator found\n");
} else {
ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
if (ret > 0)
mmc->ocr_avail = ret;
else
dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
}
if (IS_ERR(mmc->supply.vqmmc)) {
if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(dev, "No vqmmc regulator found\n");
}
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
/**
* mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Enables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
{
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
else
mmc->vqmmc_enabled = true;
}
return ret;
}
EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
/**
* mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Disables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
{
if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
regulator_disable(mmc->supply.vqmmc);
mmc->vqmmc_enabled = false;
}
}
EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
| linux-master | drivers/mmc/core/regulator.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/core/core.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/leds.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/suspend.h>
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
#include "core.h"
#include "card.h"
#include "crypto.h"
#include "bus.h"
#include "host.h"
#include "sdio_bus.h"
#include "pwrseq.h"
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
/* The max erase timeout, used when host->max_busy_timeout isn't specified */
#define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
#define SD_DISCARD_TIMEOUT_MS (250)
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
* So we allow it to be disabled.
*/
bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
/*
* We use the system_freezable_wq, because of two reasons.
* First, it allows several works (not the same work item) to be
* executed simultaneously. Second, the queue becomes frozen when
* userspace becomes frozen during system PM.
*/
return queue_delayed_work(system_freezable_wq, work, delay);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
/*
* Internal function. Inject random data errors.
* If mmc_data is NULL no errors are injected.
*/
static void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
static const int data_errors[] = {
-ETIMEDOUT,
-EILSEQ,
-EIO,
};
if (!data)
return;
if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))];
data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
static inline void mmc_should_fail_request(struct mmc_host *host,
struct mmc_request *mrq)
{
}
#endif /* CONFIG_FAIL_MMC_REQUEST */
static inline void mmc_complete_cmd(struct mmc_request *mrq)
{
if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
complete_all(&mrq->cmd_completion);
}
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
{
if (!mrq->cap_cmd_during_tfr)
return;
mmc_complete_cmd(mrq);
pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
mmc_hostname(host), mrq->cmd->opcode);
}
EXPORT_SYMBOL(mmc_command_done);
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
* @mrq: MMC request which request
*
* MMC drivers should call this function when they have completed
* their processing of a request.
*/
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
/* Flag re-tuning needed on CRC errors */
if (!mmc_op_tuning(cmd->opcode) &&
!host->retune_crc_disable &&
(err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ) ||
(mrq->stop && mrq->stop->error == -EILSEQ)))
mmc_retune_needed(host);
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
}
if (host->ongoing_mrq == mrq)
host->ongoing_mrq = NULL;
mmc_complete_cmd(mrq);
trace_mmc_request_done(host, mrq);
/*
* We list various conditions for the command to be considered
* properly done:
*
* - There was no error, OK fine then
* - We are not doing some kind of retry
* - The card was removed (...so just complete everything no matter
* if there are errors or retries)
*/
if (!err || !cmd->retries || mmc_card_removed(host->card)) {
mmc_should_fail_request(host, mrq);
if (!host->ongoing_mrq)
led_trigger_event(host->led, LED_OFF);
if (mrq->sbc) {
pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->sbc->opcode,
mrq->sbc->error,
mrq->sbc->resp[0], mrq->sbc->resp[1],
mrq->sbc->resp[2], mrq->sbc->resp[3]);
}
pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), cmd->opcode, err,
cmd->resp[0], cmd->resp[1],
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
}
if (mrq->stop) {
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->error,
mrq->stop->resp[0], mrq->stop->resp[1],
mrq->stop->resp[2], mrq->stop->resp[3]);
}
}
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
*/
if (mrq->done)
mrq->done(mrq);
}
EXPORT_SYMBOL(mmc_request_done);
static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
/* Assumes host controller has been runtime resumed by mmc_claim_host */
err = mmc_retune(host);
if (err) {
mrq->cmd->error = err;
mmc_request_done(host, mrq);
return;
}
/*
* For sdio rw commands we must wait for card busy otherwise some
* sdio devices won't work properly.
* And bypass I/O abort, reset and bus suspend operations.
*/
if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
host->ops->card_busy) {
int tries = 500; /* Wait aprox 500ms at maximum */
while (host->ops->card_busy(host) && --tries)
mmc_delay(1);
if (tries == 0) {
mrq->cmd->error = -EBUSY;
mmc_request_done(host, mrq);
return;
}
}
if (mrq->cap_cmd_during_tfr) {
host->ongoing_mrq = mrq;
/*
* Retry path could come through here without having waiting on
* cmd_completion, so ensure it is reinitialised.
*/
reinit_completion(&mrq->cmd_completion);
}
trace_mmc_request_start(host, mrq);
if (host->cqe_on)
host->cqe_ops->cqe_off(host);
host->ops->request(host, mrq);
}
static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
mmc_hostname(host), mrq->sbc->opcode,
mrq->sbc->arg, mrq->sbc->flags);
}
if (mrq->cmd) {
pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
mmc_hostname(host), cqe ? "CQE direct " : "",
mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
} else if (cqe) {
pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
pr_debug("%s: blksz %d blocks %d flags %08x "
"tsac %d ms nsac %d\n",
mmc_hostname(host), mrq->data->blksz,
mrq->data->blocks, mrq->data->flags,
mrq->data->timeout_ns / 1000000,
mrq->data->timeout_clks);
}
if (mrq->stop) {
pr_debug("%s: CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->arg, mrq->stop->flags);
}
}
static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
{
unsigned int i, sz = 0;
struct scatterlist *sg;
if (mrq->cmd) {
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
mrq->cmd->data = mrq->data;
}
if (mrq->sbc) {
mrq->sbc->error = 0;
mrq->sbc->mrq = mrq;
}
if (mrq->data) {
if (mrq->data->blksz > host->max_blk_size ||
mrq->data->blocks > host->max_blk_count ||
mrq->data->blocks * mrq->data->blksz > host->max_req_size)
return -EINVAL;
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
sz += sg->length;
if (sz != mrq->data->blocks * mrq->data->blksz)
return -EINVAL;
mrq->data->error = 0;
mrq->data->mrq = mrq;
if (mrq->stop) {
mrq->data->stop = mrq->stop;
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
}
return 0;
}
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
init_completion(&mrq->cmd_completion);
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
err = mmc_mrq_prep(host, mrq);
if (err)
return err;
led_trigger_event(host->led, LED_FULL);
__mmc_start_request(host, mrq);
return 0;
}
EXPORT_SYMBOL(mmc_start_request);
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
}
static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
{
struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
/*
* If there is an ongoing transfer, wait for the command line to become
* available.
*/
if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
wait_for_completion(&ongoing_mrq->cmd_completion);
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
mmc_wait_ongoing_tfr_cmd(host);
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
mmc_complete_cmd(mrq);
complete(&mrq->completion);
}
return err;
}
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd;
while (1) {
wait_for_completion(&mrq->completion);
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card))
break;
mmc_retune_recheck(host);
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
__mmc_start_request(host, mrq);
}
mmc_retune_release(host);
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
/*
* mmc_cqe_start_req - Start a CQE request.
* @host: MMC host to start the request
* @mrq: request to start
*
* Start the request, re-tuning if needed and it is possible. Returns an error
* code if the request fails to start or -EBUSY if CQE is busy.
*/
int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
/*
* CQE cannot process re-tuning commands. Caller must hold retuning
* while CQE is in use. Re-tuning can happen here only when CQE has no
* active requests i.e. this is the first. Note, re-tuning will call
* ->cqe_off().
*/
err = mmc_retune(host);
if (err)
goto out_err;
mrq->host = host;
mmc_mrq_pr_debug(host, mrq, true);
err = mmc_mrq_prep(host, mrq);
if (err)
goto out_err;
err = host->cqe_ops->cqe_request(host, mrq);
if (err)
goto out_err;
trace_mmc_request_start(host, mrq);
return 0;
out_err:
if (mrq->cmd) {
pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
mmc_hostname(host), mrq->cmd->opcode, err);
} else {
pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
mmc_hostname(host), mrq->tag, err);
}
return err;
}
EXPORT_SYMBOL(mmc_cqe_start_req);
/**
* mmc_cqe_request_done - CQE has finished processing an MMC request
* @host: MMC host which completed request
* @mrq: MMC request which completed
*
* CQE drivers should call this function when they have completed
* their processing of a request.
*/
void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
mmc_should_fail_request(host, mrq);
/* Flag re-tuning needed on CRC errors */
if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ))
mmc_retune_needed(host);
trace_mmc_request_done(host, mrq);
if (mrq->cmd) {
pr_debug("%s: CQE req done (direct CMD%u): %d\n",
mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
} else {
pr_debug("%s: CQE transfer done tag %d\n",
mmc_hostname(host), mrq->tag);
}
if (mrq->data) {
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
}
mrq->done(mrq);
}
EXPORT_SYMBOL(mmc_cqe_request_done);
/**
* mmc_cqe_post_req - CQE post process of a completed MMC request
* @host: MMC host
* @mrq: MMC request to be processed
*/
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
{
if (host->cqe_ops->cqe_post_req)
host->cqe_ops->cqe_post_req(host, mrq);
}
EXPORT_SYMBOL(mmc_cqe_post_req);
/* Arbitrary 1 second timeout */
#define MMC_CQE_RECOVERY_TIMEOUT 1000
/*
* mmc_cqe_recovery - Recover from CQE errors.
* @host: MMC host to recover
*
* Recovery consists of stopping CQE, stopping eMMC, discarding the queue
* in eMMC, and discarding the queue in CQE. CQE must call
* mmc_cqe_request_done() on all requests. An error is returned if the eMMC
* fails to discard its queue.
*/
int mmc_cqe_recovery(struct mmc_host *host)
{
struct mmc_command cmd;
int err;
mmc_retune_hold_now(host);
/*
* Recovery is expected seldom, if at all, but it reduces performance,
* so make sure it is not completely silent.
*/
pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
host->cqe_ops->cqe_recovery_start(host);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION;
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0);
host->cqe_ops->cqe_recovery_finish(host);
mmc_retune_release(host);
return err;
}
EXPORT_SYMBOL(mmc_cqe_recovery);
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
* @mrq: MMC request
*
* mmc_is_req_done() is used with requests that have
* mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
* starting a request and before waiting for it to complete. That is,
* either in between calls to mmc_start_req(), or after mmc_wait_for_req()
* and before mmc_wait_for_req_done(). If it is called at other times the
* result is not meaningful.
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
*
* Start a new MMC custom command request for a host, and wait
* for the command to complete. In the case of 'cap_cmd_during_tfr'
* requests, the transfer is ongoing and the caller can issue further
* commands that do not use the data lines, and then wait by calling
* mmc_wait_for_req_done().
* Does not attempt to parse the response.
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
if (!mrq->cap_cmd_during_tfr)
mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
/**
* mmc_wait_for_cmd - start a command and wait for completion
* @host: MMC host to start command
* @cmd: MMC command to start
* @retries: maximum number of retries
*
* Start a new MMC command for a host, and wait for the command
* to complete. Return any error that occurred while the command
* was executing. Do not attempt to parse the response.
*/
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{
struct mmc_request mrq = {};
WARN_ON(!host->claimed);
memset(cmd->resp, 0, sizeof(cmd->resp));
cmd->retries = retries;
mrq.cmd = cmd;
cmd->data = NULL;
mmc_wait_for_req(host, &mrq);
return cmd->error;
}
EXPORT_SYMBOL(mmc_wait_for_cmd);
/**
* mmc_set_data_timeout - set the timeout for a data command
* @data: data phase for command
* @card: the MMC card associated with the data transfer
*
* Computes the data timeout parameters according to the
* correct algorithm given the card type.
*/
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
{
unsigned int mult;
/*
* SDIO cards only define an upper 1 s limit on access.
*/
if (mmc_card_sdio(card)) {
data->timeout_ns = 1000000000;
data->timeout_clks = 0;
return;
}
/*
* SD cards use a 100 multiplier rather than 10
*/
mult = mmc_card_sd(card) ? 100 : 10;
/*
* Scale up the multiplier (and therefore the timeout) by
* the r2w factor for writes.
*/
if (data->flags & MMC_DATA_WRITE)
mult <<= card->csd.r2w_factor;
data->timeout_ns = card->csd.taac_ns * mult;
data->timeout_clks = card->csd.taac_clks * mult;
/*
* SD cards also have an upper limit on the timeout.
*/
if (mmc_card_sd(card)) {
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
if (card->host->ios.clock)
timeout_us += data->timeout_clks * 1000 /
(card->host->ios.clock / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
* The MMC spec "It is strongly recommended
* for hosts to implement more than 500ms
* timeout value even if the card indicates
* the 250ms maximum busy length." Even the
* previous value of 300ms is known to be
* insufficient for some cards.
*/
limit_us = 3000000;
else
limit_us = 100000;
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
/* assign limit value if invalid */
if (timeout_us == 0)
data->timeout_ns = limit_us * 1000;
}
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
* continuous stream of data until the internal logic
* overflowed.
*/
if (mmc_host_is_spi(card->host)) {
if (data->flags & MMC_DATA_WRITE) {
if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
} else {
if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
}
EXPORT_SYMBOL(mmc_set_data_timeout);
/*
* Allow claiming an already claimed host if the context is the same or there is
* no context but the task is the same.
*/
static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
struct task_struct *task)
{
return host->claimer == ctx ||
(!ctx && task && host->claimer->task == task);
}
static inline void mmc_ctx_set_claimer(struct mmc_host *host,
struct mmc_ctx *ctx,
struct task_struct *task)
{
if (!host->claimer) {
if (ctx)
host->claimer = ctx;
else
host->claimer = &host->default_ctx;
}
if (task)
host->claimer->task = task;
}
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @ctx: context that claims the host or NULL in which case the default
* context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
* dereference a non-zero value then this will return prematurely with
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
atomic_t *abort)
{
struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
bool pm = false;
might_sleep();
add_wait_queue(&host->wq, &wait);
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
} else
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
if (pm)
pm_runtime_get_sync(mmc_dev(host));
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
* mmc_release_host - release a host
* @host: mmc host to release
*
* Release a MMC host, allowing others to claim the host
* for their operations.
*/
void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
WARN_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
pm_runtime_mark_last_busy(mmc_dev(host));
if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
pm_runtime_put_sync_suspend(mmc_dev(host));
else
pm_runtime_put_autosuspend(mmc_dev(host));
}
}
EXPORT_SYMBOL(mmc_release_host);
/*
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
__mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
/*
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
struct mmc_host *host = card->host;
WARN_ON(ctx && host->claimer != ctx);
mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
EXPORT_SYMBOL(mmc_put_card);
/*
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
static inline void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
1 << ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
/*
* Control chip select pin on a host.
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
host->ios.chip_select = mode;
mmc_set_ios(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz && hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
host->ios.clock = hz;
mmc_set_ios(host);
}
int mmc_execute_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u32 opcode;
int err;
if (!host->ops->execute_tuning)
return 0;
if (host->cqe_on)
host->cqe_ops->cqe_off(host);
if (mmc_card_mmc(card))
opcode = MMC_SEND_TUNING_BLOCK_HS200;
else
opcode = MMC_SEND_TUNING_BLOCK;
err = host->ops->execute_tuning(host, opcode);
if (!err) {
mmc_retune_clear(host);
mmc_retune_enable(host);
return 0;
}
/* Only print error when we don't check for card removal */
if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
}
return err;
}
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
host->ios.bus_mode = mode;
mmc_set_ios(host);
}
/*
* Change data bus width of a host.
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
host->ios.bus_width = width;
mmc_set_ios(host);
}
/*
* Set initial state after a power cycle or a hw_reset.
*/
void mmc_set_initial_state(struct mmc_host *host)
{
if (host->cqe_on)
host->cqe_ops->cqe_off(host);
mmc_retune_disable(host);
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
else
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
host->ios.drv_type = 0;
host->ios.enhanced_strobe = false;
/*
* Make sure we are in non-enhanced strobe mode before we
* actually enable it in ext_csd.
*/
if ((host->caps2 & MMC_CAP2_HS400_ES) &&
host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
mmc_set_ios(host);
mmc_crypto_set_initial_state(host);
}
/**
* mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
* @vdd: voltage (mV)
* @low_bits: prefer low bits in boundary cases
*
* This function returns the OCR bit number according to the provided @vdd
* value. If conversion is not possible a negative errno value returned.
*
* Depending on the @low_bits flag the function prefers low or high OCR bits
* on boundary voltages. For example,
* with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
* with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
*
* Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
*/
static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{
const int max_bit = ilog2(MMC_VDD_35_36);
int bit;
if (vdd < 1650 || vdd > 3600)
return -EINVAL;
if (vdd >= 1650 && vdd <= 1950)
return ilog2(MMC_VDD_165_195);
if (low_bits)
vdd -= 1;
/* Base 2000 mV, step 100 mV, bit's base 8. */
bit = (vdd - 2000) / 100 + 8;
if (bit > max_bit)
return max_bit;
return bit;
}
/**
* mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
* @vdd_min: minimum voltage value (mV)
* @vdd_max: maximum voltage value (mV)
*
* This function returns the OCR mask bits according to the provided @vdd_min
* and @vdd_max values. If conversion is not possible the function returns 0.
*
* Notes wrt boundary cases:
* This function sets the OCR bits for all boundary voltages, for example
* [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
* MMC_VDD_34_35 mask.
*/
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
u32 mask = 0;
if (vdd_max < vdd_min)
return 0;
/* Prefer high bits for the boundary vdd_max values. */
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
if (vdd_max < 0)
return 0;
/* Prefer low bits for the boundary vdd_min values. */
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
if (vdd_min < 0)
return 0;
/* Fill the mask, from max bit to min bit. */
while (vdd_max >= vdd_min)
mask |= 1 << vdd_max--;
return mask;
}
static int mmc_of_get_func_num(struct device_node *node)
{
u32 reg;
int ret;
ret = of_property_read_u32(node, "reg", ®);
if (ret < 0)
return ret;
return reg;
}
struct device_node *mmc_of_find_child_device(struct mmc_host *host,
unsigned func_num)
{
struct device_node *node;
if (!host->parent || !host->parent->of_node)
return NULL;
for_each_child_of_node(host->parent->of_node, node) {
if (mmc_of_get_func_num(node) == func_num)
return node;
}
return NULL;
}
/*
* Mask off any voltages we don't support and select
* the lowest voltage
*/
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{
int bit;
/*
* Sanity check the voltages that the card claims to
* support.
*/
if (ocr & 0x7F) {
dev_warn(mmc_dev(host),
"card claims to support voltages below defined range\n");
ocr &= ~0x7F;
}
ocr &= host->ocr_avail;
if (!ocr) {
dev_warn(mmc_dev(host), "no support for card's volts\n");
return 0;
}
if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
bit = ffs(ocr) - 1;
ocr &= 3 << bit;
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1;
/*
* The bit variable represents the highest voltage bit set in
* the OCR register.
* To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
* we must shift the mask '3' with (bit - 1).
*/
ocr &= 3 << (bit - 1);
if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
}
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{
int err = 0;
int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage;
if (host->ops->start_signal_voltage_switch)
err = host->ops->start_signal_voltage_switch(host, &host->ios);
if (err)
host->ios.signal_voltage = old_signal_voltage;
return err;
}
void mmc_set_initial_signal_voltage(struct mmc_host *host)
{
/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
}
int mmc_host_set_uhs_voltage(struct mmc_host *host)
{
u32 clock;
/*
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec
*/
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
return -EAGAIN;
/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
mmc_delay(10);
host->ios.clock = clock;
mmc_set_ios(host);
return 0;
}
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
/*
* If we cannot switch voltages, return failure so the caller
* can continue without UHS mode
*/
if (!host->ops->start_signal_voltage_switch)
return -EPERM;
if (!host->ops->card_busy)
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto power_cycle;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
return -EIO;
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
*/
mmc_delay(1);
if (host->ops->card_busy && !host->ops->card_busy(host)) {
err = -EAGAIN;
goto power_cycle;
}
if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN;
goto power_cycle;
}
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
/*
* Failure to switch is indicated by the card holding
* dat[0:3] low
*/
if (host->ops->card_busy && host->ops->card_busy(host))
err = -EAGAIN;
power_cycle:
if (err) {
pr_debug("%s: Signal voltage switch failed, "
"power cycling card\n", mmc_hostname(host));
mmc_power_cycle(host, ocr);
}
return err;
}
/*
* Select timing parameters for host.
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
host->ios.timing = timing;
mmc_set_ios(host);
}
/*
* Select appropriate driver type for host.
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
host->ios.drv_type = drv_type;
mmc_set_ios(host);
}
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
int card_drv_type, int *drv_type)
{
struct mmc_host *host = card->host;
int host_drv_type = SD_DRIVER_TYPE_B;
*drv_type = 0;
if (!host->ops->select_drive_strength)
return 0;
/* Use SD definition of driver strength for hosts */
if (host->caps & MMC_CAP_DRIVER_TYPE_A)
host_drv_type |= SD_DRIVER_TYPE_A;
if (host->caps & MMC_CAP_DRIVER_TYPE_C)
host_drv_type |= SD_DRIVER_TYPE_C;
if (host->caps & MMC_CAP_DRIVER_TYPE_D)
host_drv_type |= SD_DRIVER_TYPE_D;
/*
* The drive strength that the hardware can support
* depends on the board design. Pass the appropriate
* information and let the hardware specific code
* return what is possible given the options
*/
return host->ops->select_drive_strength(card, max_dtr,
host_drv_type,
card_drv_type,
drv_type);
}
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
* We then wait a bit for the power to stabilise. Finally,
* enable the bus drivers and clock to the card.
*
* We must _NOT_ enable the clock prior to power stablising.
*
* If a host does all the power sequencing itself, ignore the
* initial MMC_POWER_UP stage.
*/
void mmc_power_up(struct mmc_host *host, u32 ocr)
{
if (host->ios.power_mode == MMC_POWER_ON)
return;
mmc_pwrseq_pre_power_on(host);
host->ios.vdd = fls(ocr) - 1;
host->ios.power_mode = MMC_POWER_UP;
/* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
mmc_set_initial_signal_voltage(host);
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
*/
mmc_delay(host->ios.power_delay_ms);
mmc_pwrseq_post_power_on(host);
host->ios.clock = host->f_init;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
/*
* This delay must be at least 74 clock sizes, or 1 ms, or the
* time required to reach a stable voltage.
*/
mmc_delay(host->ios.power_delay_ms);
}
void mmc_power_off(struct mmc_host *host)
{
if (host->ios.power_mode == MMC_POWER_OFF)
return;
mmc_pwrseq_power_off(host);
host->ios.clock = 0;
host->ios.vdd = 0;
host->ios.power_mode = MMC_POWER_OFF;
/* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
/*
* Some configurations, such as the 802.11 SDIO card in the OLPC
* XO-1.5, require a short delay after poweroff before the card
* can be successfully turned on again.
*/
mmc_delay(1);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host);
/* Wait at least 1 ms according to SD spec */
mmc_delay(1);
mmc_power_up(host, ocr);
}
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
{
host->bus_ops = ops;
}
/*
* Remove the current bus handler from a host.
*/
void mmc_detach_bus(struct mmc_host *host)
{
host->bus_ops = NULL;
}
void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
{
/*
* Prevent system sleep for 5s to allow user space to consume the
* corresponding uevent. This is especially useful, when CD irq is used
* as a system wakeup, but doesn't hurt in other cases.
*/
if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
__pm_wakeup_event(host->ws, 5000);
host->detect_change = 1;
mmc_schedule_delayed_work(&host->detect, delay);
}
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
* @delay: optional delay to wait before detection (jiffies)
*
* MMC drivers should call this when they detect a card has been
* inserted or removed. The MMC layer will confirm that any
* present card is still functional, and initialize any newly
* inserted.
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
_mmc_detect_change(host, delay, true);
}
EXPORT_SYMBOL(mmc_detect_change);
void mmc_init_erase(struct mmc_card *card)
{
unsigned int sz;
if (is_power_of_2(card->erase_size))
card->erase_shift = ffs(card->erase_size) - 1;
else
card->erase_shift = 0;
/*
* It is possible to erase an arbitrarily large area of an SD or MMC
* card. That is not desirable because it can take a long time
* (minutes) potentially delaying more important I/O, and also the
* timeout calculations become increasingly hugely over-estimated.
* Consequently, 'pref_erase' is defined as a guide to limit erases
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
* Allocation Unit at a time.
* For MMC, have a stab at ai good value and for modern cards it will
* end up being 4MiB. Note that if the value is too small, it can end
* up taking longer to erase. Also note, erase_size is already set to
* High Capacity Erase Size if available when this function is called.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
else if (sz < 512)
card->pref_erase = 1024 * 1024 / 512;
else if (sz < 1024)
card->pref_erase = 2 * 1024 * 1024 / 512;
else
card->pref_erase = 4 * 1024 * 1024 / 512;
if (card->pref_erase < card->erase_size)
card->pref_erase = card->erase_size;
else {
sz = card->pref_erase % card->erase_size;
if (sz)
card->pref_erase += card->erase_size - sz;
}
} else
card->pref_erase = 0;
}
static bool is_trim_arg(unsigned int arg)
{
return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG;
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
unsigned int arg, unsigned int qty)
{
unsigned int erase_timeout;
if (arg == MMC_DISCARD_ARG ||
(arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
erase_timeout = card->ext_csd.trim_timeout;
} else if (card->ext_csd.erase_group_def & 1) {
/* High Capacity Erase Group Size uses HC timeouts */
if (arg == MMC_TRIM_ARG)
erase_timeout = card->ext_csd.trim_timeout;
else
erase_timeout = card->ext_csd.hc_erase_timeout;
} else {
/* CSD Erase Group Size uses write timeout */
unsigned int mult = (10 << card->csd.r2w_factor);
unsigned int timeout_clks = card->csd.taac_clks * mult;
unsigned int timeout_us;
/* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
if (card->csd.taac_ns < 1000000)
timeout_us = (card->csd.taac_ns * mult) / 1000;
else
timeout_us = (card->csd.taac_ns / 1000) * mult;
/*
* ios.clock is only a target. The real clock rate might be
* less but not that much less, so fudge it by multiplying by 2.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
(card->host->ios.clock / 1000);
erase_timeout = timeout_us / 1000;
/*
* Theoretically, the calculation could underflow so round up
* to 1ms in that case.
*/
if (!erase_timeout)
erase_timeout = 1;
}
/* Multiplier for secure operations */
if (arg & MMC_SECURE_ARGS) {
if (arg == MMC_SECURE_ERASE_ARG)
erase_timeout *= card->ext_csd.sec_erase_mult;
else
erase_timeout *= card->ext_csd.sec_trim_mult;
}
erase_timeout *= qty;
/*
* Ensure at least a 1 second timeout for SPI as per
* 'mmc_set_data_timeout()'
*/
if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
unsigned int erase_timeout;
/* for DISCARD none of the below calculation applies.
* the busy timeout is 250msec per discard command.
*/
if (arg == SD_DISCARD_ARG)
return SD_DISCARD_TIMEOUT_MS;
if (card->ssr.erase_timeout) {
/* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
card->ssr.erase_offset;
} else {
/*
* Erase timeout not specified in SD Status Register (SSR) so
* use 250ms per write block.
*/
erase_timeout = 250 * qty;
}
/* Must not be less than 1 second */
if (erase_timeout < 1000)
erase_timeout = 1000;
return erase_timeout;
}
static unsigned int mmc_erase_timeout(struct mmc_card *card,
unsigned int arg,
unsigned int qty)
{
if (mmc_card_sd(card))
return mmc_sd_erase_timeout(card, arg, qty);
else
return mmc_mmc_erase_timeout(card, arg, qty);
}
static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {};
unsigned int qty = 0, busy_timeout = 0;
bool use_r1b_resp;
int err;
mmc_retune_hold(card->host);
/*
* qty is used to calculate the erase timeout which depends on how many
* erase groups (or allocation units in SD terminology) are affected.
* We count erasing part of an erase group as one erase group.
* For SD, the allocation units are always a power of 2. For MMC, the
* erase group size is almost certainly also power of 2, but it does not
* seem to insist on that in the JEDEC standard, so we fall back to
* division in that case. SD may not specify an allocation unit size,
* in which case the timeout is based on the number of write blocks.
*
* Note that the timeout for secure trim 2 will only be correct if the
* number of erase groups specified is the same as the total of all
* preceding secure trim 1 commands. Since the power may have been
* lost since the secure trim 1 commands occurred, it is generally
* impossible to calculate the secure trim 2 timeout correctly.
*/
if (card->erase_shift)
qty += ((to >> card->erase_shift) -
(from >> card->erase_shift)) + 1;
else if (mmc_card_sd(card))
qty += to - from + 1;
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
cmd.opcode = MMC_ERASE_GROUP_START;
cmd.arg = from;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_END;
else
cmd.opcode = MMC_ERASE_GROUP_END;
cmd.arg = to;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
busy_timeout = mmc_erase_timeout(card, arg, qty);
use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout);
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
err, cmd.resp[0]);
err = -EIO;
goto out;
}
if (mmc_host_is_spi(card->host))
goto out;
/*
* In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
* shall be avoided.
*/
if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
goto out;
/* Let's poll to find out when the erase operation completes. */
err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
out:
mmc_retune_release(card->host);
return err;
}
static unsigned int mmc_align_erase_size(struct mmc_card *card,
unsigned int *from,
unsigned int *to,
unsigned int nr)
{
unsigned int from_new = *from, nr_new = nr, rem;
/*
* When the 'card->erase_size' is power of 2, we can use round_up/down()
* to align the erase size efficiently.
*/
if (is_power_of_2(card->erase_size)) {
unsigned int temp = from_new;
from_new = round_up(temp, card->erase_size);
rem = from_new - temp;
if (nr_new > rem)
nr_new -= rem;
else
return 0;
nr_new = round_down(nr_new, card->erase_size);
} else {
rem = from_new % card->erase_size;
if (rem) {
rem = card->erase_size - rem;
from_new += rem;
if (nr_new > rem)
nr_new -= rem;
else
return 0;
}
rem = nr_new % card->erase_size;
if (rem)
nr_new -= rem;
}
if (nr_new == 0)
return 0;
*to = from_new + nr_new;
*from = from_new;
return nr_new;
}
/**
* mmc_erase - erase sectors.
* @card: card to erase
* @from: first sector to erase
* @nr: number of sectors to erase
* @arg: erase command argument
*
* Caller must claim host before calling this function.
*/
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg)
{
unsigned int rem, to = from + nr;
int err;
if (!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
if (!card->erase_size)
return -EOPNOTSUPP;
if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
return -EOPNOTSUPP;
if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
return -EOPNOTSUPP;
if (mmc_card_mmc(card) && is_trim_arg(arg) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) {
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
if (arg == MMC_ERASE_ARG)
nr = mmc_align_erase_size(card, &from, &to, nr);
if (nr == 0)
return 0;
if (to <= from)
return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
/*
* Special case where only one erase-group fits in the timeout budget:
* If the region crosses an erase-group boundary on this particular
* case, we will be trimming more than one erase-group which, does not
* fit in the timeout budget of the controller, so we need to split it
* and call mmc_do_erase() twice if necessary. This special case is
* identified by the card->eg_boundary flag.
*/
rem = card->erase_size - (from % card->erase_size);
if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
err = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
if ((err) || (to <= from))
return err;
}
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
int mmc_can_erase(struct mmc_card *card)
{
if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_erase);
int mmc_can_trim(struct mmc_card *card)
{
if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
(!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
int mmc_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_discard);
int mmc_can_sanitize(struct mmc_card *card)
{
if (!mmc_can_trim(card) && !mmc_can_erase(card))
return 0;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
return 1;
return 0;
}
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_secure_erase_trim);
int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr)
{
if (!card->erase_size)
return 0;
if (from % card->erase_size || nr % card->erase_size)
return 0;
return 1;
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
unsigned int last_timeout = 0;
unsigned int max_busy_timeout = host->max_busy_timeout ?
host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
if (card->erase_shift) {
max_qty = UINT_MAX >> card->erase_shift;
min_qty = card->pref_erase >> card->erase_shift;
} else if (mmc_card_sd(card)) {
max_qty = UINT_MAX;
min_qty = card->pref_erase;
} else {
max_qty = UINT_MAX / card->erase_size;
min_qty = card->pref_erase / card->erase_size;
}
/*
* We should not only use 'host->max_busy_timeout' as the limitation
* when deciding the max discard sectors. We should set a balance value
* to improve the erase speed, and it can not get too long timeout at
* the same time.
*
* Here we set 'card->pref_erase' as the minimal discard sectors no
* matter what size of 'host->max_busy_timeout', but if the
* 'host->max_busy_timeout' is large enough for more discard sectors,
* then we can continue to increase the max discard sectors until we
* get a balance value. In cases when the 'host->max_busy_timeout'
* isn't specified, use the default max erase timeout.
*/
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
if (qty + x > min_qty && timeout > max_busy_timeout)
break;
if (timeout < last_timeout)
break;
last_timeout = timeout;
y = x;
}
qty += y;
} while (y);
if (!qty)
return 0;
/*
* When specifying a sector range to trim, chances are we might cross
* an erase-group boundary even if the amount of sectors is less than
* one erase-group.
* If we can only fit one erase-group in the controller timeout budget,
* we have to care that erase-group boundaries are not crossed by a
* single trim operation. We flag that special case with "eg_boundary".
* In all other cases we can just decrement qty and pretend that we
* always touch (qty + 1) erase-groups as a simple optimization.
*/
if (qty == 1)
card->eg_boundary = 1;
else
qty--;
/* Convert qty to sectors */
if (card->erase_shift)
max_discard = qty << card->erase_shift;
else if (mmc_card_sd(card))
max_discard = qty + 1;
else
max_discard = qty * card->erase_size;
return max_discard;
}
unsigned int mmc_calc_max_discard(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
/*
* Without erase_group_def set, MMC erase timeout depends on clock
* frequence which can change. In that case, the best choice is
* just the preferred erase size.
*/
if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
if (mmc_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard || max_discard == 0)
max_discard = max_trim;
} else if (max_discard < card->erase_size) {
max_discard = 0;
}
pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
mmc_hostname(host), max_discard, host->max_busy_timeout ?
host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
return max_discard;
}
EXPORT_SYMBOL(mmc_calc_max_discard);
bool mmc_card_is_blockaddr(struct mmc_card *card)
{
return card ? mmc_card_blockaddr(card) : false;
}
EXPORT_SYMBOL(mmc_card_is_blockaddr);
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {};
if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
mmc_card_hs400(card) || mmc_card_hs400es(card))
return 0;
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = blocklen;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
EXPORT_SYMBOL(mmc_set_blocklen);
static void mmc_hw_reset_for_init(struct mmc_host *host)
{
mmc_pwrseq_reset(host);
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset)
return;
host->ops->card_hw_reset(host);
}
/**
* mmc_hw_reset - reset the card in hardware
* @card: card to be reset
*
* Hard reset the card. This function is only for upper layers, like the
* block layer or card drivers. You cannot use it in host drivers (struct
* mmc_card might be gone then).
*
* Return: 0 on success, -errno on failure
*/
int mmc_hw_reset(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int ret;
ret = host->bus_ops->hw_reset(host);
if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
return ret;
}
EXPORT_SYMBOL(mmc_hw_reset);
int mmc_sw_reset(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int ret;
if (!host->bus_ops->sw_reset)
return -EOPNOTSUPP;
ret = host->bus_ops->sw_reset(host);
if (ret)
pr_warn("%s: tried to SW reset card, got error %d\n",
mmc_hostname(host), ret);
return ret;
}
EXPORT_SYMBOL(mmc_sw_reset);
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
pr_debug("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
mmc_power_up(host, host->ocr_avail);
/*
* Some eMMCs (with VCCQ always on) may not be reset after power up, so
* do a hardware reset if possible.
*/
mmc_hw_reset_for_init(host);
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
* Skip it if we already know that we do not support SDIO commands
*/
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
sdio_reset(host);
mmc_go_idle(host);
if (!(host->caps2 & MMC_CAP2_NO_SD)) {
if (mmc_send_if_cond_pcie(host, host->ocr_avail))
goto out;
if (mmc_card_sd_express(host))
return 0;
}
/* Order's important: probe SDIO, then SD, then MMC */
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
if (!mmc_attach_sdio(host))
return 0;
if (!(host->caps2 & MMC_CAP2_NO_SD))
if (!mmc_attach_sd(host))
return 0;
if (!(host->caps2 & MMC_CAP2_NO_MMC))
if (!mmc_attach_mmc(host))
return 0;
out:
mmc_power_off(host);
return -EIO;
}
int _mmc_detect_card_removed(struct mmc_host *host)
{
int ret;
if (!host->card || mmc_card_removed(host->card))
return 1;
ret = host->bus_ops->alive(host);
/*
* Card detect status and alive check may be out of sync if card is
* removed slowly, when card detect switch changes while card/slot
* pads are still contacted in hardware (refer to "SD Card Mechanical
* Addendum, Appendix C: Card Detection Switch"). So reschedule a
* detect work 200ms later for this case.
*/
if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
mmc_detect_change(host, msecs_to_jiffies(200));
pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
}
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
}
int mmc_detect_card_removed(struct mmc_host *host)
{
struct mmc_card *card = host->card;
int ret;
WARN_ON(!host->claimed);
if (!card)
return 1;
if (!mmc_card_is_removable(host))
return 0;
ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
* detect a change or host requires polling to provide card detection.
*/
if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
return ret;
host->detect_change = 0;
if (!ret) {
ret = _mmc_detect_card_removed(host);
if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
/*
* Schedule a detect work as soon as possible to let a
* rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
_mmc_detect_change(host, 0, false);
}
}
return ret;
}
EXPORT_SYMBOL(mmc_detect_card_removed);
int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
{
unsigned int boot_sectors_num;
if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA)))
return -EOPNOTSUPP;
/* filter out unrelated cards */
if (card->ext_csd.rev < 3 ||
!mmc_card_mmc(card) ||
!mmc_card_is_blockaddr(card) ||
mmc_card_is_removable(card->host))
return -ENOENT;
/*
* eMMC storage has two special boot partitions in addition to the
* main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main
* accesses, this means that the partition table addresses are shifted
* by the size of boot partitions. In accordance with the eMMC
* specification, the boot partition size is calculated as follows:
*
* boot partition size = 128K byte x BOOT_SIZE_MULT
*
* Calculate number of sectors occupied by the both boot partitions.
*/
boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
SZ_512 * MMC_NUM_BOOT_PARTITION;
/* Defined by NVIDIA and used by Android devices. */
*gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
return 0;
}
EXPORT_SYMBOL(mmc_card_alternative_gpt_sector);
void mmc_rescan(struct work_struct *work)
{
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
if (host->rescan_disable)
return;
/* If there is a non-removable card registered, only scan once */
if (!mmc_card_is_removable(host) && host->rescan_entered)
return;
host->rescan_entered = 1;
if (host->trigger_card_event && host->ops->card_event) {
mmc_claim_host(host);
host->ops->card_event(host);
mmc_release_host(host);
host->trigger_card_event = false;
}
/* Verify a registered card to be functional, else remove it. */
if (host->bus_ops)
host->bus_ops->detect(host);
host->detect_change = 0;
/* if there still is a card present, stop here */
if (host->bus_ops != NULL)
goto out;
mmc_claim_host(host);
if (mmc_card_is_removable(host) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_power_off(host);
mmc_release_host(host);
goto out;
}
/* If an SD express card is present, then leave it as is. */
if (mmc_card_sd_express(host)) {
mmc_release_host(host);
goto out;
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
unsigned int freq = freqs[i];
if (freq > host->f_max) {
if (i + 1 < ARRAY_SIZE(freqs))
continue;
freq = host->f_max;
}
if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
break;
if (freqs[i] <= host->f_min)
break;
}
/* A non-removable card should have been detected by now. */
if (!mmc_card_is_removable(host) && !host->bus_ops)
pr_info("%s: Failed to initialize a non-removable card",
mmc_hostname(host));
/*
* Ignore the command timeout errors observed during
* the card init as those are excepted.
*/
host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out:
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(min(freqs[0], host->f_max), host->f_min);
host->rescan_disable = 0;
if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
mmc_claim_host(host);
mmc_power_up(host, host->ocr_avail);
mmc_release_host(host);
}
mmc_gpiod_request_cd_irq(host);
_mmc_detect_change(host, 0, false);
}
void __mmc_stop_host(struct mmc_host *host)
{
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
disable_irq(host->slot.cd_irq);
}
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
}
void mmc_stop_host(struct mmc_host *host)
{
__mmc_stop_host(host);
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
if (host->bus_ops) {
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
return;
}
mmc_claim_host(host);
mmc_power_off(host);
mmc_release_host(host);
}
static int __init mmc_init(void)
{
int ret;
ret = mmc_register_bus();
if (ret)
return ret;
ret = mmc_register_host_class();
if (ret)
goto unregister_bus;
ret = sdio_register_bus();
if (ret)
goto unregister_host_class;
return 0;
unregister_host_class:
mmc_unregister_host_class();
unregister_bus:
mmc_unregister_bus();
return ret;
}
static void __exit mmc_exit(void)
{
sdio_unregister_bus();
mmc_unregister_host_class();
mmc_unregister_bus();
}
subsys_initcall(mmc_init);
module_exit(mmc_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/core/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/core/bus.c
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007 Pierre Ossman
*
* MMC card bus driver model
*/
#include <linux/export.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "sdio_cis.h"
#include "bus.h"
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
switch (card->type) {
case MMC_TYPE_MMC:
return sysfs_emit(buf, "MMC\n");
case MMC_TYPE_SD:
return sysfs_emit(buf, "SD\n");
case MMC_TYPE_SDIO:
return sysfs_emit(buf, "SDIO\n");
case MMC_TYPE_SD_COMBO:
return sysfs_emit(buf, "SDcombo\n");
default:
return -EFAULT;
}
}
static DEVICE_ATTR_RO(type);
static struct attribute *mmc_dev_attrs[] = {
&dev_attr_type.attr,
NULL,
};
ATTRIBUTE_GROUPS(mmc_dev);
static int
mmc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
unsigned int i;
int retval = 0;
switch (card->type) {
case MMC_TYPE_MMC:
type = "MMC";
break;
case MMC_TYPE_SD:
type = "SD";
break;
case MMC_TYPE_SDIO:
type = "SDIO";
break;
case MMC_TYPE_SD_COMBO:
type = "SDcombo";
break;
default:
type = NULL;
}
if (type) {
retval = add_uevent_var(env, "MMC_TYPE=%s", type);
if (retval)
return retval;
}
if (mmc_card_sdio(card) || mmc_card_sd_combo(card)) {
retval = add_uevent_var(env, "SDIO_ID=%04X:%04X",
card->cis.vendor, card->cis.device);
if (retval)
return retval;
retval = add_uevent_var(env, "SDIO_REVISION=%u.%u",
card->major_rev, card->minor_rev);
if (retval)
return retval;
for (i = 0; i < card->num_info; i++) {
retval = add_uevent_var(env, "SDIO_INFO%u=%s", i+1, card->info[i]);
if (retval)
return retval;
}
}
/*
* SDIO (non-combo) cards are not handled by mmc_block driver and do not
* have accessible CID register which used by mmc_card_name() function.
*/
if (mmc_card_sdio(card))
return 0;
retval = add_uevent_var(env, "MMC_NAME=%s", mmc_card_name(card));
if (retval)
return retval;
/*
* Request the mmc_block device. Note: that this is a direct request
* for the module it carries no information as to what is inserted.
*/
retval = add_uevent_var(env, "MODALIAS=mmc:block");
return retval;
}
static int mmc_bus_probe(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
return drv->probe(card);
}
static void mmc_bus_remove(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
drv->remove(card);
}
static void mmc_bus_shutdown(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
int ret;
if (dev->driver && drv->shutdown)
drv->shutdown(card);
if (host->bus_ops->shutdown) {
ret = host->bus_ops->shutdown(host);
if (ret)
pr_warn("%s: error %d during shutdown\n",
mmc_hostname(host), ret);
}
}
#ifdef CONFIG_PM_SLEEP
static int mmc_bus_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
int ret;
ret = pm_generic_suspend(dev);
if (ret)
return ret;
ret = host->bus_ops->suspend(host);
if (ret)
pm_generic_resume(dev);
return ret;
}
static int mmc_bus_resume(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
int ret;
ret = host->bus_ops->resume(host);
if (ret)
pr_warn("%s: error %d during resume (card was removed?)\n",
mmc_hostname(host), ret);
ret = pm_generic_resume(dev);
return ret;
}
#endif
#ifdef CONFIG_PM
static int mmc_runtime_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
return host->bus_ops->runtime_suspend(host);
}
static int mmc_runtime_resume(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
return host->bus_ops->runtime_resume(host);
}
#endif /* !CONFIG_PM */
static const struct dev_pm_ops mmc_bus_pm_ops = {
SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
};
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_groups = mmc_dev_groups,
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
.shutdown = mmc_bus_shutdown,
.pm = &mmc_bus_pm_ops,
};
int mmc_register_bus(void)
{
return bus_register(&mmc_bus_type);
}
void mmc_unregister_bus(void)
{
bus_unregister(&mmc_bus_type);
}
/**
* mmc_register_driver - register a media driver
* @drv: MMC media driver
*/
int mmc_register_driver(struct mmc_driver *drv)
{
drv->drv.bus = &mmc_bus_type;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL(mmc_register_driver);
/**
* mmc_unregister_driver - unregister a media driver
* @drv: MMC media driver
*/
void mmc_unregister_driver(struct mmc_driver *drv)
{
drv->drv.bus = &mmc_bus_type;
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL(mmc_unregister_driver);
static void mmc_release_card(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
sdio_free_common_cis(card);
kfree(card->info);
kfree(card);
}
/*
* Allocate and initialise a new MMC card structure.
*/
struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
{
struct mmc_card *card;
card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
if (!card)
return ERR_PTR(-ENOMEM);
card->host = host;
device_initialize(&card->dev);
card->dev.parent = mmc_classdev(host);
card->dev.bus = &mmc_bus_type;
card->dev.release = mmc_release_card;
card->dev.type = type;
return card;
}
/*
* Register a new MMC card with the driver model.
*/
int mmc_add_card(struct mmc_card *card)
{
int ret;
const char *type;
const char *uhs_bus_speed_mode = "";
static const char *const uhs_speeds[] = {
[UHS_SDR12_BUS_SPEED] = "SDR12 ",
[UHS_SDR25_BUS_SPEED] = "SDR25 ",
[UHS_SDR50_BUS_SPEED] = "SDR50 ",
[UHS_SDR104_BUS_SPEED] = "SDR104 ",
[UHS_DDR50_BUS_SPEED] = "DDR50 ",
};
dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
dev_set_removable(&card->dev,
mmc_card_is_removable(card->host) ?
DEVICE_REMOVABLE : DEVICE_FIXED);
switch (card->type) {
case MMC_TYPE_MMC:
type = "MMC";
break;
case MMC_TYPE_SD:
type = "SD";
if (mmc_card_blockaddr(card)) {
if (mmc_card_ext_capacity(card))
type = "SDXC";
else
type = "SDHC";
}
break;
case MMC_TYPE_SDIO:
type = "SDIO";
break;
case MMC_TYPE_SD_COMBO:
type = "SD-combo";
if (mmc_card_blockaddr(card))
type = "SDHC-combo";
break;
default:
type = "?";
break;
}
if (mmc_card_uhs(card) &&
(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds)))
uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed];
if (mmc_host_is_spi(card->host)) {
pr_info("%s: new %s%s%s card on SPI\n",
mmc_hostname(card->host),
mmc_card_hs(card) ? "high speed " : "",
mmc_card_ddr52(card) ? "DDR " : "",
type);
} else {
pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_hs(card) ? "high speed " : ""),
mmc_card_hs400(card) ? "HS400 " :
(mmc_card_hs200(card) ? "HS200 " : ""),
mmc_card_hs400es(card) ? "Enhanced strobe " : "",
mmc_card_ddr52(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
mmc_add_card_debugfs(card);
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
ret = device_add(&card->dev);
if (ret)
return ret;
mmc_card_set_present(card);
return 0;
}
/*
* Unregister a new MMC card with the driver model, and
* (eventually) free it.
*/
void mmc_remove_card(struct mmc_card *card)
{
struct mmc_host *host = card->host;
mmc_remove_card_debugfs(card);
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
mmc_hostname(card->host));
} else {
pr_info("%s: card %04x removed\n",
mmc_hostname(card->host), card->rca);
}
device_del(&card->dev);
of_node_put(card->dev.of_node);
}
if (host->cqe_enabled) {
host->cqe_ops->cqe_disable(host);
host->cqe_enabled = false;
}
put_device(&card->dev);
}
| linux-master | drivers/mmc/core/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/core/host.c
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
* Copyright (C) 2010 Linus Walleij
*
* MMC host class device management
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pagemap.h>
#include <linux/pm_wakeup.h>
#include <linux/export.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/slot-gpio.h>
#include "core.h"
#include "crypto.h"
#include "host.h"
#include "slot-gpio.h"
#include "pwrseq.h"
#include "sdio_ops.h"
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
static DEFINE_IDA(mmc_host_ida);
#ifdef CONFIG_PM_SLEEP
static int mmc_host_class_prepare(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
/*
* It's safe to access the bus_ops pointer, as both userspace and the
* workqueue for detecting cards are frozen at this point.
*/
if (!host->bus_ops)
return 0;
/* Validate conditions for system suspend. */
if (host->bus_ops->pre_suspend)
return host->bus_ops->pre_suspend(host);
return 0;
}
static void mmc_host_class_complete(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
_mmc_detect_change(host, 0, false);
}
static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
.prepare = mmc_host_class_prepare,
.complete = mmc_host_class_complete,
};
#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
#else
#define MMC_HOST_CLASS_DEV_PM_OPS NULL
#endif
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
wakeup_source_unregister(host->ws);
if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
ida_simple_remove(&mmc_host_ida, host->index);
kfree(host);
}
static int mmc_host_classdev_shutdown(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
__mmc_stop_host(host);
return 0;
}
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
.shutdown_pre = mmc_host_classdev_shutdown,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};
int mmc_register_host_class(void)
{
return class_register(&mmc_host_class);
}
void mmc_unregister_host_class(void)
{
class_unregister(&mmc_host_class);
}
/**
* mmc_retune_enable() - enter a transfer mode that requires retuning
* @host: host which should retune now
*/
void mmc_retune_enable(struct mmc_host *host)
{
host->can_retune = 1;
if (host->retune_period)
mod_timer(&host->retune_timer,
jiffies + host->retune_period * HZ);
}
/*
* Pause re-tuning for a small set of operations. The pause begins after the
* next command and after first doing re-tuning.
*/
void mmc_retune_pause(struct mmc_host *host)
{
if (!host->retune_paused) {
host->retune_paused = 1;
mmc_retune_needed(host);
mmc_retune_hold(host);
}
}
EXPORT_SYMBOL(mmc_retune_pause);
void mmc_retune_unpause(struct mmc_host *host)
{
if (host->retune_paused) {
host->retune_paused = 0;
mmc_retune_release(host);
}
}
EXPORT_SYMBOL(mmc_retune_unpause);
/**
* mmc_retune_disable() - exit a transfer mode that requires retuning
* @host: host which should not retune anymore
*
* It is not meant for temporarily preventing retuning!
*/
void mmc_retune_disable(struct mmc_host *host)
{
mmc_retune_unpause(host);
host->can_retune = 0;
del_timer_sync(&host->retune_timer);
mmc_retune_clear(host);
}
void mmc_retune_timer_stop(struct mmc_host *host)
{
del_timer_sync(&host->retune_timer);
}
EXPORT_SYMBOL(mmc_retune_timer_stop);
void mmc_retune_hold(struct mmc_host *host)
{
if (!host->hold_retune)
host->retune_now = 1;
host->hold_retune += 1;
}
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
host->hold_retune -= 1;
else
WARN_ON(1);
}
EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
bool return_to_hs400 = false;
int err;
if (host->retune_now)
host->retune_now = 0;
else
return 0;
if (!host->need_retune || host->doing_retune || !host->card)
return 0;
host->need_retune = 0;
host->doing_retune = 1;
if (host->ios.timing == MMC_TIMING_MMC_HS400) {
err = mmc_hs400_to_hs200(host->card);
if (err)
goto out;
return_to_hs400 = true;
}
err = mmc_execute_tuning(host->card);
if (err)
goto out;
if (return_to_hs400)
err = mmc_hs200_to_hs400(host->card);
out:
host->doing_retune = 0;
return err;
}
static void mmc_retune_timer(struct timer_list *t)
{
struct mmc_host *host = from_timer(host, t, retune_timer);
mmc_retune_needed(host);
}
static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
struct mmc_clk_phase *phase)
{
int degrees[2] = {0};
int rc;
rc = device_property_read_u32_array(dev, prop, degrees, 2);
phase->valid = !rc;
if (phase->valid) {
phase->in_deg = degrees[0];
phase->out_deg = degrees[1];
}
}
void
mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map)
{
struct device *dev = host->parent;
mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
&map->phase[MMC_TIMING_LEGACY]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
&map->phase[MMC_TIMING_MMC_HS]);
mmc_of_parse_timing_phase(dev, "clk-phase-sd-hs",
&map->phase[MMC_TIMING_SD_HS]);
mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr12",
&map->phase[MMC_TIMING_UHS_SDR12]);
mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr25",
&map->phase[MMC_TIMING_UHS_SDR25]);
mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr50",
&map->phase[MMC_TIMING_UHS_SDR50]);
mmc_of_parse_timing_phase(dev, "clk-phase-uhs-sdr104",
&map->phase[MMC_TIMING_UHS_SDR104]);
mmc_of_parse_timing_phase(dev, "clk-phase-uhs-ddr50",
&map->phase[MMC_TIMING_UHS_DDR50]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-ddr52",
&map->phase[MMC_TIMING_MMC_DDR52]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs200",
&map->phase[MMC_TIMING_MMC_HS200]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs400",
&map->phase[MMC_TIMING_MMC_HS400]);
}
EXPORT_SYMBOL(mmc_of_parse_clk_phase);
/**
* mmc_of_parse() - parse host's device properties
* @host: host whose properties should be parsed.
*
* To keep the rest of the MMC subsystem unaware of whether DT has been
* used to instantiate and configure this host instance or not, we
* parse the properties and set respective generic mmc-host flags and
* parameters.
*/
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
u32 bus_width, drv_type, cd_debounce_delay_ms;
int ret;
if (!dev || !dev_fwnode(dev))
return 0;
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
dev_dbg(host->parent,
"\"bus-width\" property is missing, assuming 1 bit.\n");
bus_width = 1;
}
switch (bus_width) {
case 8:
host->caps |= MMC_CAP_8_BIT_DATA;
fallthrough; /* Hosts capable of 8-bit can also do 4 bits */
case 4:
host->caps |= MMC_CAP_4_BIT_DATA;
break;
case 1:
break;
default:
dev_err(host->parent,
"Invalid \"bus-width\" value %u!\n", bus_width);
return -EINVAL;
}
/* f_max is obtained from the optional "max-frequency" property */
device_property_read_u32(dev, "max-frequency", &host->f_max);
/*
* Configure CD and WP pins. They are both by default active low to
* match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
* mmc-gpio helpers are used to attach, configure and use them. If
* polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH
* and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the
* "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability
* is set. If the "non-removable" property is found, the
* MMC_CAP_NONREMOVABLE capability is set and no card-detection
* configuration is performed.
*/
/* Parse Card Detection */
if (device_property_read_bool(dev, "non-removable")) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
if (device_property_read_bool(dev, "cd-inverted"))
host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
if (device_property_read_u32(dev, "cd-debounce-delay-ms",
&cd_debounce_delay_ms))
cd_debounce_delay_ms = 200;
if (device_property_read_bool(dev, "broken-cd"))
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, false,
cd_debounce_delay_ms * 1000);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
}
/* Parse Write Protection */
if (device_property_read_bool(dev, "wp-inverted"))
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
ret = mmc_gpiod_request_ro(host, "wp", 0, 0);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
if (device_property_read_bool(dev, "disable-wp"))
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
if (device_property_read_bool(dev, "cap-sd-highspeed"))
host->caps |= MMC_CAP_SD_HIGHSPEED;
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
host->caps |= MMC_CAP_MMC_HIGHSPEED;
if (device_property_read_bool(dev, "sd-uhs-sdr12"))
host->caps |= MMC_CAP_UHS_SDR12;
if (device_property_read_bool(dev, "sd-uhs-sdr25"))
host->caps |= MMC_CAP_UHS_SDR25;
if (device_property_read_bool(dev, "sd-uhs-sdr50"))
host->caps |= MMC_CAP_UHS_SDR50;
if (device_property_read_bool(dev, "sd-uhs-sdr104"))
host->caps |= MMC_CAP_UHS_SDR104;
if (device_property_read_bool(dev, "sd-uhs-ddr50"))
host->caps |= MMC_CAP_UHS_DDR50;
if (device_property_read_bool(dev, "cap-power-off-card"))
host->caps |= MMC_CAP_POWER_OFF_CARD;
if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
host->caps |= MMC_CAP_HW_RESET;
if (device_property_read_bool(dev, "cap-sdio-irq"))
host->caps |= MMC_CAP_SDIO_IRQ;
if (device_property_read_bool(dev, "full-pwr-cycle"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
if (device_property_read_bool(dev, "full-pwr-cycle-in-suspend"))
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND;
if (device_property_read_bool(dev, "keep-power-in-suspend"))
host->pm_caps |= MMC_PM_KEEP_POWER;
if (device_property_read_bool(dev, "wakeup-source") ||
device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
host->caps |= MMC_CAP_3_3V_DDR;
if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
host->caps |= MMC_CAP_1_8V_DDR;
if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
host->caps |= MMC_CAP_1_2V_DDR;
if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
host->caps2 |= MMC_CAP2_HS400_ES;
if (device_property_read_bool(dev, "no-sdio"))
host->caps2 |= MMC_CAP2_NO_SDIO;
if (device_property_read_bool(dev, "no-sd"))
host->caps2 |= MMC_CAP2_NO_SD;
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
if (device_property_read_bool(dev, "no-mmc-hs400"))
host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
MMC_CAP2_HS400_ES);
/* Must be after "non-removable" check */
if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
if (host->caps & MMC_CAP_NONREMOVABLE)
host->fixed_drv_type = drv_type;
else
dev_err(host->parent,
"can't use fixed driver type, media is removable\n");
}
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
"device tree specified broken value for DSR: 0x%x, ignoring\n",
host->dsr);
host->dsr_req = 0;
}
device_property_read_u32(dev, "post-power-on-delay-ms",
&host->ios.power_delay_ms);
return mmc_pwrseq_alloc(host);
}
EXPORT_SYMBOL(mmc_of_parse);
/**
* mmc_of_parse_voltage - return mask of supported voltages
* @host: host whose properties should be parsed.
* @mask: mask of voltages available for MMC/SD/SDIO
*
* Parse the "voltage-ranges" property, returning zero if it is not
* found, negative errno if the voltage-range specification is invalid,
* or one if the voltage-range is specified and successfully parsed.
*/
int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask)
{
const char *prop = "voltage-ranges";
struct device *dev = host->parent;
u32 *voltage_ranges;
int num_ranges, i;
int ret;
if (!device_property_present(dev, prop)) {
dev_dbg(dev, "%s unspecified\n", prop);
return 0;
}
ret = device_property_count_u32(dev, prop);
if (ret < 0)
return ret;
num_ranges = ret / 2;
if (!num_ranges) {
dev_err(dev, "%s empty\n", prop);
return -EINVAL;
}
voltage_ranges = kcalloc(2 * num_ranges, sizeof(*voltage_ranges), GFP_KERNEL);
if (!voltage_ranges)
return -ENOMEM;
ret = device_property_read_u32_array(dev, prop, voltage_ranges, 2 * num_ranges);
if (ret) {
kfree(voltage_ranges);
return ret;
}
for (i = 0; i < num_ranges; i++) {
const int j = i * 2;
u32 ocr_mask;
ocr_mask = mmc_vddrange_to_ocrmask(voltage_ranges[j + 0],
voltage_ranges[j + 1]);
if (!ocr_mask) {
dev_err(dev, "range #%d in %s is invalid\n", i, prop);
kfree(voltage_ranges);
return -EINVAL;
}
*mask |= ocr_mask;
}
kfree(voltage_ranges);
return 1;
}
EXPORT_SYMBOL(mmc_of_parse_voltage);
/**
* mmc_first_nonreserved_index() - get the first index that is not reserved
*/
static int mmc_first_nonreserved_index(void)
{
int max;
max = of_alias_get_highest_id("mmc");
if (max < 0)
return 0;
return max + 1;
}
/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
*
* Initialise the per-host structure.
*/
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
int index;
struct mmc_host *host;
int alias_id, min_idx, max_idx;
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
if (!host)
return NULL;
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
alias_id = of_alias_get_id(dev->of_node, "mmc");
if (alias_id >= 0) {
index = alias_id;
} else {
min_idx = mmc_first_nonreserved_index();
max_idx = 0;
index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
if (index < 0) {
kfree(host);
return NULL;
}
}
host->index = index;
dev_set_name(&host->class_dev, "mmc%d", host->index);
host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
host->parent = dev;
host->class_dev.parent = dev;
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
device_enable_async_suspend(&host->class_dev);
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
return NULL;
}
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
*/
host->max_segs = 1;
host->max_seg_size = PAGE_SIZE;
host->max_req_size = PAGE_SIZE;
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
host->fixed_drv_type = -EINVAL;
host->ios.power_delay_ms = 10;
host->ios.power_mode = MMC_POWER_UNDEFINED;
return host;
}
EXPORT_SYMBOL(mmc_alloc_host);
static void devm_mmc_host_release(struct device *dev, void *res)
{
mmc_free_host(*(struct mmc_host **)res);
}
struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra)
{
struct mmc_host **dr, *host;
dr = devres_alloc(devm_mmc_host_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
host = mmc_alloc_host(extra, dev);
if (!host) {
devres_free(dr);
return NULL;
}
*dr = host;
devres_add(dev, dr);
return host;
}
EXPORT_SYMBOL(devm_mmc_alloc_host);
static int mmc_validate_host_caps(struct mmc_host *host)
{
struct device *dev = host->parent;
u32 caps = host->caps, caps2 = host->caps2;
if (caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) {
dev_warn(dev, "missing ->enable_sdio_irq() ops\n");
return -EINVAL;
}
if (caps2 & (MMC_CAP2_HS400_ES | MMC_CAP2_HS400) &&
!(caps & MMC_CAP_8_BIT_DATA) && !(caps2 & MMC_CAP2_NO_MMC)) {
dev_warn(dev, "drop HS400 support since no 8-bit bus\n");
host->caps2 = caps2 & ~MMC_CAP2_HS400_ES & ~MMC_CAP2_HS400;
}
return 0;
}
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
*
* Register the host with the driver model. The host must be
* prepared to start servicing requests before this function
* completes.
*/
int mmc_add_host(struct mmc_host *host)
{
int err;
err = mmc_validate_host_caps(host);
if (err)
return err;
err = device_add(&host->class_dev);
if (err)
return err;
led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
mmc_add_host_debugfs(host);
mmc_start_host(host);
return 0;
}
EXPORT_SYMBOL(mmc_add_host);
/**
* mmc_remove_host - remove host hardware
* @host: mmc host
*
* Unregister and remove all cards associated with this host,
* and power down the MMC bus. No new requests will be issued
* after this function has returned.
*/
void mmc_remove_host(struct mmc_host *host)
{
mmc_stop_host(host);
mmc_remove_host_debugfs(host);
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
}
EXPORT_SYMBOL(mmc_remove_host);
/**
* mmc_free_host - free the host structure
* @host: mmc host
*
* Free the host once all references to it have been dropped.
*/
void mmc_free_host(struct mmc_host *host)
{
mmc_pwrseq_free(host);
put_device(&host->class_dev);
}
EXPORT_SYMBOL(mmc_free_host);
| linux-master | drivers/mmc/core/host.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SDIO UART/GPS driver
*
* Based on drivers/serial/8250.c and drivers/serial/serial_core.c
* by Russell King.
*
* Author: Nicolas Pitre
* Created: June 15, 2007
* Copyright: MontaVista Software, Inc.
*/
/*
* Note: Although this driver assumes a 16550A-like UART implementation,
* it is not possible to leverage the common 8250/16550 driver, nor the
* core UART infrastructure, as they assumes direct access to the hardware
* registers, often under a spinlock. This is not possible in the SDIO
* context as SDIO access functions must be able to sleep.
*
* Because we need to lock the SDIO host to ensure an exclusive access to
* the card, we simply rely on that lock to also prevent and serialize
* concurrent access to the same port.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#define UART_NR 8 /* Number of UARTs this driver can handle */
#define FIFO_SIZE PAGE_SIZE
#define WAKEUP_CHARS 256
struct uart_icount {
__u32 cts;
__u32 dsr;
__u32 rng;
__u32 dcd;
__u32 rx;
__u32 tx;
__u32 frame;
__u32 overrun;
__u32 parity;
__u32 brk;
};
struct sdio_uart_port {
struct tty_port port;
unsigned int index;
struct sdio_func *func;
struct mutex func_lock;
struct task_struct *in_sdio_uart_irq;
unsigned int regs_offset;
struct kfifo xmit_fifo;
spinlock_t write_lock;
struct uart_icount icount;
unsigned int uartclk;
unsigned int mctrl;
unsigned int rx_mctrl;
unsigned int read_status_mask;
unsigned int ignore_status_mask;
unsigned char x_char;
unsigned char ier;
unsigned char lcr;
};
static struct sdio_uart_port *sdio_uart_table[UART_NR];
static DEFINE_SPINLOCK(sdio_uart_table_lock);
static int sdio_uart_add_port(struct sdio_uart_port *port)
{
int index, ret = -EBUSY;
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
return -ENOMEM;
spin_lock(&sdio_uart_table_lock);
for (index = 0; index < UART_NR; index++) {
if (!sdio_uart_table[index]) {
port->index = index;
sdio_uart_table[index] = port;
ret = 0;
break;
}
}
spin_unlock(&sdio_uart_table_lock);
return ret;
}
static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
{
struct sdio_uart_port *port;
if (index >= UART_NR)
return NULL;
spin_lock(&sdio_uart_table_lock);
port = sdio_uart_table[index];
if (port)
tty_port_get(&port->port);
spin_unlock(&sdio_uart_table_lock);
return port;
}
static void sdio_uart_port_put(struct sdio_uart_port *port)
{
tty_port_put(&port->port);
}
static void sdio_uart_port_remove(struct sdio_uart_port *port)
{
struct sdio_func *func;
spin_lock(&sdio_uart_table_lock);
sdio_uart_table[port->index] = NULL;
spin_unlock(&sdio_uart_table_lock);
/*
* We're killing a port that potentially still is in use by
* the tty layer. Be careful to prevent any further access
* to the SDIO function and arrange for the tty layer to
* give up on that port ASAP.
* Beware: the lock ordering is critical.
*/
mutex_lock(&port->port.mutex);
mutex_lock(&port->func_lock);
func = port->func;
sdio_claim_host(func);
port->func = NULL;
mutex_unlock(&port->func_lock);
/* tty_hangup is async so is this safe as is ?? */
tty_port_tty_hangup(&port->port, false);
mutex_unlock(&port->port.mutex);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
sdio_uart_port_put(port);
}
static int sdio_uart_claim_func(struct sdio_uart_port *port)
{
mutex_lock(&port->func_lock);
if (unlikely(!port->func)) {
mutex_unlock(&port->func_lock);
return -ENODEV;
}
if (likely(port->in_sdio_uart_irq != current))
sdio_claim_host(port->func);
mutex_unlock(&port->func_lock);
return 0;
}
static inline void sdio_uart_release_func(struct sdio_uart_port *port)
{
if (likely(port->in_sdio_uart_irq != current))
sdio_release_host(port->func);
}
static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
{
unsigned char c;
c = sdio_readb(port->func, port->regs_offset + offset, NULL);
return c;
}
static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
{
sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
}
static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
{
unsigned char status;
unsigned int ret;
/* FIXME: What stops this losing the delta bits and breaking
sdio_uart_check_modem_status ? */
status = sdio_in(port, UART_MSR);
ret = 0;
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void sdio_uart_write_mctrl(struct sdio_uart_port *port,
unsigned int mctrl)
{
unsigned char mcr = 0;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
sdio_out(port, UART_MCR, mcr);
}
static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
unsigned int set, unsigned int clear)
{
unsigned int old;
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl)
sdio_uart_write_mctrl(port, port->mctrl);
}
#define sdio_uart_set_mctrl(port, x) sdio_uart_update_mctrl(port, x, 0)
#define sdio_uart_clear_mctrl(port, x) sdio_uart_update_mctrl(port, 0, x)
static void sdio_uart_change_speed(struct sdio_uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
{
unsigned char cval, fcr = 0;
unsigned int baud, quot;
cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag));
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
for (;;) {
baud = tty_termios_baud_rate(termios);
if (baud == 0)
baud = 9600; /* Special case: B0 rate. */
if (baud <= port->uartclk)
break;
/*
* Oops, the quotient was zero. Try again with the old
* baud rate if possible, otherwise default to 9600.
*/
termios->c_cflag &= ~CBAUD;
if (old) {
termios->c_cflag |= old->c_cflag & CBAUD;
old = NULL;
} else
termios->c_cflag |= B9600;
}
quot = (2 * port->uartclk + baud) / (2 * baud);
if (baud < 2400)
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_LSR_DR;
/*
* CTS flow control flag and modem status interrupts
*/
port->ier &= ~UART_IER_MSI;
if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
port->ier |= UART_IER_MSI;
port->lcr = cval;
sdio_out(port, UART_IER, port->ier);
sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
sdio_out(port, UART_DLL, quot & 0xff);
sdio_out(port, UART_DLM, quot >> 8);
sdio_out(port, UART_LCR, cval);
sdio_out(port, UART_FCR, fcr);
sdio_uart_write_mctrl(port, port->mctrl);
}
static void sdio_uart_start_tx(struct sdio_uart_port *port)
{
if (!(port->ier & UART_IER_THRI)) {
port->ier |= UART_IER_THRI;
sdio_out(port, UART_IER, port->ier);
}
}
static void sdio_uart_stop_tx(struct sdio_uart_port *port)
{
if (port->ier & UART_IER_THRI) {
port->ier &= ~UART_IER_THRI;
sdio_out(port, UART_IER, port->ier);
}
}
static void sdio_uart_stop_rx(struct sdio_uart_port *port)
{
port->ier &= ~UART_IER_RLSI;
port->read_status_mask &= ~UART_LSR_DR;
sdio_out(port, UART_IER, port->ier);
}
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
unsigned int ch, flag;
int max_count = 256;
do {
ch = sdio_in(port, UART_RX);
flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE))) {
/*
* For statistics only
*/
if (*status & UART_LSR_BI) {
*status &= ~(UART_LSR_FE | UART_LSR_PE);
port->icount.brk++;
} else if (*status & UART_LSR_PE)
port->icount.parity++;
else if (*status & UART_LSR_FE)
port->icount.frame++;
if (*status & UART_LSR_OE)
port->icount.overrun++;
/*
* Mask off conditions which should be ignored.
*/
*status &= port->read_status_mask;
if (*status & UART_LSR_BI)
flag = TTY_BREAK;
else if (*status & UART_LSR_PE)
flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
flag = TTY_FRAME;
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
tty_insert_flip_char(&port->port, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
tty_flip_buffer_push(&port->port);
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
{
struct kfifo *xmit = &port->xmit_fifo;
int count;
struct tty_struct *tty;
u8 iobuf[16];
int len;
if (port->x_char) {
sdio_out(port, UART_TX, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
tty = tty_port_tty_get(&port->port);
if (tty == NULL || !kfifo_len(xmit) ||
tty->flow.stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
tty_kref_put(tty);
return;
}
len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
for (count = 0; count < len; count++) {
sdio_out(port, UART_TX, iobuf[count]);
port->icount.tx++;
}
len = kfifo_len(xmit);
if (len < WAKEUP_CHARS) {
tty_wakeup(tty);
if (len == 0)
sdio_uart_stop_tx(port);
}
tty_kref_put(tty);
}
static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
{
int status;
struct tty_struct *tty;
status = sdio_in(port, UART_MSR);
if ((status & UART_MSR_ANY_DELTA) == 0)
return;
if (status & UART_MSR_TERI)
port->icount.rng++;
if (status & UART_MSR_DDSR)
port->icount.dsr++;
if (status & UART_MSR_DDCD) {
port->icount.dcd++;
/* DCD raise - wake for open */
if (status & UART_MSR_DCD)
wake_up_interruptible(&port->port.open_wait);
else {
/* DCD drop - hang up if tty attached */
tty_port_tty_hangup(&port->port, false);
}
}
if (status & UART_MSR_DCTS) {
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
if (tty && C_CRTSCTS(tty)) {
int cts = (status & UART_MSR_CTS);
if (tty->hw_stopped) {
if (cts) {
tty->hw_stopped = false;
sdio_uart_start_tx(port);
tty_wakeup(tty);
}
} else {
if (!cts) {
tty->hw_stopped = true;
sdio_uart_stop_tx(port);
}
}
}
tty_kref_put(tty);
}
}
/*
* This handles the interrupt from one port.
*/
static void sdio_uart_irq(struct sdio_func *func)
{
struct sdio_uart_port *port = sdio_get_drvdata(func);
unsigned int iir, lsr;
/*
* In a few places sdio_uart_irq() is called directly instead of
* waiting for the actual interrupt to be raised and the SDIO IRQ
* thread scheduled in order to reduce latency. However, some
* interaction with the tty core may end up calling us back
* (serial echo, flow control, etc.) through those same places
* causing undesirable effects. Let's stop the recursion here.
*/
if (unlikely(port->in_sdio_uart_irq == current))
return;
iir = sdio_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT)
return;
port->in_sdio_uart_irq = current;
lsr = sdio_in(port, UART_LSR);
if (lsr & UART_LSR_DR)
sdio_uart_receive_chars(port, &lsr);
sdio_uart_check_modem_status(port);
if (lsr & UART_LSR_THRE)
sdio_uart_transmit_chars(port);
port->in_sdio_uart_irq = NULL;
}
static bool uart_carrier_raised(struct tty_port *tport)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
unsigned int ret = sdio_uart_claim_func(port);
if (ret) /* Missing hardware shouldn't block for carrier */
return 1;
ret = sdio_uart_get_mctrl(port);
sdio_uart_release_func(port);
return ret & TIOCM_CAR;
}
/**
* uart_dtr_rts - port helper to set uart signals
* @tport: tty port to be updated
* @active: set to turn on DTR/RTS
*
* Called by the tty port helpers when the modem signals need to be
* adjusted during an open, close and hangup.
*/
static void uart_dtr_rts(struct tty_port *tport, bool active)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
int ret = sdio_uart_claim_func(port);
if (ret)
return;
if (!active)
sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
else
sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
sdio_uart_release_func(port);
}
/**
* sdio_uart_activate - start up hardware
* @tport: tty port to activate
* @tty: tty bound to this port
*
* Activate a tty port. The port locking guarantees us this will be
* run exactly once per set of opens, and if successful will see the
* shutdown method run exactly once to match. Start up and shutdown are
* protected from each other by the internal locking and will not run
* at the same time even during a hangup event.
*
* If we successfully start up the port we take an extra kref as we
* will keep it around until shutdown when the kref is dropped.
*/
static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
int ret;
/*
* Set the TTY IO error marker - we will only clear this
* once we have successfully opened the port.
*/
set_bit(TTY_IO_ERROR, &tty->flags);
kfifo_reset(&port->xmit_fifo);
ret = sdio_uart_claim_func(port);
if (ret)
return ret;
ret = sdio_enable_func(port->func);
if (ret)
goto err1;
ret = sdio_claim_irq(port->func, sdio_uart_irq);
if (ret)
goto err2;
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in sdio_change_speed())
*/
sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
sdio_out(port, UART_FCR, 0);
/*
* Clear the interrupt registers.
*/
(void) sdio_in(port, UART_LSR);
(void) sdio_in(port, UART_RX);
(void) sdio_in(port, UART_IIR);
(void) sdio_in(port, UART_MSR);
/*
* Now, initialize the UART
*/
sdio_out(port, UART_LCR, UART_LCR_WLEN8);
port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
port->mctrl = TIOCM_OUT2;
sdio_uart_change_speed(port, &tty->termios, NULL);
if (C_BAUD(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
if (C_CRTSCTS(tty))
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
tty->hw_stopped = true;
clear_bit(TTY_IO_ERROR, &tty->flags);
/* Kick the IRQ handler once while we're still holding the host lock */
sdio_uart_irq(port->func);
sdio_uart_release_func(port);
return 0;
err2:
sdio_disable_func(port->func);
err1:
sdio_uart_release_func(port);
return ret;
}
/**
* sdio_uart_shutdown - stop hardware
* @tport: tty port to shut down
*
* Deactivate a tty port. The port locking guarantees us this will be
* run only if a successful matching activate already ran. The two are
* protected from each other by the internal locking and will not run
* at the same time even during a hangup event.
*/
static void sdio_uart_shutdown(struct tty_port *tport)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
int ret;
ret = sdio_uart_claim_func(port);
if (ret)
return;
sdio_uart_stop_rx(port);
/* Disable interrupts from this port */
sdio_release_irq(port->func);
port->ier = 0;
sdio_out(port, UART_IER, 0);
sdio_uart_clear_mctrl(port, TIOCM_OUT2);
/* Disable break condition and FIFOs. */
port->lcr &= ~UART_LCR_SBC;
sdio_out(port, UART_LCR, port->lcr);
sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
sdio_out(port, UART_FCR, 0);
sdio_disable_func(port->func);
sdio_uart_release_func(port);
}
static void sdio_uart_port_destroy(struct tty_port *tport)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
kfifo_free(&port->xmit_fifo);
kfree(port);
}
/**
* sdio_uart_install - install method
* @driver: the driver in use (sdio_uart in our case)
* @tty: the tty being bound
*
* Look up and bind the tty and the driver together. Initialize
* any needed private data (in our case the termios)
*/
static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
{
int idx = tty->index;
struct sdio_uart_port *port = sdio_uart_port_get(idx);
int ret = tty_standard_install(driver, tty);
if (ret == 0)
/* This is the ref sdio_uart_port get provided */
tty->driver_data = port;
else
sdio_uart_port_put(port);
return ret;
}
/**
* sdio_uart_cleanup - called on the last tty kref drop
* @tty: the tty being destroyed
*
* Called asynchronously when the last reference to the tty is dropped.
* We cannot destroy the tty->driver_data port kref until this point
*/
static void sdio_uart_cleanup(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
tty->driver_data = NULL; /* Bug trap */
sdio_uart_port_put(port);
}
/*
* Open/close/hangup is now entirely boilerplate
*/
static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
{
struct sdio_uart_port *port = tty->driver_data;
return tty_port_open(&port->port, tty, filp);
}
static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
{
struct sdio_uart_port *port = tty->driver_data;
tty_port_close(&port->port, tty, filp);
}
static void sdio_uart_hangup(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
tty_port_hangup(&port->port);
}
static ssize_t sdio_uart_write(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct sdio_uart_port *port = tty->driver_data;
int ret;
if (!port->func)
return -ENODEV;
ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
if (!(port->ier & UART_IER_THRI)) {
int err = sdio_uart_claim_func(port);
if (!err) {
sdio_uart_start_tx(port);
sdio_uart_irq(port->func);
sdio_uart_release_func(port);
} else
ret = err;
}
return ret;
}
static unsigned int sdio_uart_write_room(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
}
static unsigned int sdio_uart_chars_in_buffer(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
return kfifo_len(&port->xmit_fifo);
}
static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
{
struct sdio_uart_port *port = tty->driver_data;
port->x_char = ch;
if (ch && !(port->ier & UART_IER_THRI)) {
if (sdio_uart_claim_func(port) != 0)
return;
sdio_uart_start_tx(port);
sdio_uart_irq(port->func);
sdio_uart_release_func(port);
}
}
static void sdio_uart_throttle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
return;
if (I_IXOFF(tty)) {
port->x_char = STOP_CHAR(tty);
sdio_uart_start_tx(port);
}
if (C_CRTSCTS(tty))
sdio_uart_clear_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
sdio_uart_release_func(port);
}
static void sdio_uart_unthrottle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
return;
if (I_IXOFF(tty)) {
if (port->x_char) {
port->x_char = 0;
} else {
port->x_char = START_CHAR(tty);
sdio_uart_start_tx(port);
}
}
if (C_CRTSCTS(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
sdio_uart_release_func(port);
}
static void sdio_uart_set_termios(struct tty_struct *tty,
const struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
unsigned int cflag = tty->termios.c_cflag;
if (sdio_uart_claim_func(port) != 0)
return;
sdio_uart_change_speed(port, &tty->termios, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
if (!(cflag & CRTSCTS) || !tty_throttled(tty))
mask |= TIOCM_RTS;
sdio_uart_set_mctrl(port, mask);
}
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
tty->hw_stopped = false;
sdio_uart_start_tx(port);
}
/* Handle turning on CRTSCTS */
if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
tty->hw_stopped = true;
sdio_uart_stop_tx(port);
}
}
sdio_uart_release_func(port);
}
static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
{
struct sdio_uart_port *port = tty->driver_data;
int result;
result = sdio_uart_claim_func(port);
if (result != 0)
return result;
if (break_state == -1)
port->lcr |= UART_LCR_SBC;
else
port->lcr &= ~UART_LCR_SBC;
sdio_out(port, UART_LCR, port->lcr);
sdio_uart_release_func(port);
return 0;
}
static int sdio_uart_tiocmget(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
int result;
result = sdio_uart_claim_func(port);
if (!result) {
result = port->mctrl | sdio_uart_get_mctrl(port);
sdio_uart_release_func(port);
}
return result;
}
static int sdio_uart_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct sdio_uart_port *port = tty->driver_data;
int result;
result = sdio_uart_claim_func(port);
if (!result) {
sdio_uart_update_mctrl(port, set, clear);
sdio_uart_release_func(port);
}
return result;
}
static int sdio_uart_proc_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
"", "", "");
for (i = 0; i < UART_NR; i++) {
struct sdio_uart_port *port = sdio_uart_port_get(i);
if (port) {
seq_printf(m, "%d: uart:SDIO", i);
if (capable(CAP_SYS_ADMIN)) {
seq_printf(m, " tx:%d rx:%d",
port->icount.tx, port->icount.rx);
if (port->icount.frame)
seq_printf(m, " fe:%d",
port->icount.frame);
if (port->icount.parity)
seq_printf(m, " pe:%d",
port->icount.parity);
if (port->icount.brk)
seq_printf(m, " brk:%d",
port->icount.brk);
if (port->icount.overrun)
seq_printf(m, " oe:%d",
port->icount.overrun);
if (port->icount.cts)
seq_printf(m, " cts:%d",
port->icount.cts);
if (port->icount.dsr)
seq_printf(m, " dsr:%d",
port->icount.dsr);
if (port->icount.rng)
seq_printf(m, " rng:%d",
port->icount.rng);
if (port->icount.dcd)
seq_printf(m, " dcd:%d",
port->icount.dcd);
}
sdio_uart_port_put(port);
seq_putc(m, '\n');
}
}
return 0;
}
static const struct tty_port_operations sdio_uart_port_ops = {
.dtr_rts = uart_dtr_rts,
.carrier_raised = uart_carrier_raised,
.shutdown = sdio_uart_shutdown,
.activate = sdio_uart_activate,
.destruct = sdio_uart_port_destroy,
};
static const struct tty_operations sdio_uart_ops = {
.open = sdio_uart_open,
.close = sdio_uart_close,
.write = sdio_uart_write,
.write_room = sdio_uart_write_room,
.chars_in_buffer = sdio_uart_chars_in_buffer,
.send_xchar = sdio_uart_send_xchar,
.throttle = sdio_uart_throttle,
.unthrottle = sdio_uart_unthrottle,
.set_termios = sdio_uart_set_termios,
.hangup = sdio_uart_hangup,
.break_ctl = sdio_uart_break_ctl,
.tiocmget = sdio_uart_tiocmget,
.tiocmset = sdio_uart_tiocmset,
.install = sdio_uart_install,
.cleanup = sdio_uart_cleanup,
.proc_show = sdio_uart_proc_show,
};
static struct tty_driver *sdio_uart_tty_driver;
static int sdio_uart_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct sdio_uart_port *port;
int ret;
port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
if (func->class == SDIO_CLASS_UART) {
pr_warn("%s: need info on UART class basic setup\n",
sdio_func_id(func));
kfree(port);
return -ENOSYS;
} else if (func->class == SDIO_CLASS_GPS) {
/*
* We need tuple 0x91. It contains SUBTPL_SIOREG
* and SUBTPL_RCVCAPS.
*/
struct sdio_func_tuple *tpl;
for (tpl = func->tuples; tpl; tpl = tpl->next) {
if (tpl->code != 0x91)
continue;
if (tpl->size < 10)
continue;
if (tpl->data[1] == 0) /* SUBTPL_SIOREG */
break;
}
if (!tpl) {
pr_warn("%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
sdio_func_id(func));
kfree(port);
return -EINVAL;
}
pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
sdio_func_id(func), tpl->data[2], tpl->data[3]);
port->regs_offset = (tpl->data[4] << 0) |
(tpl->data[5] << 8) |
(tpl->data[6] << 16);
pr_debug("%s: regs offset = 0x%x\n",
sdio_func_id(func), port->regs_offset);
port->uartclk = tpl->data[7] * 115200;
if (port->uartclk == 0)
port->uartclk = 115200;
pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
sdio_func_id(func), port->uartclk,
tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
} else {
kfree(port);
return -EINVAL;
}
port->func = func;
sdio_set_drvdata(func, port);
tty_port_init(&port->port);
port->port.ops = &sdio_uart_port_ops;
ret = sdio_uart_add_port(port);
if (ret) {
kfree(port);
} else {
struct device *dev;
dev = tty_port_register_device(&port->port,
sdio_uart_tty_driver, port->index, &func->dev);
if (IS_ERR(dev)) {
sdio_uart_port_remove(port);
ret = PTR_ERR(dev);
}
}
return ret;
}
static void sdio_uart_remove(struct sdio_func *func)
{
struct sdio_uart_port *port = sdio_get_drvdata(func);
tty_unregister_device(sdio_uart_tty_driver, port->index);
sdio_uart_port_remove(port);
}
static const struct sdio_device_id sdio_uart_ids[] = {
{ SDIO_DEVICE_CLASS(SDIO_CLASS_UART) },
{ SDIO_DEVICE_CLASS(SDIO_CLASS_GPS) },
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
static struct sdio_driver sdio_uart_driver = {
.probe = sdio_uart_probe,
.remove = sdio_uart_remove,
.name = "sdio_uart",
.id_table = sdio_uart_ids,
};
static int __init sdio_uart_init(void)
{
int ret;
struct tty_driver *tty_drv;
sdio_uart_tty_driver = tty_drv = tty_alloc_driver(UART_NR,
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(tty_drv))
return PTR_ERR(tty_drv);
tty_drv->driver_name = "sdio_uart";
tty_drv->name = "ttySDIO";
tty_drv->major = 0; /* dynamically allocated */
tty_drv->minor_start = 0;
tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
tty_drv->subtype = SERIAL_TYPE_NORMAL;
tty_drv->init_termios = tty_std_termios;
tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
tty_drv->init_termios.c_ispeed = 4800;
tty_drv->init_termios.c_ospeed = 4800;
tty_set_operations(tty_drv, &sdio_uart_ops);
ret = tty_register_driver(tty_drv);
if (ret)
goto err1;
ret = sdio_register_driver(&sdio_uart_driver);
if (ret)
goto err2;
return 0;
err2:
tty_unregister_driver(tty_drv);
err1:
tty_driver_kref_put(tty_drv);
return ret;
}
static void __exit sdio_uart_exit(void)
{
sdio_unregister_driver(&sdio_uart_driver);
tty_unregister_driver(sdio_uart_tty_driver);
tty_driver_kref_put(sdio_uart_tty_driver);
}
module_init(sdio_uart_init);
module_exit(sdio_uart_exit);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/core/sdio_uart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/core/mmc.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
* MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
*/
#include <linux/err.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/sysfs.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include "core.h"
#include "card.h"
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
#include "quirks.h"
#include "sd_ops.h"
#include "pwrseq.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
#define MIN_CACHE_EN_TIMEOUT_MS 1600
#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
};
static const unsigned char tran_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
static const unsigned int taac_exp[] = {
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
};
static const unsigned int taac_mant[] = {
0, 10, 12, 13, 15, 20, 25, 30,
35, 40, 45, 50, 55, 60, 70, 80,
};
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
const int __off = 3 - ((start) / 32); \
const int __shft = (start) & 31; \
u32 __res; \
\
__res = resp[__off] >> __shft; \
if (__size + __shft > 32) \
__res |= resp[__off-1] << ((32 - __shft) % 32); \
__res & __mask; \
})
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
static int mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
/*
* Add the raw card ID (cid) data to the entropy pool. It doesn't
* matter that not all of it is unique, it's just bonus entropy.
*/
add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
/*
* The selection of the format here is based upon published
* specs from sandisk and from what people have reported.
*/
switch (card->csd.mmca_vsn) {
case 0: /* MMC v1.0 - v1.2 */
case 1: /* MMC v1.4 */
card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
card->cid.month = UNSTUFF_BITS(resp, 12, 4);
card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
break;
case 2: /* MMC v2.0 - v2.2 */
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
card->cid.month = UNSTUFF_BITS(resp, 12, 4);
card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
break;
default:
pr_err("%s: card has unknown MMCA version %d\n",
mmc_hostname(card->host), card->csd.mmca_vsn);
return -EINVAL;
}
return 0;
}
static void mmc_set_erase_size(struct mmc_card *card)
{
if (card->ext_csd.erase_group_def & 1)
card->erase_size = card->ext_csd.hc_erase_size;
else
card->erase_size = card->csd.erase_size;
mmc_init_erase(card);
}
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
static int mmc_decode_csd(struct mmc_card *card)
{
struct mmc_csd *csd = &card->csd;
unsigned int e, m, a, b;
u32 *resp = card->raw_csd;
/*
* We only understand CSD structure v1.1 and v1.2.
* v1.2 has extra information in bits 15, 11 and 10.
* We also support eMMC v4.4 & v4.41.
*/
csd->structure = UNSTUFF_BITS(resp, 126, 2);
if (csd->structure == 0) {
pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd->structure);
return -EINVAL;
}
csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
m = UNSTUFF_BITS(resp, 115, 4);
e = UNSTUFF_BITS(resp, 112, 3);
csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10;
csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
e = UNSTUFF_BITS(resp, 47, 3);
m = UNSTUFF_BITS(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
if (csd->write_blkbits >= 9) {
a = UNSTUFF_BITS(resp, 42, 5);
b = UNSTUFF_BITS(resp, 37, 5);
csd->erase_size = (a + 1) * (b + 1);
csd->erase_size <<= csd->write_blkbits - 9;
}
return 0;
}
static void mmc_select_card_type(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u8 card_type = card->ext_csd.raw_card_type;
u32 caps = host->caps, caps2 = host->caps2;
unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
unsigned int avail_type = 0;
if (caps & MMC_CAP_MMC_HIGHSPEED &&
card_type & EXT_CSD_CARD_TYPE_HS_26) {
hs_max_dtr = MMC_HIGH_26_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS_26;
}
if (caps & MMC_CAP_MMC_HIGHSPEED &&
card_type & EXT_CSD_CARD_TYPE_HS_52) {
hs_max_dtr = MMC_HIGH_52_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS_52;
}
if (caps & (MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR) &&
card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
}
if (caps & MMC_CAP_1_2V_DDR &&
card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
}
if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
hs200_max_dtr = MMC_HS200_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
}
if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
hs200_max_dtr = MMC_HS200_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
}
if (caps2 & MMC_CAP2_HS400_1_8V &&
card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
hs200_max_dtr = MMC_HS200_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
}
if (caps2 & MMC_CAP2_HS400_1_2V &&
card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
hs200_max_dtr = MMC_HS200_MAX_DTR;
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
}
if ((caps2 & MMC_CAP2_HS400_ES) &&
card->ext_csd.strobe_support &&
(avail_type & EXT_CSD_CARD_TYPE_HS400))
avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
card->ext_csd.hs_max_dtr = hs_max_dtr;
card->ext_csd.hs200_max_dtr = hs200_max_dtr;
card->mmc_avail_type = avail_type;
}
static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
{
u8 hc_erase_grp_sz, hc_wp_grp_sz;
/*
* Disable these attributes by default
*/
card->ext_csd.enhanced_area_offset = -EINVAL;
card->ext_csd.enhanced_area_size = -EINVAL;
/*
* Enhanced area feature support -- check whether the eMMC
* card has the Enhanced area enabled. If so, export enhanced
* area offset and size to user by adding sysfs interface.
*/
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
if (card->ext_csd.partition_setting_completed) {
hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
hc_wp_grp_sz =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
/*
* calculate the enhanced data area offset, in bytes
*/
card->ext_csd.enhanced_area_offset =
(((unsigned long long)ext_csd[139]) << 24) +
(((unsigned long long)ext_csd[138]) << 16) +
(((unsigned long long)ext_csd[137]) << 8) +
(((unsigned long long)ext_csd[136]));
if (mmc_card_blockaddr(card))
card->ext_csd.enhanced_area_offset <<= 9;
/*
* calculate the enhanced data area size, in kilobytes
*/
card->ext_csd.enhanced_area_size =
(ext_csd[142] << 16) + (ext_csd[141] << 8) +
ext_csd[140];
card->ext_csd.enhanced_area_size *=
(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
card->ext_csd.enhanced_area_size <<= 9;
} else {
pr_warn("%s: defines enhanced area without partition setting complete\n",
mmc_hostname(card->host));
}
}
}
static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
card->part[card->nr_parts].size = size;
card->part[card->nr_parts].part_cfg = part_cfg;
sprintf(card->part[card->nr_parts].name, name, idx);
card->part[card->nr_parts].force_ro = ro;
card->part[card->nr_parts].area_type = area_type;
card->nr_parts++;
}
static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
u64 part_size;
/*
* General purpose partition feature support --
* If ext_csd has the size of general purpose partitions,
* set size, part_cfg, partition name in mmc_part.
*/
if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
EXT_CSD_PART_SUPPORT_PART_EN) {
hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
hc_wp_grp_sz =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
continue;
if (card->ext_csd.partition_setting_completed == 0) {
pr_warn("%s: has partition size defined without partition complete\n",
mmc_hostname(card->host));
break;
}
part_size =
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
<< 16) +
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
MMC_BLK_DATA_AREA_GP);
}
}
}
/* Minimum partition switch timeout in milliseconds */
#define MMC_MIN_PART_SWITCH_TIME 300
/*
* Decode extended CSD.
*/
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
u64 part_size;
struct device_node *np;
bool broken_hpi = false;
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
if (card->ext_csd.raw_ext_csd_structure > 2) {
pr_err("%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
goto out;
}
}
np = mmc_of_find_child_device(card->host, 0);
if (np && of_device_is_compatible(np, "mmc-card"))
broken_hpi = of_property_read_bool(np, "broken-hpi");
of_node_put(np);
/*
* The EXT_CSD format is meant to be forward compatible. As long
* as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
* are authorized, see JEDEC JESD84-B50 section B.8.
*/
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
/* fixup device after ext_csd revision field is updated */
mmc_fixup_device(card, mmc_ext_csd_fixups);
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
/* Cards with density > 2GiB are sector addressed */
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
mmc_select_card_type(card);
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
card->ext_csd.raw_hc_erase_grp_size =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
card->ext_csd.raw_boot_mult =
ext_csd[EXT_CSD_BOOT_MULT];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
/* EXT_CSD value is in units of 10ms, but we store in ms */
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
/* Sleep / awake timeout in 100ns units */
if (sa_shift > 0 && sa_shift <= 0x17)
card->ext_csd.sa_timeout =
1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.erase_group_def =
ext_csd[EXT_CSD_ERASE_GROUP_DEF];
card->ext_csd.hc_erase_timeout = 300 *
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
card->ext_csd.hc_erase_size =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
/*
* There are two boot regions of equal size, defined in
* multiples of 128K.
*/
if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
"boot%d", idx, true,
MMC_BLK_DATA_AREA_BOOT);
}
}
}
card->ext_csd.raw_hc_erase_gap_size =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
card->ext_csd.raw_sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.raw_sec_erase_mult =
ext_csd[EXT_CSD_SEC_ERASE_MULT];
card->ext_csd.raw_sec_feature_support =
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.raw_trim_mult =
ext_csd[EXT_CSD_TRIM_MULT];
card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH];
if (card->ext_csd.rev >= 4) {
if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
EXT_CSD_PART_SETTING_COMPLETED)
card->ext_csd.partition_setting_completed = 1;
else
card->ext_csd.partition_setting_completed = 0;
mmc_manage_enhanced_area(card, ext_csd);
mmc_manage_gp_partitions(card, ext_csd);
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
ext_csd[EXT_CSD_SEC_ERASE_MULT];
card->ext_csd.sec_feature_support =
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.trim_timeout = 300 *
ext_csd[EXT_CSD_TRIM_MULT];
/*
* Note that the call to mmc_part_add above defaults to read
* only. If this default assumption is changed, the call must
* take into account the value of boot_locked below.
*/
card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
card->ext_csd.boot_ro_lockable = true;
/* Save power class values */
card->ext_csd.raw_pwr_cl_52_195 =
ext_csd[EXT_CSD_PWR_CL_52_195];
card->ext_csd.raw_pwr_cl_26_195 =
ext_csd[EXT_CSD_PWR_CL_26_195];
card->ext_csd.raw_pwr_cl_52_360 =
ext_csd[EXT_CSD_PWR_CL_52_360];
card->ext_csd.raw_pwr_cl_26_360 =
ext_csd[EXT_CSD_PWR_CL_26_360];
card->ext_csd.raw_pwr_cl_200_195 =
ext_csd[EXT_CSD_PWR_CL_200_195];
card->ext_csd.raw_pwr_cl_200_360 =
ext_csd[EXT_CSD_PWR_CL_200_360];
card->ext_csd.raw_pwr_cl_ddr_52_195 =
ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
card->ext_csd.raw_pwr_cl_ddr_52_360 =
ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
card->ext_csd.raw_pwr_cl_ddr_200_360 =
ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
}
if (card->ext_csd.rev >= 5) {
/* Adjust production date as per JEDEC JESD84-B451 */
if (card->cid.year < 2010)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
EXT_CSD_MANUAL_BKOPS_MASK);
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
if (card->ext_csd.man_bkops_en)
pr_debug("%s: MAN_BKOPS_EN bit is set\n",
mmc_hostname(card->host));
card->ext_csd.auto_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
EXT_CSD_AUTO_BKOPS_MASK);
if (card->ext_csd.auto_bkops_en)
pr_debug("%s: AUTO_BKOPS_EN bit is set\n",
mmc_hostname(card->host));
}
/* check whether the eMMC card supports HPI */
if (!mmc_card_broken_hpi(card) &&
!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
card->ext_csd.hpi = 1;
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
else
card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
/*
* Indicate the maximum timeout to close
* a command interrupted by HPI
*/
card->ext_csd.out_of_int_time =
ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
}
card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
/*
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
"rpmb", 0, false,
MMC_BLK_DATA_AREA_RPMB);
}
}
card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
card->erased_byte = 0xFF;
else
card->erased_byte = 0x0;
/* eMMC v4.5 or later */
card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
if (card->ext_csd.rev >= 6) {
card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
card->ext_csd.generic_cmd6_time = 10 *
ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
card->ext_csd.power_off_longtime = 10 *
ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
card->ext_csd.cache_size =
ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
card->ext_csd.data_sector_size = 4096;
else
card->ext_csd.data_sector_size = 512;
if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
(ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
card->ext_csd.data_tag_unit_size =
((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
(card->ext_csd.data_sector_size);
} else {
card->ext_csd.data_tag_unit_size = 0;
}
card->ext_csd.max_packed_writes =
ext_csd[EXT_CSD_MAX_PACKED_WRITES];
card->ext_csd.max_packed_reads =
ext_csd[EXT_CSD_MAX_PACKED_READS];
} else {
card->ext_csd.data_sector_size = 512;
}
/*
* GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
* when accessing a specific field", so use it here if there is no
* PARTITION_SWITCH_TIME.
*/
if (!card->ext_csd.part_time)
card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
/* Some eMMC set the value too low so set a minimum */
if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
/* eMMC v5 or later */
if (card->ext_csd.rev >= 7) {
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
MMC_FIRMWARE_LEN);
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
card->ext_csd.device_life_time_est_typ_a =
ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
card->ext_csd.device_life_time_est_typ_b =
ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
}
/* eMMC v5.1 or later */
if (card->ext_csd.rev >= 8) {
card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT] &
EXT_CSD_CMDQ_SUPPORTED;
card->ext_csd.cmdq_depth = (ext_csd[EXT_CSD_CMDQ_DEPTH] &
EXT_CSD_CMDQ_DEPTH_MASK) + 1;
/* Exclude inefficiently small queue depths */
if (card->ext_csd.cmdq_depth <= 2) {
card->ext_csd.cmdq_support = false;
card->ext_csd.cmdq_depth = 0;
}
if (card->ext_csd.cmdq_support) {
pr_debug("%s: Command Queue supported depth %u\n",
mmc_hostname(card->host),
card->ext_csd.cmdq_depth);
}
card->ext_csd.enhanced_rpmb_supported =
(card->ext_csd.rel_param &
EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
}
out:
return err;
}
static int mmc_read_ext_csd(struct mmc_card *card)
{
u8 *ext_csd;
int err;
if (!mmc_can_ext_csd(card))
return 0;
err = mmc_get_ext_csd(card, &ext_csd);
if (err) {
/* If the host or the card can't do the switch,
* fail more gracefully. */
if ((err != -EINVAL)
&& (err != -ENOSYS)
&& (err != -EFAULT))
return err;
/*
* High capacity cards should have this "magic" size
* stored in their CSD.
*/
if (card->csd.capacity == (4096 * 512)) {
pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
mmc_hostname(card->host));
} else {
pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
mmc_hostname(card->host));
err = 0;
}
return err;
}
err = mmc_decode_ext_csd(card, ext_csd);
kfree(ext_csd);
return err;
}
static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
{
u8 *bw_ext_csd;
int err;
if (bus_width == MMC_BUS_WIDTH_1)
return 0;
err = mmc_get_ext_csd(card, &bw_ext_csd);
if (err)
return err;
/* only compare read only fields */
err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
(card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
(card->ext_csd.rev ==
bw_ext_csd[EXT_CSD_REV]) &&
(card->ext_csd.raw_ext_csd_structure ==
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
(card->ext_csd.raw_card_type ==
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
(card->ext_csd.raw_s_a_timeout ==
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
(card->ext_csd.raw_hc_erase_gap_size ==
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
(card->ext_csd.raw_erase_timeout_mult ==
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
(card->ext_csd.raw_hc_erase_grp_size ==
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
(card->ext_csd.raw_sec_trim_mult ==
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
(card->ext_csd.raw_sec_erase_mult ==
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
(card->ext_csd.raw_sec_feature_support ==
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
(card->ext_csd.raw_trim_mult ==
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
(card->ext_csd.raw_sectors[0] ==
bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
(card->ext_csd.raw_sectors[1] ==
bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
(card->ext_csd.raw_sectors[2] ==
bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
(card->ext_csd.raw_sectors[3] ==
bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
(card->ext_csd.raw_pwr_cl_52_195 ==
bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
(card->ext_csd.raw_pwr_cl_26_195 ==
bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
(card->ext_csd.raw_pwr_cl_52_360 ==
bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
(card->ext_csd.raw_pwr_cl_26_360 ==
bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
(card->ext_csd.raw_pwr_cl_200_195 ==
bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
(card->ext_csd.raw_pwr_cl_200_360 ==
bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
(card->ext_csd.raw_pwr_cl_ddr_52_195 ==
bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
(card->ext_csd.raw_pwr_cl_ddr_52_360 ==
bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
(card->ext_csd.raw_pwr_cl_ddr_200_360 ==
bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
if (err)
err = -EINVAL;
kfree(bw_ext_csd);
return err;
}
MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
card->raw_cid[2], card->raw_cid[3]);
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
card->ext_csd.device_life_time_est_typ_b);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
card->ext_csd.enhanced_rpmb_supported);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
static ssize_t mmc_fwrev_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
if (card->ext_csd.rev < 7)
return sysfs_emit(buf, "0x%x\n", card->cid.fwrev);
else
return sysfs_emit(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
card->ext_csd.fwrev);
}
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
static ssize_t mmc_dsr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
if (card->csd.dsr_imp && host->dsr_req)
return sysfs_emit(buf, "0x%x\n", host->dsr);
else
/* return default DSR value */
return sysfs_emit(buf, "0x%x\n", 0x404);
}
static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_date.attr,
&dev_attr_erase_size.attr,
&dev_attr_preferred_erase_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_ffu_capable.attr,
&dev_attr_hwrev.attr,
&dev_attr_manfid.attr,
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
&dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
&dev_attr_enhanced_rpmb_supported.attr,
&dev_attr_rel_sectors.attr,
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
&dev_attr_dsr.attr,
&dev_attr_cmdq_en.attr,
NULL,
};
ATTRIBUTE_GROUPS(mmc_std);
static struct device_type mmc_type = {
.groups = mmc_std_groups,
};
/*
* Select the PowerClass for the current bus width
* If power class is defined for 4/8 bit bus in the
* extended CSD register, select it by executing the
* mmc_switch command.
*/
static int __mmc_select_powerclass(struct mmc_card *card,
unsigned int bus_width)
{
struct mmc_host *host = card->host;
struct mmc_ext_csd *ext_csd = &card->ext_csd;
unsigned int pwrclass_val = 0;
int err = 0;
switch (1 << host->ios.vdd) {
case MMC_VDD_165_195:
if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_26_195;
else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_52_195 :
ext_csd->raw_pwr_cl_ddr_52_195;
else if (host->ios.clock <= MMC_HS200_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_200_195;
break;
case MMC_VDD_27_28:
case MMC_VDD_28_29:
case MMC_VDD_29_30:
case MMC_VDD_30_31:
case MMC_VDD_31_32:
case MMC_VDD_32_33:
case MMC_VDD_33_34:
case MMC_VDD_34_35:
case MMC_VDD_35_36:
if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_26_360;
else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_52_360 :
ext_csd->raw_pwr_cl_ddr_52_360;
else if (host->ios.clock <= MMC_HS200_MAX_DTR)
pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_ddr_200_360 :
ext_csd->raw_pwr_cl_200_360;
break;
default:
pr_warn("%s: Voltage range not supported for power class\n",
mmc_hostname(host));
return -EINVAL;
}
if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
EXT_CSD_PWR_CL_8BIT_SHIFT;
else
pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
EXT_CSD_PWR_CL_4BIT_SHIFT;
/* If the power class is different from the default value */
if (pwrclass_val > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_CLASS,
pwrclass_val,
card->ext_csd.generic_cmd6_time);
}
return err;
}
static int mmc_select_powerclass(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u32 bus_width, ext_csd_bits;
int err, ddr;
/* Power class selection is supported for versions >= 4.0 */
if (!mmc_can_ext_csd(card))
return 0;
bus_width = host->ios.bus_width;
/* Power class values are defined only for 4/8 bit bus */
if (bus_width == MMC_BUS_WIDTH_1)
return 0;
ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
if (ddr)
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
else
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
err = __mmc_select_powerclass(card, ext_csd_bits);
if (err)
pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
mmc_hostname(host), 1 << bus_width, ddr);
return err;
}
/*
* Set the bus speed for the selected speed mode.
*/
static void mmc_set_bus_speed(struct mmc_card *card)
{
unsigned int max_dtr = (unsigned int)-1;
if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
max_dtr > card->ext_csd.hs200_max_dtr)
max_dtr = card->ext_csd.hs200_max_dtr;
else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
max_dtr = card->ext_csd.hs_max_dtr;
else if (max_dtr > card->csd.max_dtr)
max_dtr = card->csd.max_dtr;
mmc_set_clock(card->host, max_dtr);
}
/*
* Select the bus width amoung 4-bit and 8-bit(SDR).
* If the bus width is changed successfully, return the selected width value.
* Zero is returned instead of error value if the wide width is not supported.
*/
static int mmc_select_bus_width(struct mmc_card *card)
{
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
int err = 0;
if (!mmc_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
/*
* Unlike SD, MMC cards dont have a configuration register to notify
* supported bus width. So bus test command should be run to identify
* the supported bus width or compare the ext csd values of current
* bus width and ext csd values of 1 bit mode read earlier.
*/
for (; idx < ARRAY_SIZE(bus_widths); idx++) {
/*
* Host is capable of 8bit transfer, then switch
* the device to work in 8bit transfer mode. If the
* mmc switch command returns error then switch to
* 4bit transfer mode. On success set the corresponding
* bus width on the host.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx],
card->ext_csd.generic_cmd6_time);
if (err)
continue;
bus_width = bus_widths[idx];
mmc_set_bus_width(host, bus_width);
/*
* If controller can't handle bus width test,
* compare ext_csd previously read in 1 bit mode
* against ext_csd at new bus width
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card, bus_width);
else
err = mmc_bus_test(card, bus_width);
if (!err) {
err = bus_width;
break;
} else {
pr_warn("%s: switch to bus width %d failed\n",
mmc_hostname(host), 1 << bus_width);
}
}
return err;
}
/*
* Switch to the high-speed mode
*/
static int mmc_select_hs(struct mmc_card *card)
{
int err;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time, MMC_TIMING_MMC_HS,
true, true, MMC_CMD_RETRIES);
if (err)
pr_warn("%s: switch to high-speed failed, err:%d\n",
mmc_hostname(card->host), err);
return err;
}
/*
* Activate wide bus and DDR if supported.
*/
static int mmc_select_hs_ddr(struct mmc_card *card)
{
struct mmc_host *host = card->host;
u32 bus_width, ext_csd_bits;
int err = 0;
if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
return 0;
bus_width = host->ios.bus_width;
if (bus_width == MMC_BUS_WIDTH_1)
return 0;
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits,
card->ext_csd.generic_cmd6_time,
MMC_TIMING_MMC_DDR52,
true, true, MMC_CMD_RETRIES);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
return err;
}
/*
* eMMC cards can support 3.3V to 1.2V i/o (vccq)
* signaling.
*
* EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
*
* 1.8V vccq at 3.3V core voltage (vcc) is not required
* in the JEDEC spec for DDR.
*
* Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
* host controller can support this, like some of the SDHCI
* controller which connect to an eMMC device. Some of these
* host controller still needs to use 1.8v vccq for supporting
* DDR mode.
*
* So the sequence will be:
* if (host and device can both support 1.2v IO)
* use 1.2v IO;
* else if (host and device can both support 1.8v IO)
* use 1.8v IO;
* so if host and device can only support 3.3v IO, this is the
* last choice.
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (!err)
return 0;
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V &&
host->caps & MMC_CAP_1_8V_DDR)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* make sure vccq is 3.3v after switching disaster */
if (err)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
return err;
}
static int mmc_select_hs400(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_dtr;
int err = 0;
u8 val;
/*
* HS400 mode requires 8-bit bus width
*/
if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
host->ios.bus_width == MMC_BUS_WIDTH_8))
return 0;
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
mmc_hostname(host), err);
return err;
}
/* Prepare host to downgrade to HS timing */
if (host->ops->hs400_downgrade)
host->ops->hs400_downgrade(host);
/* Set host controller to HS timing */
mmc_set_timing(host, MMC_TIMING_MMC_HS);
/* Reduce frequency to HS frequency */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
if (host->ops->hs400_prepare_ddr)
host->ops->hs400_prepare_ddr(host);
/* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
EXT_CSD_DDR_BUS_WIDTH_8,
card->ext_csd.generic_cmd6_time);
if (err) {
pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
mmc_hostname(host), err);
return err;
}
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
mmc_hostname(host), err);
return err;
}
/* Set host controller to HS400 timing and frequency */
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
if (host->ops->execute_hs400_tuning) {
mmc_retune_disable(host);
err = host->ops->execute_hs400_tuning(host, card);
mmc_retune_enable(host);
if (err)
goto out_err;
}
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
return 0;
out_err:
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
return err;
}
int mmc_hs200_to_hs400(struct mmc_card *card)
{
return mmc_select_hs400(card);
}
int mmc_hs400_to_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int max_dtr;
int err;
u8 val;
/* Reduce frequency to HS */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err)
goto out_err;
if (host->ops->hs400_downgrade)
host->ops->hs400_downgrade(host);
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
0, false, true, MMC_CMD_RETRIES);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
/* Switch HS to HS200 */
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
/*
* For HS200, CRC errors are not a reliable way to know the switch
* failed. If there really is a problem, we would expect tuning will
* fail and the result ends up the same.
*/
err = mmc_switch_status(card, false);
if (err)
goto out_err;
mmc_set_bus_speed(card);
/* Prepare tuning for HS400 mode. */
if (host->ops->prepare_hs400_tuning)
host->ops->prepare_hs400_tuning(host, &host->ios);
return 0;
out_err:
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
return err;
}
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type = 0;
int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
if (fixed_drv_type >= 0)
drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
? fixed_drv_type : 0;
else
drive_strength = mmc_select_drive_strength(card,
card->ext_csd.hs200_max_dtr,
card_drv_type, &drv_type);
card->drive_strength = drive_strength;
if (drv_type)
mmc_set_driver_type(card->host, drv_type);
}
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
int err = -EINVAL;
u8 val;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
goto out_err;
err = mmc_select_bus_width(card);
if (err != MMC_BUS_WIDTH_8) {
pr_err("%s: switch to 8bit bus width failed, err:%d\n",
mmc_hostname(host), err);
err = err < 0 ? err : -ENOTSUPP;
goto out_err;
}
/* Switch card to HS mode */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err) {
pr_err("%s: switch to hs for hs400es failed, err:%d\n",
mmc_hostname(host), err);
goto out_err;
}
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
*/
mmc_set_timing(host, MMC_TIMING_MMC_HS);
mmc_set_bus_speed(card);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
val,
card->ext_csd.generic_cmd6_time);
if (err) {
pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
mmc_hostname(host), err);
goto out_err;
}
mmc_select_driver_type(card);
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err) {
pr_err("%s: switch to hs400es failed, err:%d\n",
mmc_hostname(host), err);
goto out_err;
}
/* Set host controller to HS400 timing and frequency */
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
/* Controller enable enhanced strobe function */
host->ios.enhanced_strobe = true;
if (host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
return 0;
out_err:
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
return err;
}
/*
* For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process.
* 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
* 2. switch to HS200 mode
* 3. set the clock to > 52Mhz and <=200MHz
*/
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
old_signal_voltage = host->ios.signal_voltage;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */
if (err)
return err;
mmc_select_driver_type(card);
/*
* Set the bus width(4 or 8) with host's support and
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
if (err > 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0,
false, true, MMC_CMD_RETRIES);
if (err)
goto err;
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
* NB: We can't move to full (HS200) speeds until after we've
* successfully switched over.
*/
old_timing = host->ios.timing;
old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
* switch failed. If there really is a problem, we would expect
* tuning will fail and the result ends up the same.
*/
err = mmc_switch_status(card, false);
/*
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
if (err == -EBADMSG) {
mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
}
}
err:
if (err) {
/* fall back to the old signal voltage, if fails report error */
if (mmc_set_signal_voltage(host, old_signal_voltage))
err = -EIO;
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
}
return err;
}
/*
* Activate High Speed, HS200 or HS400ES mode if supported.
*/
static int mmc_select_timing(struct mmc_card *card)
{
int err = 0;
if (!mmc_can_ext_csd(card))
goto bus_speed;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
err = mmc_select_hs400es(card);
goto out;
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
err = mmc_select_hs200(card);
if (err == -EBADMSG)
card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200;
else
goto out;
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
out:
if (err && err != -EBADMSG)
return err;
bus_speed:
/*
* Set the bus speed to the selected bus timing.
* If timing is not selected, backward compatible is the default.
*/
mmc_set_bus_speed(card);
return 0;
}
/*
* Execute tuning sequence to seek the proper bus operating
* conditions for HS200 and HS400, which sends CMD21 to the device.
*/
static int mmc_hs200_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
/*
* Timing should be adjusted to the HS400 target
* operation frequency for tuning process
*/
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
host->ios.bus_width == MMC_BUS_WIDTH_8)
if (host->ops->prepare_hs400_tuning)
host->ops->prepare_hs400_tuning(host, &host->ios);
return mmc_execute_tuning(card);
}
/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
* we're trying to reinitialise.
*/
static int mmc_init_card(struct mmc_host *host, u32 ocr,
struct mmc_card *oldcard)
{
struct mmc_card *card;
int err;
u32 cid[4];
u32 rocr;
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting init */
if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
* state. We wait 1ms to give cards time to
* respond.
* mmc_go_idle is needed for eMMC that are asleep
*/
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
if (err)
goto err;
/*
* For SPI, enable CRC as appropriate.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
if (err)
goto err;
}
/*
* Fetch CID from card.
*/
err = mmc_send_cid(host, cid);
if (err)
goto err;
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
err = -ENOENT;
goto err;
}
card = oldcard;
} else {
/*
* Allocate card structure.
*/
card = mmc_alloc_card(host, &mmc_type);
if (IS_ERR(card)) {
err = PTR_ERR(card);
goto err;
}
card->ocr = ocr;
card->type = MMC_TYPE_MMC;
card->rca = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
/*
* Call the optional HC's init_card function to handle quirks.
*/
if (host->ops->init_card)
host->ops->init_card(host, card);
/*
* For native busses: set card RCA and quit open drain mode.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_set_relative_addr(card);
if (err)
goto free_card;
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
if (!oldcard) {
/*
* Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd);
if (err)
goto free_card;
err = mmc_decode_csd(card);
if (err)
goto free_card;
err = mmc_decode_cid(card);
if (err)
goto free_card;
}
/*
* handling only for cards supporting DSR and hosts requesting
* DSR configuration
*/
if (card->csd.dsr_imp && host->dsr_req)
mmc_set_dsr(host);
/*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto free_card;
}
if (!oldcard) {
/* Read extended CSD. */
err = mmc_read_ext_csd(card);
if (err)
goto free_card;
/*
* If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
* ocr register has bit 30 set for sector addressing.
*/
if (rocr & BIT(30))
mmc_card_set_blockaddr(card);
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
}
/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
if (card->ext_csd.rev >= 3) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
/*
* Just disable enhanced area off & sz
* will try to enable ERASE_GROUP_DEF
* during next time reinit
*/
card->ext_csd.enhanced_area_offset = -EINVAL;
card->ext_csd.enhanced_area_size = -EINVAL;
} else {
card->ext_csd.erase_group_def = 1;
/*
* enable ERASE_GRP_DEF successfully.
* This will affect the erase size, so
* here need to reset erase size
*/
mmc_set_erase_size(card);
}
}
/*
* Ensure eMMC user default partition is enabled
*/
if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
card->ext_csd.part_config,
card->ext_csd.part_time);
if (err && err != -EBADMSG)
goto free_card;
}
/*
* Enable power_off_notification byte in the ext_csd register
*/
if (card->ext_csd.rev >= 6) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
/*
* The err can be -EBADMSG or 0,
* so check for success and update the flag
*/
if (!err)
card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
}
/* set erase_arg */
if (mmc_can_discard(card))
card->erase_arg = MMC_DISCARD_ARG;
else if (mmc_can_trim(card))
card->erase_arg = MMC_TRIM_ARG;
else
card->erase_arg = MMC_ERASE_ARG;
/*
* Select timing interface
*/
err = mmc_select_timing(card);
if (err)
goto free_card;
if (mmc_card_hs200(card)) {
host->doing_init_tune = 1;
err = mmc_hs200_tuning(card);
if (!err)
err = mmc_select_hs400(card);
host->doing_init_tune = 0;
if (err)
goto free_card;
} else if (!mmc_card_hs400es(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
if (err > 0 && mmc_card_hs(card)) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
}
}
/*
* Choose the power class with selected bus interface
*/
mmc_select_powerclass(card);
/*
* Enable HPI feature (if supported)
*/
if (card->ext_csd.hpi) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HPI_MGMT, 1,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
card->ext_csd.hpi_en = 0;
} else {
card->ext_csd.hpi_en = 1;
}
}
/*
* If cache size is higher than 0, this indicates the existence of cache
* and it can be turned on. Note that some eMMCs from Micron has been
* reported to need ~800 ms timeout, while enabling the cache after
* sudden power failure tests. Let's extend the timeout to a minimum of
* DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
*/
if (card->ext_csd.cache_size > 0) {
unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1, timeout_ms);
if (err && err != -EBADMSG)
goto free_card;
/*
* Only if no error, cache is turned on successfully.
*/
if (err) {
pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
mmc_hostname(card->host), err);
card->ext_csd.cache_ctrl = 0;
} else {
card->ext_csd.cache_ctrl = 1;
}
}
/*
* Enable Command Queue if supported. Note that Packed Commands cannot
* be used with Command Queue.
*/
card->ext_csd.cmdq_en = false;
if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
err = mmc_cmdq_enable(card);
if (err && err != -EBADMSG)
goto free_card;
if (err) {
pr_warn("%s: Enabling CMDQ failed\n",
mmc_hostname(card->host));
card->ext_csd.cmdq_support = false;
card->ext_csd.cmdq_depth = 0;
}
}
/*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
if (card->ext_csd.cmdq_en) {
pr_info("%s: Command Queue Engine enabled\n",
mmc_hostname(host));
} else {
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
}
}
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
pr_err("%s: Host failed to negotiate down from 3.3V\n",
mmc_hostname(host));
err = -EINVAL;
goto free_card;
}
if (!oldcard)
host->card = card;
return 0;
free_card:
if (!oldcard)
mmc_remove_card(card);
err:
return err;
}
static int mmc_can_sleep(struct mmc_card *card)
{
return card->ext_csd.rev >= 3;
}
static int mmc_sleep_busy_cb(void *cb_data, bool *busy)
{
struct mmc_host *host = cb_data;
*busy = host->ops->card_busy(host);
return 0;
}
static int mmc_sleep(struct mmc_host *host)
{
struct mmc_command cmd = {};
struct mmc_card *card = host->card;
unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
bool use_r1b_resp;
int err;
/* Re-tuning can't be done once the card is deselected */
mmc_retune_hold(host);
err = mmc_deselect_cards(host);
if (err)
goto out_release;
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
cmd.arg |= 1 << 15;
use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
goto out_release;
/*
* If the host does not wait while the card signals busy, then we can
* try to poll, but only if the host supports HW polling, as the
* SEND_STATUS cmd is not allowed. If we can't poll, then we simply need
* to wait the sleep/awake timeout.
*/
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
goto out_release;
if (!host->ops->card_busy) {
mmc_delay(timeout_ms);
goto out_release;
}
err = __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_sleep_busy_cb, host);
out_release:
mmc_retune_release(host);
return err;
}
static int mmc_can_poweroff_notify(const struct mmc_card *card)
{
return card &&
mmc_card_mmc(card) &&
(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
}
static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
{
unsigned int timeout = card->ext_csd.generic_cmd6_time;
int err;
/* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
if (notify_type == EXT_CSD_POWER_OFF_LONG)
timeout = card->ext_csd.power_off_longtime;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
notify_type, timeout, 0, false, false, MMC_CMD_RETRIES);
if (err)
pr_err("%s: Power Off Notification timed out, %u\n",
mmc_hostname(card->host), timeout);
/* Disable the power off notification after the switch operation. */
card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
return err;
}
/*
* Host is being removed. Free up the current card.
*/
static void mmc_remove(struct mmc_host *host)
{
mmc_remove_card(host->card);
host->card = NULL;
}
/*
* Card detection - card is alive.
*/
static int mmc_alive(struct mmc_host *host)
{
return mmc_send_status(host->card, NULL);
}
/*
* Card detection callback from host.
*/
static void mmc_detect(struct mmc_host *host)
{
int err;
mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
}
}
static bool _mmc_cache_enabled(struct mmc_host *host)
{
return host->card->ext_csd.cache_size > 0 &&
host->card->ext_csd.cache_ctrl & 1;
}
/*
* Flush the internal cache of the eMMC to non-volatile storage.
*/
static int _mmc_flush_cache(struct mmc_host *host)
{
int err = 0;
if (_mmc_cache_enabled(host)) {
err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
CACHE_FLUSH_TIMEOUT_MS);
if (err)
pr_err("%s: cache flush error %d\n",
mmc_hostname(host), err);
}
return err;
}
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
EXT_CSD_POWER_OFF_LONG;
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
goto out;
err = _mmc_flush_cache(host);
if (err)
goto out;
if (mmc_can_poweroff_notify(host->card) &&
((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend ||
(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND)))
err = mmc_poweroff_notify(host->card, notify_type);
else if (mmc_can_sleep(host->card))
err = mmc_sleep(host);
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
if (!err) {
mmc_power_off(host);
mmc_card_set_suspended(host->card);
}
out:
mmc_release_host(host);
return err;
}
/*
* Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
int err;
err = _mmc_suspend(host, true);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
return err;
}
/*
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
static int _mmc_resume(struct mmc_host *host)
{
int err = 0;
mmc_claim_host(host);
if (!mmc_card_suspended(host->card))
goto out;
mmc_power_up(host, host->card->ocr);
err = mmc_init_card(host, host->card->ocr, host->card);
mmc_card_clr_suspended(host->card);
out:
mmc_release_host(host);
return err;
}
/*
* Shutdown callback
*/
static int mmc_shutdown(struct mmc_host *host)
{
int err = 0;
/*
* In a specific case for poweroff notify, we need to resume the card
* before we can shutdown it properly.
*/
if (mmc_can_poweroff_notify(host->card) &&
!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
err = _mmc_resume(host);
if (!err)
err = _mmc_suspend(host, false);
return err;
}
/*
* Callback for resume.
*/
static int mmc_resume(struct mmc_host *host)
{
pm_runtime_enable(&host->card->dev);
return 0;
}
/*
* Callback for runtime_suspend.
*/
static int mmc_runtime_suspend(struct mmc_host *host)
{
int err;
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
err = _mmc_suspend(host, true);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
return err;
}
/*
* Callback for runtime_resume.
*/
static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
err = _mmc_resume(host);
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
return 0;
}
static int mmc_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
rst_n_function = card->ext_csd.rst_n_function;
if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
return 0;
return 1;
}
static int _mmc_hw_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
/*
* In the case of recovery, we can't expect flushing the cache to work
* always, but we have a go and ignore errors.
*/
_mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->card_hw_reset &&
mmc_can_reset(card)) {
/* If the card accept RST_n signal, send it. */
mmc_set_clock(host, host->f_init);
host->ops->card_hw_reset(host);
/* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
} else {
/* Do a brute force power cycle */
mmc_power_cycle(host, card->ocr);
mmc_pwrseq_reset(host);
}
return mmc_init_card(host, card->ocr, card);
}
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
.suspend = mmc_suspend,
.resume = mmc_resume,
.runtime_suspend = mmc_runtime_suspend,
.runtime_resume = mmc_runtime_resume,
.alive = mmc_alive,
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
.flush_cache = _mmc_flush_cache,
};
/*
* Starting point for MMC card init.
*/
int mmc_attach_mmc(struct mmc_host *host)
{
int err;
u32 ocr, rocr;
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting attach */
if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
err = mmc_send_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus(host, &mmc_ops);
if (host->ocr_avail_mmc)
host->ocr_avail = host->ocr_avail_mmc;
/*
* We need to get OCR a different way for SPI.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_read_ocr(host, 1, &ocr);
if (err)
goto err;
}
rocr = mmc_select_voltage(host, ocr);
/*
* Can we support the voltage of the card?
*/
if (!rocr) {
err = -EINVAL;
goto err;
}
/*
* Detect and init the card.
*/
err = mmc_init_card(host, rocr, NULL);
if (err)
goto err;
mmc_release_host(host);
err = mmc_add_card(host->card);
if (err)
goto remove_card;
mmc_claim_host(host);
return 0;
remove_card:
mmc_remove_card(host->card);
mmc_claim_host(host);
host->card = NULL;
err:
mmc_detach_bus(host);
pr_err("%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err);
return err;
}
| linux-master | drivers/mmc/core/mmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/sdio_bus.c
*
* Copyright 2007 Pierre Ossman
*
* SDIO function driver model
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/acpi.h>
#include <linux/sysfs.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include <linux/of.h>
#include "core.h"
#include "card.h"
#include "sdio_cis.h"
#include "sdio_bus.h"
#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
/* show configuration fields */
#define sdio_config_attr(field, format_string, args...) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct sdio_func *func; \
\
func = dev_to_sdio_func (dev); \
return sysfs_emit(buf, format_string, args); \
} \
static DEVICE_ATTR_RO(field)
sdio_config_attr(class, "0x%02x\n", func->class);
sdio_config_attr(vendor, "0x%04x\n", func->vendor);
sdio_config_attr(device, "0x%04x\n", func->device);
sdio_config_attr(revision, "%u.%u\n", func->major_rev, func->minor_rev);
sdio_config_attr(modalias, "sdio:c%02Xv%04Xd%04X\n", func->class, func->vendor, func->device);
#define sdio_info_attr(num) \
static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct sdio_func *func = dev_to_sdio_func(dev); \
\
if (num > func->num_info) \
return -ENODATA; \
if (!func->info[num - 1][0]) \
return 0; \
return sysfs_emit(buf, "%s\n", func->info[num - 1]); \
} \
static DEVICE_ATTR_RO(info##num)
sdio_info_attr(1);
sdio_info_attr(2);
sdio_info_attr(3);
sdio_info_attr(4);
static struct attribute *sdio_dev_attrs[] = {
&dev_attr_class.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
&dev_attr_revision.attr,
&dev_attr_info1.attr,
&dev_attr_info2.attr,
&dev_attr_info3.attr,
&dev_attr_info4.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdio_dev);
static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,
const struct sdio_device_id *id)
{
if (id->class != (__u8)SDIO_ANY_ID && id->class != func->class)
return NULL;
if (id->vendor != (__u16)SDIO_ANY_ID && id->vendor != func->vendor)
return NULL;
if (id->device != (__u16)SDIO_ANY_ID && id->device != func->device)
return NULL;
return id;
}
static const struct sdio_device_id *sdio_match_device(struct sdio_func *func,
struct sdio_driver *sdrv)
{
const struct sdio_device_id *ids;
ids = sdrv->id_table;
if (ids) {
while (ids->class || ids->vendor || ids->device) {
if (sdio_match_one(func, ids))
return ids;
ids++;
}
}
return NULL;
}
static int sdio_bus_match(struct device *dev, struct device_driver *drv)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct sdio_driver *sdrv = to_sdio_driver(drv);
if (sdio_match_device(func, sdrv))
return 1;
return 0;
}
static int
sdio_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct sdio_func *func = dev_to_sdio_func(dev);
unsigned int i;
if (add_uevent_var(env,
"SDIO_CLASS=%02X", func->class))
return -ENOMEM;
if (add_uevent_var(env,
"SDIO_ID=%04X:%04X", func->vendor, func->device))
return -ENOMEM;
if (add_uevent_var(env,
"SDIO_REVISION=%u.%u", func->major_rev, func->minor_rev))
return -ENOMEM;
for (i = 0; i < func->num_info; i++) {
if (add_uevent_var(env, "SDIO_INFO%u=%s", i+1, func->info[i]))
return -ENOMEM;
}
if (add_uevent_var(env,
"MODALIAS=sdio:c%02Xv%04Xd%04X",
func->class, func->vendor, func->device))
return -ENOMEM;
return 0;
}
static int sdio_bus_probe(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
const struct sdio_device_id *id;
int ret;
id = sdio_match_device(func, drv);
if (!id)
return -ENODEV;
ret = dev_pm_domain_attach(dev, false);
if (ret)
return ret;
atomic_inc(&func->card->sdio_funcs_probed);
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
* it should call pm_runtime_put_noidle() in its probe routine and
* pm_runtime_get_noresume() in its remove routine.
*/
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto disable_runtimepm;
}
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
if (mmc_card_removed(func->card))
ret = -ENOMEDIUM;
else
ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
goto disable_runtimepm;
ret = drv->probe(func, id);
if (ret)
goto disable_runtimepm;
return 0;
disable_runtimepm:
atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
dev_pm_domain_detach(dev, false);
return ret;
}
static void sdio_bus_remove(struct device *dev)
{
struct sdio_driver *drv = to_sdio_driver(dev->driver);
struct sdio_func *func = dev_to_sdio_func(dev);
/* Make sure card is powered before invoking ->remove() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_get_sync(dev);
drv->remove(func);
atomic_dec(&func->card->sdio_funcs_probed);
if (func->irq_handler) {
pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
drv->name);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
}
/* First, undo the increment made directly above */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(dev);
dev_pm_domain_detach(dev, false);
}
static const struct dev_pm_ops sdio_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
NULL
)
};
static struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
.uevent = sdio_bus_uevent,
.probe = sdio_bus_probe,
.remove = sdio_bus_remove,
.pm = &sdio_bus_pm_ops,
};
int sdio_register_bus(void)
{
return bus_register(&sdio_bus_type);
}
void sdio_unregister_bus(void)
{
bus_unregister(&sdio_bus_type);
}
/**
* sdio_register_driver - register a function driver
* @drv: SDIO function driver
*/
int sdio_register_driver(struct sdio_driver *drv)
{
drv->drv.name = drv->name;
drv->drv.bus = &sdio_bus_type;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(sdio_register_driver);
/**
* sdio_unregister_driver - unregister a function driver
* @drv: SDIO function driver
*/
void sdio_unregister_driver(struct sdio_driver *drv)
{
drv->drv.bus = &sdio_bus_type;
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(sdio_unregister_driver);
static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
sdio_free_func_cis(func);
/*
* We have now removed the link to the tuples in the
* card structure, so remove the reference.
*/
put_device(&func->card->dev);
kfree(func->info);
kfree(func->tmpbuf);
kfree(func);
}
/*
* Allocate and initialise a new SDIO function structure.
*/
struct sdio_func *sdio_alloc_func(struct mmc_card *card)
{
struct sdio_func *func;
func = kzalloc(sizeof(struct sdio_func), GFP_KERNEL);
if (!func)
return ERR_PTR(-ENOMEM);
/*
* allocate buffer separately to make sure it's properly aligned for
* DMA usage (incl. 64 bit DMA)
*/
func->tmpbuf = kmalloc(4, GFP_KERNEL);
if (!func->tmpbuf) {
kfree(func);
return ERR_PTR(-ENOMEM);
}
func->card = card;
device_initialize(&func->dev);
/*
* We may link to tuples in the card structure,
* we need make sure we have a reference to it.
*/
get_device(&func->card->dev);
func->dev.parent = &card->dev;
func->dev.bus = &sdio_bus_type;
func->dev.release = sdio_release_func;
return func;
}
#ifdef CONFIG_ACPI
static void sdio_acpi_set_handle(struct sdio_func *func)
{
struct mmc_host *host = func->card->host;
u64 addr = ((u64)host->slotno << 16) | func->num;
acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr);
}
#else
static inline void sdio_acpi_set_handle(struct sdio_func *func) {}
#endif
static void sdio_set_of_node(struct sdio_func *func)
{
struct mmc_host *host = func->card->host;
func->dev.of_node = mmc_of_find_child_device(host, func->num);
}
/*
* Register a new SDIO function with the driver model.
*/
int sdio_add_func(struct sdio_func *func)
{
int ret;
dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num);
sdio_set_of_node(func);
sdio_acpi_set_handle(func);
device_enable_async_suspend(&func->dev);
ret = device_add(&func->dev);
if (ret == 0)
sdio_func_set_present(func);
return ret;
}
/*
* Unregister a SDIO function with the driver model, and
* (eventually) free it.
* This function can be called through error paths where sdio_add_func() was
* never executed (because a failure occurred at an earlier point).
*/
void sdio_remove_func(struct sdio_func *func)
{
if (sdio_func_present(func))
device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
}
| linux-master | drivers/mmc/core/sdio_bus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/drivers/mmc/core/sdio_cis.c
*
* Author: Nicolas Pitre
* Created: June 11, 2007
* Copyright: MontaVista Software Inc.
*
* Copyright 2007 Pierre Ossman
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include "sdio_cis.h"
#include "sdio_ops.h"
#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
u8 major_rev, minor_rev;
unsigned i, nr_strings;
char **buffer, *string;
if (size < 2)
return 0;
major_rev = buf[0];
minor_rev = buf[1];
/* Find all null-terminated (including zero length) strings in
the TPLLV1_INFO field. Trailing garbage is ignored. */
buf += 2;
size -= 2;
nr_strings = 0;
for (i = 0; i < size; i++) {
if (buf[i] == 0xff)
break;
if (buf[i] == 0)
nr_strings++;
}
if (nr_strings == 0)
return 0;
size = i;
buffer = kzalloc(sizeof(char*) * nr_strings + size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
string = (char*)(buffer + nr_strings);
for (i = 0; i < nr_strings; i++) {
buffer[i] = string;
strcpy(string, buf);
string += strlen(string) + 1;
buf += strlen(buf) + 1;
}
if (func) {
func->major_rev = major_rev;
func->minor_rev = minor_rev;
func->num_info = nr_strings;
func->info = (const char**)buffer;
} else {
card->major_rev = major_rev;
card->minor_rev = minor_rev;
card->num_info = nr_strings;
card->info = (const char**)buffer;
}
return 0;
}
static int cistpl_manfid(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
unsigned int vendor, device;
/* TPLMID_MANF */
vendor = buf[0] | (buf[1] << 8);
/* TPLMID_CARD */
device = buf[2] | (buf[3] << 8);
if (func) {
func->vendor = vendor;
func->device = device;
} else {
card->cis.vendor = vendor;
card->cis.device = device;
}
return 0;
}
static const unsigned char speed_val[16] =
{ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
static const unsigned int speed_unit[8] =
{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
const unsigned char *, unsigned);
struct cis_tpl {
unsigned char code;
unsigned char min_size;
tpl_parse_t *parse;
};
static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
const char *tpl_descr,
const struct cis_tpl *tpl, int tpl_count,
unsigned char code,
const unsigned char *buf, unsigned size)
{
int i, ret;
/* look for a matching code in the table */
for (i = 0; i < tpl_count; i++, tpl++) {
if (tpl->code == code)
break;
}
if (i < tpl_count) {
if (size >= tpl->min_size) {
if (tpl->parse)
ret = tpl->parse(card, func, buf, size);
else
ret = -EILSEQ; /* known tuple, not parsed */
} else {
/* invalid tuple */
ret = -EINVAL;
}
if (ret && ret != -EILSEQ && ret != -ENOENT) {
pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host), tpl_descr, code, size);
}
} else {
/* unknown tuple */
ret = -ENOENT;
}
return ret;
}
static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
/* Only valid for the common CIS (function 0) */
if (func)
return -EINVAL;
/* TPLFE_FN0_BLK_SIZE */
card->cis.blksize = buf[1] | (buf[2] << 8);
/* TPLFE_MAX_TRAN_SPEED */
card->cis.max_dtr = speed_val[(buf[3] >> 3) & 15] *
speed_unit[buf[3] & 7];
return 0;
}
static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
unsigned vsn;
unsigned min_size;
/* Only valid for the individual function's CIS (1-7) */
if (!func)
return -EINVAL;
/*
* This tuple has a different length depending on the SDIO spec
* version.
*/
vsn = func->card->cccr.sdio_vsn;
min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
if (size == 28 && vsn == SDIO_SDIO_REV_1_10) {
pr_warn("%s: card has broken SDIO 1.1 CIS, forcing SDIO 1.0\n",
mmc_hostname(card->host));
vsn = SDIO_SDIO_REV_1_00;
} else if (size < min_size) {
return -EINVAL;
}
/* TPLFE_MAX_BLK_SIZE */
func->max_blksize = buf[12] | (buf[13] << 8);
/* TPLFE_ENABLE_TIMEOUT_VAL, present in ver 1.1 and above */
if (vsn > SDIO_SDIO_REV_1_00)
func->enable_timeout = (buf[28] | (buf[29] << 8)) * 10;
else
func->enable_timeout = jiffies_to_msecs(HZ);
return 0;
}
/*
* Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
*
* Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
* on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
* TPLFID_FUNCTION is always hardcoded to 0x0C.
*/
static const struct cis_tpl cis_tpl_funce_list[] = {
{ 0x00, 4, cistpl_funce_common },
{ 0x01, 0, cistpl_funce_func },
{ 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
};
static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
if (size < 1)
return -EINVAL;
return cis_tpl_parse(card, func, "CISTPL_FUNCE",
cis_tpl_funce_list,
ARRAY_SIZE(cis_tpl_funce_list),
buf[0], buf, size);
}
/* Known TPL_CODEs table for CIS tuples */
static const struct cis_tpl cis_tpl_list[] = {
{ 0x15, 3, cistpl_vers_1 },
{ 0x20, 4, cistpl_manfid },
{ 0x21, 2, /* cistpl_funcid */ },
{ 0x22, 0, cistpl_funce },
{ 0x91, 2, /* cistpl_sdio_std */ },
};
static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
{
int ret;
struct sdio_func_tuple *this, **prev;
unsigned i, ptr = 0;
/*
* Note that this works for the common CIS (function number 0) as
* well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
* have the same offset.
*/
for (i = 0; i < 3; i++) {
unsigned char x, fn;
if (func)
fn = func->num;
else
fn = 0;
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
if (ret)
return ret;
ptr |= x << (i * 8);
}
if (func)
prev = &func->tuples;
else
prev = &card->tuples;
if (*prev)
return -EINVAL;
do {
unsigned char tpl_code, tpl_link;
unsigned long timeout = jiffies +
msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret)
break;
/* 0xff means we're done */
if (tpl_code == 0xff)
break;
/* null entries have no link field or data */
if (tpl_code == 0x00)
continue;
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
if (ret)
break;
/* a size of 0xff also means we're done */
if (tpl_link == 0xff)
break;
this = kmalloc(sizeof(*this) + tpl_link, GFP_KERNEL);
if (!this)
return -ENOMEM;
for (i = 0; i < tpl_link; i++) {
ret = mmc_io_rw_direct(card, 0, 0,
ptr + i, 0, &this->data[i]);
if (ret)
break;
}
if (ret) {
kfree(this);
break;
}
/* Try to parse the CIS tuple */
ret = cis_tpl_parse(card, func, "CIS",
cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
tpl_code, this->data, tpl_link);
if (ret == -EILSEQ || ret == -ENOENT) {
/*
* The tuple is unknown or known but not parsed.
* Queue the tuple for the function driver.
*/
this->next = NULL;
this->code = tpl_code;
this->size = tpl_link;
*prev = this;
prev = &this->next;
if (ret == -ENOENT) {
if (time_after(jiffies, timeout))
break;
#define FMT(type) "%s: queuing " type " CIS tuple 0x%02x [%*ph] (%u bytes)\n"
/*
* Tuples in this range are reserved for
* vendors, so don't warn about them
*/
if (tpl_code >= 0x80 && tpl_code <= 0x8f)
pr_debug_ratelimited(FMT("vendor"),
mmc_hostname(card->host),
tpl_code, tpl_link, this->data,
tpl_link);
else
pr_warn_ratelimited(FMT("unknown"),
mmc_hostname(card->host),
tpl_code, tpl_link, this->data,
tpl_link);
}
/* keep on analyzing tuples */
ret = 0;
} else {
/*
* We don't need the tuple anymore if it was
* successfully parsed by the SDIO core or if it is
* not going to be queued for a driver.
*/
kfree(this);
}
ptr += tpl_link;
} while (!ret);
/*
* Link in all unknown tuples found in the common CIS so that
* drivers don't have to go digging in two places.
*/
if (func)
*prev = card->tuples;
return ret;
}
int sdio_read_common_cis(struct mmc_card *card)
{
return sdio_read_cis(card, NULL);
}
void sdio_free_common_cis(struct mmc_card *card)
{
struct sdio_func_tuple *tuple, *victim;
tuple = card->tuples;
while (tuple) {
victim = tuple;
tuple = tuple->next;
kfree(victim);
}
card->tuples = NULL;
}
int sdio_read_func_cis(struct sdio_func *func)
{
int ret;
ret = sdio_read_cis(func->card, func);
if (ret)
return ret;
/*
* Vendor/device id is optional for function CIS, so
* copy it from the card structure as needed.
*/
if (func->vendor == 0) {
func->vendor = func->card->cis.vendor;
func->device = func->card->cis.device;
}
return 0;
}
void sdio_free_func_cis(struct sdio_func *func)
{
struct sdio_func_tuple *tuple, *victim;
tuple = func->tuples;
while (tuple && tuple != func->card->tuples) {
victim = tuple;
tuple = tuple->next;
kfree(victim);
}
func->tuples = NULL;
}
| linux-master | drivers/mmc/core/sdio_cis.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MMC crypto engine (inline encryption) support
*
* Copyright 2020 Google LLC
*/
#include <linux/blk-crypto.h>
#include <linux/mmc/host.h>
#include "core.h"
#include "crypto.h"
#include "queue.h"
void mmc_crypto_set_initial_state(struct mmc_host *host)
{
/* Reset might clear all keys, so reprogram all the keys. */
if (host->caps2 & MMC_CAP2_CRYPTO)
blk_crypto_reprogram_all_keys(&host->crypto_profile);
}
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
{
if (host->caps2 & MMC_CAP2_CRYPTO)
blk_crypto_register(&host->crypto_profile, q);
}
EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct blk_crypto_keyslot *keyslot;
if (!req->crypt_ctx)
return;
mrq->crypto_ctx = req->crypt_ctx;
keyslot = req->crypt_keyslot;
if (keyslot)
mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
| linux-master | drivers/mmc/core/crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DMA support for Internal DMAC with SDHI SD/SDIO controller
*
* Copyright (C) 2016-19 Renesas Electronics Corporation
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
#include "renesas_sdhi.h"
#include "tmio_mmc.h"
#define DM_CM_DTRAN_MODE 0x820
#define DM_CM_DTRAN_CTRL 0x828
#define DM_CM_RST 0x830
#define DM_CM_INFO1 0x840
#define DM_CM_INFO1_MASK 0x848
#define DM_CM_INFO2 0x850
#define DM_CM_INFO2_MASK 0x858
#define DM_DTRAN_ADDR 0x880
/* DM_CM_DTRAN_MODE */
#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */
#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */
#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4))
#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address, 0 = Fixed */
/* DM_CM_DTRAN_CTRL */
#define DTRAN_CTRL_DM_START BIT(0)
/* DM_CM_RST */
#define RST_DTRANRST1 BIT(9)
#define RST_DTRANRST0 BIT(8)
#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO1_DTRANEND1 BIT(20)
#define INFO1_DTRANEND1_OLD BIT(17)
#define INFO1_DTRANEND0 BIT(16)
/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO2_DTRANERR1 BIT(17)
#define INFO2_DTRANERR0 BIT(16)
enum renesas_sdhi_dma_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED,
COOKIE_MAPPED,
};
/*
* Specification of this driver:
* - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma
* - Since this SDHI DMAC register set has 16 but 32-bit width, we
* need a custom accessor.
*/
static unsigned long global_flags;
/*
* Workaround for avoiding to use RX DMAC by multiple channels. On R-Car M3-W
* ES1.0, when multiple SDHI channels use RX DMAC simultaneously, sometimes
* hundreds of data bytes are not stored into the system memory even if the
* DMAC interrupt happened. So, this driver then uses one RX DMAC channel only.
*/
#define SDHI_INTERNAL_DMAC_RX_IN_USE 0
/* Definitions for sampling clocks */
static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
{
.clk_rate = 0,
.tap = 0x00000300,
.tap_hs400_4tap = 0x00000100,
},
};
static const struct renesas_sdhi_of_data of_data_rza2 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY,
.tmio_ocr_mask = MMC_VDD_32_33,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.bus_shift = 2,
.scc_offset = 0 - 0x1000,
.taps = rcar_gen3_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
/* DMAC can handle 32bit blk count but only 1 segment */
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
};
static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
.bus_shift = 2,
.scc_offset = 0x1000,
.taps = rcar_gen3_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
/* DMAC can handle 32bit blk count but only 1 segment */
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
.sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
};
static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_sdh_fallback = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
.bus_shift = 2,
.scc_offset = 0x1000,
.taps = rcar_gen3_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
/* DMAC can handle 32bit blk count but only 1 segment */
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
};
static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
{ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
};
static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
{ 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
};
static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
.hs400_disabled = true,
.hs400_4taps = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
.hs400_disabled = true,
.hs400_4taps = true,
.dma_one_rx_only = true,
.old_info1_layout = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
.hs400_disabled = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
.fixed_addr_mode = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
.hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
.hs400_4taps = true,
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a7796_es13_calib_table,
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
.hs400_calib_table = r8a77965_calib_table,
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
.hs400_calib_table = r8a77990_calib_table,
.manual_tap_correction = true,
};
static const struct renesas_sdhi_quirks sdhi_quirks_r9a09g011 = {
.fixed_addr_mode = true,
.hs400_disabled = true,
};
/*
* Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
* So, we want to treat them equally and only have a match for ES1.2 to enforce
* this if there ever will be a way to distinguish ES1.2.
*/
static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_4tap_nohs400_one_rx },
{ .soc_id = "r8a7796", .revision = "ES1.[12]", .data = &sdhi_quirks_4tap_nohs400 },
{ .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
{ .soc_id = "r8a77980", .revision = "ES1.*", .data = &sdhi_quirks_nohs400 },
{ /* Sentinel. */ }
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_bad_taps2367,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_bad_taps1357,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_r8a77965,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
.of_data = &of_data_rcar_gen3_no_sdh_fallback,
.quirks = &sdhi_quirks_nohs400,
};
static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_r8a77990,
};
static const struct renesas_sdhi_of_data_with_quirks of_r9a09g011_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_r9a09g011,
};
static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
.of_data = &of_data_rcar_gen3,
};
static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_nohs400_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_nohs400,
};
static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
.of_data = &of_data_rza2,
.quirks = &sdhi_quirks_fixed_addr,
};
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
{ .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
{ .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
{ .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
{ .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
{ .compatible = "renesas,sdhi-r9a09g011", .data = &of_r9a09g011_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
struct renesas_sdhi *priv = host_to_priv(host);
u32 dma_irqs = INFO1_DTRANEND0 |
(sdhi_has_quirk(priv, old_info1_layout) ?
INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
if (!host->chan_tx || !host->chan_rx)
return;
writel(enable ? ~dma_irqs : INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
if (priv->dma_priv.enable)
priv->dma_priv.enable(host, enable);
}
static void
renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
{
u64 val = RST_DTRANRST1 | RST_DTRANRST0;
renesas_sdhi_internal_dmac_enable_dma(host, false);
writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
u32 dma_irqs = INFO1_DTRANEND0 |
(sdhi_has_quirk(priv, old_info1_layout) ?
INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
u32 status = readl(host->ctl + DM_CM_INFO1);
if (status & dma_irqs) {
writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
tasklet_schedule(&dma_priv->dma_complete);
}
return status & dma_irqs;
}
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
host->data->error)
tasklet_schedule(&dma_priv->dma_complete);
}
/*
* renesas_sdhi_internal_dmac_map() will be called with two different
* sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
* sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
* pointer in a mmc_data instead of host->sg_ptr.
*/
static void
renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host *host,
struct mmc_data *data,
enum renesas_sdhi_dma_cookie cookie)
{
bool unmap = cookie == COOKIE_UNMAPPED ? (data->host_cookie != cookie) :
(data->host_cookie == cookie);
if (unmap) {
dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
static bool
renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
struct mmc_data *data,
enum renesas_sdhi_dma_cookie cookie)
{
if (data->host_cookie == COOKIE_PRE_MAPPED)
return true;
if (!dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data)))
return false;
data->host_cookie = cookie;
/* This DMAC needs buffers to be 128-byte aligned */
if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
renesas_sdhi_internal_dmac_unmap(host, data, cookie);
return false;
}
return true;
}
static void
renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr;
u32 dtran_mode = DTRAN_MODE_BUS_WIDTH;
if (!sdhi_has_quirk(priv, fixed_addr_mode))
dtran_mode |= DTRAN_MODE_ADDR_MODE;
if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
goto force_pio;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
if (sdhi_has_quirk(priv, dma_one_rx_only) &&
test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
goto force_pio_with_unmap;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
}
priv->dma_priv.end_flags = 0;
renesas_sdhi_internal_dmac_enable_dma(host, true);
/* set dma parameters */
writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
host->dma_on = true;
return;
force_pio_with_unmap:
renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
force_pio:
renesas_sdhi_internal_dmac_enable_dma(host, false);
}
static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
struct renesas_sdhi *priv = host_to_priv(host);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
if (!host->cmd->error) {
/* start the DMAC */
writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
} else {
/* on CMD errors, simulate DMA end immediately */
set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
tasklet_schedule(&priv->dma_priv.dma_complete);
}
}
static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
{
enum dma_data_direction dir;
if (!host->dma_on)
return false;
if (!host->data)
return false;
if (host->data->flags & MMC_DATA_READ)
dir = DMA_FROM_DEVICE;
else
dir = DMA_TO_DEVICE;
renesas_sdhi_internal_dmac_enable_dma(host, false);
renesas_sdhi_internal_dmac_unmap(host, host->data, COOKIE_MAPPED);
if (dir == DMA_FROM_DEVICE)
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
host->dma_on = false;
return true;
}
static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
spin_lock_irq(&host->lock);
if (!renesas_sdhi_internal_dmac_complete(host))
goto out;
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host)
{
if (host->data)
renesas_sdhi_internal_dmac_complete(host);
}
static void renesas_sdhi_internal_dmac_post_req(struct mmc_host *mmc,
struct mmc_request *mrq,
int err)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data)
return;
renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
}
static void renesas_sdhi_internal_dmac_pre_req(struct mmc_host *mmc,
struct mmc_request *mrq)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data)
return;
data->host_cookie = COOKIE_UNMAPPED;
renesas_sdhi_internal_dmac_map(host, data, COOKIE_PRE_MAPPED);
}
static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
struct renesas_sdhi *priv = host_to_priv(host);
/* Disable DMAC interrupts initially */
writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
writel(0, host->ctl + DM_CM_INFO1);
writel(0, host->ctl + DM_CM_INFO2);
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
renesas_sdhi_internal_dmac_issue_tasklet_fn,
(unsigned long)host);
/* Add pre_req and post_req */
host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
host->ops.post_req = renesas_sdhi_internal_dmac_post_req;
}
static void
renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host)
{
/* Each value is set to zero to assume "disabling" each DMA */
host->chan_rx = host->chan_tx = NULL;
}
static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
.start = renesas_sdhi_internal_dmac_start_dma,
.enable = renesas_sdhi_internal_dmac_enable_dma,
.request = renesas_sdhi_internal_dmac_request_dma,
.release = renesas_sdhi_internal_dmac_release_dma,
.abort = renesas_sdhi_internal_dmac_abort_dma,
.dataend = renesas_sdhi_internal_dmac_dataend_dma,
.end = renesas_sdhi_internal_dmac_end_dma,
.dma_irq = renesas_sdhi_internal_dmac_dma_irq,
};
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
const struct renesas_sdhi_quirks *quirks;
struct device *dev = &pdev->dev;
of_data_quirks = of_device_get_match_data(&pdev->dev);
quirks = of_data_quirks->quirks;
attr = soc_device_match(sdhi_quirks_match);
if (attr)
quirks = attr->data;
/* value is max of SD_SECCNT. Confirmed by HW engineers */
dma_set_max_seg_size(dev, 0xffffffff);
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
of_data_quirks->of_data, quirks);
}
static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
tmio_mmc_host_runtime_resume,
NULL)
};
static struct platform_driver renesas_internal_dmac_sdhi_driver = {
.driver = {
.name = "renesas_sdhi_internal_dmac",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
.of_match_table = renesas_sdhi_internal_dmac_of_match,
},
.probe = renesas_sdhi_internal_dmac_probe,
.remove_new = renesas_sdhi_remove,
};
module_platform_driver(renesas_internal_dmac_sdhi_driver);
MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/renesas_sdhi_internal_dmac.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
*
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* Thanks to the following companies for their support:
*
* - JMicron (hardware and technical support)
*/
#include <linux/bitfield.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
#endif
#include "cqhci.h"
#include "sdhci.h"
#include "sdhci-cqhci.h"
#include "sdhci-pci.h"
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
{
mmc_pm_flag_t pm_flags = 0;
bool cap_cd_wake = false;
int i;
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
if (slot) {
pm_flags |= slot->host->mmc->pm_flags;
if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
cap_cd_wake = true;
}
}
if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
return device_wakeup_enable(&chip->pdev->dev);
else if (!cap_cd_wake)
return device_wakeup_disable(&chip->pdev->dev);
return 0;
}
static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
sdhci_pci_init_wakeup(chip);
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
if (!slot)
continue;
host = slot->host;
if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
if (device_may_wakeup(&chip->pdev->dev))
mmc_gpio_set_cd_wake(host->mmc, true);
}
return 0;
err_pci_suspend:
while (--i >= 0)
sdhci_resume_host(chip->slots[i]->host);
return ret;
}
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot;
int i, ret;
for (i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
ret = sdhci_resume_host(slot->host);
if (ret)
return ret;
mmc_gpio_set_cd_wake(slot->host->mmc, false);
}
return 0;
}
static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
return sdhci_pci_suspend_host(chip);
}
static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
int ret;
ret = sdhci_pci_resume_host(chip);
if (ret)
return ret;
return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
#ifdef CONFIG_PM
static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
int i, ret;
for (i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
host = slot->host;
ret = sdhci_runtime_suspend_host(host);
if (ret)
goto err_pci_runtime_suspend;
if (chip->rpm_retune &&
host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
}
return 0;
err_pci_runtime_suspend:
while (--i >= 0)
sdhci_runtime_resume_host(chip->slots[i]->host, 0);
return ret;
}
static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot;
int i, ret;
for (i = 0; i < chip->num_slots; i++) {
slot = chip->slots[i];
if (!slot)
continue;
ret = sdhci_runtime_resume_host(slot->host, 0);
if (ret)
return ret;
}
return 0;
}
static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
{
int ret;
ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
return sdhci_pci_runtime_suspend_host(chip);
}
static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
{
int ret;
ret = sdhci_pci_runtime_resume_host(chip);
if (ret)
return ret;
return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
int data_error = 0;
if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
return intmask;
cqhci_irq(host->mmc, intmask, cmd_error, data_error);
return 0;
}
static void sdhci_pci_dumpregs(struct mmc_host *mmc)
{
sdhci_dumpregs(mmc_priv(mmc));
}
/*****************************************************************************\
* *
* Hardware specific quirk handling *
* *
\*****************************************************************************/
static int ricoh_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
return 0;
}
static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
u32 caps =
FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
SDHCI_TIMEOUT_CLK_UNIT |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_DO_HISPD |
SDHCI_CAN_DO_SDMA;
u32 caps1 = 0;
__sdhci_read_caps(slot->host, NULL, &caps, &caps1);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
{
/* Apply a delay to allow controller to settle */
/* Otherwise it becomes confused if card state changed
during suspend */
msleep(500);
return sdhci_pci_resume_host(chip);
}
#endif
static const struct sdhci_pci_fixes sdhci_ricoh = {
.probe = ricoh_probe,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_FORCE_DMA |
SDHCI_QUIRK_CLOCK_BEFORE_RESET,
};
static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
.probe_slot = ricoh_mmc_probe_slot,
#ifdef CONFIG_PM_SLEEP
.resume = ricoh_mmc_resume,
#endif
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_CLOCK_BEFORE_RESET |
SDHCI_QUIRK_NO_CARD_NO_RESET,
};
static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
sdhci_set_ios(mmc, ios);
/*
* Some (ENE) controllers misbehave on some ios operations,
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
if (!(host->flags & SDHCI_DEVICE_DEAD))
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
}
static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
return 0;
}
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
};
static const struct sdhci_pci_fixes sdhci_ene_714 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
.probe_slot = ene_714_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_cafe = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
static const struct sdhci_pci_fixes sdhci_intel_qrk = {
.quirks = SDHCI_QUIRK_NO_HISPD_BIT,
};
static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
return 0;
}
/*
* ADMA operation is disabled for Moorestown platform due to
* hardware bugs.
*/
static int mrst_hc_probe(struct sdhci_pci_chip *chip)
{
/*
* slots number is fixed here for MRST as SDIO3/5 are never used and
* have hardware bugs.
*/
chip->num_slots = 1;
return 0;
}
static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
return 0;
}
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
return 0;
}
static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
return 0;
}
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
.probe_slot = mrst_hc_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
.probe = mrst_hc_probe,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.allow_runtime_pm = true,
.probe_slot = mfd_sdio_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
.probe_slot = mfd_emmc_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA,
.probe_slot = pch_hc_probe_slot,
};
#ifdef CONFIG_X86
#define BYT_IOSF_SCCEP 0x63
#define BYT_IOSF_OCP_NETCTRL0 0x1078
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
static void byt_ocp_setting(struct pci_dev *pdev)
{
u32 val = 0;
if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
return;
if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
&val)) {
dev_err(&pdev->dev, "%s read error\n", __func__);
return;
}
if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
return;
val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
val)) {
dev_err(&pdev->dev, "%s write error\n", __func__);
return;
}
dev_dbg(&pdev->dev, "%s completed\n", __func__);
}
#else
static inline void byt_ocp_setting(struct pci_dev *pdev)
{
}
#endif
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
INTEL_DSM_DRV_STRENGTH = 9,
INTEL_DSM_D3_RETUNE = 10,
};
struct intel_host {
u32 dsm_fns;
int drv_strength;
bool d3_retune;
bool rpm_retune_ok;
bool needs_pwr_off;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
u32 active_ltr;
u32 idle_ltr;
};
static const guid_t intel_dsm_guid =
GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
union acpi_object *obj;
int err = 0;
size_t len;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
if (!obj)
return -EOPNOTSUPP;
if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
err = -EINVAL;
goto out;
}
len = min_t(size_t, obj->buffer.length, 4);
*result = 0;
memcpy(result, obj->buffer.pointer, len);
out:
ACPI_FREE(obj);
return err;
}
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
}
static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
struct mmc_host *mmc)
{
int err;
u32 val;
intel_host->d3_retune = true;
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
mmc_hostname(mmc), err);
return;
}
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
intel_host->drv_strength = err ? 0 : val;
err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
intel_host->d3_retune = err ? true : !!val;
}
static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
reg |= 0x10;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
/* For eMMC, minimum is 1us but give it 9us for good measure */
udelay(9);
reg &= ~0x10;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
/* For eMMC, minimum is 200us but give it 300us for good measure */
usleep_range(300, 1000);
}
static int intel_select_drive_strength(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type)
{
struct sdhci_host *host = mmc_priv(card->host);
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
return 0;
return intel_host->drv_strength;
}
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
if (!gpio_cd)
return 0;
return sdhci_get_cd_nogpio(mmc);
}
static int mrfld_get_cd(struct mmc_host *mmc)
{
return sdhci_get_cd_nogpio(mmc);
}
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
int cntr;
u8 reg;
/*
* Bus power may control card power, but a full reset still may not
* reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
* That might be needed to initialize correctly, if the card was left
* powered on previously.
*/
if (intel_host->needs_pwr_off) {
intel_host->needs_pwr_off = false;
if (mode != MMC_POWER_OFF) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
usleep_range(10000, 12500);
}
}
sdhci_set_power(host, mode, vdd);
if (mode == MMC_POWER_OFF)
return;
/*
* Bus power might not enable after D3 -> D0 transition due to the
* present state not yet having propagated. Retry for up to 2ms.
*/
for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
if (reg & SDHCI_POWER_ON)
break;
udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
reg |= SDHCI_POWER_ON;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
}
}
static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
/* Set UHS timing to SDR25 for High Speed mode */
if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
timing = MMC_TIMING_UHS_SDR25;
sdhci_set_uhs_signaling(host, timing);
}
#define INTEL_HS400_ES_REG 0x78
#define INTEL_HS400_ES_BIT BIT(0)
static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
u32 val;
val = sdhci_readl(host, INTEL_HS400_ES_REG);
if (ios->enhanced_strobe)
val |= INTEL_HS400_ES_BIT;
else
val &= ~INTEL_HS400_ES_BIT;
sdhci_writel(host, val, INTEL_HS400_ES_REG);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct device *dev = mmc_dev(mmc);
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
unsigned int fn;
u32 result = 0;
int err;
err = sdhci_start_signal_voltage_switch(mmc, ios);
if (err)
return err;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
fn = INTEL_DSM_V33_SWITCH;
break;
case MMC_SIGNAL_VOLTAGE_180:
fn = INTEL_DSM_V18_SWITCH;
break;
default:
return 0;
}
err = intel_dsm(intel_host, dev, fn, &result);
pr_debug("%s: %s DSM fn %u error %d result %u\n",
mmc_hostname(mmc), __func__, fn, err, result);
return 0;
}
static const struct sdhci_ops sdhci_intel_byt_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_intel_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
};
static const struct sdhci_ops sdhci_intel_glk_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_and_cqhci_reset,
.set_uhs_signaling = sdhci_intel_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
};
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct device *dev = &slot->chip->pdev->dev;
struct mmc_host *mmc = slot->host->mmc;
intel_dsm_init(intel_host, dev, mmc);
slot->chip->rpm_retune = intel_host->d3_retune;
}
static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
int err = sdhci_execute_tuning(mmc, opcode);
struct sdhci_host *host = mmc_priv(mmc);
if (err)
return err;
/*
* Tuning can leave the IP in an active state (Buffer Read Enable bit
* set) which prevents the entry to low power states (i.e. S0i3). Data
* reset will clear it.
*/
sdhci_reset(host, SDHCI_RESET_DATA);
return 0;
}
#define INTEL_ACTIVELTR 0x804
#define INTEL_IDLELTR 0x808
#define INTEL_LTR_REQ BIT(15)
#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
#define INTEL_LTR_SCALE_1US (2 << 10)
#define INTEL_LTR_SCALE_32US (3 << 10)
#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
static void intel_cache_ltr(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct sdhci_host *host = slot->host;
intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
}
static void intel_ltr_set(struct device *dev, s32 val)
{
struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
struct sdhci_pci_slot *slot = chip->slots[0];
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct sdhci_host *host = slot->host;
u32 ltr;
pm_runtime_get_sync(dev);
/*
* Program latency tolerance (LTR) accordingly what has been asked
* by the PM QoS layer or disable it in case we were passed
* negative value or PM_QOS_LATENCY_ANY.
*/
ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
if (val == PM_QOS_LATENCY_ANY || val < 0) {
ltr &= ~INTEL_LTR_REQ;
} else {
ltr |= INTEL_LTR_REQ;
ltr &= ~INTEL_LTR_SCALE_MASK;
ltr &= ~INTEL_LTR_VALUE_MASK;
if (val > INTEL_LTR_VALUE_MASK) {
val >>= 5;
if (val > INTEL_LTR_VALUE_MASK)
val = INTEL_LTR_VALUE_MASK;
ltr |= INTEL_LTR_SCALE_32US | val;
} else {
ltr |= INTEL_LTR_SCALE_1US | val;
}
}
if (ltr == intel_host->active_ltr)
goto out;
writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
writel(ltr, host->ioaddr + INTEL_IDLELTR);
/* Cache the values into lpss structure */
intel_cache_ltr(slot);
out:
pm_runtime_put_autosuspend(dev);
}
static bool intel_use_ltr(struct sdhci_pci_chip *chip)
{
switch (chip->pdev->device) {
case PCI_DEVICE_ID_INTEL_BYT_EMMC:
case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
case PCI_DEVICE_ID_INTEL_BYT_SDIO:
case PCI_DEVICE_ID_INTEL_BYT_SD:
case PCI_DEVICE_ID_INTEL_BSW_EMMC:
case PCI_DEVICE_ID_INTEL_BSW_SDIO:
case PCI_DEVICE_ID_INTEL_BSW_SD:
return false;
default:
return true;
}
}
static void intel_ltr_expose(struct sdhci_pci_chip *chip)
{
struct device *dev = &chip->pdev->dev;
if (!intel_use_ltr(chip))
return;
dev->power.set_latency_tolerance = intel_ltr_set;
dev_pm_qos_expose_latency_tolerance(dev);
}
static void intel_ltr_hide(struct sdhci_pci_chip *chip)
{
struct device *dev = &chip->pdev->dev;
if (!intel_use_ltr(chip))
return;
dev_pm_qos_hide_latency_tolerance(dev);
dev->power.set_latency_tolerance = NULL;
}
static void byt_probe_slot(struct sdhci_pci_slot *slot)
{
struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
struct device *dev = &slot->chip->pdev->dev;
struct mmc_host *mmc = slot->host->mmc;
byt_read_dsm(slot);
byt_ocp_setting(slot->chip->pdev);
ops->execute_tuning = intel_execute_tuning;
ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
device_property_read_u32(dev, "max-frequency", &mmc->f_max);
if (!mmc->slotno) {
slot->chip->slots[mmc->slotno] = slot;
intel_ltr_expose(slot->chip);
}
}
static void byt_add_debugfs(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct mmc_host *mmc = slot->host->mmc;
struct dentry *dir = mmc->debugfs_root;
if (!intel_use_ltr(slot->chip))
return;
debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
intel_cache_ltr(slot);
}
static int byt_add_host(struct sdhci_pci_slot *slot)
{
int ret = sdhci_add_host(slot->host);
if (!ret)
byt_add_debugfs(slot);
return ret;
}
static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
{
struct mmc_host *mmc = slot->host->mmc;
if (!mmc->slotno)
intel_ltr_hide(slot->chip);
}
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
MMC_CAP_CMD_DURING_TFR |
MMC_CAP_WAIT_WHILE_BUSY;
slot->hw_reset = sdhci_pci_int_hw_reset;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
slot->host->mmc_host_ops.select_drive_strength =
intel_select_drive_strength;
return 0;
}
static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
(dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
dmi_match(DMI_SYS_VENDOR, "IRBIS"));
}
static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
}
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
if (!glk_broken_cqhci(slot))
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
if (!jsl_broken_hs400es(slot)) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
}
slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
return ret;
}
static const struct cqhci_host_ops glk_cqhci_ops = {
.enable = sdhci_cqe_enable,
.disable = sdhci_cqe_disable,
.dumpregs = sdhci_pci_dumpregs,
};
static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
{
struct device *dev = &slot->chip->pdev->dev;
struct sdhci_host *host = slot->host;
struct cqhci_host *cq_host;
bool dma64;
int ret;
ret = sdhci_setup_host(host);
if (ret)
return ret;
cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
if (!cq_host) {
ret = -ENOMEM;
goto cleanup;
}
cq_host->mmio = host->ioaddr + 0x200;
cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
cq_host->ops = &glk_cqhci_ops;
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
if (dma64)
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
ret = cqhci_init(cq_host, host->mmc, dma64);
if (ret)
goto cleanup;
ret = __sdhci_add_host(host);
if (ret)
goto cleanup;
byt_add_debugfs(slot);
return 0;
cleanup:
sdhci_cleanup_host(host);
return ret;
}
#ifdef CONFIG_PM
#define GLK_RX_CTRL1 0x834
#define GLK_TUN_VAL 0x840
#define GLK_PATH_PLL GENMASK(13, 8)
#define GLK_DLY GENMASK(6, 0)
/* Workaround firmware failing to restore the tuning value */
static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct intel_host *intel_host = sdhci_pci_priv(slot);
struct sdhci_host *host = slot->host;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
u32 dly;
if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
return;
glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
if (susp) {
intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
intel_host->glk_tun_val = glk_tun_val;
return;
}
if (!intel_host->glk_tun_val)
return;
if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
intel_host->rpm_retune_ok = true;
return;
}
dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
(intel_host->glk_tun_val << 1));
if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
return;
glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
intel_host->rpm_retune_ok = true;
chip->rpm_retune = true;
mmc_retune_needed(host->mmc);
pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
}
static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
{
if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
!chip->rpm_retune)
glk_rpm_retune_wa(chip, susp);
}
static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
{
glk_rpm_retune_chk(chip, true);
return sdhci_cqhci_runtime_suspend(chip);
}
static int glk_runtime_resume(struct sdhci_pci_chip *chip)
{
glk_rpm_retune_chk(chip, false);
return sdhci_cqhci_runtime_resume(chip);
}
#endif
#ifdef CONFIG_ACPI
static int ni_set_max_freq(struct sdhci_pci_slot *slot)
{
acpi_status status;
unsigned long long max_freq;
status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
"MXFQ", NULL, &max_freq);
if (ACPI_FAILURE(status)) {
dev_err(&slot->chip->pdev->dev,
"MXFQ not found in acpi table\n");
return -EINVAL;
}
slot->host->mmc->f_max = max_freq * 1000000;
return 0;
}
#else
static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
{
return 0;
}
#endif
static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
int err;
byt_probe_slot(slot);
err = ni_set_max_freq(slot);
if (err)
return err;
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
MMC_CAP_WAIT_WHILE_BUSY;
return 0;
}
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
MMC_CAP_WAIT_WHILE_BUSY;
return 0;
}
static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
}
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
byt_probe_slot(slot);
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
byt_needs_pwr_off(slot);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int byt_resume(struct sdhci_pci_chip *chip)
{
byt_ocp_setting(chip->pdev);
return sdhci_pci_resume_host(chip);
}
#endif
#ifdef CONFIG_PM
static int byt_runtime_resume(struct sdhci_pci_chip *chip)
{
byt_ocp_setting(chip->pdev);
return sdhci_pci_runtime_resume_host(chip);
}
#endif
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
#endif
#ifdef CONFIG_PM
.runtime_resume = byt_runtime_resume,
#endif
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
.add_host = glk_emmc_add_host,
.remove_slot = byt_remove_slot,
#ifdef CONFIG_PM_SLEEP
.suspend = sdhci_cqhci_suspend,
.resume = sdhci_cqhci_resume,
#endif
#ifdef CONFIG_PM
.runtime_suspend = glk_runtime_suspend,
.runtime_resume = glk_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
.ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
#endif
#ifdef CONFIG_PM
.runtime_resume = byt_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = ni_byt_sdio_probe_slot,
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
#endif
#ifdef CONFIG_PM
.runtime_resume = byt_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
#ifdef CONFIG_PM_SLEEP
.resume = byt_resume,
#endif
#ifdef CONFIG_PM
.runtime_resume = byt_runtime_resume,
#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC,
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
.add_host = byt_add_host,
.remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
/* Define Host controllers for Intel Merrifield platform */
#define INTEL_MRFLD_EMMC_0 0
#define INTEL_MRFLD_EMMC_1 1
#define INTEL_MRFLD_SD 2
#define INTEL_MRFLD_SDIO 3
#ifdef CONFIG_ACPI
static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
{
struct acpi_device *device;
device = ACPI_COMPANION(&slot->chip->pdev->dev);
if (device)
acpi_device_fix_up_power_extended(device);
}
#else
static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
#endif
static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
switch (func) {
case INTEL_MRFLD_EMMC_0:
case INTEL_MRFLD_EMMC_1:
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_8_BIT_DATA |
MMC_CAP_1_8V_DDR;
break;
case INTEL_MRFLD_SD:
slot->cd_idx = 0;
slot->cd_override_level = true;
/*
* There are two PCB designs of SD card slot with the opposite
* card detection sense. Quirk this out by ignoring GPIO state
* completely in the custom ->get_cd() callback.
*/
slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
/* Advertise 2.0v for compatibility with the SDIO card's OCR */
slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
default:
return -ENODEV;
}
intel_mrfld_mmc_fix_up_power_slot(slot);
return 0;
}
static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = intel_mrfld_mmc_probe_slot,
};
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
int ret;
ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
if (ret)
return ret;
/*
* Turn PMOS on [bit 0], set over current detection to 2.4 V
* [bit 1:2] and enable over current debouncing [bit 6].
*/
if (on)
scratch |= 0x47;
else
scratch &= ~0x47;
return pci_write_config_byte(chip->pdev, 0xAE, scratch);
}
static int jmicron_probe(struct sdhci_pci_chip *chip)
{
int ret;
u16 mmcdev = 0;
if (chip->pdev->revision == 0) {
chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_32BIT_ADMA_SIZE |
SDHCI_QUIRK_RESET_AFTER_REQUEST |
SDHCI_QUIRK_BROKEN_SMALL_PIO;
}
/*
* JMicron chips can have two interfaces to the same hardware
* in order to work around limitations in Microsoft's driver.
* We need to make sure we only bind to one of them.
*
* This code assumes two things:
*
* 1. The PCI code adds subfunctions in order.
*
* 2. The MMC interface has a lower subfunction number
* than the SD interface.
*/
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
if (mmcdev) {
struct pci_dev *sd_dev;
sd_dev = NULL;
while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
mmcdev, sd_dev)) != NULL) {
if ((PCI_SLOT(chip->pdev->devfn) ==
PCI_SLOT(sd_dev->devfn)) &&
(chip->pdev->bus == sd_dev->bus))
break;
}
if (sd_dev) {
pci_dev_put(sd_dev);
dev_info(&chip->pdev->dev, "Refusing to bind to "
"secondary interface.\n");
return -ENODEV;
}
}
/*
* JMicron chips need a bit of a nudge to enable the power
* output pins.
*/
ret = jmicron_pmos(chip, 1);
if (ret) {
dev_err(&chip->pdev->dev, "Failure enabling card power\n");
return ret;
}
/* quirk for unsable RO-detection on JM388 chips */
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
return 0;
}
static void jmicron_enable_mmc(struct sdhci_host *host, int on)
{
u8 scratch;
scratch = readb(host->ioaddr + 0xC0);
if (on)
scratch |= 0x01;
else
scratch &= ~0x01;
writeb(scratch, host->ioaddr + 0xC0);
}
static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
{
if (slot->chip->pdev->revision == 0) {
u16 version;
version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
version = (version & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
/*
* Older versions of the chip have lots of nasty glitches
* in the ADMA engine. It's best just to avoid it
* completely.
*/
if (version < 0xAC)
slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
}
/* JM388 MMC doesn't support 1.8V while SD supports it */
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31 |
MMC_VDD_165_195; /* allow 1.8V */
slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
}
/*
* The secondary interface requires a bit set to get the
* interrupts.
*/
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 1);
slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
return 0;
}
static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
{
if (dead)
return;
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 0);
}
#ifdef CONFIG_PM_SLEEP
static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0; i < chip->num_slots; i++)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
return 0;
}
static int jmicron_resume(struct sdhci_pci_chip *chip)
{
int ret, i;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0; i < chip->num_slots; i++)
jmicron_enable_mmc(chip->slots[i]->host, 1);
}
ret = jmicron_pmos(chip, 1);
if (ret) {
dev_err(&chip->pdev->dev, "Failure enabling card power\n");
return ret;
}
return sdhci_pci_resume_host(chip);
}
#endif
static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe,
.probe_slot = jmicron_probe_slot,
.remove_slot = jmicron_remove_slot,
#ifdef CONFIG_PM_SLEEP
.suspend = jmicron_suspend,
.resume = jmicron_resume,
#endif
};
/* SysKonnect CardBus2SDIO extra registers */
#define SYSKT_CTRL 0x200
#define SYSKT_RDFIFO_STAT 0x204
#define SYSKT_WRFIFO_STAT 0x208
#define SYSKT_POWER_DATA 0x20c
#define SYSKT_POWER_330 0xef
#define SYSKT_POWER_300 0xf8
#define SYSKT_POWER_184 0xcc
#define SYSKT_POWER_CMD 0x20d
#define SYSKT_POWER_START (1 << 7)
#define SYSKT_POWER_STATUS 0x20e
#define SYSKT_POWER_STATUS_OK (1 << 0)
#define SYSKT_BOARD_REV 0x210
#define SYSKT_CHIP_REV 0x211
#define SYSKT_CONF_DATA 0x212
#define SYSKT_CONF_DATA_1V8 (1 << 2)
#define SYSKT_CONF_DATA_2V5 (1 << 1)
#define SYSKT_CONF_DATA_3V3 (1 << 0)
static int syskt_probe(struct sdhci_pci_chip *chip)
{
if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
chip->pdev->class &= ~0x0000FF;
chip->pdev->class |= PCI_SDHCI_IFDMA;
}
return 0;
}
static int syskt_probe_slot(struct sdhci_pci_slot *slot)
{
int tm, ps;
u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
"board rev %d.%d, chip rev %d.%d\n",
board_rev >> 4, board_rev & 0xf,
chip_rev >> 4, chip_rev & 0xf);
if (chip_rev >= 0x20)
slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
udelay(50);
tm = 10; /* Wait max 1 ms */
do {
ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
if (ps & SYSKT_POWER_STATUS_OK)
break;
udelay(100);
} while (--tm);
if (!tm) {
dev_err(&slot->chip->pdev->dev,
"power regulator never stabilized");
writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
return -ENODEV;
}
return 0;
}
static const struct sdhci_pci_fixes sdhci_syskt = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
.probe = syskt_probe,
.probe_slot = syskt_probe_slot,
};
static int via_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->revision == 0x10)
chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
return 0;
}
static const struct sdhci_pci_fixes sdhci_via = {
.probe = via_probe,
};
static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps2 |= MMC_CAP2_HS200;
return 0;
}
static const struct sdhci_pci_fixes sdhci_rtsx = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = rtsx_probe_slot,
};
/*AMD chipset generation*/
enum amd_chipset_gen {
AMD_CHIPSET_BEFORE_ML,
AMD_CHIPSET_CZ,
AMD_CHIPSET_NL,
AMD_CHIPSET_UNKNOWN,
};
/* AMD registers */
#define AMD_SD_AUTO_PATTERN 0xB8
#define AMD_MSLEEP_DURATION 4
#define AMD_SD_MISC_CONTROL 0xD0
#define AMD_MAX_TUNE_VALUE 0x0B
#define AMD_AUTO_TUNE_SEL 0x10800
#define AMD_FIFO_PTR 0x30
#define AMD_BIT_MASK 0x1F
static void amd_tuning_reset(struct sdhci_host *host)
{
unsigned int val;
val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
val &= ~SDHCI_CTRL_EXEC_TUNING;
sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
}
static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
{
unsigned int val;
pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
val &= ~AMD_BIT_MASK;
val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
}
static void amd_enable_manual_tuning(struct pci_dev *pdev)
{
unsigned int val;
pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
val |= AMD_FIFO_PTR;
pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
}
static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev = slot->chip->pdev;
u8 valid_win = 0;
u8 valid_win_max = 0;
u8 valid_win_end = 0;
u8 ctrl, tune_around;
amd_tuning_reset(host);
for (tune_around = 0; tune_around < 12; tune_around++) {
amd_config_tuning_phase(pdev, tune_around);
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
valid_win = 0;
msleep(AMD_MSLEEP_DURATION);
ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
} else if (++valid_win > valid_win_max) {
valid_win_max = valid_win;
valid_win_end = tune_around;
}
}
if (!valid_win_max) {
dev_err(&pdev->dev, "no tuning point found\n");
return -EIO;
}
amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
amd_enable_manual_tuning(pdev);
host->mmc->retune_period = 0;
return 0;
}
static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
/* AMD requires custom HS200 tuning */
if (host->timing == MMC_TIMING_MMC_HS200)
return amd_execute_tuning_hs200(host, opcode);
/* Otherwise perform standard SDHCI tuning */
return sdhci_execute_tuning(mmc, opcode);
}
static int amd_probe_slot(struct sdhci_pci_slot *slot)
{
struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
ops->execute_tuning = amd_execute_tuning;
return 0;
}
static int amd_probe(struct sdhci_pci_chip *chip)
{
struct pci_dev *smbus_dev;
enum amd_chipset_gen gen;
smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
if (smbus_dev) {
gen = AMD_CHIPSET_BEFORE_ML;
} else {
smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (smbus_dev) {
if (smbus_dev->revision < 0x51)
gen = AMD_CHIPSET_CZ;
else
gen = AMD_CHIPSET_NL;
} else {
gen = AMD_CHIPSET_UNKNOWN;
}
}
pci_dev_put(smbus_dev);
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
return 0;
}
static u32 sdhci_read_present_state(struct sdhci_host *host)
{
return sdhci_readl(host, SDHCI_PRESENT_STATE);
}
static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev = slot->chip->pdev;
u32 present_state;
/*
* SDHC 0x7906 requires a hard reset to clear all internal state.
* Otherwise it can get into a bad state where the DATA lines are always
* read as zeros.
*/
if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
pci_clear_master(pdev);
pci_save_state(pdev);
pci_set_power_state(pdev, PCI_D3cold);
pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
pdev->current_state);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* SDHCI_RESET_ALL says the card detect logic should not be
* reset, but since we need to reset the entire controller
* we should wait until the card detect logic has stabilized.
*
* This normally takes about 40ms.
*/
readx_poll_timeout(
sdhci_read_present_state,
host,
present_state,
present_state & SDHCI_CD_STABLE,
10000,
100000
);
}
return sdhci_reset(host, mask);
}
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pci_fixes sdhci_amd = {
.probe = amd_probe,
.ops = &amd_sdhci_pci_ops,
.probe_slot = amd_probe_slot,
};
static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
SDHCI_PCI_DEVICE(VIA, 95D0, via),
SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
SDHCI_PCI_DEVICE(O2, 8320, o2),
SDHCI_PCI_DEVICE(O2, 8321, o2),
SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
SDHCI_PCI_DEVICE(O2, SDS0, o2),
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, pci_ids);
/*****************************************************************************\
* *
* SDHCI core callbacks *
* *
\*****************************************************************************/
int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
slot = sdhci_priv(host);
pdev = slot->chip->pdev;
if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
(host->flags & SDHCI_USE_SDMA)) {
dev_warn(&pdev->dev, "Will use DMA mode even though HW "
"doesn't fully claim to support it.\n");
}
pci_set_master(pdev);
return 0;
}
static void sdhci_pci_hw_reset(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
if (slot->hw_reset)
slot->hw_reset(host);
}
static const struct sdhci_ops sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
};
/*****************************************************************************\
* *
* Suspend/resume *
* *
\*****************************************************************************/
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_suspend(struct device *dev)
{
struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
if (chip->fixes && chip->fixes->suspend)
return chip->fixes->suspend(chip);
return sdhci_pci_suspend_host(chip);
}
static int sdhci_pci_resume(struct device *dev)
{
struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
if (chip->fixes && chip->fixes->resume)
return chip->fixes->resume(chip);
return sdhci_pci_resume_host(chip);
}
#endif
#ifdef CONFIG_PM
static int sdhci_pci_runtime_suspend(struct device *dev)
{
struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
if (chip->fixes && chip->fixes->runtime_suspend)
return chip->fixes->runtime_suspend(chip);
return sdhci_pci_runtime_suspend_host(chip);
}
static int sdhci_pci_runtime_resume(struct device *dev)
{
struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
if (!chip)
return 0;
if (chip->fixes && chip->fixes->runtime_resume)
return chip->fixes->runtime_resume(chip);
return sdhci_pci_runtime_resume_host(chip);
}
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
sdhci_pci_runtime_resume, NULL)
};
/*****************************************************************************\
* *
* Device probing/removal *
* *
\*****************************************************************************/
static struct sdhci_pci_slot *sdhci_pci_probe_slot(
struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
int slotno)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
int ret, bar = first_bar + slotno;
size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
return ERR_PTR(-ENODEV);
}
if (pci_resource_len(pdev, bar) < 0x100) {
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
}
if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
return ERR_PTR(-ENODEV);
}
if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
return ERR_PTR(-ENODEV);
}
host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
if (IS_ERR(host)) {
dev_err(&pdev->dev, "cannot allocate host\n");
return ERR_CAST(host);
}
slot = sdhci_priv(host);
slot->chip = chip;
slot->host = host;
slot->cd_idx = -1;
host->hw_name = "PCI";
host->ops = chip->fixes && chip->fixes->ops ?
chip->fixes->ops :
&sdhci_pci_ops;
host->quirks = chip->quirks;
host->quirks2 = chip->quirks2;
host->irq = pdev->irq;
ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
goto cleanup;
}
host->ioaddr = pcim_iomap_table(pdev)[bar];
if (chip->fixes && chip->fixes->probe_slot) {
ret = chip->fixes->probe_slot(slot);
if (ret)
goto cleanup;
}
host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
if (device_can_wakeup(&pdev->dev))
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
if (host->mmc->caps & MMC_CAP_CD_WAKE)
device_init_wakeup(&pdev->dev, true);
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0);
if (ret && ret != -EPROBE_DEFER)
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
0);
if (ret == -EPROBE_DEFER)
goto remove;
if (ret) {
dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
slot->cd_idx = -1;
}
}
if (chip->fixes && chip->fixes->add_host)
ret = chip->fixes->add_host(slot);
else
ret = sdhci_add_host(host);
if (ret)
goto remove;
/*
* Check if the chip needs a separate GPIO for card detect to wake up
* from runtime suspend. If it is not there, don't allow runtime PM.
*/
if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
remove:
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
cleanup:
sdhci_free_host(host);
return ERR_PTR(ret);
}
static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
{
int dead;
u32 scratch;
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(slot->host, dead);
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
sdhci_free_host(slot->host);
}
static void sdhci_pci_runtime_pm_allow(struct device *dev)
{
pm_suspend_ignore_children(dev, 1);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
pm_runtime_allow(dev);
/* Stay active until mmc core scans for a card */
pm_runtime_put_noidle(dev);
}
static void sdhci_pci_runtime_pm_forbid(struct device *dev)
{
pm_runtime_forbid(dev);
pm_runtime_get_noresume(dev);
}
static int sdhci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
u8 slots, first_bar;
int ret, i;
BUG_ON(pdev == NULL);
BUG_ON(ent == NULL);
dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
return ret;
slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
BUG_ON(slots > MAX_SLOTS);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
if (ret)
return ret;
first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
if (first_bar > 5) {
dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
return -ENODEV;
}
ret = pcim_enable_device(pdev);
if (ret)
return ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->pdev = pdev;
chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
if (chip->fixes) {
chip->quirks = chip->fixes->quirks;
chip->quirks2 = chip->fixes->quirks2;
chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
}
chip->num_slots = slots;
chip->pm_retune = true;
chip->rpm_retune = true;
pci_set_drvdata(pdev, chip);
if (chip->fixes && chip->fixes->probe) {
ret = chip->fixes->probe(chip);
if (ret)
return ret;
}
slots = chip->num_slots; /* Quirk may have changed this */
for (i = 0; i < slots; i++) {
slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
return PTR_ERR(slot);
}
chip->slots[i] = slot;
}
if (chip->allow_runtime_pm)
sdhci_pci_runtime_pm_allow(&pdev->dev);
return 0;
}
static void sdhci_pci_remove(struct pci_dev *pdev)
{
int i;
struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
if (chip->allow_runtime_pm)
sdhci_pci_runtime_pm_forbid(&pdev->dev);
for (i = 0; i < chip->num_slots; i++)
sdhci_pci_remove_slot(chip->slots[i]);
}
static struct pci_driver sdhci_driver = {
.name = "sdhci-pci",
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = sdhci_pci_remove,
.driver = {
.pm = &sdhci_pci_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_pci_driver(sdhci_driver);
MODULE_AUTHOR("Pierre Ossman <[email protected]>");
MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/sdhci-pci-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
*
* Copyright (c) 2005, Advanced Micro Devices, Inc.
*
* Developed with help from the 2.4.30 MMC AU1XXX controller including
* the following copyright notices:
* Copyright (c) 2003-2004 Embedded Edge, LLC.
* Portions Copyright (C) 2002 Embedix, Inc
* Copyright 2002 Hewlett-Packard Company
* 2.6 version of this driver inspired by:
* (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
* All Rights Reserved.
* (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
* All Rights Reserved.
*
*/
/* Why don't we use the SD controllers' carddetect feature?
*
* From the AU1100 MMC application guide:
* If the Au1100-based design is intended to support both MultiMediaCards
* and 1- or 4-data bit SecureDigital cards, then the solution is to
* connect a weak (560KOhm) pull-up resistor to connector pin 1.
* In doing so, a MMC card never enters SPI-mode communications,
* but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
* (the low to high transition will not occur).
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#define DRIVER_NAME "au1xxx-mmc"
/* Set this to enable special debugging macros */
/* #define DEBUG */
#ifdef DEBUG
#define DBG(fmt, idx, args...) \
pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
#else
#define DBG(fmt, idx, args...) do {} while (0)
#endif
/* Hardware definitions */
#define AU1XMMC_DESCRIPTOR_COUNT 1
/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
/* This gives us a hard value for the stop command that we can write directly
* to the command register.
*/
#define STOP_CMD \
(SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
/* This is the set of interrupts that we configure by default. */
#define AU1XMMC_INTERRUPTS \
(SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
SD_CONFIG_CR | SD_CONFIG_I)
/* The poll event (looking for insert/remove events runs twice a second. */
#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
struct au1xmmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
u32 flags;
void __iomem *iobase;
u32 clock;
u32 bus_width;
u32 power_mode;
int status;
struct {
int len;
int dir;
} dma;
struct {
int index;
int offset;
int len;
} pio;
u32 tx_chan;
u32 rx_chan;
int irq;
struct tasklet_struct finish_task;
struct tasklet_struct data_task;
struct au1xmmc_platform_data *platdata;
struct platform_device *pdev;
struct resource *ioarea;
struct clk *clk;
};
/* Status flags used by the host structure */
#define HOST_F_XMIT 0x0001
#define HOST_F_RECV 0x0002
#define HOST_F_DMA 0x0010
#define HOST_F_DBDMA 0x0020
#define HOST_F_ACTIVE 0x0100
#define HOST_F_STOP 0x1000
#define HOST_S_IDLE 0x0001
#define HOST_S_CMD 0x0002
#define HOST_S_DATA 0x0003
#define HOST_S_STOP 0x0004
/* Easy access macros */
#define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
#define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
#define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
#define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
#define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
#define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
#define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
#define HOST_CMD(h) ((h)->iobase + SD_CMD)
#define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
#define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
#define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
#define DMA_CHANNEL(h) \
(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
static inline int has_dbdma(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1200:
case ALCHEMY_CPU_AU1300:
return 1;
default:
return 0;
}
}
static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
{
u32 val = __raw_readl(HOST_CONFIG(host));
val |= mask;
__raw_writel(val, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static inline void FLUSH_FIFO(struct au1xmmc_host *host)
{
u32 val = __raw_readl(HOST_CONFIG2(host));
__raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
mdelay(1);
/* SEND_STOP will turn off clock control - this re-enables it */
val &= ~SD_CONFIG2_DF;
__raw_writel(val, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
}
static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
{
u32 val = __raw_readl(HOST_CONFIG(host));
val &= ~mask;
__raw_writel(val, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static inline void SEND_STOP(struct au1xmmc_host *host)
{
u32 config2;
WARN_ON(host->status != HOST_S_DATA);
host->status = HOST_S_STOP;
config2 = __raw_readl(HOST_CONFIG2(host));
__raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
/* Send the stop command */
__raw_writel(STOP_CMD, HOST_CMD(host));
wmb(); /* drain writebuffer */
}
static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
{
if (host->platdata && host->platdata->set_power)
host->platdata->set_power(host->mmc, state);
}
static int au1xmmc_card_inserted(struct mmc_host *mmc)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (host->platdata && host->platdata->card_inserted)
return !!host->platdata->card_inserted(host->mmc);
return -ENOSYS;
}
static int au1xmmc_card_readonly(struct mmc_host *mmc)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (host->platdata && host->platdata->card_readonly)
return !!host->platdata->card_readonly(mmc);
return -ENOSYS;
}
static void au1xmmc_finish_request(struct au1xmmc_host *host)
{
struct mmc_request *mrq = host->mrq;
host->mrq = NULL;
host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
host->dma.len = 0;
host->dma.dir = 0;
host->pio.index = 0;
host->pio.offset = 0;
host->pio.len = 0;
host->status = HOST_S_IDLE;
mmc_request_done(host->mmc, mrq);
}
static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
{
struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
au1xmmc_finish_request(host);
}
static int au1xmmc_send_command(struct au1xmmc_host *host,
struct mmc_command *cmd, struct mmc_data *data)
{
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
break;
case MMC_RSP_R1:
mmccmd |= SD_CMD_RT_1;
break;
case MMC_RSP_R1B:
mmccmd |= SD_CMD_RT_1B;
break;
case MMC_RSP_R2:
mmccmd |= SD_CMD_RT_2;
break;
case MMC_RSP_R3:
mmccmd |= SD_CMD_RT_3;
break;
default:
pr_info("au1xmmc: unhandled response type %02x\n",
mmc_resp_type(cmd));
return -EINVAL;
}
if (data) {
if (data->flags & MMC_DATA_READ) {
if (data->blocks > 1)
mmccmd |= SD_CMD_CT_4;
else
mmccmd |= SD_CMD_CT_2;
} else if (data->flags & MMC_DATA_WRITE) {
if (data->blocks > 1)
mmccmd |= SD_CMD_CT_3;
else
mmccmd |= SD_CMD_CT_1;
}
}
__raw_writel(cmd->arg, HOST_CMDARG(host));
wmb(); /* drain writebuffer */
__raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
wmb(); /* drain writebuffer */
/* Wait for the command to go on the line */
while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
/* nop */;
return 0;
}
static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
{
struct mmc_request *mrq = host->mrq;
struct mmc_data *data;
u32 crc;
WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
if (host->mrq == NULL)
return;
data = mrq->cmd->data;
if (status == 0)
status = __raw_readl(HOST_STATUS(host));
/* The transaction is really over when the SD_STATUS_DB bit is clear */
while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
status = __raw_readl(HOST_STATUS(host));
data->error = 0;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
/* Process any errors */
crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
if (host->flags & HOST_F_XMIT)
crc |= ((status & 0x07) == 0x02) ? 0 : 1;
if (crc)
data->error = -EILSEQ;
/* Clear the CRC bits */
__raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
data->bytes_xfered = 0;
if (!data->error) {
if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
u32 chan = DMA_CHANNEL(host);
chan_tab_t *c = *((chan_tab_t **)chan);
au1x_dma_chan_t *cp = c->chan_ptr;
data->bytes_xfered = cp->ddma_bytecnt;
} else
data->bytes_xfered =
(data->blocks * data->blksz) - host->pio.len;
}
au1xmmc_finish_request(host);
}
static void au1xmmc_tasklet_data(struct tasklet_struct *t)
{
struct au1xmmc_host *host = from_tasklet(host, t, data_task);
u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
}
#define AU1XMMC_MAX_TRANSFER 8
static void au1xmmc_send_pio(struct au1xmmc_host *host)
{
struct mmc_data *data;
int sg_len, max, count;
unsigned char *sg_ptr, val;
u32 status;
struct scatterlist *sg;
data = host->mrq->data;
if (!(host->flags & HOST_F_XMIT))
return;
/* This is the pointer to the data buffer */
sg = &data->sg[host->pio.index];
sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = data->sg[host->pio.index].length - host->pio.offset;
/* Check if we need less than the size of the sg_buffer */
max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
if (max > AU1XMMC_MAX_TRANSFER)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_TH))
break;
val = sg_ptr[count];
__raw_writel((unsigned long)val, HOST_TXPORT(host));
wmb(); /* drain writebuffer */
}
kunmap_local(sg_ptr);
host->pio.len -= count;
host->pio.offset += count;
if (count == sg_len) {
host->pio.index++;
host->pio.offset = 0;
}
if (host->pio.len == 0) {
IRQ_OFF(host, SD_CONFIG_TH);
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
}
static void au1xmmc_receive_pio(struct au1xmmc_host *host)
{
struct mmc_data *data;
int max, count, sg_len = 0;
unsigned char *sg_ptr = NULL;
u32 status, val;
struct scatterlist *sg;
data = host->mrq->data;
if (!(host->flags & HOST_F_RECV))
return;
max = host->pio.len;
if (host->pio.index < host->dma.len) {
sg = &data->sg[host->pio.index];
sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
/* Check if we need less than the size of the sg_buffer */
if (sg_len < max)
max = sg_len;
}
if (max > AU1XMMC_MAX_TRANSFER)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_NE))
break;
if (status & SD_STATUS_RC) {
DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
host->pio.len, count);
break;
}
if (status & SD_STATUS_RO) {
DBG("RX Overrun [%d + %d]\n", host->pdev->id,
host->pio.len, count);
break;
}
else if (status & SD_STATUS_RU) {
DBG("RX Underrun [%d + %d]\n", host->pdev->id,
host->pio.len, count);
break;
}
val = __raw_readl(HOST_RXPORT(host));
if (sg_ptr)
sg_ptr[count] = (unsigned char)(val & 0xFF);
}
if (sg_ptr)
kunmap_local(sg_ptr);
host->pio.len -= count;
host->pio.offset += count;
if (sg_len && count == sg_len) {
host->pio.index++;
host->pio.offset = 0;
}
if (host->pio.len == 0) {
/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
IRQ_OFF(host, SD_CONFIG_NE);
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
}
/* This is called when a command has been completed - grab the response
* and check for errors. Then start the data transfer if it is indicated.
*/
static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
{
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd;
u32 r[4];
int i, trans;
if (!host->mrq)
return;
cmd = mrq->cmd;
cmd->error = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
r[0] = __raw_readl(host->iobase + SD_RESP3);
r[1] = __raw_readl(host->iobase + SD_RESP2);
r[2] = __raw_readl(host->iobase + SD_RESP1);
r[3] = __raw_readl(host->iobase + SD_RESP0);
/* The CRC is omitted from the response, so really
* we only got 120 bytes, but the engine expects
* 128 bits, so we have to shift things up.
*/
for (i = 0; i < 4; i++) {
cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
if (i != 3)
cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
}
} else {
/* Techincally, we should be getting all 48 bits of
* the response (SD_RESP1 + SD_RESP2), but because
* our response omits the CRC, our data ends up
* being shifted 8 bits to the right. In this case,
* that means that the OSR data starts at bit 31,
* so we can just read RESP0 and return that.
*/
cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
}
}
/* Figure out errors */
if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
cmd->error = -EILSEQ;
trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
if (!trans || cmd->error) {
IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
tasklet_schedule(&host->finish_task);
return;
}
host->status = HOST_S_DATA;
if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
u32 channel = DMA_CHANNEL(host);
/* Start the DBDMA as soon as the buffer gets something in it */
if (host->flags & HOST_F_RECV) {
u32 mask = SD_STATUS_DB | SD_STATUS_NE;
while((status & mask) != mask)
status = __raw_readl(HOST_STATUS(host));
}
au1xxx_dbdma_start(channel);
}
}
static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
{
unsigned int pbus = clk_get_rate(host->clk);
unsigned int divisor = ((pbus / rate) / 2) - 1;
u32 config;
config = __raw_readl(HOST_CONFIG(host));
config &= ~(SD_CONFIG_DIV);
config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
__raw_writel(config, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static int au1xmmc_prepare_data(struct au1xmmc_host *host,
struct mmc_data *data)
{
int datalen = data->blocks * data->blksz;
if (data->flags & MMC_DATA_READ)
host->flags |= HOST_F_RECV;
else
host->flags |= HOST_F_XMIT;
if (host->mrq->stop)
host->flags |= HOST_F_STOP;
host->dma.dir = DMA_BIDIRECTIONAL;
host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, host->dma.dir);
if (host->dma.len == 0)
return -ETIMEDOUT;
__raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
int i;
u32 channel = DMA_CHANNEL(host);
au1xxx_dbdma_stop(channel);
for (i = 0; i < host->dma.len; i++) {
u32 ret = 0, flags = DDMA_FLAGS_NOIE;
struct scatterlist *sg = &data->sg[i];
int sg_len = sg->length;
int len = (datalen > sg_len) ? sg_len : datalen;
if (i == host->dma.len - 1)
flags = DDMA_FLAGS_IE;
if (host->flags & HOST_F_XMIT) {
ret = au1xxx_dbdma_put_source(channel,
sg_phys(sg), len, flags);
} else {
ret = au1xxx_dbdma_put_dest(channel,
sg_phys(sg), len, flags);
}
if (!ret)
goto dataerr;
datalen -= len;
}
} else {
host->pio.index = 0;
host->pio.offset = 0;
host->pio.len = datalen;
if (host->flags & HOST_F_XMIT)
IRQ_ON(host, SD_CONFIG_TH);
else
IRQ_ON(host, SD_CONFIG_NE);
/* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
}
return 0;
dataerr:
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma.dir);
return -ETIMEDOUT;
}
/* This actually starts a command or data transaction */
static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
{
struct au1xmmc_host *host = mmc_priv(mmc);
int ret = 0;
WARN_ON(irqs_disabled());
WARN_ON(host->status != HOST_S_IDLE);
host->mrq = mrq;
host->status = HOST_S_CMD;
/* fail request immediately if no card is present */
if (0 == au1xmmc_card_inserted(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
au1xmmc_finish_request(host);
return;
}
if (mrq->data) {
FLUSH_FIFO(host);
ret = au1xmmc_prepare_data(host, mrq->data);
}
if (!ret)
ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
if (ret) {
mrq->cmd->error = ret;
au1xmmc_finish_request(host);
}
}
static void au1xmmc_reset_controller(struct au1xmmc_host *host)
{
/* Apply the clock */
__raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
mdelay(1);
__raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
mdelay(5);
__raw_writel(~0, HOST_STATUS(host));
wmb(); /* drain writebuffer */
__raw_writel(0, HOST_BLKSIZE(host));
__raw_writel(0x001fffff, HOST_TIMEOUT(host));
wmb(); /* drain writebuffer */
__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
__raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
mdelay(1);
__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
/* Configure interrupts */
__raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
wmb(); /* drain writebuffer */
}
static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct au1xmmc_host *host = mmc_priv(mmc);
u32 config2;
if (ios->power_mode == MMC_POWER_OFF)
au1xmmc_set_power(host, 0);
else if (ios->power_mode == MMC_POWER_ON) {
au1xmmc_set_power(host, 1);
}
if (ios->clock && ios->clock != host->clock) {
au1xmmc_set_clock(host, ios->clock);
host->clock = ios->clock;
}
config2 = __raw_readl(HOST_CONFIG2(host));
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
config2 |= SD_CONFIG2_BB;
break;
case MMC_BUS_WIDTH_4:
config2 &= ~SD_CONFIG2_BB;
config2 |= SD_CONFIG2_WB;
break;
case MMC_BUS_WIDTH_1:
config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
break;
}
__raw_writel(config2, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
}
#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
#define STATUS_DATA_IN (SD_STATUS_NE)
#define STATUS_DATA_OUT (SD_STATUS_TH)
static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
{
struct au1xmmc_host *host = dev_id;
u32 status;
status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_I))
return IRQ_NONE; /* not ours */
if (status & SD_STATUS_SI) /* SDIO */
mmc_signal_sdio_irq(host->mmc);
if (host->mrq && (status & STATUS_TIMEOUT)) {
if (status & SD_STATUS_RAT)
host->mrq->cmd->error = -ETIMEDOUT;
else if (status & SD_STATUS_DT)
host->mrq->data->error = -ETIMEDOUT;
/* In PIO mode, interrupts might still be enabled */
IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
tasklet_schedule(&host->finish_task);
}
#if 0
else if (status & SD_STATUS_DD) {
/* Sometimes we get a DD before a NE in PIO mode */
if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
au1xmmc_receive_pio(host);
else {
au1xmmc_data_complete(host, status);
/* tasklet_schedule(&host->data_task); */
}
}
#endif
else if (status & SD_STATUS_CR) {
if (host->status == HOST_S_CMD)
au1xmmc_cmd_complete(host, status);
} else if (!(host->flags & HOST_F_DMA)) {
if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
au1xmmc_send_pio(host);
else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
au1xmmc_receive_pio(host);
} else if (status & 0x203F3C70) {
DBG("Unhandled status %8.8x\n", host->pdev->id,
status);
}
__raw_writel(status, HOST_STATUS(host));
wmb(); /* drain writebuffer */
return IRQ_HANDLED;
}
/* 8bit memory DMA device */
static dbdev_tab_t au1xmmc_mem_dbdev = {
.dev_id = DSCR_CMD0_ALWAYS,
.dev_flags = DEV_FLAGS_ANYUSE,
.dev_tsize = 0,
.dev_devwidth = 8,
.dev_physaddr = 0x00000000,
.dev_intlevel = 0,
.dev_intpolarity = 0,
};
static int memid;
static void au1xmmc_dbdma_callback(int irq, void *dev_id)
{
struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
/* Avoid spurious interrupts */
if (!host->mrq)
return;
if (host->flags & HOST_F_STOP)
SEND_STOP(host);
tasklet_schedule(&host->data_task);
}
static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
{
struct resource *res;
int txid, rxid;
res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
if (!res)
return -ENODEV;
txid = res->start;
res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
if (!res)
return -ENODEV;
rxid = res->start;
if (!memid)
return -ENODEV;
host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
au1xmmc_dbdma_callback, (void *)host);
if (!host->tx_chan) {
dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
return -ENODEV;
}
host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
au1xmmc_dbdma_callback, (void *)host);
if (!host->rx_chan) {
dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
au1xxx_dbdma_chan_free(host->tx_chan);
return -ENODEV;
}
au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
/* DBDMA is good to go */
host->flags |= HOST_F_DMA | HOST_F_DBDMA;
return 0;
}
static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
{
if (host->flags & HOST_F_DMA) {
host->flags &= ~HOST_F_DMA;
au1xxx_dbdma_chan_free(host->tx_chan);
au1xxx_dbdma_chan_free(host->rx_chan);
}
}
static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
{
struct au1xmmc_host *host = mmc_priv(mmc);
if (en)
IRQ_ON(host, SD_CONFIG_SI);
else
IRQ_OFF(host, SD_CONFIG_SI);
}
static const struct mmc_host_ops au1xmmc_ops = {
.request = au1xmmc_request,
.set_ios = au1xmmc_set_ios,
.get_ro = au1xmmc_card_readonly,
.get_cd = au1xmmc_card_inserted,
.enable_sdio_irq = au1xmmc_enable_sdio_irq,
};
static int au1xmmc_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct au1xmmc_host *host;
struct resource *r;
int ret, iflag;
mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
if (!mmc) {
dev_err(&pdev->dev, "no memory for mmc_host\n");
ret = -ENOMEM;
goto out0;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->platdata = pdev->dev.platform_data;
host->pdev = pdev;
ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no mmio defined\n");
goto out1;
}
host->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!host->ioarea) {
dev_err(&pdev->dev, "mmio already in use\n");
goto out1;
}
host->iobase = ioremap(r->start, 0x3c);
if (!host->iobase) {
dev_err(&pdev->dev, "cannot remap mmio\n");
goto out2;
}
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
goto out3;
}
mmc->ops = &au1xmmc_ops;
mmc->f_min = 450000;
mmc->f_max = 24000000;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
mmc->ocr_avail = AU1XMMC_OCR;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1100:
mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
break;
case ALCHEMY_CPU_AU1200:
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
break;
case ALCHEMY_CPU_AU1300:
iflag = 0; /* nothing is shared */
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
mmc->f_max = 52000000;
if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
mmc->caps |= MMC_CAP_8_BIT_DATA;
break;
}
ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
if (ret) {
dev_err(&pdev->dev, "cannot grab IRQ\n");
goto out3;
}
host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "cannot find clock\n");
ret = PTR_ERR(host->clk);
goto out_irq;
}
ret = clk_prepare_enable(host->clk);
if (ret) {
dev_err(&pdev->dev, "cannot enable clock\n");
goto out_clk;
}
host->status = HOST_S_IDLE;
/* board-specific carddetect setup, if any */
if (host->platdata && host->platdata->cd_setup) {
ret = host->platdata->cd_setup(mmc, 1);
if (ret) {
dev_warn(&pdev->dev, "board CD setup failed\n");
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
} else
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* platform may not be able to use all advertised caps */
if (host->platdata)
mmc->caps &= ~(host->platdata->mask_host_caps);
tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
if (has_dbdma()) {
ret = au1xmmc_dbdma_init(host);
if (ret)
pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
}
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led) {
struct led_classdev *led = host->platdata->led;
led->name = mmc_hostname(mmc);
led->brightness = LED_OFF;
led->default_trigger = mmc_hostname(mmc);
ret = led_classdev_register(mmc_dev(mmc), led);
if (ret)
goto out5;
}
#endif
au1xmmc_reset_controller(host);
ret = mmc_add_host(mmc);
if (ret) {
dev_err(&pdev->dev, "cannot add mmc host\n");
goto out6;
}
platform_set_drvdata(pdev, host);
pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
" (mode=%s)\n", pdev->id, host->iobase,
host->flags & HOST_F_DMA ? "dma" : "pio");
return 0; /* all ok */
out6:
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
led_classdev_unregister(host->platdata->led);
out5:
#endif
__raw_writel(0, HOST_ENABLE(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
clk_disable_unprepare(host->clk);
out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
out3:
iounmap((void *)host->iobase);
out2:
release_resource(host->ioarea);
kfree(host->ioarea);
out1:
mmc_free_host(mmc);
out0:
return ret;
}
static void au1xmmc_remove(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
if (host) {
mmc_remove_host(host->mmc);
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
led_classdev_unregister(host->platdata->led);
#endif
if (host->platdata && host->platdata->cd_setup &&
!(host->mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(host->mmc, 0);
__raw_writel(0, HOST_ENABLE(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0, HOST_CONFIG2(host));
wmb(); /* drain writebuffer */
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
au1xmmc_set_power(host, 0);
clk_disable_unprepare(host->clk);
clk_put(host->clk);
free_irq(host->irq, host);
iounmap((void *)host->iobase);
release_resource(host->ioarea);
kfree(host->ioarea);
mmc_free_host(host->mmc);
}
}
#ifdef CONFIG_PM
static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
__raw_writel(0, HOST_CONFIG2(host));
__raw_writel(0, HOST_CONFIG(host));
__raw_writel(0xffffffff, HOST_STATUS(host));
__raw_writel(0, HOST_ENABLE(host));
wmb(); /* drain writebuffer */
return 0;
}
static int au1xmmc_resume(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
au1xmmc_reset_controller(host);
return 0;
}
#else
#define au1xmmc_suspend NULL
#define au1xmmc_resume NULL
#endif
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove_new = au1xmmc_remove,
.suspend = au1xmmc_suspend,
.resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init au1xmmc_init(void)
{
if (has_dbdma()) {
/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
* of 8 bits. And since devices are shared, we need to create
* our own to avoid freaking out other devices.
*/
memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
if (!memid)
pr_err("au1xmmc: cannot add memory dbdma\n");
}
return platform_driver_register(&au1xmmc_driver);
}
static void __exit au1xmmc_exit(void)
{
if (has_dbdma() && memid)
au1xxx_ddma_del_device(memid);
platform_driver_unregister(&au1xmmc_driver);
}
module_init(au1xmmc_init);
module_exit(au1xmmc_exit);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:au1xxx-mmc");
| linux-master | drivers/mmc/host/au1xmmc.c |
/*
* drivers/mmc/host/omap_hsmmc.c
*
* Driver for OMAP2430/3430 MMC controller.
*
* Copyright (C) 2007 Texas Instruments.
*
* Authors:
* Syed Mohammed Khasim <[email protected]>
* Madhusudhan <[email protected]>
* Mohit Jalori <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/dmaengine.h>
#include <linux/seq_file.h>
#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_data/hsmmc-omap.h>
/* OMAP HSMMC Host Controller Registers */
#define OMAP_HSMMC_SYSSTATUS 0x0014
#define OMAP_HSMMC_CON 0x002C
#define OMAP_HSMMC_SDMASA 0x0100
#define OMAP_HSMMC_BLK 0x0104
#define OMAP_HSMMC_ARG 0x0108
#define OMAP_HSMMC_CMD 0x010C
#define OMAP_HSMMC_RSP10 0x0110
#define OMAP_HSMMC_RSP32 0x0114
#define OMAP_HSMMC_RSP54 0x0118
#define OMAP_HSMMC_RSP76 0x011C
#define OMAP_HSMMC_DATA 0x0120
#define OMAP_HSMMC_PSTATE 0x0124
#define OMAP_HSMMC_HCTL 0x0128
#define OMAP_HSMMC_SYSCTL 0x012C
#define OMAP_HSMMC_STAT 0x0130
#define OMAP_HSMMC_IE 0x0134
#define OMAP_HSMMC_ISE 0x0138
#define OMAP_HSMMC_AC12 0x013C
#define OMAP_HSMMC_CAPA 0x0140
#define VS18 (1 << 26)
#define VS30 (1 << 25)
#define HSS (1 << 21)
#define SDVS18 (0x5 << 9)
#define SDVS30 (0x6 << 9)
#define SDVS33 (0x7 << 9)
#define SDVS_MASK 0x00000E00
#define SDVSCLR 0xFFFFF1FF
#define SDVSDET 0x00000400
#define AUTOIDLE 0x1
#define SDBP (1 << 8)
#define DTO 0xe
#define ICE 0x1
#define ICS 0x2
#define CEN (1 << 2)
#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */
#define CLKD_MASK 0x0000FFC0
#define CLKD_SHIFT 6
#define DTO_MASK 0x000F0000
#define DTO_SHIFT 16
#define INIT_STREAM (1 << 1)
#define ACEN_ACMD23 (2 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMAE 0x1
#define MSBS (1 << 5)
#define BCE (1 << 1)
#define FOUR_BIT (1 << 1)
#define HSPE (1 << 2)
#define IWE (1 << 24)
#define DDR (1 << 19)
#define CLKEXTFREE (1 << 16)
#define CTPL (1 << 11)
#define DW8 (1 << 5)
#define OD 0x1
#define STAT_CLEAR 0xFFFFFFFF
#define INIT_STREAM_CMD 0x00000000
#define DUAL_VOLT_OCR_BIT 7
#define SRC (1 << 25)
#define SRD (1 << 26)
#define SOFTRESET (1 << 1)
/* PSTATE */
#define DLEV_DAT(x) (1 << (20 + (x)))
/* Interrupt masks for IE and ISE register */
#define CC_EN (1 << 0)
#define TC_EN (1 << 1)
#define BWR_EN (1 << 4)
#define BRR_EN (1 << 5)
#define CIRQ_EN (1 << 8)
#define ERR_EN (1 << 15)
#define CTO_EN (1 << 16)
#define CCRC_EN (1 << 17)
#define CEB_EN (1 << 18)
#define CIE_EN (1 << 19)
#define DTO_EN (1 << 20)
#define DCRC_EN (1 << 21)
#define DEB_EN (1 << 22)
#define ACE_EN (1 << 24)
#define CERR_EN (1 << 28)
#define BADA_EN (1 << 29)
#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\
DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \
BRR_EN | BWR_EN | TC_EN | CC_EN)
#define CNI (1 << 7)
#define ACIE (1 << 4)
#define ACEB (1 << 3)
#define ACCE (1 << 2)
#define ACTO (1 << 1)
#define ACNE (1 << 0)
#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20 /* 20 mSec */
#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
#define OMAP_MMC_MIN_CLOCK 400000
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
* omap_hsmmc.c device.
*/
#define mmc_pdata(host) host->pdata
/*
* MMC Host controller read/write API's
*/
#define OMAP_HSMMC_READ(base, reg) \
__raw_readl((base) + OMAP_HSMMC_##reg)
#define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)
struct omap_hsmmc_next {
unsigned int dma_len;
s32 cookie;
};
struct omap_hsmmc_host {
struct device *dev;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
struct clk *fclk;
struct clk *dbclk;
struct regulator *pbias;
bool pbias_enabled;
void __iomem *base;
bool vqmmc_enabled;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
unsigned int dma_len;
unsigned int dma_sg_idx;
unsigned char bus_mode;
unsigned char power_mode;
int suspended;
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
int irq;
int wake_irq;
int use_dma, dma_ch;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
int response_busy;
int context_loss;
int reqs_blocked;
int req_in_progress;
unsigned long clk_rate;
unsigned int flags;
#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
struct omap_hsmmc_next next_data;
struct omap_hsmmc_platform_data *pdata;
};
struct omap_mmc_of_data {
u32 reg_offset;
u8 controller_flags;
};
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
{
int ret;
struct omap_hsmmc_host *host = mmc_priv(mmc);
struct mmc_ios *ios = &mmc->ios;
if (!IS_ERR(mmc->supply.vmmc)) {
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
if (ret)
return ret;
}
/* Enable interface voltage rail, if needed */
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret) {
dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
goto err_vqmmc;
}
host->vqmmc_enabled = true;
}
return 0;
err_vqmmc:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
return ret;
}
static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
{
int ret;
int status;
struct omap_hsmmc_host *host = mmc_priv(mmc);
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
ret = regulator_disable(mmc->supply.vqmmc);
if (ret) {
dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
return ret;
}
host->vqmmc_enabled = false;
}
if (!IS_ERR(mmc->supply.vmmc)) {
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (ret)
goto err_set_ocr;
}
return 0;
err_set_ocr:
if (!IS_ERR(mmc->supply.vqmmc)) {
status = regulator_enable(mmc->supply.vqmmc);
if (status)
dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
}
return ret;
}
static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
if (IS_ERR(host->pbias))
return 0;
if (power_on) {
if (!host->pbias_enabled) {
ret = regulator_enable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg enable fail\n");
return ret;
}
host->pbias_enabled = true;
}
} else {
if (host->pbias_enabled) {
ret = regulator_disable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg disable fail\n");
return ret;
}
host->pbias_enabled = false;
}
}
return 0;
}
static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
/*
* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
*/
if (IS_ERR(mmc->supply.vmmc))
return 0;
ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
/*
* Assume Vcc regulator is used only to power the card ... OMAP
* VDDS is used to power the pins, optionally with a transceiver to
* support cards using voltages other than VDDS (1.8V nominal). When a
* transceiver is used, DAT3..7 are muxed as transceiver control pins.
*
* In some cases this regulator won't support enable/disable;
* e.g. it's a fixed rail for a WLAN chip.
*
* In other cases vcc_aux switches interface power. Example, for
* eMMC cards it represents VccQ. Sometimes transceivers or SDIO
* chips/cards need an interface voltage rail too.
*/
if (power_on) {
ret = omap_hsmmc_enable_supply(mmc);
if (ret)
return ret;
ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
ret = omap_hsmmc_disable_supply(mmc);
if (ret)
return ret;
}
return 0;
err_set_voltage:
omap_hsmmc_disable_supply(mmc);
return ret;
}
static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
{
int ret;
if (IS_ERR(reg))
return 0;
if (regulator_is_enabled(reg)) {
ret = regulator_enable(reg);
if (ret)
return ret;
ret = regulator_disable(reg);
if (ret)
return ret;
}
return 0;
}
static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
{
struct mmc_host *mmc = host->mmc;
int ret;
/*
* disable regulators enabled during boot and get the usecount
* right so that regulators can be enabled/disabled by checking
* the return value of regulator_is_enabled
*/
ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc);
if (ret) {
dev_err(host->dev, "fail to disable boot enabled vmmc reg\n");
return ret;
}
ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc);
if (ret) {
dev_err(host->dev,
"fail to disable boot enabled vmmc_aux reg\n");
return ret;
}
ret = omap_hsmmc_disable_boot_regulator(host->pbias);
if (ret) {
dev_err(host->dev,
"failed to disable boot enabled pbias reg\n");
return ret;
}
return 0;
}
static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
{
int ret;
struct mmc_host *mmc = host->mmc;
ret = mmc_regulator_get_supply(mmc);
if (ret)
return ret;
/* Allow an aux regulator */
if (IS_ERR(mmc->supply.vqmmc)) {
mmc->supply.vqmmc = devm_regulator_get_optional(host->dev,
"vmmc_aux");
if (IS_ERR(mmc->supply.vqmmc)) {
ret = PTR_ERR(mmc->supply.vqmmc);
if ((ret != -ENODEV) && host->dev->of_node)
return ret;
dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
PTR_ERR(mmc->supply.vqmmc));
}
}
host->pbias = devm_regulator_get_optional(host->dev, "pbias");
if (IS_ERR(host->pbias)) {
ret = PTR_ERR(host->pbias);
if ((ret != -ENODEV) && host->dev->of_node) {
dev_err(host->dev,
"SD card detect fail? enable CONFIG_REGULATOR_PBIAS\n");
return ret;
}
dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
PTR_ERR(host->pbias));
}
/* For eMMC do not power off when not in sleep state */
if (mmc_pdata(host)->no_regulator_off_init)
return 0;
ret = omap_hsmmc_disable_boot_regulators(host);
if (ret)
return ret;
return 0;
}
/*
* Start clock to the card
*/
static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
{
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
}
/*
* Stop clock to the card
*/
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
{
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");
}
static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,
struct mmc_command *cmd)
{
u32 irq_mask = INT_EN_MASK;
unsigned long flags;
if (host->use_dma)
irq_mask &= ~(BRR_EN | BWR_EN);
/* Disable timeout for erases */
if (cmd->opcode == MMC_ERASE)
irq_mask &= ~DTO_EN;
spin_lock_irqsave(&host->irq_lock, flags);
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
/* latch pending CIRQ, but don't signal MMC core */
if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
irq_mask |= CIRQ_EN;
OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
spin_unlock_irqrestore(&host->irq_lock, flags);
}
static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
{
u32 irq_mask = 0;
unsigned long flags;
spin_lock_irqsave(&host->irq_lock, flags);
/* no transfer running but need to keep cirq if enabled */
if (host->flags & HSMMC_SDIO_IRQ_ENABLED)
irq_mask |= CIRQ_EN;
OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
spin_unlock_irqrestore(&host->irq_lock, flags);
}
/* Calculate divisor for the given clock frequency */
static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
{
u16 dsor = 0;
if (ios->clock) {
dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
if (dsor > CLKD_MAX)
dsor = CLKD_MAX;
}
return dsor;
}
static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
unsigned long regval;
unsigned long timeout;
unsigned long clkdiv;
dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
omap_hsmmc_stop_clock(host);
regval = OMAP_HSMMC_READ(host->base, SYSCTL);
regval = regval & ~(CLKD_MASK | DTO_MASK);
clkdiv = calc_divisor(host, ios);
regval = regval | (clkdiv << 6) | (DTO << 16);
OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
/* Wait till the ICS bit is set */
timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
&& time_before(jiffies, timeout))
cpu_relax();
/*
* Enable High-Speed Support
* Pre-Requisites
* - Controller should support High-Speed-Enable Bit
* - Controller should not be using DDR Mode
* - Controller should advertise that it supports High Speed
* in capabilities register
* - MMC/SD clock coming out of controller > 25MHz
*/
if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) &&
(ios->timing != MMC_TIMING_MMC_DDR52) &&
(ios->timing != MMC_TIMING_UHS_DDR50) &&
((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) {
regval = OMAP_HSMMC_READ(host->base, HCTL);
if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000)
regval |= HSPE;
else
regval &= ~HSPE;
OMAP_HSMMC_WRITE(host->base, HCTL, regval);
}
omap_hsmmc_start_clock(host);
}
static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
ios->timing == MMC_TIMING_UHS_DDR50)
con |= DDR; /* configure in DDR mode */
else
con &= ~DDR;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
break;
case MMC_BUS_WIDTH_4:
OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
break;
case MMC_BUS_WIDTH_1:
OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
break;
}
}
static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
OMAP_HSMMC_WRITE(host->base, CON, con | OD);
else
OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}
#ifdef CONFIG_PM
/*
* Restore the MMC host context, if it was lost as result of a
* power state change.
*/
static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
u32 hctl, capa;
unsigned long timeout;
if (host->con == OMAP_HSMMC_READ(host->base, CON) &&
host->hctl == OMAP_HSMMC_READ(host->base, HCTL) &&
host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) &&
host->capa == OMAP_HSMMC_READ(host->base, CAPA))
return 0;
host->context_loss++;
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
if (host->power_mode != MMC_POWER_OFF &&
(1 << ios->vdd) <= MMC_VDD_23_24)
hctl = SDVS18;
else
hctl = SDVS30;
capa = VS30 | VS18;
} else {
hctl = SDVS18;
capa = VS18;
}
if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
hctl |= IWE;
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) | hctl);
OMAP_HSMMC_WRITE(host->base, CAPA,
OMAP_HSMMC_READ(host->base, CAPA) | capa);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
&& time_before(jiffies, timeout))
;
OMAP_HSMMC_WRITE(host->base, ISE, 0);
OMAP_HSMMC_WRITE(host->base, IE, 0);
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
/* Do not initialize card-specific things if the power is off */
if (host->power_mode == MMC_POWER_OFF)
goto out;
omap_hsmmc_set_bus_width(host);
omap_hsmmc_set_clock(host);
omap_hsmmc_set_bus_mode(host);
out:
dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n",
host->context_loss);
return 0;
}
/*
* Save the MMC host context (store the number of power state changes so far).
*/
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
host->con = OMAP_HSMMC_READ(host->base, CON);
host->hctl = OMAP_HSMMC_READ(host->base, HCTL);
host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL);
host->capa = OMAP_HSMMC_READ(host->base, CAPA);
}
#else
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}
#endif
/*
* Send init stream sequence to card
* before sending IDLE command
*/
static void send_init_stream(struct omap_hsmmc_host *host)
{
int reg = 0;
unsigned long timeout;
disable_irq(host->irq);
OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
while ((reg != CC_EN) && time_before(jiffies, timeout))
reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_READ(host->base, STAT);
enable_irq(host->irq);
}
static ssize_t
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
struct omap_hsmmc_host *host = mmc_priv(mmc);
return sprintf(buf, "%s\n", mmc_pdata(host)->name);
}
static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
/*
* Configure the response type and send the cmd.
*/
static void
omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
struct mmc_data *data)
{
int cmdreg = 0, resptype = 0, cmdtype = 0;
dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
host->cmd = cmd;
omap_hsmmc_enable_irq(host, cmd);
host->response_busy = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
resptype = 1;
else if (cmd->flags & MMC_RSP_BUSY) {
resptype = 3;
host->response_busy = 1;
} else
resptype = 2;
}
/*
* Unlike OMAP1 controller, the cmdtype does not seem to be based on
* ac, bc, adtc, bcr. Only commands ending an open ended transfer need
* a val of 0x3, rest 0x0.
*/
if (cmd == host->mrq->stop)
cmdtype = 0x3;
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) &&
host->mrq->sbc) {
cmdreg |= ACEN_ACMD23;
OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg);
}
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
if (data->flags & MMC_DATA_READ)
cmdreg |= DDIR;
else
cmdreg &= ~(DDIR);
}
if (host->use_dma)
cmdreg |= DMAE;
host->req_in_progress = 1;
OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
}
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
unsigned long flags;
spin_lock_irqsave(&host->irq_lock, flags);
host->req_in_progress = 0;
dma_ch = host->dma_ch;
spin_unlock_irqrestore(&host->irq_lock, flags);
omap_hsmmc_disable_irq(host);
/* Do not complete the request if DMA is still in progress */
if (mrq->data && host->use_dma && dma_ch != -1)
return;
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
}
/*
* Notify the transfer complete to MMC core
*/
static void
omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (!data) {
struct mmc_request *mrq = host->mrq;
/* TC before CC from CMD6 - don't know why, but it happens */
if (host->cmd && host->cmd->opcode == 6 &&
host->response_busy) {
host->response_busy = 0;
return;
}
omap_hsmmc_request_done(host, mrq);
return;
}
host->data = NULL;
if (!data->error)
data->bytes_xfered += data->blocks * (data->blksz);
else
data->bytes_xfered = 0;
if (data->stop && (data->error || !host->mrq->sbc))
omap_hsmmc_start_command(host, data->stop, NULL);
else
omap_hsmmc_request_done(host, data->mrq);
}
/*
* Notify the core about command completion
*/
static void
omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
{
if (host->mrq->sbc && (host->cmd == host->mrq->sbc) &&
!host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) {
host->cmd = NULL;
omap_hsmmc_start_dma_transfer(host);
omap_hsmmc_start_command(host, host->mrq->cmd,
host->mrq->data);
return;
}
host->cmd = NULL;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* response type 2 */
cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
} else {
/* response types 1, 1b, 3, 4, 5, 6 */
cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
}
}
if ((host->data == NULL && !host->response_busy) || cmd->error)
omap_hsmmc_request_done(host, host->mrq);
}
/*
* DMA clean up for command errors
*/
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
int dma_ch;
unsigned long flags;
host->data->error = errno;
spin_lock_irqsave(&host->irq_lock, flags);
dma_ch = host->dma_ch;
host->dma_ch = -1;
spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev,
host->data->sg, host->data->sg_len,
mmc_get_dma_dir(host->data));
host->data->host_cookie = 0;
}
host->data = NULL;
}
/*
* Readable error output
*/
#ifdef CONFIG_MMC_DEBUG
static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
{
/* --- means reserved bit without definition at documentation */
static const char *omap_hsmmc_status_bits[] = {
"CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
"CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
};
char res[256];
char *buf = res;
int len, i;
len = sprintf(buf, "MMC IRQ 0x%x :", status);
buf += len;
for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
if (status & (1 << i)) {
len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
buf += len;
}
dev_vdbg(mmc_dev(host->mmc), "%s\n", res);
}
#else
static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
u32 status)
{
}
#endif /* CONFIG_MMC_DEBUG */
/*
* MMC controller internal state machines reset
*
* Used to reset command or data internal state machines, using respectively
* SRC or SRD bit of SYSCTL register
* Can be called from interrupt context
*/
static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
unsigned long bit)
{
unsigned long i = 0;
unsigned long limit = MMC_TIMEOUT_US;
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | bit);
/*
* OMAP4 ES2 and greater has an updated reset logic.
* Monitor a 0->1 transition first
*/
if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) {
while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
&& (i++ < limit))
udelay(1);
}
i = 0;
while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&
(i++ < limit))
udelay(1);
if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
}
static void hsmmc_command_incomplete(struct omap_hsmmc_host *host,
int err, int end_cmd)
{
if (end_cmd) {
omap_hsmmc_reset_controller_fsm(host, SRC);
if (host->cmd)
host->cmd->error = err;
}
if (host->data) {
omap_hsmmc_reset_controller_fsm(host, SRD);
omap_hsmmc_dma_cleanup(host, err);
} else if (host->mrq && host->mrq->cmd)
host->mrq->cmd->error = err;
}
static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
{
struct mmc_data *data;
int end_cmd = 0, end_trans = 0;
int error = 0;
data = host->data;
dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR_EN) {
omap_hsmmc_dbg_report_irq(host, status);
if (status & (CTO_EN | CCRC_EN | CEB_EN))
end_cmd = 1;
if (host->data || host->response_busy) {
end_trans = !end_cmd;
host->response_busy = 0;
}
if (status & (CTO_EN | DTO_EN))
hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
BADA_EN))
hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
if (status & ACE_EN) {
u32 ac12;
ac12 = OMAP_HSMMC_READ(host->base, AC12);
if (!(ac12 & ACNE) && host->mrq->sbc) {
end_cmd = 1;
if (ac12 & ACTO)
error = -ETIMEDOUT;
else if (ac12 & (ACCE | ACEB | ACIE))
error = -EILSEQ;
host->mrq->sbc->error = error;
hsmmc_command_incomplete(host, error, end_cmd);
}
dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
}
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
if (end_cmd || ((status & CC_EN) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC_EN)) && host->mrq)
omap_hsmmc_xfer_done(host, data);
}
/*
* MMC controller IRQ handler
*/
static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
struct omap_hsmmc_host *host = dev_id;
int status;
status = OMAP_HSMMC_READ(host->base, STAT);
while (status & (INT_EN_MASK | CIRQ_EN)) {
if (host->req_in_progress)
omap_hsmmc_do_irq(host, status);
if (status & CIRQ_EN)
mmc_signal_sdio_irq(host->mmc);
/* Flush posted write */
status = OMAP_HSMMC_READ(host->base, STAT);
}
return IRQ_HANDLED;
}
static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
for (i = 0; i < loops_per_jiffy; i++) {
if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP)
break;
cpu_relax();
}
}
/*
* Switch MMC interface voltage ... only relevant for MMC1.
*
* MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver.
* The MMC2 transceiver controls are used instead of DAT4..DAT7.
* Some chips, like eMMC ones, use internal transceivers.
*/
static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
{
u32 reg_val = 0;
int ret;
/* Disable the clocks */
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
ret = omap_hsmmc_set_power(host, 1);
clk_prepare_enable(host->dbclk);
if (ret != 0)
goto err;
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
reg_val = OMAP_HSMMC_READ(host->base, HCTL);
/*
* If a MMC dual voltage card is detected, the set_ios fn calls
* this fn with VDD bit set for 1.8V. Upon card removal from the
* slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
*
* Cope with a bit of slop in the range ... per data sheets:
* - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
* but recommended values are 1.71V to 1.89V
* - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max,
* but recommended values are 2.7V to 3.3V
*
* Board setup code shouldn't permit anything very out-of-range.
* TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the
* middle range) but VSIM can't power DAT4..DAT7 at more than 3V.
*/
if ((1 << vdd) <= MMC_VDD_23_24)
reg_val |= SDVS18;
else
reg_val |= SDVS30;
OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
set_sd_bus_power(host);
return 0;
err:
dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
return ret;
}
static void omap_hsmmc_dma_callback(void *param)
{
struct omap_hsmmc_host *host = param;
struct dma_chan *chan;
struct mmc_data *data;
int req_in_progress;
spin_lock_irq(&host->irq_lock);
if (host->dma_ch < 0) {
spin_unlock_irq(&host->irq_lock);
return;
}
data = host->mrq->data;
chan = omap_hsmmc_get_dma_chan(host, data);
if (!data->host_cookie)
dma_unmap_sg(chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
req_in_progress = host->req_in_progress;
host->dma_ch = -1;
spin_unlock_irq(&host->irq_lock);
/* If DMA has finished after TC, complete the request */
if (!req_in_progress) {
struct mmc_request *mrq = host->mrq;
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
}
}
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_data *data,
struct omap_hsmmc_next *next,
struct dma_chan *chan)
{
int dma_len;
if (!next && data->host_cookie &&
data->host_cookie != host->next_data.cookie) {
dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
data->host_cookie = 0;
}
/* Check if next job is already prepared */
if (next || data->host_cookie != host->next_data.cookie) {
dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
} else {
dma_len = host->next_data.dma_len;
host->next_data.dma_len = 0;
}
if (dma_len == 0)
return -EINVAL;
if (next) {
next->dma_len = dma_len;
data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
} else
host->dma_len = dma_len;
return 0;
}
/*
* Routine to configure and start DMA for the MMC card
*/
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
struct dma_async_tx_descriptor *tx;
int ret = 0, i;
struct mmc_data *data = req->data;
struct dma_chan *chan;
struct dma_slave_config cfg = {
.src_addr = host->mapbase + OMAP_HSMMC_DATA,
.dst_addr = host->mapbase + OMAP_HSMMC_DATA,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = data->blksz / 4,
.dst_maxburst = data->blksz / 4,
};
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
struct scatterlist *sgl;
sgl = data->sg + i;
if (sgl->length % data->blksz)
return -EINVAL;
}
if ((data->blksz % 4) != 0)
/* REVISIT: The MMC buffer increments only when MSB is written.
* Return error for blksz which is non multiple of four.
*/
return -EINVAL;
BUG_ON(host->dma_ch != -1);
chan = omap_hsmmc_get_dma_chan(host, data);
ret = dmaengine_slave_config(chan, &cfg);
if (ret)
return ret;
ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
if (ret)
return ret;
tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx) {
dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
/* FIXME: cleanup */
return -1;
}
tx->callback = omap_hsmmc_dma_callback;
tx->callback_param = host;
/* Does not fail */
dmaengine_submit(tx);
host->dma_ch = 1;
return 0;
}
static void set_data_timeout(struct omap_hsmmc_host *host,
unsigned long long timeout_ns,
unsigned int timeout_clks)
{
unsigned long long timeout = timeout_ns;
unsigned int cycle_ns;
uint32_t reg, clkd, dto = 0;
reg = OMAP_HSMMC_READ(host->base, SYSCTL);
clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
if (clkd == 0)
clkd = 1;
cycle_ns = 1000000000 / (host->clk_rate / clkd);
do_div(timeout, cycle_ns);
timeout += timeout_clks;
if (timeout) {
while ((timeout & 0x80000000) == 0) {
dto += 1;
timeout <<= 1;
}
dto = 31 - dto;
timeout <<= 1;
if (timeout && dto)
dto += 1;
if (dto >= 13)
dto -= 13;
else
dto = 0;
if (dto > 14)
dto = 14;
}
reg &= ~DTO_MASK;
reg |= dto << DTO_SHIFT;
OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
}
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host)
{
struct mmc_request *req = host->mrq;
struct dma_chan *chan;
if (!req->data)
return;
OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
| (req->data->blocks << 16));
set_data_timeout(host, req->data->timeout_ns,
req->data->timeout_clks);
chan = omap_hsmmc_get_dma_chan(host, req->data);
dma_async_issue_pending(chan);
}
/*
* Configure block length for MMC/SD cards and initiate the transfer.
*/
static int
omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
{
int ret;
unsigned long long timeout;
host->data = req->data;
if (req->data == NULL) {
OMAP_HSMMC_WRITE(host->base, BLK, 0);
if (req->cmd->flags & MMC_RSP_BUSY) {
timeout = req->cmd->busy_timeout * NSEC_PER_MSEC;
/*
* Set an arbitrary 100ms data timeout for commands with
* busy signal and no indication of busy_timeout.
*/
if (!timeout)
timeout = 100000000U;
set_data_timeout(host, timeout, 0);
}
return 0;
}
if (host->use_dma) {
ret = omap_hsmmc_setup_dma_transfer(host, req);
if (ret != 0) {
dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");
return ret;
}
}
return 0;
}
static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (host->use_dma && data->host_cookie) {
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
data->host_cookie = 0;
}
}
static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
if (mrq->data->host_cookie) {
mrq->data->host_cookie = 0;
return ;
}
if (host->use_dma) {
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
&host->next_data, c))
mrq->data->host_cookie = 0;
}
}
/*
* Request function. for read/write operation
*/
static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;
BUG_ON(host->req_in_progress);
BUG_ON(host->dma_ch != -1);
if (host->reqs_blocked)
host->reqs_blocked = 0;
WARN_ON(host->mrq != NULL);
host->mrq = req;
host->clk_rate = clk_get_rate(host->fclk);
err = omap_hsmmc_prepare_data(host, req);
if (err) {
req->cmd->error = err;
if (req->data)
req->data->error = err;
host->mrq = NULL;
mmc_request_done(mmc, req);
return;
}
if (req->sbc && !(host->flags & AUTO_CMD23)) {
omap_hsmmc_start_command(host, req->sbc, NULL);
return;
}
omap_hsmmc_start_dma_transfer(host);
omap_hsmmc_start_command(host, req->cmd, req->data);
}
/* Routine to configure clock values. Exposed API to core */
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
int do_send_init_stream = 0;
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
break;
}
host->power_mode = ios->power_mode;
}
/* FIXME: set registers based only on changes to ios */
omap_hsmmc_set_bus_width(host);
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
* of external transceiver; but they all handle 1.8V.
*/
if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
(ios->vdd == DUAL_VOLT_OCR_BIT)) {
/*
* The mmc_select_voltage fn of the core does
* not seem to set the power_mode to
* MMC_POWER_UP upon recalculating the voltage.
* vdd 1.8v.
*/
if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
dev_dbg(mmc_dev(host->mmc),
"Switch operation failed\n");
}
}
omap_hsmmc_set_clock(host);
if (do_send_init_stream)
send_init_stream(host);
omap_hsmmc_set_bus_mode(host);
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
u32 irq_mask, con;
unsigned long flags;
spin_lock_irqsave(&host->irq_lock, flags);
con = OMAP_HSMMC_READ(host->base, CON);
irq_mask = OMAP_HSMMC_READ(host->base, ISE);
if (enable) {
host->flags |= HSMMC_SDIO_IRQ_ENABLED;
irq_mask |= CIRQ_EN;
con |= CTPL | CLKEXTFREE;
} else {
host->flags &= ~HSMMC_SDIO_IRQ_ENABLED;
irq_mask &= ~CIRQ_EN;
con &= ~(CTPL | CLKEXTFREE);
}
OMAP_HSMMC_WRITE(host->base, CON, con);
OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
/*
* if enable, piggy back detection on current request
* but always disable immediately
*/
if (!host->req_in_progress || !enable)
OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
/* flush posted write */
OMAP_HSMMC_READ(host->base, IE);
spin_unlock_irqrestore(&host->irq_lock, flags);
}
static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
int ret;
/*
* For omaps with wake-up path, wakeirq will be irq from pinctrl and
* for other omaps, wakeirq will be from GPIO (dat line remuxed to
* gpio). wakeirq is needed to detect sdio irq in runtime suspend state
* with functional clock disabled.
*/
if (!host->dev->of_node || !host->wake_irq)
return -ENODEV;
ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
goto err;
}
/*
* Some omaps don't have wake-up path from deeper idle states
* and need to remux SDIO DAT1 to GPIO for wake-up from idle.
*/
if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
struct pinctrl *p = devm_pinctrl_get(host->dev);
if (IS_ERR(p)) {
ret = PTR_ERR(p);
goto err_free_irq;
}
if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) {
dev_info(host->dev, "missing idle pinctrl state\n");
devm_pinctrl_put(p);
ret = -EINVAL;
goto err_free_irq;
}
devm_pinctrl_put(p);
}
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) | IWE);
return 0;
err_free_irq:
dev_pm_clear_wake_irq(host->dev);
err:
dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
host->wake_irq = 0;
return ret;
}
static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
{
u32 hctl, capa, value;
/* Only MMC1 supports 3.0V */
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
hctl = SDVS30;
capa = VS30 | VS18;
} else {
hctl = SDVS18;
capa = VS18;
}
value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK;
OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl);
value = OMAP_HSMMC_READ(host->base, CAPA);
OMAP_HSMMC_WRITE(host->base, CAPA, value | capa);
/* Set SD bus power bit */
set_sd_bus_power(host);
}
static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
unsigned int direction, int blk_size)
{
/* This controller can't do multiblock reads due to hw bugs */
if (direction == MMC_DATA_READ)
return 1;
return blk_size;
}
static struct mmc_host_ops omap_hsmmc_ops = {
.post_req = omap_hsmmc_post_req,
.pre_req = omap_hsmmc_pre_req,
.request = omap_hsmmc_request,
.set_ios = omap_hsmmc_set_ios,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
};
#ifdef CONFIG_DEBUG_FS
static int mmc_regs_show(struct seq_file *s, void *data)
{
struct mmc_host *mmc = s->private;
struct omap_hsmmc_host *host = mmc_priv(mmc);
seq_printf(s, "mmc%d:\n", mmc->index);
seq_printf(s, "sdio irq mode\t%s\n",
(mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling");
if (mmc->caps & MMC_CAP_SDIO_IRQ) {
seq_printf(s, "sdio irq \t%s\n",
(host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled"
: "disabled");
}
seq_printf(s, "ctx_loss:\t%d\n", host->context_loss);
pm_runtime_get_sync(host->dev);
seq_puts(s, "\nregs:\n");
seq_printf(s, "CON:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CON));
seq_printf(s, "PSTATE:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, PSTATE));
seq_printf(s, "HCTL:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, HCTL));
seq_printf(s, "SYSCTL:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, SYSCTL));
seq_printf(s, "IE:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, IE));
seq_printf(s, "ISE:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, ISE));
seq_printf(s, "CAPA:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CAPA));
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mmc_regs);
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
{
if (mmc->debugfs_root)
debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
mmc, &mmc_regs_fops);
}
#else
static void omap_hsmmc_debugfs(struct mmc_host *mmc)
{
}
#endif
#ifdef CONFIG_OF
static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = {
/* See 35xx errata 2.1.1.128 in SPRZ278F */
.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};
static const struct omap_mmc_of_data omap4_mmc_of_data = {
.reg_offset = 0x100,
};
static const struct omap_mmc_of_data am33xx_mmc_of_data = {
.reg_offset = 0x100,
.controller_flags = OMAP_HSMMC_SWAKEUP_MISSING,
};
static const struct of_device_id omap_mmc_of_match[] = {
{
.compatible = "ti,omap2-hsmmc",
},
{
.compatible = "ti,omap3-pre-es3-hsmmc",
.data = &omap3_pre_es3_mmc_of_data,
},
{
.compatible = "ti,omap3-hsmmc",
},
{
.compatible = "ti,omap4-hsmmc",
.data = &omap4_mmc_of_data,
},
{
.compatible = "ti,am33xx-hsmmc",
.data = &am33xx_mmc_of_data,
},
{},
};
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
{
struct omap_hsmmc_platform_data *pdata, *legacy;
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM); /* out of memory */
legacy = dev_get_platdata(dev);
if (legacy && legacy->name)
pdata->name = legacy->name;
if (of_property_read_bool(np, "ti,dual-volt"))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
if (of_property_read_bool(np, "ti,non-removable")) {
pdata->nonremovable = true;
pdata->no_regulator_off_init = true;
}
if (of_property_read_bool(np, "ti,needs-special-reset"))
pdata->features |= HSMMC_HAS_UPDATED_RESET;
if (of_property_read_bool(np, "ti,needs-special-hs-handling"))
pdata->features |= HSMMC_HAS_HSPE_SUPPORT;
return pdata;
}
#else
static inline struct omap_hsmmc_platform_data
*of_get_hsmmc_pdata(struct device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif
static int omap_hsmmc_probe(struct platform_device *pdev)
{
struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_host *mmc;
struct omap_hsmmc_host *host = NULL;
struct resource *res;
int ret, irq;
const struct of_device_id *match;
const struct omap_mmc_of_data *data;
void __iomem *base;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
pdata = of_get_hsmmc_pdata(&pdev->dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
if (match->data) {
data = match->data;
pdata->reg_offset = data->reg_offset;
pdata->controller_flags |= data->controller_flags;
}
}
if (pdata == NULL) {
dev_err(&pdev->dev, "Platform Data is missing\n");
return -ENXIO;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto err;
}
ret = mmc_of_parse(mmc);
if (ret)
goto err1;
host = mmc_priv(mmc);
host->mmc = mmc;
host->pdata = pdata;
host->dev = &pdev->dev;
host->use_dma = 1;
host->dma_ch = -1;
host->irq = irq;
host->mapbase = res->start + pdata->reg_offset;
host->base = base + pdata->reg_offset;
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
host->pbias_enabled = false;
host->vqmmc_enabled = false;
platform_set_drvdata(pdev, host);
if (pdev->dev.of_node)
host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1);
mmc->ops = &omap_hsmmc_ops;
mmc->f_min = OMAP_MMC_MIN_CLOCK;
if (pdata->max_freq > 0)
mmc->f_max = pdata->max_freq;
else if (mmc->f_max == 0)
mmc->f_max = OMAP_MMC_MAX_CLOCK;
spin_lock_init(&host->irq_lock);
host->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(host->fclk)) {
ret = PTR_ERR(host->fclk);
host->fclk = NULL;
goto err1;
}
if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
}
device_init_wakeup(&pdev->dev, true);
pm_runtime_enable(host->dev);
pm_runtime_get_sync(host->dev);
pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(host->dev);
omap_hsmmc_context_save(host);
host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck");
/*
* MMC can still work without debounce clock.
*/
if (IS_ERR(host->dbclk)) {
host->dbclk = NULL;
} else if (clk_prepare_enable(host->dbclk) != 0) {
dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
host->dbclk = NULL;
}
/* Set this to a value that allows allocating an entire descriptor
* list within a page (zero order allocation). */
mmc->max_segs = 64;
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_CMD23;
mmc->caps |= mmc_pdata(host)->caps;
if (mmc->caps & MMC_CAP_8_BIT_DATA)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (mmc_pdata(host)->nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
mmc->pm_caps |= mmc_pdata(host)->pm_caps;
omap_hsmmc_conf_bus_power(host);
host->rx_chan = dma_request_chan(&pdev->dev, "rx");
if (IS_ERR(host->rx_chan)) {
dev_err(mmc_dev(host->mmc), "RX DMA channel request failed\n");
ret = PTR_ERR(host->rx_chan);
goto err_irq;
}
host->tx_chan = dma_request_chan(&pdev->dev, "tx");
if (IS_ERR(host->tx_chan)) {
dev_err(mmc_dev(host->mmc), "TX DMA channel request failed\n");
ret = PTR_ERR(host->tx_chan);
goto err_irq;
}
/*
* Limit the maximum segment size to the lower of the request size
* and the DMA engine device segment size limits. In reality, with
* 32-bit transfers, the DMA engine can do longer segments than this
* but there is no way to represent that in the DMA model - if we
* increase this figure here, we get warnings from the DMA API debug.
*/
mmc->max_seg_size = min3(mmc->max_req_size,
dma_get_max_seg_size(host->rx_chan->device->dev),
dma_get_max_seg_size(host->tx_chan->device->dev));
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
goto err_irq;
}
ret = omap_hsmmc_reg_get(host);
if (ret)
goto err_irq;
if (!mmc->ocr_avail)
mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
omap_hsmmc_disable_irq(host);
/*
* For now, only support SDIO interrupt if we have a separate
* wake-up interrupt configured from device tree. This is because
* the wake-up interrupt is needed for idle state and some
* platforms need special quirks. And we don't want to add new
* legacy mux platform init code callbacks any longer as we
* are moving to DT based booting anyways.
*/
ret = omap_hsmmc_configure_wake_irq(host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
ret = mmc_add_host(mmc);
if (ret)
goto err_irq;
if (mmc_pdata(host)->name != NULL) {
ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
if (ret < 0)
goto err_slot_name;
}
omap_hsmmc_debugfs(mmc);
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
err_slot_name:
mmc_remove_host(mmc);
err_irq:
device_init_wakeup(&pdev->dev, false);
if (!IS_ERR_OR_NULL(host->tx_chan))
dma_release_channel(host->tx_chan);
if (!IS_ERR_OR_NULL(host->rx_chan))
dma_release_channel(host->rx_chan);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_disable_unprepare(host->dbclk);
err1:
mmc_free_host(mmc);
err:
return ret;
}
static void omap_hsmmc_remove(struct platform_device *pdev)
{
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
dev_pm_clear_wake_irq(host->dev);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
clk_disable_unprepare(host->dbclk);
mmc_free_host(host->mmc);
}
#ifdef CONFIG_PM_SLEEP
static int omap_hsmmc_suspend(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
pm_runtime_get_sync(host->dev);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
OMAP_HSMMC_WRITE(host->base, ISE, 0);
OMAP_HSMMC_WRITE(host->base, IE, 0);
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
clk_disable_unprepare(host->dbclk);
pm_runtime_put_sync(host->dev);
return 0;
}
/* Routine to resume the MMC device */
static int omap_hsmmc_resume(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
if (!host)
return 0;
pm_runtime_get_sync(host->dev);
clk_prepare_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
}
#endif
#ifdef CONFIG_PM
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
unsigned long flags;
int ret = 0;
host = dev_get_drvdata(dev);
omap_hsmmc_context_save(host);
dev_dbg(dev, "disabled\n");
spin_lock_irqsave(&host->irq_lock, flags);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
/* disable sdio irq handling to prevent race */
OMAP_HSMMC_WRITE(host->base, ISE, 0);
OMAP_HSMMC_WRITE(host->base, IE, 0);
if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) {
/*
* dat1 line low, pending sdio irq
* race condition: possible irq handler running on
* multi-core, abort
*/
dev_dbg(dev, "pending sdio irq, abort suspend\n");
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
pm_runtime_mark_last_busy(dev);
ret = -EBUSY;
goto abort;
}
pinctrl_pm_select_idle_state(dev);
} else {
pinctrl_pm_select_idle_state(dev);
}
abort:
spin_unlock_irqrestore(&host->irq_lock, flags);
return ret;
}
static int omap_hsmmc_runtime_resume(struct device *dev)
{
struct omap_hsmmc_host *host;
unsigned long flags;
host = dev_get_drvdata(dev);
omap_hsmmc_context_restore(host);
dev_dbg(dev, "enabled\n");
spin_lock_irqsave(&host->irq_lock, flags);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
pinctrl_select_default_state(host->dev);
/* irq lost, if pinmux incorrect */
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN);
OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN);
} else {
pinctrl_select_default_state(host->dev);
}
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
}
#endif
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
SET_RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
.probe = omap_hsmmc_probe,
.remove_new = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &omap_hsmmc_dev_pm_ops,
.of_match_table = of_match_ptr(omap_mmc_of_match),
},
};
module_platform_driver(omap_hsmmc_driver);
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
| linux-master | drivers/mmc/host/omap_hsmmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SDHI
*
* Copyright (C) 2015-19 Renesas Electronics Corporation
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2009 Magnus Damm
*
* Based on "Compaq ASIC3 support":
*
* Copyright 2001 Compaq Computer Corporation.
* Copyright 2004-2005 Phil Blundell
* Copyright 2007-2008 OpenedHand Ltd.
*
* Authors: Phil Blundell <[email protected]>,
* Samuel Ortiz <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinctrl-state.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/slab.h>
#include "renesas_sdhi.h"
#include "tmio_mmc.h"
#define CTL_HOST_MODE 0xe4
#define HOST_MODE_GEN2_SDR50_WMODE BIT(0)
#define HOST_MODE_GEN2_SDR104_WMODE BIT(0)
#define HOST_MODE_GEN3_WMODE BIT(0)
#define HOST_MODE_GEN3_BUSWIDTH BIT(8)
#define HOST_MODE_GEN3_16BIT HOST_MODE_GEN3_WMODE
#define HOST_MODE_GEN3_32BIT (HOST_MODE_GEN3_WMODE | HOST_MODE_GEN3_BUSWIDTH)
#define HOST_MODE_GEN3_64BIT 0
#define SDHI_VER_GEN2_SDR50 0x490c
#define SDHI_VER_RZ_A1 0x820b
/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
#define SDHI_VER_GEN2_SDR104 0xcb0d
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
#define SDHI_GEN3_MMC0_ADDR 0xee140000
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
/*
* see also
* renesas_sdhi_of_data :: dma_buswidth
*/
switch (sd_ctrl_read16(host, CTL_VERSION)) {
case SDHI_VER_GEN2_SDR50:
val = (width == 32) ? HOST_MODE_GEN2_SDR50_WMODE : 0;
break;
case SDHI_VER_GEN2_SDR104:
val = (width == 32) ? 0 : HOST_MODE_GEN2_SDR104_WMODE;
break;
case SDHI_VER_GEN3_SD:
case SDHI_VER_GEN3_SDMMC:
if (width == 64)
val = HOST_MODE_GEN3_64BIT;
else if (width == 32)
val = HOST_MODE_GEN3_32BIT;
else
val = HOST_MODE_GEN3_16BIT;
break;
default:
/* nothing to do */
return;
}
sd_ctrl_write16(host, CTL_HOST_MODE, val);
}
static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
ret = clk_prepare_enable(priv->clk_cd);
if (ret < 0)
return ret;
/*
* The clock driver may not know what maximum frequency
* actually works, so it should be set with the max-frequency
* property which will already have been read to f_max. If it
* was missing, assume the current frequency is the maximum.
*/
if (!mmc->f_max)
mmc->f_max = clk_get_rate(priv->clk);
/*
* Minimum frequency is the minimum input clock frequency
* divided by our maximum divider.
*/
mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
/* enable 16bit data access on SDBUF as default */
renesas_sdhi_sdbuf_width(host, 16);
return 0;
}
static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
unsigned int wanted_clock)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
unsigned int new_clock, clkh_shift = 0;
unsigned int new_upper_limit;
int i;
/*
* We simply return the current rate if a) we are not on a R-Car Gen2+
* SoC (may work for others, but untested) or b) if the SCC needs its
* clock during tuning, so we don't change the external clock setup.
*/
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc))
return clk_get_rate(priv->clk);
if (priv->clkh) {
/* HS400 with 4TAP needs different clock settings */
bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
ref_clk = priv->clkh;
}
new_clock = wanted_clock << clkh_shift;
/*
* We want the bus clock to be as close as possible to, but no
* greater than, new_clock. As we can divide by 1 << i for
* any i in [0, 9] we want the input clock to be as close as
* possible, but no greater than, new_clock << i.
*
* Add an upper limit of 1/1024 rate higher to the clock rate to fix
* clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has
* 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request
* for 533.333333 MHz will selects a slower 400 MHz due to rounding
* error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)).
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
freq = clk_round_rate(ref_clk, new_clock << i);
new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10);
if (freq > new_upper_limit) {
/* Too fast; look for a slightly slower option */
freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
if (freq > new_upper_limit)
continue;
}
diff = new_clock - (freq >> i);
if (diff <= diff_min) {
best_freq = freq;
diff_min = diff;
}
}
clk_set_rate(ref_clk, best_freq);
if (priv->clkh)
clk_set_rate(priv->clk, best_freq >> clkh_shift);
return clk_get_rate(priv->clk);
}
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
unsigned int new_clock)
{
unsigned int clk_margin;
u32 clk = 0, clock;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
if (new_clock == 0) {
host->mmc->actual_clock = 0;
goto out;
}
host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
clock = host->mmc->actual_clock / 512;
/*
* Add a margin of 1/1024 rate higher to the clock rate in order
* to avoid clk variable setting a value of 0 due to the margin
* provided for actual_clock in renesas_sdhi_clk_update().
*/
clk_margin = new_clock >> 10;
for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) {
if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
clk |= 0xff;
else
clk &= ~0xff;
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
out:
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
usleep_range(10000, 11000);
}
static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
clk_disable_unprepare(priv->clk_cd);
}
static int renesas_sdhi_card_busy(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
TMIO_STAT_DAT0);
}
static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
struct pinctrl_state *pin_state;
int ret;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
pin_state = priv->pins_default;
break;
case MMC_SIGNAL_VOLTAGE_180:
pin_state = priv->pins_uhs;
break;
default:
return -EINVAL;
}
/*
* If anything is missing, assume signal voltage is fixed at
* 3.3V and succeed/fail accordingly.
*/
if (IS_ERR(priv->pinctrl) || IS_ERR(pin_state))
return ios->signal_voltage ==
MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL;
ret = mmc_regulator_set_vqmmc(host->mmc, ios);
if (ret < 0)
return ret;
return pinctrl_select_state(priv->pinctrl, pin_state);
}
/* SCC registers */
#define SH_MOBILE_SDHI_SCC_DTCNTL 0x000
#define SH_MOBILE_SDHI_SCC_TAPSET 0x002
#define SH_MOBILE_SDHI_SCC_DT2FF 0x004
#define SH_MOBILE_SDHI_SCC_CKSEL 0x006
#define SH_MOBILE_SDHI_SCC_RVSCNTL 0x008
#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
#define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C
#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E
#define SH_MOBILE_SDHI_SCC_TMPPORT3 0x014
#define SH_MOBILE_SDHI_SCC_TMPPORT4 0x016
#define SH_MOBILE_SDHI_SCC_TMPPORT5 0x018
#define SH_MOBILE_SDHI_SCC_TMPPORT6 0x01A
#define SH_MOBILE_SDHI_SCC_TMPPORT7 0x01C
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK 0xff
#define SH_MOBILE_SDHI_SCC_CKSEL_DTSEL BIT(0)
#define SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN BIT(0)
#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0)
#define SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP BIT(1)
#define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR BIT(2)
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN BIT(8)
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP BIT(24)
#define SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR (BIT(8) | BIT(24))
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4)
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31)
/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT4 register */
#define SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START BIT(0)
/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT5 register */
#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R BIT(8)
#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W (0 << 8)
#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK 0x3F
/* Definitions for values the SH_MOBILE_SDHI_SCC register */
#define SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE 0xa5000000
#define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f
#define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7)
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, int addr)
{
return readl(priv->scc_ctl + (addr << host->bus_shift));
}
static inline void sd_scc_write32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv,
int addr, u32 val)
{
writel(val, priv->scc_ctl + (addr << host->bus_shift));
}
static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv;
priv = host_to_priv(host);
/* Initialize SCC */
sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, 0x0);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
/* set sampling clock selection range */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
0x8 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT) &
SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
}
static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
/* Set HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, SDIF_MODE_HS400 |
sd_ctrl_read16(host, CTL_SDIF_MODE));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF,
priv->scc_tappos_hs400);
if (sdhi_has_quirk(priv, manual_tap_correction))
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
sd_scc_read32(host, priv,
SH_MOBILE_SDHI_SCC_DTCNTL));
/* Avoid bad TAP */
if (bad_taps & BIT(priv->tap_set)) {
u32 new_tap = (priv->tap_set + 1) % priv->tap_num;
if (bad_taps & BIT(new_tap))
new_tap = (priv->tap_set - 1) % priv->tap_num;
if (bad_taps & BIT(new_tap)) {
new_tap = priv->tap_set;
dev_dbg(&host->pdev->dev, "Can't handle three bad tap in a row\n");
}
priv->tap_set = new_tap;
}
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
priv->tap_set / (use_4tap ? 2 : 1));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
SH_MOBILE_SDHI_SCC_CKSEL_DTSEL |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
if (priv->adjust_hs400_calib_table)
priv->needs_adjust_hs400 = true;
}
static void renesas_sdhi_disable_scc(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_CKSEL,
~SH_MOBILE_SDHI_SCC_CKSEL_DTSEL &
sd_scc_read32(host, priv,
SH_MOBILE_SDHI_SCC_CKSEL));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
~SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN &
sd_scc_read32(host, priv,
SH_MOBILE_SDHI_SCC_DTCNTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
}
static u32 sd_scc_tmpport_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, u32 addr)
{
/* read mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5,
SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R |
(SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr));
/* access start and stop */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4,
SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0);
return sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT7);
}
static void sd_scc_tmpport_write32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, u32 addr, u32 val)
{
/* write mode */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5,
SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W |
(SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT6, val);
/* access start and stop */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4,
SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0);
}
static void renesas_sdhi_adjust_hs400_mode_enable(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
u32 calib_code;
/* disable write protect */
sd_scc_tmpport_write32(host, priv, 0x00,
SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE);
/* read calibration code and adjust */
calib_code = sd_scc_tmpport_read32(host, priv, 0x26);
calib_code &= SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK;
sd_scc_tmpport_write32(host, priv, 0x22,
SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE |
priv->adjust_hs400_calib_table[calib_code]);
/* set offset value to TMPPORT3, hardcoded to OFFSET0 (= 0x3) for now */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0x3);
/* adjustment done, clear flag */
priv->needs_adjust_hs400 = false;
}
static void renesas_sdhi_adjust_hs400_mode_disable(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
/* disable write protect */
sd_scc_tmpport_write32(host, priv, 0x00,
SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE);
/* disable manual calibration */
sd_scc_tmpport_write32(host, priv, 0x22, 0);
/* clear offset value of TMPPORT3 */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0);
}
static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
struct renesas_sdhi *priv)
{
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
/* Reset HS400 mode */
sd_ctrl_write16(host, CTL_SDIF_MODE, ~SDIF_MODE_HS400 &
sd_ctrl_read16(host, CTL_SDIF_MODE));
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2,
~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN |
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
if (sdhi_has_quirk(priv, hs400_calib_table) || sdhi_has_quirk(priv, hs400_bad_taps))
renesas_sdhi_adjust_hs400_mode_disable(host);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
}
static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
renesas_sdhi_reset_hs400_mode(host, host_to_priv(host));
return 0;
}
static void renesas_sdhi_scc_reset(struct tmio_mmc_host *host, struct renesas_sdhi *priv)
{
renesas_sdhi_disable_scc(host->mmc);
renesas_sdhi_reset_hs400_mode(host, priv);
priv->needs_adjust_hs400 = false;
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
}
/* only populated for TMIO_MMC_MIN_RCAR2 */
static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
{
struct renesas_sdhi *priv = host_to_priv(host);
int ret;
u16 val;
if (!preserve) {
if (priv->rstc) {
reset_control_reset(priv->rstc);
/* Unknown why but without polling reset status, it will hang */
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
false, priv->rstc);
/* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
} else if (priv->scc_ctl) {
renesas_sdhi_scc_reset(host, priv);
}
}
if (sd_ctrl_read16(host, CTL_VERSION) >= SDHI_VER_GEN3_SD) {
val = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
val |= CARD_OPT_EXTOP;
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, val);
}
}
static unsigned int renesas_sdhi_gen3_get_cycles(struct tmio_mmc_host *host)
{
u16 num, val = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
num = (val & CARD_OPT_TOP_MASK) >> CARD_OPT_TOP_SHIFT;
return 1 << ((val & CARD_OPT_EXTOP ? 14 : 13) + num);
}
#define SH_MOBILE_SDHI_MIN_TAP_ROW 3
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
unsigned int tap_start = 0, tap_end = 0, tap_cnt = 0, rs, re, i;
unsigned int taps_size = priv->tap_num * 2, min_tap_row;
unsigned long *bitmap;
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/*
* When tuning CMD19 is issued twice for each tap, merge the
* result requiring the tap to be good in both runs before
* considering it for tuning selection.
*/
for (i = 0; i < taps_size; i++) {
int offset = priv->tap_num * (i < priv->tap_num ? 1 : -1);
if (!test_bit(i, priv->taps))
clear_bit(i + offset, priv->taps);
if (!test_bit(i, priv->smpcmp))
clear_bit(i + offset, priv->smpcmp);
}
/*
* If all TAP are OK, the sampling clock position is selected by
* identifying the change point of data.
*/
if (bitmap_full(priv->taps, taps_size)) {
bitmap = priv->smpcmp;
min_tap_row = 1;
} else {
bitmap = priv->taps;
min_tap_row = SH_MOBILE_SDHI_MIN_TAP_ROW;
}
/*
* Find the longest consecutive run of successful probes. If that
* is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
* center index as the tap, otherwise bail out.
*/
for_each_set_bitrange(rs, re, bitmap, taps_size) {
if (re - rs > tap_cnt) {
tap_end = re;
tap_start = rs;
tap_cnt = tap_end - tap_start;
}
}
if (tap_cnt >= min_tap_row)
priv->tap_set = (tap_start + tap_end) / 2 % priv->tap_num;
else
return -EIO;
/* Set SCC */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, priv->tap_set);
/* Enable auto re-tuning */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN |
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
return 0;
}
static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
int i, ret;
priv->tap_num = renesas_sdhi_init_tuning(host);
if (!priv->tap_num)
return 0; /* Tuning is not supported */
if (priv->tap_num * 2 >= sizeof(priv->taps) * BITS_PER_BYTE) {
dev_err(&host->pdev->dev,
"Too many taps, please update 'taps' in tmio_mmc_host!\n");
return -EINVAL;
}
bitmap_zero(priv->taps, priv->tap_num * 2);
bitmap_zero(priv->smpcmp, priv->tap_num * 2);
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
int cmd_error = 0;
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
set_bit(i, priv->taps);
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
set_bit(i, priv->smpcmp);
if (cmd_error)
mmc_send_abort_tuning(mmc, opcode);
}
ret = renesas_sdhi_select_tuning(host);
if (ret < 0)
renesas_sdhi_scc_reset(host, priv);
return ret;
}
static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap)
{
struct renesas_sdhi *priv = host_to_priv(host);
unsigned int new_tap = priv->tap_set, error_tap = priv->tap_set;
u32 val;
val = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ);
if (!val)
return false;
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/* Change TAP position according to correction status */
if (sdhi_has_quirk(priv, manual_tap_correction) &&
host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
u32 bad_taps = priv->quirks ? priv->quirks->hs400_bad_taps : 0;
/*
* With HS400, the DAT signal is based on DS, not CLK.
* Therefore, use only CMD status.
*/
u32 smpcmp = sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) &
SH_MOBILE_SDHI_SCC_SMPCMP_CMD_ERR;
if (!smpcmp) {
return false; /* no error in CMD signal */
} else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQUP) {
new_tap++;
error_tap--;
} else if (smpcmp == SH_MOBILE_SDHI_SCC_SMPCMP_CMD_REQDOWN) {
new_tap--;
error_tap++;
} else {
return true; /* need retune */
}
/*
* When new_tap is a bad tap, we cannot change. Then, we compare
* with the HS200 tuning result. When smpcmp[error_tap] is OK,
* we can at least retune.
*/
if (bad_taps & BIT(new_tap % priv->tap_num))
return test_bit(error_tap % priv->tap_num, priv->smpcmp);
} else {
if (val & SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR)
return true; /* need retune */
else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPUP)
new_tap++;
else if (val & SH_MOBILE_SDHI_SCC_RVSREQ_REQTAPDOWN)
new_tap--;
else
return false;
}
priv->tap_set = (new_tap % priv->tap_num);
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET,
priv->tap_set / (use_4tap ? 2 : 1));
return false;
}
static bool renesas_sdhi_auto_correction(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
/* Check SCC error */
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ) &
SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR) {
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
return true;
}
return false;
}
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host,
struct mmc_request *mrq)
{
struct renesas_sdhi *priv = host_to_priv(host);
bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
bool ret = false;
/*
* Skip checking SCC errors when running on 4 taps in HS400 mode as
* any retuning would still result in the same 4 taps being used.
*/
if (!(host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) &&
!(host->mmc->ios.timing == MMC_TIMING_MMC_HS200) &&
!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap))
return false;
if (mmc_doing_tune(host->mmc))
return false;
if (((mrq->cmd->error == -ETIMEDOUT) ||
(mrq->data && mrq->data->error == -ETIMEDOUT)) &&
((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
(host->ops.get_cd && host->ops.get_cd(host->mmc))))
ret |= true;
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN)
ret |= renesas_sdhi_auto_correction(host);
else
ret |= renesas_sdhi_manual_correction(host, use_4tap);
return ret;
}
static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit)
{
int timeout = 1000;
/* CBSY is set when busy, SCLKDIVEN is cleared when busy */
u32 wait_state = (bit == TMIO_STAT_CMD_BUSY ? TMIO_STAT_CMD_BUSY : 0);
while (--timeout && (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS)
& bit) == wait_state)
udelay(1);
if (!timeout) {
dev_warn(&host->pdev->dev, "timeout waiting for SD bus idle\n");
return -EBUSY;
}
return 0;
}
static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
{
u32 bit = TMIO_STAT_SCLKDIVEN;
switch (addr) {
case CTL_SD_CMD:
case CTL_STOP_INTERNAL_ACTION:
case CTL_XFER_BLK_COUNT:
case CTL_SD_XFER_LEN:
case CTL_SD_MEM_CARD_OPT:
case CTL_TRANSACTION_CTL:
case CTL_DMA_ENABLE:
case CTL_HOST_MODE:
if (host->pdata->flags & TMIO_MMC_HAVE_CBSY)
bit = TMIO_STAT_CMD_BUSY;
fallthrough;
case CTL_SD_CARD_CLK_CTL:
return renesas_sdhi_wait_idle(host, bit);
}
return 0;
}
static int renesas_sdhi_multi_io_quirk(struct mmc_card *card,
unsigned int direction, int blk_size)
{
/*
* In Renesas controllers, when performing a
* multiple block read of one or two blocks,
* depending on the timing with which the
* response register is read, the response
* value may not be read properly.
* Use single block read for this HW bug
*/
if ((direction == MMC_DATA_READ) &&
blk_size == 2)
return 1;
return blk_size;
}
static void renesas_sdhi_fixup_request(struct tmio_mmc_host *host, struct mmc_request *mrq)
{
struct renesas_sdhi *priv = host_to_priv(host);
if (priv->needs_adjust_hs400 && mrq->cmd->opcode == MMC_SEND_STATUS)
renesas_sdhi_adjust_hs400_mode_enable(host);
}
static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
{
/* Iff regs are 8 byte apart, sdbuf is 64 bit. Otherwise always 32. */
int width = (host->bus_shift == 2) ? 64 : 32;
sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0);
renesas_sdhi_sdbuf_width(host, enable ? width : 16);
}
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
const struct renesas_sdhi_quirks *quirks)
{
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_data *mmc_data;
struct renesas_sdhi_dma *dma_priv;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
int num_irqs, irq, ret, i;
struct resource *res;
u16 ver;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct renesas_sdhi),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->quirks = quirks;
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "cannot get clock");
priv->clkh = devm_clk_get_optional(&pdev->dev, "clkh");
if (IS_ERR(priv->clkh))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkh), "cannot get clkh");
/*
* Some controllers provide a 2nd clock just to run the internal card
* detection logic. Unfortunately, the existing driver architecture does
* not support a separation of clocks for runtime PM usage. When
* native hotplug is used, the tmio driver assumes that the core
* must continue to run for card detect to stay active, so we cannot
* disable it.
* Additionally, it is prohibited to supply a clock to the core but not
* to the card detect circuit. That leaves us with if separate clocks
* are presented, we must treat them both as virtually 1 clock.
*/
priv->clk_cd = devm_clk_get_optional(&pdev->dev, "cd");
if (IS_ERR(priv->clk_cd))
return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return PTR_ERR(priv->rstc);
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
PINCTRL_STATE_DEFAULT);
priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl,
"state_uhs");
}
host = tmio_mmc_host_alloc(pdev, mmc_data);
if (IS_ERR(host))
return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
mmc_data->ocr_mask = of_data->tmio_ocr_mask;
mmc_data->capabilities |= of_data->capabilities;
mmc_data->capabilities2 |= of_data->capabilities2;
mmc_data->dma_rx_offset = of_data->dma_rx_offset;
mmc_data->max_blk_count = of_data->max_blk_count;
mmc_data->max_segs = of_data->max_segs;
dma_priv->dma_buswidth = of_data->dma_buswidth;
host->bus_shift = of_data->bus_shift;
/* Fallback for old DTs */
if (!priv->clkh && of_data->sdhi_flags & SDHI_FLAG_NEED_CLKH_FALLBACK)
priv->clkh = clk_get_parent(clk_get_parent(priv->clk));
}
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_disable = renesas_sdhi_clk_disable;
host->set_clock = renesas_sdhi_set_clock;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
host->dma_ops = dma_ops;
if (sdhi_has_quirk(priv, hs400_disabled))
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
/* For some SoC, we disable internal WP. GPIO may override this */
if (mmc_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
host->ops.card_busy = renesas_sdhi_card_busy;
host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
host->reset = renesas_sdhi_reset;
} else {
host->sdcard_irq_mask_all = TMIO_MASK_ALL;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
host->bus_shift = 1;
if (mmd)
*mmc_data = *mmd;
dma_priv->filter = shdma_chan_filter;
dma_priv->enable = renesas_sdhi_enable_dma;
mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
/*
* All SDHI blocks support 2-byte and larger block sizes in 4-bit
* bus width mode.
*/
mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
/*
* All SDHI blocks support SDIO IRQ signalling.
*/
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
/* All SDHI have CMD12 control bit */
mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
/* All SDHI support HW busy detection */
mmc_data->flags |= TMIO_MMC_USE_BUSY_TIMEOUT;
dev_pm_domain_start(&pdev->dev);
ret = renesas_sdhi_clk_enable(host);
if (ret)
goto efree;
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
mmc_data->max_blk_count = U16_MAX;
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
if (ver == SDHI_VER_GEN3_SDMMC && sdhi_has_quirk(priv, hs400_calib_table)) {
host->fixup_request = renesas_sdhi_fixup_request;
priv->adjust_hs400_calib_table = *(
res->start == SDHI_GEN3_MMC0_ADDR ?
quirks->hs400_calib_table :
quirks->hs400_calib_table + 1);
}
/* these have an EXTOP bit */
if (ver >= SDHI_VER_GEN3_SD)
host->get_timeout_cycles = renesas_sdhi_gen3_get_cycles;
/* Check for SCC so we can reset it if needed */
if (of_data && of_data->scc_offset && ver >= SDHI_VER_GEN2_SDR104)
priv->scc_ctl = host->ctl + of_data->scc_offset;
/* Enable tuning iff we have an SCC and a supported mode */
if (priv->scc_ctl && (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
host->mmc->caps2 & MMC_CAP2_HSX00_1_8V)) {
const struct renesas_sdhi_scc *taps = of_data->taps;
bool use_4tap = sdhi_has_quirk(priv, hs400_4taps);
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
priv->scc_tappos = taps->tap;
priv->scc_tappos_hs400 = use_4tap ?
taps->tap_hs400_4tap :
taps->tap;
hit = true;
break;
}
}
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
host->check_retune = renesas_sdhi_check_scc_error;
host->ops.execute_tuning = renesas_sdhi_execute_tuning;
host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
host->ops.hs400_downgrade = renesas_sdhi_disable_scc;
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
}
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
goto eirq;
}
/* There must be at least one IRQ source */
if (!num_irqs) {
ret = -ENXIO;
goto eirq;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto eirq;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
return ret;
eirq:
tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
void renesas_sdhi_remove(struct platform_device *pdev)
{
struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
tmio_mmc_host_free(host);
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/renesas_sdhi_core.c |
/*
* drivers/mmc/host/sdhci-spear.c
*
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
* Viresh Kumar <[email protected]>
*
* Inspired by sdhci-pltfm.c
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include "sdhci.h"
struct spear_sdhci {
struct clk *clk;
};
/* sdhci ops */
static const struct sdhci_ops sdhci_pltfm_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct spear_sdhci *sdhci;
struct device *dev;
int ret;
dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev;
host = sdhci_alloc_host(dev, sizeof(*sdhci));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
goto err;
}
host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret);
goto err_host;
}
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
goto err_host;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
/* clk enable */
sdhci->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
goto err_host;
}
ret = clk_prepare_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
goto err_host;
}
ret = clk_set_rate(sdhci->clk, 50000000);
if (ret)
dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n",
clk_get_rate(sdhci->clk));
/*
* It is optional to use GPIOs for sdhci card detection. If we
* find a descriptor using slot GPIO, we use it.
*/
ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto disable_clk;
ret = sdhci_add_host(host);
if (ret)
goto disable_clk;
platform_set_drvdata(pdev, host);
return 0;
disable_clk:
clk_disable_unprepare(sdhci->clk);
err_host:
sdhci_free_host(host);
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
}
static void sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct spear_sdhci *sdhci = sdhci_priv(host);
int dead = 0;
u32 scratch;
scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(host, dead);
clk_disable_unprepare(sdhci->clk);
sdhci_free_host(host);
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct spear_sdhci *sdhci = sdhci_priv(host);
int ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
if (!ret)
clk_disable(sdhci->clk);
return ret;
}
static int sdhci_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct spear_sdhci *sdhci = sdhci_priv(host);
int ret;
ret = clk_enable(sdhci->clk);
if (ret) {
dev_dbg(dev, "Resume: Error enabling clock\n");
return ret;
}
return sdhci_resume_host(host);
}
#endif
static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
static const struct of_device_id sdhci_spear_id_table[] = {
{ .compatible = "st,spear300-sdhci" },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pm_ops,
.of_match_table = sdhci_spear_id_table,
},
.probe = sdhci_probe,
.remove_new = sdhci_remove,
};
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
MODULE_AUTHOR("Viresh Kumar <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-spear.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DMA support use of SYS DMAC with SDHI SD/SDIO controller
*
* Copyright (C) 2016-19 Renesas Electronics Corporation
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
* Copyright (C) 2017 Horms Solutions, Simon Horman
* Copyright (C) 2010-2011 Guennadi Liakhovetski
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
#include "renesas_sdhi.h"
#include "tmio_mmc.h"
#define TMIO_MMC_MIN_DMA_LEN 8
static const struct renesas_sdhi_of_data of_default_cfg = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
static const struct renesas_sdhi_of_data of_rz_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT |
TMIO_MMC_HAVE_CBSY,
.tmio_ocr_mask = MMC_VDD_32_33,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_WAIT_WHILE_BUSY,
};
static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
};
/* Definitions for sampling clocks */
static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
{
.clk_rate = 156000000,
.tap = 0x00000703,
},
{
.clk_rate = 0,
.tap = 0x00000300,
},
};
static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
struct renesas_sdhi *priv = host_to_priv(host);
if (!host->chan_tx || !host->chan_rx)
return;
if (priv->dma_priv.enable)
priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
{
renesas_sdhi_sys_dmac_enable_dma(host, false);
if (host->chan_rx)
dmaengine_terminate_sync(host->chan_rx);
if (host->chan_tx)
dmaengine_terminate_sync(host->chan_tx);
renesas_sdhi_sys_dmac_enable_dma(host, true);
}
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
spin_unlock_irq(&host->lock);
wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = 1; /* 2-byte alignment */
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN)
return;
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
DMA_CTRL_ACK);
if (desc) {
reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
host->dma_on = true;
}
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
renesas_sdhi_sys_dmac_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
if (chan) {
host->chan_tx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
}
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = 1; /* 2-byte alignment */
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN)
return;
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
void *sg_vaddr = kmap_local_page(sg_page(sg));
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr + sg->offset, host->bounce_sg.length);
kunmap_local(sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
DMA_CTRL_ACK);
if (desc) {
reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
host->dma_on = true;
}
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
renesas_sdhi_sys_dmac_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
if (chan) {
host->chan_rx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
}
static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
renesas_sdhi_sys_dmac_start_dma_rx(host);
} else {
if (host->chan_tx)
renesas_sdhi_sys_dmac_start_dma_tx(host);
}
}
static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = NULL;
spin_lock_irq(&host->lock);
if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
chan = host->chan_tx;
}
spin_unlock_irq(&host->lock);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
if (chan)
dma_async_issue_pending(chan);
}
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
struct renesas_sdhi *priv = host_to_priv(host);
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!host->pdev->dev.of_node &&
(!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
struct resource *res = platform_get_resource(host->pdev,
IORESOURCE_MEM, 0);
struct dma_slave_config cfg = {};
dma_cap_mask_t mask;
int ret;
if (!res)
return;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
ret = dmaengine_slave_config(host->chan_tx, &cfg);
if (ret < 0)
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto ereqrx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
ret = dmaengine_slave_config(host->chan_rx, &cfg);
if (ret < 0)
goto ecfgrx;
host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!host->bounce_buf)
goto ebouncebuf;
init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
}
renesas_sdhi_sys_dmac_enable_dma(host, true);
return;
ebouncebuf:
ecfgrx:
dma_release_channel(host->chan_rx);
host->chan_rx = NULL;
ereqrx:
ecfgtx:
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
}
static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
{
if (host->chan_tx) {
struct dma_chan *chan = host->chan_tx;
host->chan_tx = NULL;
dma_release_channel(chan);
}
if (host->chan_rx) {
struct dma_chan *chan = host->chan_rx;
host->chan_rx = NULL;
dma_release_channel(chan);
}
if (host->bounce_buf) {
free_pages((unsigned long)host->bounce_buf, 0);
host->bounce_buf = NULL;
}
}
static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
.start = renesas_sdhi_sys_dmac_start_dma,
.enable = renesas_sdhi_sys_dmac_enable_dma,
.request = renesas_sdhi_sys_dmac_request_dma,
.release = renesas_sdhi_sys_dmac_release_dma,
.abort = renesas_sdhi_sys_dmac_abort_dma,
.dataend = renesas_sdhi_sys_dmac_dataend_dma,
};
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops,
of_device_get_match_data(&pdev->dev), NULL);
}
static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
tmio_mmc_host_runtime_resume,
NULL)
};
static struct platform_driver renesas_sys_dmac_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
.of_match_table = renesas_sdhi_sys_dmac_of_match,
},
.probe = renesas_sdhi_sys_dmac_probe,
.remove_new = renesas_sdhi_remove,
};
module_platform_driver(renesas_sys_dmac_sdhi_driver);
MODULE_DESCRIPTION("Renesas SDHI driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sh_mobile_sdhi");
| linux-master | drivers/mmc/host/renesas_sdhi_sys_dmac.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 BayHub Technology Ltd.
*
* Authors: Peter Guo <[email protected]>
* Adam Lee <[email protected]>
* Ernest Zhang <[email protected]>
*/
#include <linux/pci.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/bitfield.h>
#include "sdhci.h"
#include "sdhci-pci.h"
/*
* O2Micro device registers
*/
#define O2_SD_PCIE_SWITCH 0x54
#define O2_SD_MISC_REG5 0x64
#define O2_SD_LD0_CTRL 0x68
#define O2_SD_DEV_CTRL 0x88
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_TEST_REG 0xD4
#define O2_SD_FUNC_REG0 0xDC
#define O2_SD_MULTI_VCC3V 0xEE
#define O2_SD_CLKREQ 0xEC
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
#define O2_SD_MISC_CTRL2 0xF0
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_MISC_CTRL 0x1C0
#define O2_SD_EXP_INT_REG 0x1E0
#define O2_SD_PWR_FORCE_L0 0x0002
#define O2_SD_TUNING_CTRL 0x300
#define O2_SD_PLL_SETTING 0x304
#define O2_SD_MISC_SETTING 0x308
#define O2_SD_CLK_SETTING 0x328
#define O2_SD_CAP_REG2 0x330
#define O2_SD_CAP_REG0 0x334
#define O2_SD_UHS1_CAP_SETTING 0x33C
#define O2_SD_DELAY_CTRL 0x350
#define O2_SD_OUTPUT_CLK_SOURCE_SWITCH 0x354
#define O2_SD_UHS2_L1_CTRL 0x35C
#define O2_SD_FUNC_REG3 0x3E0
#define O2_SD_FUNC_REG4 0x3E4
#define O2_SD_PARA_SET_REG1 0x444
#define O2_SD_VDDX_CTRL_REG 0x508
#define O2_SD_GPIO_CTRL_REG1 0x510
#define O2_SD_LED_ENABLE BIT(6)
#define O2_SD_FREG0_LEDOFF BIT(13)
#define O2_SD_SEL_DLL BIT(16)
#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
#define O2_SD_PHASE_MASK GENMASK(23, 20)
#define O2_SD_FIX_PHASE FIELD_PREP(O2_SD_PHASE_MASK, 0x9)
#define O2_SD_VENDOR_SETTING 0x110
#define O2_SD_VENDOR_SETTING2 0x1C8
#define O2_SD_HW_TUNING_DISABLE BIT(4)
#define O2_PLL_DLL_WDT_CONTROL1 0x1CC
#define O2_PLL_FORCE_ACTIVE BIT(18)
#define O2_PLL_LOCK_STATUS BIT(14)
#define O2_PLL_SOFT_RESET BIT(12)
#define O2_DLL_LOCK_STATUS BIT(11)
#define O2_SD_DETECT_SETTING 0x324
static const u32 dmdn_table[] = {0x2B1C0000,
0x2C1A0000, 0x371B0000, 0x35100000};
#define DMDN_SZ ARRAY_SIZE(dmdn_table)
struct o2_host {
u8 dll_adjust_count;
};
static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host)
{
ktime_t timeout;
u32 scratch32;
/* Wait max 50 ms */
timeout = ktime_add_ms(ktime_get(), 50);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE);
if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT
== (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT)
break;
if (timedout) {
pr_err("%s: Card Detect debounce never finished.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
return;
}
udelay(10);
}
}
static void sdhci_o2_enable_internal_clock(struct sdhci_host *host)
{
ktime_t timeout;
u16 scratch;
u32 scratch32;
/* PLL software reset */
scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
scratch32 |= O2_PLL_SOFT_RESET;
sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
udelay(1);
scratch32 &= ~(O2_PLL_SOFT_RESET);
sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
/* PLL force active */
scratch32 |= O2_PLL_FORCE_ACTIVE;
sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
scratch = sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1);
if (scratch & O2_PLL_LOCK_STATUS)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
goto out;
}
udelay(10);
}
/* Wait for card detect finish */
udelay(1);
sdhci_o2_wait_card_detect_stable(host);
out:
/* Cancel PLL force active */
scratch32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
scratch32 &= ~O2_PLL_FORCE_ACTIVE;
sdhci_writel(host, scratch32, O2_PLL_DLL_WDT_CONTROL1);
}
static int sdhci_o2_get_cd(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
sdhci_o2_enable_internal_clock(host);
else
sdhci_o2_wait_card_detect_stable(host);
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
u32 scratch_32;
pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
scratch_32 &= 0x0000FFFF;
scratch_32 |= value;
pci_write_config_dword(chip->pdev,
O2_SD_PLL_SETTING, scratch_32);
}
static u32 sdhci_o2_pll_dll_wdt_control(struct sdhci_host *host)
{
return sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
}
/*
* This function is used to detect dll lock status.
* Since the dll lock status bit will toggle randomly
* with very short interval which needs to be polled
* as fast as possible. Set sleep_us as 1 microsecond.
*/
static int sdhci_o2_wait_dll_detect_lock(struct sdhci_host *host)
{
u32 scratch32 = 0;
return readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
scratch32, !(scratch32 & O2_DLL_LOCK_STATUS), 1, 1000000);
}
static void sdhci_o2_set_tuning_mode(struct sdhci_host *host)
{
u16 reg;
/* enable hardware tuning */
reg = sdhci_readw(host, O2_SD_VENDOR_SETTING);
reg &= ~O2_SD_HW_TUNING_DISABLE;
sdhci_writew(host, reg, O2_SD_VENDOR_SETTING);
}
static void __sdhci_o2_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int i;
sdhci_send_tuning(host, opcode);
for (i = 0; i < 150; i++) {
u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
if (ctrl & SDHCI_CTRL_TUNED_CLK) {
host->tuning_done = true;
return;
}
pr_warn("%s: HW tuning failed !\n",
mmc_hostname(host->mmc));
break;
}
mdelay(1);
}
pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
mmc_hostname(host->mmc));
sdhci_reset_tuning(host);
}
/*
* This function is used to fix o2 dll shift issue.
* It isn't necessary to detect card present before recovery.
* Firstly, it is used by bht emmc card, which is embedded.
* Second, before call recovery card present will be detected
* outside of the execute tuning function.
*/
static int sdhci_o2_dll_recovery(struct sdhci_host *host)
{
int ret = 0;
u8 scratch_8 = 0;
u32 scratch_32 = 0;
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
struct o2_host *o2_host = sdhci_pci_priv(slot);
/* UnLock WP */
pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch_8);
scratch_8 &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
while (o2_host->dll_adjust_count < DMDN_SZ && !ret) {
/* Disable clock */
sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL);
/* PLL software reset */
scratch_32 = sdhci_readl(host, O2_PLL_DLL_WDT_CONTROL1);
scratch_32 |= O2_PLL_SOFT_RESET;
sdhci_writel(host, scratch_32, O2_PLL_DLL_WDT_CONTROL1);
pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG4,
&scratch_32);
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG4, scratch_32);
o2_pci_set_baseclk(chip, dmdn_table[o2_host->dll_adjust_count]);
/* Enable internal clock */
scratch_8 = SDHCI_CLOCK_INT_EN;
sdhci_writeb(host, scratch_8, SDHCI_CLOCK_CONTROL);
if (sdhci_o2_get_cd(host->mmc)) {
/*
* need wait at least 5ms for dll status stable,
* after enable internal clock
*/
usleep_range(5000, 6000);
if (sdhci_o2_wait_dll_detect_lock(host)) {
scratch_8 |= SDHCI_CLOCK_CARD_EN;
sdhci_writeb(host, scratch_8,
SDHCI_CLOCK_CONTROL);
ret = 1;
} else {
pr_warn("%s: DLL unlocked when dll_adjust_count is %d.\n",
mmc_hostname(host->mmc),
o2_host->dll_adjust_count);
}
} else {
pr_err("%s: card present detect failed.\n",
mmc_hostname(host->mmc));
break;
}
o2_host->dll_adjust_count++;
}
if (!ret && o2_host->dll_adjust_count == DMDN_SZ)
pr_err("%s: DLL adjust over max times\n",
mmc_hostname(host->mmc));
/* Lock WP */
pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch_8);
scratch_8 |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
return ret;
}
static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
int current_bus_width = 0;
u32 scratch32 = 0;
u16 scratch = 0;
u8 scratch_8 = 0;
u32 reg_val;
/*
* This handler implements the hardware tuning that is specific to
* this controller. Fall back to the standard method for other TIMING.
*/
if ((host->timing != MMC_TIMING_MMC_HS200) &&
(host->timing != MMC_TIMING_UHS_SDR104) &&
(host->timing != MMC_TIMING_UHS_SDR50))
return sdhci_execute_tuning(mmc, opcode);
if (WARN_ON(!mmc_op_tuning(opcode)))
return -EINVAL;
/* Force power mode enter L0 */
scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
scratch |= O2_SD_PWR_FORCE_L0;
sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
/* Update output phase */
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0:
case PCI_DEVICE_ID_O2_SEABIRD1:
case PCI_DEVICE_ID_O2_SDS1:
case PCI_DEVICE_ID_O2_FUJIN2:
/* Stop clk */
reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
reg_val &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
if (host->timing == MMC_TIMING_MMC_HS200 ||
host->timing == MMC_TIMING_UHS_SDR104) {
/* UnLock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
scratch_8 &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
/* Set pcr 0x354[16] to choose dll clock, and set the default phase */
pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, ®_val);
reg_val &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
reg_val |= (O2_SD_SEL_DLL | O2_SD_FIX_PHASE);
pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, reg_val);
/* Lock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
scratch_8 |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
}
/* Start clk */
reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
reg_val |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
break;
default:
break;
}
/* wait DLL lock, timeout value 5ms */
if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
pr_warn("%s: DLL can't lock in 5ms after force L0 during tuning.\n",
mmc_hostname(host->mmc));
/*
* Judge the tuning reason, whether caused by dll shift
* If cause by dll shift, should call sdhci_o2_dll_recovery
*/
if (!sdhci_o2_wait_dll_detect_lock(host))
if (!sdhci_o2_dll_recovery(host)) {
pr_err("%s: o2 dll recovery failed\n",
mmc_hostname(host->mmc));
return -EINVAL;
}
/*
* o2 sdhci host didn't support 8bit emmc tuning
*/
if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
current_bus_width = mmc->ios.bus_width;
mmc->ios.bus_width = MMC_BUS_WIDTH_4;
sdhci_set_bus_width(host, MMC_BUS_WIDTH_4);
}
sdhci_o2_set_tuning_mode(host);
sdhci_start_tuning(host);
__sdhci_o2_execute_tuning(host, opcode);
sdhci_end_tuning(host);
if (current_bus_width == MMC_BUS_WIDTH_8) {
mmc->ios.bus_width = MMC_BUS_WIDTH_8;
sdhci_set_bus_width(host, current_bus_width);
}
/* Cancel force power mode enter L0 */
scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
scratch &= ~(O2_SD_PWR_FORCE_L0);
sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
host->flags &= ~SDHCI_HS400_TUNING;
return 0;
}
static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
{
int ret;
u32 scratch_32;
/* Set led of SD host function enable */
ret = pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG0, &scratch_32);
if (ret)
return;
scratch_32 &= ~O2_SD_FREG0_LEDOFF;
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG0, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_TEST_REG, &scratch_32);
if (ret)
return;
scratch_32 |= O2_SD_LED_ENABLE;
pci_write_config_dword(chip->pdev,
O2_SD_TEST_REG, scratch_32);
}
static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
{
u32 scratch_32;
int ret;
/* Improve write performance for SD3.0 */
ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32);
if (ret)
return;
scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14));
pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32);
/* Enable Link abnormal reset generating Reset */
ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32);
if (ret)
return;
scratch_32 &= ~((1 << 19) | (1 << 11));
scratch_32 |= (1 << 10);
pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32);
/* set card power over current protection */
ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32);
if (ret)
return;
scratch_32 |= (1 << 4);
pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32);
/* adjust the output delay for SD mode */
pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492);
/* Set the output voltage setting of Aux 1.2v LDO */
ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32);
if (ret)
return;
scratch_32 &= ~(3 << 12);
pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32);
/* Set Max power supply capability of SD host */
ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32);
if (ret)
return;
scratch_32 &= ~(0x01FE);
scratch_32 |= 0x00CC;
pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32);
/* Set DLL Tuning Window */
ret = pci_read_config_dword(chip->pdev,
O2_SD_TUNING_CTRL, &scratch_32);
if (ret)
return;
scratch_32 &= ~(0x000000FF);
scratch_32 |= 0x00000066;
pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32);
/* Set UHS2 T_EIDLE */
ret = pci_read_config_dword(chip->pdev,
O2_SD_UHS2_L1_CTRL, &scratch_32);
if (ret)
return;
scratch_32 &= ~(0x000000FC);
scratch_32 |= 0x00000084;
pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32);
/* Set UHS2 Termination */
ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32);
if (ret)
return;
scratch_32 &= ~((1 << 21) | (1 << 30));
pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32);
/* Set L1 Entrance Timer */
ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32);
if (ret)
return;
scratch_32 &= ~(0xf0000000);
scratch_32 |= 0x30000000;
pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_MISC_CTRL4, &scratch_32);
if (ret)
return;
scratch_32 &= ~(0x000f0000);
scratch_32 |= 0x00080000;
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
}
static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
struct sdhci_host *host)
{
int ret;
ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
if (!ret) {
pr_info("%s: unsupported MSI, use INTx irq\n",
mmc_hostname(host->mmc));
return;
}
ret = pci_alloc_irq_vectors(chip->pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
pr_err("%s: enable PCI MSI failed, err=%d\n",
mmc_hostname(host->mmc), ret);
return;
}
host->irq = pci_irq_vector(chip->pdev, 0);
}
static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk)
{
/* Enable internal clock */
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
sdhci_o2_enable_internal_clock(host);
if (sdhci_o2_get_cd(host->mmc)) {
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
}
static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
u8 scratch;
u32 scratch_32;
u32 dmdn_208m, dmdn_200m;
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
/* UnLock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
if (chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9860 ||
chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9861 ||
chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9862 ||
chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9863) {
dmdn_208m = 0x2c500000;
dmdn_200m = 0x25200000;
} else {
dmdn_208m = 0x2c280000;
dmdn_200m = 0x25100000;
}
if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
if ((scratch_32 & 0xFFFF0000) != dmdn_208m)
o2_pci_set_baseclk(chip, dmdn_208m);
} else {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
if ((scratch_32 & 0xFFFF0000) != dmdn_200m)
o2_pci_set_baseclk(chip, dmdn_200m);
}
pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
scratch_32 &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
/* Lock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_o2_enable_clk(host, clk);
}
static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
u8 scratch8;
u16 scratch16;
int ret;
/* Disable clock */
sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL);
/* Set VDD2 voltage*/
scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
scratch8 &= 0x0F;
if (host->mmc->ios.timing == MMC_TIMING_SD_EXP_1_2V &&
host->mmc->caps2 & MMC_CAP2_SD_EXP_1_2V) {
scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_120;
} else {
scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_180;
}
sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
/* UnLock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
scratch8 &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
/* Wait for express card clkreqn assert */
ret = read_poll_timeout(sdhci_readb, scratch8, !(scratch8 & BIT(0)),
1, 30000, false, host, O2_SD_EXP_INT_REG);
if (!ret) {
/* Switch to PCIe mode */
scratch16 = sdhci_readw(host, O2_SD_PCIE_SWITCH);
scratch16 |= BIT(8);
sdhci_writew(host, scratch16, O2_SD_PCIE_SWITCH);
} else {
/* Power off VDD2 voltage*/
scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
scratch8 &= 0x0F;
sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
/* Keep mode as UHSI */
pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
scratch16 &= ~BIT(11);
pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
host->mmc->ios.timing = MMC_TIMING_LEGACY;
pr_info("%s: Express card initialization failed, falling back to Legacy\n",
mmc_hostname(host->mmc));
}
/* Lock WP */
pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
scratch8 |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
return 0;
}
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
struct o2_host *o2_host = sdhci_pci_priv(slot);
u32 reg, caps;
int ret;
chip = slot->chip;
host = slot->host;
o2_host->dll_adjust_count = 0;
caps = sdhci_readl(host, SDHCI_CAPABILITIES);
/*
* mmc_select_bus_width() will test the bus to determine the actual bus
* width.
*/
if (caps & SDHCI_CAN_DO_8BIT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
sdhci_pci_o2_enable_msi(chip, host);
host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0:
case PCI_DEVICE_ID_O2_SEABIRD1:
case PCI_DEVICE_ID_O2_SDS1:
case PCI_DEVICE_ID_O2_FUJIN2:
reg = sdhci_readl(host, O2_SD_VENDOR_SETTING);
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
ret = pci_read_config_dword(chip->pdev,
O2_SD_MISC_SETTING, ®);
if (ret)
return -EIO;
if (reg & (1 << 4)) {
pr_info("%s: emmc 1.8v flag is set, force 1.8v signaling voltage\n",
mmc_hostname(host->mmc));
host->flags &= ~SDHCI_SIGNALING_330;
host->flags |= SDHCI_SIGNALING_180;
host->mmc->caps2 |= MMC_CAP2_NO_SD;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
pci_write_config_dword(chip->pdev,
O2_SD_DETECT_SETTING, 3);
}
slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
}
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) {
slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
}
if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
break;
/* set dll watch dog timer */
reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
reg |= (1 << 12);
sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
break;
case PCI_DEVICE_ID_O2_GG8_9860:
case PCI_DEVICE_ID_O2_GG8_9861:
case PCI_DEVICE_ID_O2_GG8_9862:
case PCI_DEVICE_ID_O2_GG8_9863:
host->mmc->caps2 |= MMC_CAP2_NO_SDIO | MMC_CAP2_SD_EXP | MMC_CAP2_SD_EXP_1_2V;
host->mmc->caps |= MMC_CAP_HW_RESET;
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
host->mmc_host_ops.init_sd_express = sdhci_pci_o2_init_sd_express;
break;
default:
break;
}
return 0;
}
static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
u16 scratch16;
u32 scratch_32;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_8220:
case PCI_DEVICE_ID_O2_8221:
case PCI_DEVICE_ID_O2_8320:
case PCI_DEVICE_ID_O2_8321:
/* This extra setup is required due to broken ADMA. */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
/* Set Multi 3 to VCC3V# */
pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
/* Disable CLK_REQ# support after media DET */
ret = pci_read_config_byte(chip->pdev,
O2_SD_CLKREQ, &scratch);
if (ret)
return ret;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
/* Choose capabilities, enable SDMA. We have to write 0x01
* to the capabilities register first to unlock it.
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
return ret;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
/* Disable ADMA1/2 */
pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
/* Disable the infinite transfer mode */
ret = pci_read_config_byte(chip->pdev,
O2_SD_INF_MOD, &scratch);
if (ret)
return ret;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SDS1:
case PCI_DEVICE_ID_O2_FUJIN2:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
/* DevId=8520 subId= 0x11 or 0x12 Type Chip support */
if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) {
ret = pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG0,
&scratch_32);
if (ret)
return ret;
scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
/* Check Whether subId is 0x11 or 0x12 */
if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) {
scratch_32 = 0x25100000;
o2_pci_set_baseclk(chip, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
return ret;
/* Enable Base Clk setting change */
scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4,
scratch_32);
/* Set Tuning Window to 4 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x44);
break;
}
}
/* Enable 8520 led function */
o2_pci_led_enable(chip);
/* Set timeout CLK */
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLK_SETTING, &scratch_32);
if (ret)
return ret;
scratch_32 &= ~(0xFF00);
scratch_32 |= 0x07E0C800;
pci_write_config_dword(chip->pdev,
O2_SD_CLK_SETTING, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_CLKREQ, &scratch_32);
if (ret)
return ret;
scratch_32 |= 0x3;
pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
return ret;
scratch_32 &= ~(0x1F3F070E);
scratch_32 |= 0x18270106;
pci_write_config_dword(chip->pdev,
O2_SD_PLL_SETTING, scratch_32);
/* Disable UHS1 funciton */
ret = pci_read_config_dword(chip->pdev,
O2_SD_CAP_REG2, &scratch_32);
if (ret)
return ret;
scratch_32 &= ~(0xE0);
pci_write_config_dword(chip->pdev,
O2_SD_CAP_REG2, scratch_32);
if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2)
sdhci_pci_o2_fujin2_pci_init(chip);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SEABIRD0:
case PCI_DEVICE_ID_O2_SEABIRD1:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
ret = pci_read_config_dword(chip->pdev,
O2_SD_PLL_SETTING, &scratch_32);
if (ret)
return ret;
if ((scratch_32 & 0xff000000) == 0x01000000) {
scratch_32 &= 0x0000FFFF;
scratch_32 |= 0x1F340000;
pci_write_config_dword(chip->pdev,
O2_SD_PLL_SETTING, scratch_32);
} else {
scratch_32 &= 0x0000FFFF;
scratch_32 |= 0x25100000;
pci_write_config_dword(chip->pdev,
O2_SD_PLL_SETTING, scratch_32);
ret = pci_read_config_dword(chip->pdev,
O2_SD_FUNC_REG4,
&scratch_32);
if (ret)
return ret;
scratch_32 |= (1 << 22);
pci_write_config_dword(chip->pdev,
O2_SD_FUNC_REG4, scratch_32);
}
/* Set Tuning Windows to 5 */
pci_write_config_byte(chip->pdev,
O2_SD_TUNING_CTRL, 0x55);
//Adjust 1st and 2nd CD debounce time
pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
scratch_32 &= 0xFFE7FFFF;
scratch_32 |= 0x00180000;
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev,
O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_GG8_9860:
case PCI_DEVICE_ID_O2_GG8_9861:
case PCI_DEVICE_ID_O2_GG8_9862:
case PCI_DEVICE_ID_O2_GG8_9863:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
/* Select mode switch source as software control */
pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
scratch16 &= 0xF8FF;
scratch16 |= BIT(9);
pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
/* set VDD1 supply source */
pci_read_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, &scratch16);
scratch16 &= 0xFFE3;
scratch16 |= BIT(3);
pci_write_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, scratch16);
/* Set host drive strength*/
scratch16 = 0x0025;
pci_write_config_word(chip->pdev, O2_SD_PLL_SETTING, scratch16);
/* Set output delay*/
pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
scratch_32 &= 0xFF0FFF00;
scratch_32 |= 0x00B0003B;
pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
{
sdhci_pci_o2_probe(chip);
return sdhci_pci_resume_host(chip);
}
#endif
static const struct sdhci_ops sdhci_pci_o2_ops = {
.set_clock = sdhci_pci_o2_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
const struct sdhci_pci_fixes sdhci_o2 = {
.probe = sdhci_pci_o2_probe,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
.probe_slot = sdhci_pci_o2_probe_slot,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_o2_resume,
#endif
.ops = &sdhci_pci_o2_ops,
.priv_size = sizeof(struct o2_host),
};
| linux-master | drivers/mmc/host/sdhci-pci-o2micro.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Socionext Inc.
* Author: Masahiro Yamada <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "sdhci-pltfm.h"
/* HRS - Host Register Set (specific to Cadence) */
#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
/* SRS - Slot Register Set (SDHCI-compatible) */
#define SDHCI_CDNS_SRS_BASE 0x200
/* PHY */
#define SDHCI_CDNS_PHY_DLY_SD_HS 0x00
#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01
#define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02
#define SDHCI_CDNS_PHY_DLY_UHS_SDR25 0x03
#define SDHCI_CDNS_PHY_DLY_UHS_SDR50 0x04
#define SDHCI_CDNS_PHY_DLY_UHS_DDR50 0x05
#define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
#define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07
#define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08
#define SDHCI_CDNS_PHY_DLY_SDCLK 0x0b
#define SDHCI_CDNS_PHY_DLY_HSMMC 0x0c
#define SDHCI_CDNS_PHY_DLY_STROBE 0x0d
/*
* The tuned val register is 6 bit-wide, but not the whole of the range is
* available. The range 0-42 seems to be available (then 43 wraps around to 0)
* but I am not quite sure if it is official. Use only 0 to 39 for safety.
*/
#define SDHCI_CDNS_MAX_TUNING_LOOP 40
struct sdhci_cdns_phy_param {
u8 addr;
u8 data;
};
struct sdhci_cdns_priv {
void __iomem *hrs_addr;
void __iomem *ctl_addr; /* write control */
spinlock_t wrlock; /* write lock */
bool enhanced_strobe;
void (*priv_writel)(struct sdhci_cdns_priv *priv, u32 val, void __iomem *reg);
struct reset_control *rst_hw;
unsigned int nr_phy_params;
struct sdhci_cdns_phy_param phy_params[];
};
struct sdhci_cdns_phy_cfg {
const char *property;
u8 addr;
};
struct sdhci_cdns_drv_data {
int (*init)(struct platform_device *pdev);
const struct sdhci_pltfm_data pltfm_data;
};
static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
{ "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, },
{ "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, },
{ "cdns,phy-input-delay-sd-uhs-sdr12", SDHCI_CDNS_PHY_DLY_UHS_SDR12, },
{ "cdns,phy-input-delay-sd-uhs-sdr25", SDHCI_CDNS_PHY_DLY_UHS_SDR25, },
{ "cdns,phy-input-delay-sd-uhs-sdr50", SDHCI_CDNS_PHY_DLY_UHS_SDR50, },
{ "cdns,phy-input-delay-sd-uhs-ddr50", SDHCI_CDNS_PHY_DLY_UHS_DDR50, },
{ "cdns,phy-input-delay-mmc-highspeed", SDHCI_CDNS_PHY_DLY_EMMC_SDR, },
{ "cdns,phy-input-delay-mmc-ddr", SDHCI_CDNS_PHY_DLY_EMMC_DDR, },
{ "cdns,phy-dll-delay-sdclk", SDHCI_CDNS_PHY_DLY_SDCLK, },
{ "cdns,phy-dll-delay-sdclk-hsmmc", SDHCI_CDNS_PHY_DLY_HSMMC, },
{ "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, },
};
static inline void cdns_writel(struct sdhci_cdns_priv *priv, u32 val,
void __iomem *reg)
{
writel(val, reg);
}
static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u8 addr, u8 data)
{
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
u32 tmp;
int ret;
ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
0, 10);
if (ret)
return ret;
tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
priv->priv_writel(priv, tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10);
if (ret)
return ret;
tmp &= ~SDHCI_CDNS_HRS04_WR;
priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
0, 10);
return ret;
}
static unsigned int sdhci_cdns_phy_param_count(struct device_node *np)
{
unsigned int count = 0;
int i;
for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++)
if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property))
count++;
return count;
}
static void sdhci_cdns_phy_param_parse(struct device_node *np,
struct sdhci_cdns_priv *priv)
{
struct sdhci_cdns_phy_param *p = priv->phy_params;
u32 val;
int ret, i;
for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) {
ret = of_property_read_u32(np, sdhci_cdns_phy_cfgs[i].property,
&val);
if (ret)
continue;
p->addr = sdhci_cdns_phy_cfgs[i].addr;
p->data = val;
p++;
}
}
static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
{
int ret, i;
for (i = 0; i < priv->nr_phy_params; i++) {
ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr,
priv->phy_params[i].data);
if (ret)
return ret;
}
return 0;
}
static void *sdhci_cdns_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return sdhci_pltfm_priv(pltfm_host);
}
static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
{
/*
* Cadence's spec says the Timeout Clock Frequency is the same as the
* Base Clock Frequency.
*/
return host->max_clk;
}
static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
{
u32 tmp;
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
tmp &= ~SDHCI_CDNS_HRS06_MODE;
tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
priv->priv_writel(priv, tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
{
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
int i, ret;
if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
tmp &= ~SDHCI_CDNS_HRS06_TUNE;
tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
/*
* Workaround for IP errata:
* The IP6116 SD/eMMC PHY design has a timing issue on receive data
* path. Send tune request twice.
*/
for (i = 0; i < 2; i++) {
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
priv->priv_writel(priv, tmp, reg);
ret = readl_poll_timeout(reg, tmp,
!(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
0, 1);
if (ret)
return ret;
}
return 0;
}
/*
* In SD mode, software must not use the hardware tuning and instead perform
* an almost identical procedure to eMMC.
*/
static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int cur_streak = 0;
int max_streak = 0;
int end_of_streak = 0;
int i;
/*
* Do not execute tuning for UHS_SDR50 or UHS_DDR50.
* The delay is set by probe, based on the DT properties.
*/
if (host->timing != MMC_TIMING_MMC_HS200 &&
host->timing != MMC_TIMING_UHS_SDR104)
return 0;
for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
if (sdhci_cdns_set_tune_val(host, i) ||
mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
cur_streak = 0;
} else { /* good */
cur_streak++;
if (cur_streak > max_streak) {
max_streak = cur_streak;
end_of_streak = i;
}
}
}
if (!max_streak) {
dev_err(mmc_dev(host->mmc), "no tuning point found\n");
return -EIO;
}
return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
u32 mode;
switch (timing) {
case MMC_TIMING_MMC_HS:
mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
break;
case MMC_TIMING_MMC_DDR52:
mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
break;
case MMC_TIMING_MMC_HS200:
mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
break;
case MMC_TIMING_MMC_HS400:
if (priv->enhanced_strobe)
mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
else
mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
break;
default:
mode = SDHCI_CDNS_HRS06_MODE_SD;
break;
}
sdhci_cdns_set_emmc_mode(priv, mode);
/* For SD, fall back to the default handler */
if (mode == SDHCI_CDNS_HRS06_MODE_SD)
sdhci_set_uhs_signaling(host, timing);
}
/* Elba control register bits [6:3] are byte-lane enables */
#define ELBA_BYTE_ENABLE_MASK(x) ((x) << 3)
/*
* The Pensando Elba SoC explicitly controls byte-lane enabling on writes
* which includes writes to the HRS registers. The write lock (wrlock)
* is used to ensure byte-lane enable, using write control (ctl_addr),
* occurs before the data write.
*/
static void elba_priv_writel(struct sdhci_cdns_priv *priv, u32 val,
void __iomem *reg)
{
unsigned long flags;
spin_lock_irqsave(&priv->wrlock, flags);
writel(GENMASK(7, 3), priv->ctl_addr);
writel(val, reg);
spin_unlock_irqrestore(&priv->wrlock, flags);
}
static void elba_write_l(struct sdhci_host *host, u32 val, int reg)
{
elba_priv_writel(sdhci_cdns_priv(host), val, host->ioaddr + reg);
}
static void elba_write_w(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
u32 shift = reg & GENMASK(1, 0);
unsigned long flags;
u32 byte_enables;
byte_enables = GENMASK(1, 0) << shift;
spin_lock_irqsave(&priv->wrlock, flags);
writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
writew(val, host->ioaddr + reg);
spin_unlock_irqrestore(&priv->wrlock, flags);
}
static void elba_write_b(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
u32 shift = reg & GENMASK(1, 0);
unsigned long flags;
u32 byte_enables;
byte_enables = BIT(0) << shift;
spin_lock_irqsave(&priv->wrlock, flags);
writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
writeb(val, host->ioaddr + reg);
spin_unlock_irqrestore(&priv->wrlock, flags);
}
static const struct sdhci_ops sdhci_elba_ops = {
.write_l = elba_write_l,
.write_w = elba_write_w,
.write_b = elba_write_b,
.set_clock = sdhci_set_clock,
.get_timeout_clock = sdhci_cdns_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
};
static int elba_drv_init(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
void __iomem *ioaddr;
host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA;
spin_lock_init(&priv->wrlock);
/* Byte-lane control register */
ioaddr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
priv->ctl_addr = ioaddr;
priv->priv_writel = elba_priv_writel;
writel(ELBA_BYTE_ENABLE_MASK(0xf), priv->ctl_addr);
return 0;
}
static const struct sdhci_ops sdhci_cdns_ops = {
.set_clock = sdhci_set_clock,
.get_timeout_clock = sdhci_cdns_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.platform_execute_tuning = sdhci_cdns_execute_tuning,
.set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
};
static const struct sdhci_cdns_drv_data sdhci_cdns_uniphier_drv_data = {
.pltfm_data = {
.ops = &sdhci_cdns_ops,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
},
};
static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = {
.init = elba_drv_init,
.pltfm_data = {
.ops = &sdhci_elba_ops,
},
};
static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = {
.pltfm_data = {
.ops = &sdhci_cdns_ops,
},
};
static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
u32 mode;
priv->enhanced_strobe = ios->enhanced_strobe;
mode = sdhci_cdns_get_emmc_mode(priv);
if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400 && ios->enhanced_strobe)
sdhci_cdns_set_emmc_mode(priv,
SDHCI_CDNS_HRS06_MODE_MMC_HS400ES);
if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400ES && !ios->enhanced_strobe)
sdhci_cdns_set_emmc_mode(priv,
SDHCI_CDNS_HRS06_MODE_MMC_HS400);
}
static void sdhci_cdns_mmc_hw_reset(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
dev_dbg(mmc_dev(host->mmc), "emmc hardware reset\n");
reset_control_assert(priv->rst_hw);
/* For eMMC, minimum is 1us but give it 3us for good measure */
udelay(3);
reset_control_deassert(priv->rst_hw);
/* For eMMC, minimum is 200us but give it 300us for good measure */
usleep_range(300, 1000);
}
static int sdhci_cdns_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
const struct sdhci_cdns_drv_data *data;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
unsigned int nr_phy_params;
int ret;
struct device *dev = &pdev->dev;
static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
data = of_device_get_match_data(dev);
if (!data)
data = &sdhci_cdns_drv_data;
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
host = sdhci_pltfm_init(pdev, &data->pltfm_data,
struct_size(priv, phy_params, nr_phy_params));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
priv = sdhci_pltfm_priv(pltfm_host);
priv->nr_phy_params = nr_phy_params;
priv->hrs_addr = host->ioaddr;
priv->enhanced_strobe = false;
priv->priv_writel = cdns_writel;
host->ioaddr += SDHCI_CDNS_SRS_BASE;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
if (data->init) {
ret = data->init(pdev);
if (ret)
goto free;
}
sdhci_enable_v4_mode(host);
__sdhci_read_caps(host, &version, NULL, NULL);
sdhci_get_of_property(pdev);
ret = mmc_of_parse(host->mmc);
if (ret)
goto free;
sdhci_cdns_phy_param_parse(dev->of_node, priv);
ret = sdhci_cdns_phy_init(priv);
if (ret)
goto free;
if (host->mmc->caps & MMC_CAP_HW_RESET) {
priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(priv->rst_hw)) {
ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
"reset controller error\n");
goto free;
}
if (priv->rst_hw)
host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
}
ret = sdhci_add_host(host);
if (ret)
goto free;
return 0;
free:
sdhci_pltfm_free(pdev);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_cdns_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
if (ret)
return ret;
ret = sdhci_cdns_phy_init(priv);
if (ret)
goto disable_clk;
ret = sdhci_resume_host(host);
if (ret)
goto disable_clk;
return 0;
disable_clk:
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
#endif
static const struct dev_pm_ops sdhci_cdns_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume)
};
static const struct of_device_id sdhci_cdns_match[] = {
{
.compatible = "socionext,uniphier-sd4hc",
.data = &sdhci_cdns_uniphier_drv_data,
},
{
.compatible = "amd,pensando-elba-sd4hc",
.data = &sdhci_elba_drv_data,
},
{ .compatible = "cdns,sd4hc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
static struct platform_driver sdhci_cdns_driver = {
.driver = {
.name = "sdhci-cdns",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_cdns_pm_ops,
.of_match_table = sdhci_cdns_match,
},
.probe = sdhci_cdns_probe,
.remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_cdns_driver);
MODULE_AUTHOR("Masahiro Yamada <[email protected]>");
MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/sdhci-cadence.c |
// SPDX-License-Identifier: GPL-2.0
/*
*
* MMC software queue support based on command queue interfaces
*
* Copyright (C) 2019 Linaro, Inc.
* Author: Baolin Wang <[email protected]>
*/
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include "mmc_hsq.h"
static void mmc_hsq_retry_handler(struct work_struct *work)
{
struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
struct mmc_host *mmc = hsq->mmc;
mmc->ops->request(mmc, hsq->mrq);
}
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
struct hsq_slot *slot;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
/* Make sure there are remain requests need to pump */
if (!hsq->qcnt || !hsq->enabled) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
spin_unlock_irqrestore(&hsq->lock, flags);
if (mmc->ops->request_atomic)
ret = mmc->ops->request_atomic(mmc, hsq->mrq);
else
mmc->ops->request(mmc, hsq->mrq);
/*
* If returning BUSY from request_atomic(), which means the card
* may be busy now, and we should change to non-atomic context to
* try again for this unusual case, to avoid time-consuming operations
* in the atomic context.
*
* Note: we just give a warning for other error cases, since the host
* driver will handle them.
*/
if (ret == -EBUSY)
schedule_work(&hsq->retry_work);
else
WARN_ON_ONCE(ret);
}
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
{
int tag;
/*
* If there are no remain requests in software queue, then set a invalid
* tag.
*/
if (!remains) {
hsq->next_tag = HSQ_INVALID_TAG;
hsq->tail_tag = HSQ_INVALID_TAG;
return;
}
tag = hsq->tag_slot[hsq->next_tag];
hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
hsq->next_tag = tag;
}
static void mmc_hsq_post_request(struct mmc_hsq *hsq)
{
unsigned long flags;
int remains;
spin_lock_irqsave(&hsq->lock, flags);
remains = hsq->qcnt;
hsq->mrq = NULL;
/* Update the next available tag to be queued. */
mmc_hsq_update_next_tag(hsq, remains);
if (hsq->waiting_for_idle && !remains) {
hsq->waiting_for_idle = false;
wake_up(&hsq->wait_queue);
}
/* Do not pump new request in recovery mode. */
if (hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
spin_unlock_irqrestore(&hsq->lock, flags);
/*
* Try to pump new request to host controller as fast as possible,
* after completing previous request.
*/
if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
/**
* mmc_hsq_finalize_request - finalize one request if the request is done
* @mmc: the host controller
* @mrq: the request need to be finalized
*
* Return true if we finalized the corresponding request in software queue,
* otherwise return false.
*/
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_hsq *hsq = mmc->cqe_private;
unsigned long flags;
spin_lock_irqsave(&hsq->lock, flags);
if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
spin_unlock_irqrestore(&hsq->lock, flags);
return false;
}
/*
* Clear current completed slot request to make a room for new request.
*/
hsq->slot[hsq->next_tag].mrq = NULL;
spin_unlock_irqrestore(&hsq->lock, flags);
mmc_cqe_request_done(mmc, hsq->mrq);
mmc_hsq_post_request(hsq);
return true;
}
EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
static void mmc_hsq_recovery_start(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
unsigned long flags;
spin_lock_irqsave(&hsq->lock, flags);
hsq->recovery_halt = true;
spin_unlock_irqrestore(&hsq->lock, flags);
}
static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int remains;
spin_lock_irq(&hsq->lock);
hsq->recovery_halt = false;
remains = hsq->qcnt;
spin_unlock_irq(&hsq->lock);
/*
* Try to pump new request if there are request pending in software
* queue after finishing recovery.
*/
if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int tag = mrq->tag;
spin_lock_irq(&hsq->lock);
if (!hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return -ESHUTDOWN;
}
/* Do not queue any new requests in recovery mode. */
if (hsq->recovery_halt) {
spin_unlock_irq(&hsq->lock);
return -EBUSY;
}
hsq->slot[tag].mrq = mrq;
/*
* Set the next tag as current request tag if no available
* next tag.
*/
if (hsq->next_tag == HSQ_INVALID_TAG) {
hsq->next_tag = tag;
hsq->tail_tag = tag;
hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
} else {
hsq->tag_slot[hsq->tail_tag] = tag;
hsq->tail_tag = tag;
}
hsq->qcnt++;
spin_unlock_irq(&hsq->lock);
mmc_hsq_pump_requests(hsq);
return 0;
}
static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
if (mmc->ops->post_req)
mmc->ops->post_req(mmc, mrq, 0);
}
static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
{
bool is_idle;
spin_lock_irq(&hsq->lock);
is_idle = (!hsq->mrq && !hsq->qcnt) ||
hsq->recovery_halt;
*ret = hsq->recovery_halt ? -EBUSY : 0;
hsq->waiting_for_idle = !is_idle;
spin_unlock_irq(&hsq->lock);
return is_idle;
}
static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int ret;
wait_event(hsq->wait_queue,
mmc_hsq_queue_is_idle(hsq, &ret));
return ret;
}
static void mmc_hsq_disable(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
u32 timeout = 500;
int ret;
spin_lock_irq(&hsq->lock);
if (!hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return;
}
spin_unlock_irq(&hsq->lock);
ret = wait_event_timeout(hsq->wait_queue,
mmc_hsq_queue_is_idle(hsq, &ret),
msecs_to_jiffies(timeout));
if (ret == 0) {
pr_warn("could not stop mmc software queue\n");
return;
}
spin_lock_irq(&hsq->lock);
hsq->enabled = false;
spin_unlock_irq(&hsq->lock);
}
static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
{
struct mmc_hsq *hsq = mmc->cqe_private;
spin_lock_irq(&hsq->lock);
if (hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return -EBUSY;
}
hsq->enabled = true;
spin_unlock_irq(&hsq->lock);
return 0;
}
static const struct mmc_cqe_ops mmc_hsq_ops = {
.cqe_enable = mmc_hsq_enable,
.cqe_disable = mmc_hsq_disable,
.cqe_request = mmc_hsq_request,
.cqe_post_req = mmc_hsq_post_req,
.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
.cqe_recovery_start = mmc_hsq_recovery_start,
.cqe_recovery_finish = mmc_hsq_recovery_finish,
};
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
{
int i;
hsq->num_slots = HSQ_NUM_SLOTS;
hsq->next_tag = HSQ_INVALID_TAG;
hsq->tail_tag = HSQ_INVALID_TAG;
hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
sizeof(struct hsq_slot), GFP_KERNEL);
if (!hsq->slot)
return -ENOMEM;
hsq->mmc = mmc;
hsq->mmc->cqe_private = hsq;
mmc->cqe_ops = &mmc_hsq_ops;
for (i = 0; i < HSQ_NUM_SLOTS; i++)
hsq->tag_slot[i] = HSQ_INVALID_TAG;
INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
spin_lock_init(&hsq->lock);
init_waitqueue_head(&hsq->wait_queue);
return 0;
}
EXPORT_SYMBOL_GPL(mmc_hsq_init);
void mmc_hsq_suspend(struct mmc_host *mmc)
{
mmc_hsq_disable(mmc);
}
EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
int mmc_hsq_resume(struct mmc_host *mmc)
{
return mmc_hsq_enable(mmc, NULL);
}
EXPORT_SYMBOL_GPL(mmc_hsq_resume);
MODULE_DESCRIPTION("MMC Host Software Queue support");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/mmc_hsq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/mmc/host/sdhci-of-sparx5.c
*
* MCHP Sparx5 SoC Secure Digital Host Controller Interface.
*
* Copyright (c) 2019 Microchip Inc.
*
* Author: Lars Povlsen <[email protected]>
*/
#include <linux/sizes.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include "sdhci-pltfm.h"
#define CPU_REGS_GENERAL_CTRL (0x22 * 4)
#define MSHC_DLY_CC_MASK GENMASK(16, 13)
#define MSHC_DLY_CC_SHIFT 13
#define MSHC_DLY_CC_MAX 15
#define CPU_REGS_PROC_CTRL (0x2C * 4)
#define ACP_CACHE_FORCE_ENA BIT(4)
#define ACP_AWCACHE BIT(3)
#define ACP_ARCACHE BIT(2)
#define ACP_CACHE_MASK (ACP_CACHE_FORCE_ENA|ACP_AWCACHE|ACP_ARCACHE)
#define MSHC2_VERSION 0x500 /* Off 0x140, reg 0x0 */
#define MSHC2_TYPE 0x504 /* Off 0x140, reg 0x1 */
#define MSHC2_EMMC_CTRL 0x52c /* Off 0x140, reg 0xB */
#define MSHC2_EMMC_CTRL_EMMC_RST_N BIT(2)
#define MSHC2_EMMC_CTRL_IS_EMMC BIT(0)
struct sdhci_sparx5_data {
struct sdhci_host *host;
struct regmap *cpu_ctrl;
int delay_clock;
};
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
/*
* If DMA addr spans 128MB boundary, we split the DMA transfer into two
* so that each DMA transfer doesn't exceed the boundary.
*/
static void sdhci_sparx5_adma_write_desc(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len,
unsigned int cmd)
{
int tmplen, offset;
if (likely(!len || BOUNDARY_OK(addr, len))) {
sdhci_adma_write_desc(host, desc, addr, len, cmd);
return;
}
pr_debug("%s: write_desc: splitting dma len %d, offset %pad\n",
mmc_hostname(host->mmc), len, &addr);
offset = addr & (SZ_128M - 1);
tmplen = SZ_128M - offset;
sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
addr += tmplen;
len -= tmplen;
sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
static void sparx5_set_cacheable(struct sdhci_host *host, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
pr_debug("%s: Set Cacheable = 0x%x\n", mmc_hostname(host->mmc), value);
/* Update ACP caching attributes in HW */
regmap_update_bits(sdhci_sparx5->cpu_ctrl,
CPU_REGS_PROC_CTRL, ACP_CACHE_MASK, value);
}
static void sparx5_set_delay(struct sdhci_host *host, u8 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
pr_debug("%s: Set DLY_CC = %u\n", mmc_hostname(host->mmc), value);
/* Update DLY_CC in HW */
regmap_update_bits(sdhci_sparx5->cpu_ctrl,
CPU_REGS_GENERAL_CTRL,
MSHC_DLY_CC_MASK,
(value << MSHC_DLY_CC_SHIFT));
}
static void sdhci_sparx5_set_emmc(struct sdhci_host *host)
{
if (!mmc_card_is_removable(host->mmc)) {
u8 value;
value = sdhci_readb(host, MSHC2_EMMC_CTRL);
if (!(value & MSHC2_EMMC_CTRL_IS_EMMC)) {
value |= MSHC2_EMMC_CTRL_IS_EMMC;
pr_debug("%s: Set EMMC_CTRL: 0x%08x\n",
mmc_hostname(host->mmc), value);
sdhci_writeb(host, value, MSHC2_EMMC_CTRL);
}
}
}
static void sdhci_sparx5_reset_emmc(struct sdhci_host *host)
{
u8 value;
pr_debug("%s: Toggle EMMC_CTRL.EMMC_RST_N\n", mmc_hostname(host->mmc));
value = sdhci_readb(host, MSHC2_EMMC_CTRL) &
~MSHC2_EMMC_CTRL_EMMC_RST_N;
sdhci_writeb(host, value, MSHC2_EMMC_CTRL);
/* For eMMC, minimum is 1us but give it 10us for good measure */
usleep_range(10, 20);
sdhci_writeb(host, value | MSHC2_EMMC_CTRL_EMMC_RST_N,
MSHC2_EMMC_CTRL);
/* For eMMC, minimum is 200us but give it 300us for good measure */
usleep_range(300, 400);
}
static void sdhci_sparx5_reset(struct sdhci_host *host, u8 mask)
{
pr_debug("%s: *** RESET: mask %d\n", mmc_hostname(host->mmc), mask);
sdhci_reset(host, mask);
/* Be sure CARD_IS_EMMC stays set */
sdhci_sparx5_set_emmc(host);
}
static const struct sdhci_ops sdhci_sparx5_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.reset = sdhci_sparx5_reset,
.adma_write_desc = sdhci_sparx5_adma_write_desc,
};
static const struct sdhci_pltfm_data sdhci_sparx5_pdata = {
.quirks = 0,
.quirks2 = SDHCI_QUIRK2_HOST_NO_CMD23 | /* Controller issue */
SDHCI_QUIRK2_NO_1_8_V, /* No sdr104, ddr50, etc */
.ops = &sdhci_sparx5_ops,
};
static int sdhci_sparx5_probe(struct platform_device *pdev)
{
int ret;
const char *syscon = "microchip,sparx5-cpu-syscon";
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_sparx5_data *sdhci_sparx5;
struct device_node *np = pdev->dev.of_node;
u32 value;
u32 extra;
host = sdhci_pltfm_init(pdev, &sdhci_sparx5_pdata,
sizeof(*sdhci_sparx5));
if (IS_ERR(host))
return PTR_ERR(host);
/*
* extra adma table cnt for cross 128M boundary handling.
*/
extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M);
if (extra > SDHCI_MAX_SEGS)
extra = SDHCI_MAX_SEGS;
host->adma_table_cnt += extra;
pltfm_host = sdhci_priv(host);
sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
sdhci_sparx5->host = host;
pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, "core");
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
dev_err(&pdev->dev, "failed to get and enable core clk: %d\n", ret);
goto free_pltfm;
}
if (!of_property_read_u32(np, "microchip,clock-delay", &value) &&
(value > 0 && value <= MSHC_DLY_CC_MAX))
sdhci_sparx5->delay_clock = value;
sdhci_get_of_property(pdev);
ret = mmc_of_parse(host->mmc);
if (ret)
goto free_pltfm;
sdhci_sparx5->cpu_ctrl = syscon_regmap_lookup_by_compatible(syscon);
if (IS_ERR(sdhci_sparx5->cpu_ctrl)) {
dev_err(&pdev->dev, "No CPU syscon regmap !\n");
ret = PTR_ERR(sdhci_sparx5->cpu_ctrl);
goto free_pltfm;
}
if (sdhci_sparx5->delay_clock >= 0)
sparx5_set_delay(host, sdhci_sparx5->delay_clock);
if (!mmc_card_is_removable(host->mmc)) {
/* Do a HW reset of eMMC card */
sdhci_sparx5_reset_emmc(host);
/* Update EMMC_CTRL */
sdhci_sparx5_set_emmc(host);
/* If eMMC, disable SD and SDIO */
host->mmc->caps2 |= (MMC_CAP2_NO_SDIO|MMC_CAP2_NO_SD);
}
ret = sdhci_add_host(host);
if (ret)
goto free_pltfm;
/* Set AXI bus master to use un-cached access (for DMA) */
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA) &&
IS_ENABLED(CONFIG_DMA_DECLARE_COHERENT))
sparx5_set_cacheable(host, ACP_CACHE_FORCE_ENA);
pr_debug("%s: SDHC version: 0x%08x\n",
mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_VERSION));
pr_debug("%s: SDHC type: 0x%08x\n",
mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_TYPE));
return ret;
free_pltfm:
sdhci_pltfm_free(pdev);
return ret;
}
static const struct of_device_id sdhci_sparx5_of_match[] = {
{ .compatible = "microchip,dw-sparx5-sdhci" },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_sparx5_of_match);
static struct platform_driver sdhci_sparx5_driver = {
.driver = {
.name = "sdhci-sparx5",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_sparx5_of_match,
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_sparx5_probe,
.remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_sparx5_driver);
MODULE_DESCRIPTION("Sparx5 SDHCI OF driver");
MODULE_AUTHOR("Lars Povlsen <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-of-sparx5.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* WM8505/WM8650 SD/MMC Host Controller
*
* Copyright (C) 2010 Tony Prisk
* Copyright (C) 2008 WonderMedia Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <asm/byteorder.h>
#define DRIVER_NAME "wmt-sdhc"
/* MMC/SD controller registers */
#define SDMMC_CTLR 0x00
#define SDMMC_CMD 0x01
#define SDMMC_RSPTYPE 0x02
#define SDMMC_ARG 0x04
#define SDMMC_BUSMODE 0x08
#define SDMMC_BLKLEN 0x0C
#define SDMMC_BLKCNT 0x0E
#define SDMMC_RSP 0x10
#define SDMMC_CBCR 0x20
#define SDMMC_INTMASK0 0x24
#define SDMMC_INTMASK1 0x25
#define SDMMC_STS0 0x28
#define SDMMC_STS1 0x29
#define SDMMC_STS2 0x2A
#define SDMMC_STS3 0x2B
#define SDMMC_RSPTIMEOUT 0x2C
#define SDMMC_CLK 0x30 /* VT8500 only */
#define SDMMC_EXTCTRL 0x34
#define SDMMC_SBLKLEN 0x38
#define SDMMC_DMATIMEOUT 0x3C
/* SDMMC_CTLR bit fields */
#define CTLR_CMD_START 0x01
#define CTLR_CMD_WRITE 0x04
#define CTLR_FIFO_RESET 0x08
/* SDMMC_BUSMODE bit fields */
#define BM_SPI_MODE 0x01
#define BM_FOURBIT_MODE 0x02
#define BM_EIGHTBIT_MODE 0x04
#define BM_SD_OFF 0x10
#define BM_SPI_CS 0x20
#define BM_SD_POWER 0x40
#define BM_SOFT_RESET 0x80
/* SDMMC_BLKLEN bit fields */
#define BLKL_CRCERR_ABORT 0x0800
#define BLKL_CD_POL_HIGH 0x1000
#define BLKL_GPI_CD 0x2000
#define BLKL_DATA3_CD 0x4000
#define BLKL_INT_ENABLE 0x8000
/* SDMMC_INTMASK0 bit fields */
#define INT0_MBLK_TRAN_DONE_INT_EN 0x10
#define INT0_BLK_TRAN_DONE_INT_EN 0x20
#define INT0_CD_INT_EN 0x40
#define INT0_DI_INT_EN 0x80
/* SDMMC_INTMASK1 bit fields */
#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
#define INT1_CMD_RES_TOUT_INT_EN 0x04
#define INT1_MBLK_AUTO_STOP_INT_EN 0x08
#define INT1_DATA_TOUT_INT_EN 0x10
#define INT1_RESCRC_ERR_INT_EN 0x20
#define INT1_RCRC_ERR_INT_EN 0x40
#define INT1_WCRC_ERR_INT_EN 0x80
/* SDMMC_STS0 bit fields */
#define STS0_WRITE_PROTECT 0x02
#define STS0_CD_DATA3 0x04
#define STS0_CD_GPI 0x08
#define STS0_MBLK_DONE 0x10
#define STS0_BLK_DONE 0x20
#define STS0_CARD_DETECT 0x40
#define STS0_DEVICE_INS 0x80
/* SDMMC_STS1 bit fields */
#define STS1_SDIO_INT 0x01
#define STS1_CMDRSP_DONE 0x02
#define STS1_RSP_TIMEOUT 0x04
#define STS1_AUTOSTOP_DONE 0x08
#define STS1_DATA_TIMEOUT 0x10
#define STS1_RSP_CRC_ERR 0x20
#define STS1_RCRC_ERR 0x40
#define STS1_WCRC_ERR 0x80
/* SDMMC_STS2 bit fields */
#define STS2_CMD_RES_BUSY 0x10
#define STS2_DATARSP_BUSY 0x20
#define STS2_DIS_FORCECLK 0x80
/* SDMMC_EXTCTRL bit fields */
#define EXT_EIGHTBIT 0x04
/* MMC/SD DMA Controller Registers */
#define SDDMA_GCR 0x100
#define SDDMA_IER 0x104
#define SDDMA_ISR 0x108
#define SDDMA_DESPR 0x10C
#define SDDMA_RBR 0x110
#define SDDMA_DAR 0x114
#define SDDMA_BAR 0x118
#define SDDMA_CPR 0x11C
#define SDDMA_CCR 0x120
/* SDDMA_GCR bit fields */
#define DMA_GCR_DMA_EN 0x00000001
#define DMA_GCR_SOFT_RESET 0x00000100
/* SDDMA_IER bit fields */
#define DMA_IER_INT_EN 0x00000001
/* SDDMA_ISR bit fields */
#define DMA_ISR_INT_STS 0x00000001
/* SDDMA_RBR bit fields */
#define DMA_RBR_FORMAT 0x40000000
#define DMA_RBR_END 0x80000000
/* SDDMA_CCR bit fields */
#define DMA_CCR_RUN 0x00000080
#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
/* SDDMA_CCR event status */
#define DMA_CCR_EVT_NO_STATUS 0x00000000
#define DMA_CCR_EVT_UNDERRUN 0x00000001
#define DMA_CCR_EVT_OVERRUN 0x00000002
#define DMA_CCR_EVT_DESP_READ 0x00000003
#define DMA_CCR_EVT_DATA_RW 0x00000004
#define DMA_CCR_EVT_EARLY_END 0x00000005
#define DMA_CCR_EVT_SUCCESS 0x0000000F
#define PDMA_READ 0x00
#define PDMA_WRITE 0x01
#define WMT_SD_POWER_OFF 0
#define WMT_SD_POWER_ON 1
struct wmt_dma_descriptor {
u32 flags;
u32 data_buffer_addr;
u32 branch_addr;
u32 reserved1;
};
struct wmt_mci_caps {
unsigned int f_min;
unsigned int f_max;
u32 ocr_avail;
u32 caps;
u32 max_seg_size;
u32 max_segs;
u32 max_blk_size;
};
struct wmt_mci_priv {
struct mmc_host *mmc;
void __iomem *sdmmc_base;
int irq_regular;
int irq_dma;
void *dma_desc_buffer;
dma_addr_t dma_desc_device_addr;
struct completion cmdcomp;
struct completion datacomp;
struct completion *comp_cmd;
struct completion *comp_dma;
struct mmc_request *req;
struct mmc_command *cmd;
struct clk *clk_sdmmc;
struct device *dev;
u8 power_inverted;
u8 cd_inverted;
};
static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
{
u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
if (enable ^ priv->power_inverted)
reg_tmp &= ~BM_SD_OFF;
else
reg_tmp |= BM_SD_OFF;
writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
}
static void wmt_mci_read_response(struct mmc_host *mmc)
{
struct wmt_mci_priv *priv;
int idx1, idx2;
u8 tmp_resp;
u32 response;
priv = mmc_priv(mmc);
for (idx1 = 0; idx1 < 4; idx1++) {
response = 0;
for (idx2 = 0; idx2 < 4; idx2++) {
if ((idx1 == 3) && (idx2 == 3))
tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
else
tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
(idx1*4) + idx2 + 1);
response |= (tmp_resp << (idx2 * 8));
}
priv->cmd->resp[idx1] = cpu_to_be32(response);
}
}
static void wmt_mci_start_command(struct wmt_mci_priv *priv)
{
u32 reg_tmp;
reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
}
static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
u32 arg, u8 rsptype)
{
struct wmt_mci_priv *priv;
u32 reg_tmp;
priv = mmc_priv(mmc);
/* write command, arg, resptype registers */
writeb(command, priv->sdmmc_base + SDMMC_CMD);
writel(arg, priv->sdmmc_base + SDMMC_ARG);
writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
/* reset response FIFO */
reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
/* ensure clock enabled - VT3465 */
wmt_set_sd_power(priv, WMT_SD_POWER_ON);
/* clear status bits */
writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
/* set command type */
reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
writeb((reg_tmp & 0x0F) | (cmdtype << 4),
priv->sdmmc_base + SDMMC_CTLR);
return 0;
}
static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
{
writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
writel(0, priv->sdmmc_base + SDDMA_IER);
}
static void wmt_complete_data_request(struct wmt_mci_priv *priv)
{
struct mmc_request *req;
req = priv->req;
req->data->bytes_xfered = req->data->blksz * req->data->blocks;
/* unmap the DMA pages used for write data */
if (req->data->flags & MMC_DATA_WRITE)
dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
req->data->sg_len, DMA_TO_DEVICE);
else
dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
req->data->sg_len, DMA_FROM_DEVICE);
/* Check if the DMA ISR returned a data error */
if ((req->cmd->error) || (req->data->error))
mmc_request_done(priv->mmc, req);
else {
wmt_mci_read_response(priv->mmc);
if (!req->data->stop) {
/* single-block read/write requests end here */
mmc_request_done(priv->mmc, req);
} else {
/*
* we change the priv->cmd variable so the response is
* stored in the stop struct rather than the original
* calling command struct
*/
priv->comp_cmd = &priv->cmdcomp;
init_completion(priv->comp_cmd);
priv->cmd = req->data->stop;
wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
7, req->data->stop->arg, 9);
wmt_mci_start_command(priv);
}
}
}
static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
{
struct wmt_mci_priv *priv;
int status;
priv = (struct wmt_mci_priv *)data;
status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
if (status != DMA_CCR_EVT_SUCCESS) {
dev_err(priv->dev, "DMA Error: Status = %d\n", status);
priv->req->data->error = -ETIMEDOUT;
complete(priv->comp_dma);
return IRQ_HANDLED;
}
priv->req->data->error = 0;
wmt_mci_disable_dma(priv);
complete(priv->comp_dma);
if (priv->comp_cmd) {
if (completion_done(priv->comp_cmd)) {
/*
* if the command (regular) interrupt has already
* completed, finish off the request otherwise we wait
* for the command interrupt and finish from there.
*/
wmt_complete_data_request(priv);
}
}
return IRQ_HANDLED;
}
static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
{
struct wmt_mci_priv *priv;
u32 status0;
u32 status1;
u32 status2;
u32 reg_tmp;
int cmd_done;
priv = (struct wmt_mci_priv *)data;
cmd_done = 0;
status0 = readb(priv->sdmmc_base + SDMMC_STS0);
status1 = readb(priv->sdmmc_base + SDMMC_STS1);
status2 = readb(priv->sdmmc_base + SDMMC_STS2);
/* Check for card insertion */
reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
mmc_detect_change(priv->mmc, 0);
if (priv->cmd)
priv->cmd->error = -ETIMEDOUT;
if (priv->comp_cmd)
complete(priv->comp_cmd);
if (priv->comp_dma) {
wmt_mci_disable_dma(priv);
complete(priv->comp_dma);
}
writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
return IRQ_HANDLED;
}
if ((!priv->req->data) ||
((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
/* handle non-data & stop_transmission requests */
if (status1 & STS1_CMDRSP_DONE) {
priv->cmd->error = 0;
cmd_done = 1;
} else if ((status1 & STS1_RSP_TIMEOUT) ||
(status1 & STS1_DATA_TIMEOUT)) {
priv->cmd->error = -ETIMEDOUT;
cmd_done = 1;
}
if (cmd_done) {
priv->comp_cmd = NULL;
if (!priv->cmd->error)
wmt_mci_read_response(priv->mmc);
priv->cmd = NULL;
mmc_request_done(priv->mmc, priv->req);
}
} else {
/* handle data requests */
if (status1 & STS1_CMDRSP_DONE) {
if (priv->cmd)
priv->cmd->error = 0;
if (priv->comp_cmd)
complete(priv->comp_cmd);
}
if ((status1 & STS1_RSP_TIMEOUT) ||
(status1 & STS1_DATA_TIMEOUT)) {
if (priv->cmd)
priv->cmd->error = -ETIMEDOUT;
if (priv->comp_cmd)
complete(priv->comp_cmd);
if (priv->comp_dma) {
wmt_mci_disable_dma(priv);
complete(priv->comp_dma);
}
}
if (priv->comp_dma) {
/*
* If the dma interrupt has already completed, finish
* off the request; otherwise we wait for the DMA
* interrupt and finish from there.
*/
if (completion_done(priv->comp_dma))
wmt_complete_data_request(priv);
}
}
writeb(status0, priv->sdmmc_base + SDMMC_STS0);
writeb(status1, priv->sdmmc_base + SDMMC_STS1);
writeb(status2, priv->sdmmc_base + SDMMC_STS2);
return IRQ_HANDLED;
}
static void wmt_reset_hardware(struct mmc_host *mmc)
{
struct wmt_mci_priv *priv;
u32 reg_tmp;
priv = mmc_priv(mmc);
/* reset controller */
reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
/* reset response FIFO */
reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
/* enable GPI pin to detect card */
writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
/* clear interrupt status */
writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
/* setup interrupts */
writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
SDMMC_INTMASK0);
writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
/* set the DMA timeout */
writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
/* auto clock freezing enable */
reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
/* set a default clock speed of 400Khz */
clk_set_rate(priv->clk_sdmmc, 400000);
}
static int wmt_dma_init(struct mmc_host *mmc)
{
struct wmt_mci_priv *priv;
priv = mmc_priv(mmc);
writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
return 0;
else
return 1;
}
static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
{
desc->flags = 0x40000000 | req_count;
if (end)
desc->flags |= 0x80000000;
desc->data_buffer_addr = buffer_addr;
desc->branch_addr = branch_addr;
}
static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
{
struct wmt_mci_priv *priv;
u32 reg_tmp;
priv = mmc_priv(mmc);
/* Enable DMA Interrupts */
writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
/* Write DMA Descriptor Pointer Register */
writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
writel(0x00, priv->sdmmc_base + SDDMA_CCR);
if (dir == PDMA_WRITE) {
reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
SDDMA_CCR);
} else {
reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
SDDMA_CCR);
}
}
static void wmt_dma_start(struct wmt_mci_priv *priv)
{
u32 reg_tmp;
reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
}
static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct wmt_mci_priv *priv;
struct wmt_dma_descriptor *desc;
u8 command;
u8 cmdtype;
u32 arg;
u8 rsptype;
u32 reg_tmp;
struct scatterlist *sg;
int i;
int sg_cnt;
int offset;
u32 dma_address;
int desc_cnt;
priv = mmc_priv(mmc);
priv->req = req;
/*
* Use the cmd variable to pass a pointer to the resp[] structure
* This is required on multi-block requests to pass the pointer to the
* stop command
*/
priv->cmd = req->cmd;
command = req->cmd->opcode;
arg = req->cmd->arg;
rsptype = mmc_resp_type(req->cmd);
cmdtype = 0;
/* rsptype=7 only valid for SPI commands - should be =2 for SD */
if (rsptype == 7)
rsptype = 2;
/* rsptype=21 is R1B, convert for controller */
if (rsptype == 21)
rsptype = 9;
if (!req->data) {
wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
wmt_mci_start_command(priv);
/* completion is now handled in the regular_isr() */
}
if (req->data) {
priv->comp_cmd = &priv->cmdcomp;
init_completion(priv->comp_cmd);
wmt_dma_init(mmc);
/* set controller data length */
reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
priv->sdmmc_base + SDMMC_BLKLEN);
/* set controller block count */
writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
if (req->data->flags & MMC_DATA_WRITE) {
sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
req->data->sg_len, DMA_TO_DEVICE);
cmdtype = 1;
if (req->data->blocks > 1)
cmdtype = 3;
} else {
sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
req->data->sg_len, DMA_FROM_DEVICE);
cmdtype = 2;
if (req->data->blocks > 1)
cmdtype = 4;
}
dma_address = priv->dma_desc_device_addr + 16;
desc_cnt = 0;
for_each_sg(req->data->sg, sg, sg_cnt, i) {
offset = 0;
while (offset < sg_dma_len(sg)) {
wmt_dma_init_descriptor(desc, req->data->blksz,
sg_dma_address(sg)+offset,
dma_address, 0);
desc++;
desc_cnt++;
offset += req->data->blksz;
dma_address += 16;
if (desc_cnt == req->data->blocks)
break;
}
}
desc--;
desc->flags |= 0x80000000;
if (req->data->flags & MMC_DATA_WRITE)
wmt_dma_config(mmc, priv->dma_desc_device_addr,
PDMA_WRITE);
else
wmt_dma_config(mmc, priv->dma_desc_device_addr,
PDMA_READ);
wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
priv->comp_dma = &priv->datacomp;
init_completion(priv->comp_dma);
wmt_dma_start(priv);
wmt_mci_start_command(priv);
}
}
static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct wmt_mci_priv *priv;
u32 busmode, extctrl;
priv = mmc_priv(mmc);
if (ios->power_mode == MMC_POWER_UP) {
wmt_reset_hardware(mmc);
wmt_set_sd_power(priv, WMT_SD_POWER_ON);
}
if (ios->power_mode == MMC_POWER_OFF)
wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
if (ios->clock != 0)
clk_set_rate(priv->clk_sdmmc, ios->clock);
busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
extctrl &= ~EXT_EIGHTBIT;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
busmode |= BM_EIGHTBIT_MODE;
extctrl |= EXT_EIGHTBIT;
break;
case MMC_BUS_WIDTH_4:
busmode |= BM_FOURBIT_MODE;
break;
case MMC_BUS_WIDTH_1:
break;
}
writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
}
static int wmt_mci_get_ro(struct mmc_host *mmc)
{
struct wmt_mci_priv *priv = mmc_priv(mmc);
return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
}
static int wmt_mci_get_cd(struct mmc_host *mmc)
{
struct wmt_mci_priv *priv = mmc_priv(mmc);
u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
return !(cd ^ priv->cd_inverted);
}
static const struct mmc_host_ops wmt_mci_ops = {
.request = wmt_mci_request,
.set_ios = wmt_mci_set_ios,
.get_ro = wmt_mci_get_ro,
.get_cd = wmt_mci_get_cd,
};
/* Controller capabilities */
static struct wmt_mci_caps wm8505_caps = {
.f_min = 390425,
.f_max = 50000000,
.ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SD_HIGHSPEED,
.max_seg_size = 65024,
.max_segs = 128,
.max_blk_size = 2048,
};
static const struct of_device_id wmt_mci_dt_ids[] = {
{ .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
{ /* Sentinel */ },
};
static int wmt_mci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
struct device_node *np = pdev->dev.of_node;
const struct wmt_mci_caps *wmt_caps;
int ret;
int regular_irq, dma_irq;
wmt_caps = of_device_get_match_data(&pdev->dev);
if (!wmt_caps) {
dev_err(&pdev->dev, "Controller capabilities data missing\n");
return -EFAULT;
}
if (!np) {
dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
return -EFAULT;
}
regular_irq = irq_of_parse_and_map(np, 0);
dma_irq = irq_of_parse_and_map(np, 1);
if (!regular_irq || !dma_irq) {
dev_err(&pdev->dev, "Getting IRQs failed!\n");
ret = -ENXIO;
goto fail1;
}
mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
if (!mmc) {
dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
ret = -ENOMEM;
goto fail1;
}
mmc->ops = &wmt_mci_ops;
mmc->f_min = wmt_caps->f_min;
mmc->f_max = wmt_caps->f_max;
mmc->ocr_avail = wmt_caps->ocr_avail;
mmc->caps = wmt_caps->caps;
mmc->max_seg_size = wmt_caps->max_seg_size;
mmc->max_segs = wmt_caps->max_segs;
mmc->max_blk_size = wmt_caps->max_blk_size;
mmc->max_req_size = (16*512*mmc->max_segs);
mmc->max_blk_count = mmc->max_req_size / 512;
priv = mmc_priv(mmc);
priv->mmc = mmc;
priv->dev = &pdev->dev;
priv->power_inverted = 0;
priv->cd_inverted = 0;
priv->power_inverted = of_property_read_bool(np, "sdon-inverted");
priv->cd_inverted = of_property_read_bool(np, "cd-inverted");
priv->sdmmc_base = of_iomap(np, 0);
if (!priv->sdmmc_base) {
dev_err(&pdev->dev, "Failed to map IO space\n");
ret = -ENOMEM;
goto fail2;
}
priv->irq_regular = regular_irq;
priv->irq_dma = dma_irq;
ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
if (ret) {
dev_err(&pdev->dev, "Register regular IRQ fail\n");
goto fail3;
}
ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
if (ret) {
dev_err(&pdev->dev, "Register DMA IRQ fail\n");
goto fail4;
}
/* alloc some DMA buffers for descriptors/transfers */
priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
mmc->max_blk_count * 16,
&priv->dma_desc_device_addr,
GFP_KERNEL);
if (!priv->dma_desc_buffer) {
dev_err(&pdev->dev, "DMA alloc fail\n");
ret = -EPERM;
goto fail5;
}
platform_set_drvdata(pdev, mmc);
priv->clk_sdmmc = of_clk_get(np, 0);
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
if (ret)
goto fail6;
/* configure the controller to a known 'ready' state */
wmt_reset_hardware(mmc);
ret = mmc_add_host(mmc);
if (ret)
goto fail7;
dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
return 0;
fail7:
clk_disable_unprepare(priv->clk_sdmmc);
fail6:
clk_put(priv->clk_sdmmc);
fail5_and_a_half:
dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
free_irq(regular_irq, priv);
fail3:
iounmap(priv->sdmmc_base);
fail2:
mmc_free_host(mmc);
fail1:
return ret;
}
static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
priv = mmc_priv(mmc);
/* reset SD controller */
reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
/* release the dma buffers */
dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
priv->dma_desc_buffer, priv->dma_desc_device_addr);
mmc_remove_host(mmc);
free_irq(priv->irq_regular, priv);
free_irq(priv->irq_dma, priv);
iounmap(priv->sdmmc_base);
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
}
#ifdef CONFIG_PM
static int wmt_mci_suspend(struct device *dev)
{
u32 reg_tmp;
struct mmc_host *mmc = dev_get_drvdata(dev);
struct wmt_mci_priv *priv;
if (!mmc)
return 0;
priv = mmc_priv(mmc);
reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
SDMMC_BUSMODE);
reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
clk_disable(priv->clk_sdmmc);
return 0;
}
static int wmt_mci_resume(struct device *dev)
{
u32 reg_tmp;
struct mmc_host *mmc = dev_get_drvdata(dev);
struct wmt_mci_priv *priv;
if (mmc) {
priv = mmc_priv(mmc);
clk_enable(priv->clk_sdmmc);
reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
SDMMC_BUSMODE);
reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
priv->sdmmc_base + SDMMC_BLKLEN);
reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
SDMMC_INTMASK0);
}
return 0;
}
static const struct dev_pm_ops wmt_mci_pm = {
.suspend = wmt_mci_suspend,
.resume = wmt_mci_resume,
};
#define wmt_mci_pm_ops (&wmt_mci_pm)
#else /* !CONFIG_PM */
#define wmt_mci_pm_ops NULL
#endif
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
.remove_new = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = wmt_mci_pm_ops,
.of_match_table = wmt_mci_dt_ids,
},
};
module_platform_driver(wmt_mci_driver);
MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
MODULE_AUTHOR("Tony Prisk");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);
| linux-master | drivers/mmc/host/wmt-sdmmc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/host/sdhci_f_sdh30.c
*
* Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
* Vincent Yang <[email protected]>
* Copyright (C) 2015 Linaro Ltd Andy Green <[email protected]>
* Copyright (C) 2019 Socionext Inc.
*/
#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "sdhci_f_sdh30.h"
struct f_sdhost_priv {
struct clk *clk_iface;
struct clk *clk;
struct reset_control *rst;
u32 vendor_hs200;
struct device *dev;
bool enable_cmd_dat_delay;
};
static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return sdhci_pltfm_priv(pltfm_host);
}
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
{
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctrl = 0;
usleep_range(2500, 3000);
ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
ctrl |= F_SDH30_CRES_O_DN;
sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
ctrl |= F_SDH30_MSEL_O_1_8;
sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
ctrl &= ~F_SDH30_CRES_O_DN;
sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
usleep_range(2500, 3000);
if (priv->vendor_hs200) {
dev_info(priv->dev, "%s: setting hs200\n", __func__);
ctrl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
ctrl |= priv->vendor_hs200;
sdhci_writel(host, ctrl, F_SDH30_ESD_CONTROL);
}
ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
ctrl |= F_SDH30_CMD_CHK_DIS;
sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
}
static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
{
return F_SDH30_MIN_CLOCK;
}
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
u32 ctl;
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
if (priv->enable_cmd_dat_delay) {
ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
ctl |= F_SDH30_CMD_DAT_DELAY;
sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
}
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
ctl = sdhci_readl(host, F_SDH30_TEST);
ctl |= F_SDH30_FORCE_CARD_INSERT;
sdhci_writel(host, ctl, F_SDH30_TEST);
}
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
.voltage_switch = sdhci_f_sdh30_soft_voltage_switch,
.get_min_clock = sdhci_f_sdh30_get_min_clock,
.reset = sdhci_f_sdh30_reset,
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
.ops = &sdhci_f_sdh30_ops,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
| SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
.quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
| SDHCI_QUIRK2_TUNING_WORK_AROUND,
};
static int sdhci_f_sdh30_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device *dev = &pdev->dev;
int ctrl = 0, ret = 0;
struct f_sdhost_priv *priv;
struct sdhci_pltfm_host *pltfm_host;
u32 reg = 0;
host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
sizeof(struct f_sdhost_priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
priv->dev = dev;
priv->enable_cmd_dat_delay = device_property_read_bool(dev,
"fujitsu,cmd-dat-delay-select");
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
if (dev_of_node(dev)) {
sdhci_get_of_property(pdev);
priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(priv->clk_iface)) {
ret = PTR_ERR(priv->clk_iface);
goto err;
}
ret = clk_prepare_enable(priv->clk_iface);
if (ret)
goto err;
priv->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto err_clk;
}
ret = clk_prepare_enable(priv->clk);
if (ret)
goto err_clk;
priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
if (IS_ERR(priv->rst)) {
ret = PTR_ERR(priv->rst);
goto err_rst;
}
ret = reset_control_deassert(priv->rst);
if (ret)
goto err_rst;
}
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
ctrl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
F_SDH30_AHB_INCR_4;
ctrl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
sdhci_writew(host, ctrl, F_SDH30_AHB_CONFIG);
reg = sdhci_readl(host, F_SDH30_ESD_CONTROL);
sdhci_writel(host, reg & ~F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
msleep(20);
sdhci_writel(host, reg | F_SDH30_EMMC_RST, F_SDH30_ESD_CONTROL);
reg = sdhci_readl(host, SDHCI_CAPABILITIES);
if (reg & SDHCI_CAN_DO_8BIT)
priv->vendor_hs200 = F_SDH30_EMMC_HS200;
if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
ret = sdhci_add_host(host);
if (ret)
goto err_add_host;
return 0;
err_add_host:
reset_control_assert(priv->rst);
err_rst:
clk_disable_unprepare(priv->clk);
err_clk:
clk_disable_unprepare(priv->clk_iface);
err:
sdhci_pltfm_free(pdev);
return ret;
}
static void sdhci_f_sdh30_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
struct clk *clk_iface = priv->clk_iface;
struct reset_control *rst = priv->rst;
struct clk *clk = priv->clk;
sdhci_pltfm_remove(pdev);
reset_control_assert(rst);
clk_disable_unprepare(clk);
clk_disable_unprepare(clk_iface);
}
#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ .compatible = "socionext,f-sdh30-e51-mmc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id f_sdh30_acpi_ids[] = {
{ "SCX0002" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(f_sdh30_dt_ids),
.acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
.remove_new = sdhci_f_sdh30_remove,
};
module_platform_driver(sdhci_f_sdh30_driver);
MODULE_DESCRIPTION("F_SDH30 SD Card Controller driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("FUJITSU SEMICONDUCTOR LTD., Socionext Inc.");
MODULE_ALIAS("platform:f_sdh30");
| linux-master | drivers/mmc/host/sdhci_f_sdh30.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Marvell International Ltd.
* Zhangfei Gao <[email protected]>
* Kevin Wang <[email protected]>
* Jun Nie <[email protected]>
* Qiming Wu <[email protected]>
* Philip Rakity <[email protected]>
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/mmc.h>
#include <linux/pinctrl/consumer.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#define SD_FIFO_PARAM 0xe0
#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
CLK_GATE_ON | CLK_GATE_CTL)
#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
#define SDCLK_SEL_SHIFT 8
#define SDCLK_SEL_MASK 0x3
#define SDCLK_DELAY_SHIFT 10
#define SDCLK_DELAY_MASK 0x3c
#define SD_CE_ATA_2 0xea
#define MMC_CARD 0x1000
#define MMC_WIDTH 0x0100
struct sdhci_pxav2_host {
struct mmc_request *sdio_mrq;
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_cmd_gpio;
};
static void pxav2_reset(struct sdhci_host *host, u8 mask)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
sdhci_reset(host, mask);
if (mask == SDHCI_RESET_ALL) {
u16 tmp = 0;
/*
* tune timing of read data/command when crc error happen
* no performance impact
*/
if (pdata && pdata->clk_delay_sel == 1) {
tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
<< SDCLK_DELAY_SHIFT;
tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
}
if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) {
tmp = readw(host->ioaddr + SD_FIFO_PARAM);
tmp &= ~CLK_GATE_SETTING_BITS;
writew(tmp, host->ioaddr + SD_FIFO_PARAM);
} else {
tmp = readw(host->ioaddr + SD_FIFO_PARAM);
tmp &= ~CLK_GATE_SETTING_BITS;
tmp |= CLK_GATE_SETTING_BITS;
writew(tmp, host->ioaddr + SD_FIFO_PARAM);
}
}
}
static u16 pxav1_readw(struct sdhci_host *host, int reg)
{
/* Workaround for data abort exception on SDH2 and SDH4 on PXA168 */
if (reg == SDHCI_HOST_VERSION)
return readl(host->ioaddr + SDHCI_HOST_VERSION - 2) >> 16;
return readw(host->ioaddr + reg);
}
static u32 pxav1_irq(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pxav2_host *pxav2_host = sdhci_pltfm_priv(sdhci_priv(host));
struct mmc_request *sdio_mrq;
if (pxav2_host->sdio_mrq && (intmask & SDHCI_INT_CMD_MASK)) {
/* The dummy CMD0 for the SDIO workaround just completed */
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
intmask &= ~SDHCI_INT_CMD_MASK;
/* Restore MMC function to CMD pin */
if (pxav2_host->pinctrl && pxav2_host->pins_default)
pinctrl_select_state(pxav2_host->pinctrl, pxav2_host->pins_default);
sdio_mrq = pxav2_host->sdio_mrq;
pxav2_host->sdio_mrq = NULL;
mmc_request_done(host->mmc, sdio_mrq);
}
return intmask;
}
static void pxav1_request_done(struct sdhci_host *host, struct mmc_request *mrq)
{
u16 tmp;
struct sdhci_pxav2_host *pxav2_host;
/* If this is an SDIO command, perform errata workaround for silicon bug */
if (mrq->cmd && !mrq->cmd->error &&
(mrq->cmd->opcode == SD_IO_RW_DIRECT ||
mrq->cmd->opcode == SD_IO_RW_EXTENDED)) {
/* Reset data port */
tmp = readw(host->ioaddr + SDHCI_TIMEOUT_CONTROL);
tmp |= 0x400;
writew(tmp, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
/* Clock is now stopped, so restart it by sending a dummy CMD0 */
pxav2_host = sdhci_pltfm_priv(sdhci_priv(host));
pxav2_host->sdio_mrq = mrq;
/* Set CMD as high output rather than MMC function while we do CMD0 */
if (pxav2_host->pinctrl && pxav2_host->pins_cmd_gpio)
pinctrl_select_state(pxav2_host->pinctrl, pxav2_host->pins_cmd_gpio);
sdhci_writel(host, 0, SDHCI_ARGUMENT);
sdhci_writew(host, 0, SDHCI_TRANSFER_MODE);
sdhci_writew(host, SDHCI_MAKE_CMD(MMC_GO_IDLE_STATE, SDHCI_CMD_RESP_NONE),
SDHCI_COMMAND);
/* Don't finish this request until the dummy CMD0 finishes */
return;
}
mmc_request_done(host->mmc, mrq);
}
static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
u16 tmp;
ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
tmp = readw(host->ioaddr + SD_CE_ATA_2);
if (width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
tmp |= MMC_CARD | MMC_WIDTH;
} else {
tmp &= ~(MMC_CARD | MMC_WIDTH);
if (width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
writew(tmp, host->ioaddr + SD_CE_ATA_2);
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
}
struct sdhci_pxa_variant {
const struct sdhci_ops *ops;
unsigned int extra_quirks;
};
static const struct sdhci_ops pxav1_sdhci_ops = {
.read_w = pxav1_readw,
.set_clock = sdhci_set_clock,
.irq = pxav1_irq,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = pxav2_mmc_set_bus_width,
.reset = pxav2_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.request_done = pxav1_request_done,
};
static const struct sdhci_pxa_variant __maybe_unused pxav1_variant = {
.ops = &pxav1_sdhci_ops,
.extra_quirks = SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_32BIT_DMA_SIZE,
};
static const struct sdhci_ops pxav2_sdhci_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = pxav2_mmc_set_bus_width,
.reset = pxav2_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pxa_variant pxav2_variant = {
.ops = &pxav2_sdhci_ops,
};
#ifdef CONFIG_OF
static const struct of_device_id sdhci_pxav2_of_match[] = {
{ .compatible = "mrvl,pxav1-mmc", .data = &pxav1_variant, },
{ .compatible = "mrvl,pxav2-mmc", .data = &pxav2_variant, },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match);
static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
{
struct sdhci_pxa_platdata *pdata;
struct device_node *np = dev->of_node;
u32 bus_width;
u32 clk_delay_cycles;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
if (of_property_read_bool(np, "non-removable"))
pdata->flags |= PXA_FLAG_CARD_PERMANENT;
of_property_read_u32(np, "bus-width", &bus_width);
if (bus_width == 8)
pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
if (clk_delay_cycles > 0) {
pdata->clk_delay_sel = 1;
pdata->clk_delay_cycles = clk_delay_cycles;
}
return pdata;
}
#else
static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
{
return NULL;
}
#endif
static int sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
struct sdhci_pxav2_host *pxav2_host;
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
const struct sdhci_pxa_variant *variant;
int ret;
struct clk *clk, *clk_core;
host = sdhci_pltfm_init(pdev, NULL, sizeof(*pxav2_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pxav2_host = sdhci_pltfm_priv(pltfm_host);
clk = devm_clk_get_optional_enabled(dev, "io");
if (!clk)
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(dev, ret, "failed to get io clock\n");
goto free;
}
pltfm_host->clk = clk;
clk_core = devm_clk_get_optional_enabled(dev, "core");
if (IS_ERR(clk_core)) {
ret = PTR_ERR(clk_core);
dev_err_probe(dev, ret, "failed to enable core clock\n");
goto free;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
variant = of_device_get_match_data(dev);
if (variant)
pdata = pxav2_get_mmc_pdata(dev);
else
variant = &pxav2_variant;
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
host->mmc->caps |= MMC_CAP_NONREMOVABLE;
}
/* If slot design supports 8 bit data, indicate this to MMC. */
if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
if (pdata->quirks)
host->quirks |= pdata->quirks;
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
}
host->quirks |= variant->extra_quirks;
host->ops = variant->ops;
/* Set up optional pinctrl for PXA168 SDIO IRQ fix */
pxav2_host->pinctrl = devm_pinctrl_get(dev);
if (!IS_ERR(pxav2_host->pinctrl)) {
pxav2_host->pins_cmd_gpio = pinctrl_lookup_state(pxav2_host->pinctrl,
"state_cmd_gpio");
if (IS_ERR(pxav2_host->pins_cmd_gpio))
pxav2_host->pins_cmd_gpio = NULL;
pxav2_host->pins_default = pinctrl_lookup_state(pxav2_host->pinctrl,
"default");
if (IS_ERR(pxav2_host->pins_default))
pxav2_host->pins_default = NULL;
} else {
pxav2_host->pinctrl = NULL;
}
ret = sdhci_add_host(host);
if (ret)
goto free;
return 0;
free:
sdhci_pltfm_free(pdev);
return ret;
}
static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_pxav2_of_match),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_pxav2_probe,
.remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_pxav2_driver);
MODULE_DESCRIPTION("SDHCI driver for pxav2");
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-pxav2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Amlogic Meson SDHC clock controller
*
* Copyright (C) 2020 Martin Blumenstingl <[email protected]>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include "meson-mx-sdhc.h"
struct meson_mx_sdhc_clkc {
struct clk_mux src_sel;
struct clk_divider div;
struct clk_gate mod_clk_en;
struct clk_gate tx_clk_en;
struct clk_gate rx_clk_en;
struct clk_gate sd_clk_en;
};
static const struct clk_parent_data meson_mx_sdhc_src_sel_parents[4] = {
{ .fw_name = "clkin0" },
{ .fw_name = "clkin1" },
{ .fw_name = "clkin2" },
{ .fw_name = "clkin3" },
};
static const struct clk_div_table meson_mx_sdhc_div_table[] = {
{ .div = 6, .val = 5, },
{ .div = 8, .val = 7, },
{ .div = 9, .val = 8, },
{ .div = 10, .val = 9, },
{ .div = 12, .val = 11, },
{ .div = 16, .val = 15, },
{ .div = 18, .val = 17, },
{ .div = 34, .val = 33, },
{ .div = 142, .val = 141, },
{ .div = 850, .val = 849, },
{ .div = 2126, .val = 2125, },
{ .div = 4096, .val = 4095, },
{ /* sentinel */ }
};
static int meson_mx_sdhc_clk_hw_register(struct device *dev,
const char *name_suffix,
const struct clk_parent_data *parents,
unsigned int num_parents,
const struct clk_ops *ops,
struct clk_hw *hw)
{
struct clk_init_data init = { };
char clk_name[32];
snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dev),
name_suffix);
init.name = clk_name;
init.ops = ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_data = parents;
init.num_parents = num_parents;
hw->init = &init;
return devm_clk_hw_register(dev, hw);
}
static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
const char *name_suffix,
struct clk_hw *parent,
struct clk_hw *hw)
{
struct clk_parent_data parent_data = { .hw = parent };
return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
&clk_gate_ops, hw);
}
int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
struct clk_bulk_data *clk_bulk_data)
{
struct clk_parent_data div_parent = { };
struct meson_mx_sdhc_clkc *clkc_data;
int ret;
clkc_data = devm_kzalloc(dev, sizeof(*clkc_data), GFP_KERNEL);
if (!clkc_data)
return -ENOMEM;
clkc_data->src_sel.reg = base + MESON_SDHC_CLKC;
clkc_data->src_sel.mask = 0x3;
clkc_data->src_sel.shift = 16;
ret = meson_mx_sdhc_clk_hw_register(dev, "src_sel",
meson_mx_sdhc_src_sel_parents, 4,
&clk_mux_ops,
&clkc_data->src_sel.hw);
if (ret)
return ret;
clkc_data->div.reg = base + MESON_SDHC_CLKC;
clkc_data->div.shift = 0;
clkc_data->div.width = 12;
clkc_data->div.table = meson_mx_sdhc_div_table;
div_parent.hw = &clkc_data->src_sel.hw;
ret = meson_mx_sdhc_clk_hw_register(dev, "div", &div_parent, 1,
&clk_divider_ops,
&clkc_data->div.hw);
if (ret)
return ret;
clkc_data->mod_clk_en.reg = base + MESON_SDHC_CLKC;
clkc_data->mod_clk_en.bit_idx = 15;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on",
&clkc_data->div.hw,
&clkc_data->mod_clk_en.hw);
if (ret)
return ret;
clkc_data->tx_clk_en.reg = base + MESON_SDHC_CLKC;
clkc_data->tx_clk_en.bit_idx = 14;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on",
&clkc_data->div.hw,
&clkc_data->tx_clk_en.hw);
if (ret)
return ret;
clkc_data->rx_clk_en.reg = base + MESON_SDHC_CLKC;
clkc_data->rx_clk_en.bit_idx = 13;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on",
&clkc_data->div.hw,
&clkc_data->rx_clk_en.hw);
if (ret)
return ret;
clkc_data->sd_clk_en.reg = base + MESON_SDHC_CLKC;
clkc_data->sd_clk_en.bit_idx = 12;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on",
&clkc_data->div.hw,
&clkc_data->sd_clk_en.hw);
if (ret)
return ret;
/*
* TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is
* available.
*/
clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk;
clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk;
clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk;
clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk;
return 0;
}
| linux-master | drivers/mmc/host/meson-mx-sdhc-clkc.c |
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/mmc/host/sdhci-s3c.c
*
* Copyright 2008 Openmoko Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <[email protected]>
* http://armlinux.simtec.co.uk/
*
* SDHCI (HSMMC) support for Samsung SoC
*/
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/platform_data/mmc-sdhci-s3c.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include "sdhci.h"
#define MAX_BUS_CLK (4)
#define S3C_SDHCI_CONTROL2 (0x80)
#define S3C_SDHCI_CONTROL3 (0x84)
#define S3C64XX_SDHCI_CONTROL4 (0x8C)
#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR BIT(31)
#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK BIT(30)
#define S3C_SDHCI_CTRL2_CDINVRXD3 BIT(29)
#define S3C_SDHCI_CTRL2_SLCARDOUT BIT(28)
#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24)
#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24)
#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24)
#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16)
#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16)
#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16)
#define S3C_SDHCI_CTRL2_ENFBCLKTX BIT(15)
#define S3C_SDHCI_CTRL2_ENFBCLKRX BIT(14)
#define S3C_SDHCI_CTRL2_SDCDSEL BIT(13)
#define S3C_SDHCI_CTRL2_SDSIGPC BIT(12)
#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART BIT(11)
#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9)
#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9)
#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9)
#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9)
#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9)
#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9)
#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD BIT(8)
#define S3C_SDHCI_CTRL2_RWAITMODE BIT(7)
#define S3C_SDHCI_CTRL2_DISBUFRD BIT(6)
#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4)
#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4)
#define S3C_SDHCI_CTRL2_PWRSYNC BIT(3)
#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON BIT(1)
#define S3C_SDHCI_CTRL2_HWINITFIN BIT(0)
#define S3C_SDHCI_CTRL3_FCSEL3 BIT(31)
#define S3C_SDHCI_CTRL3_FCSEL2 BIT(23)
#define S3C_SDHCI_CTRL3_FCSEL1 BIT(15)
#define S3C_SDHCI_CTRL3_FCSEL0 BIT(7)
#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24)
#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24)
#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24)
#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16)
#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16)
#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16)
#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8)
#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8)
#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8)
#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0)
#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0)
#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16)
#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16)
#define S3C64XX_SDHCI_CONTROL4_BUSY (1)
/**
* struct sdhci_s3c - S3C SDHCI instance
* @host: The SDHCI host created
* @pdev: The platform device we where created from.
* @ioarea: The resource created when we claimed the IO area.
* @pdata: The platform data for this controller.
* @cur_clk: The index of the current bus clock.
* @ext_cd_irq: External card detect interrupt.
* @clk_io: The clock for the internal bus interface.
* @clk_rates: Clock frequencies.
* @clk_bus: The clocks that are available for the SD/MMC bus clock.
* @no_divider: No or non-standard internal clock divider.
*/
struct sdhci_s3c {
struct sdhci_host *host;
struct platform_device *pdev;
struct resource *ioarea;
struct s3c_sdhci_platdata *pdata;
int cur_clk;
int ext_cd_irq;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
unsigned long clk_rates[MAX_BUS_CLK];
bool no_divider;
};
/**
* struct sdhci_s3c_drv_data - S3C SDHCI platform specific driver data
* @sdhci_quirks: sdhci host specific quirks.
* @no_divider: no or non-standard internal clock divider.
*
* Specifies platform specific configuration of sdhci controller.
* Note: A structure for driver specific platform data is used for future
* expansion of its usage.
*/
struct sdhci_s3c_drv_data {
unsigned int sdhci_quirks;
bool no_divider;
};
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
{
return sdhci_priv(host);
}
/**
* sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
* @host: The SDHCI host instance.
*
* Callback to return the maximum clock rate acheivable by the controller.
*/
static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned long rate, max = 0;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
rate = ourhost->clk_rates[src];
if (rate > max)
max = rate;
}
return max;
}
/**
* sdhci_s3c_consider_clock - consider one the bus clocks for current setting
* @ourhost: Our SDHCI instance.
* @src: The source clock index.
* @wanted: The clock frequency wanted.
*/
static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
unsigned int src,
unsigned int wanted)
{
unsigned long rate;
struct clk *clksrc = ourhost->clk_bus[src];
int shift;
if (IS_ERR(clksrc))
return UINT_MAX;
/*
* If controller uses a non-standard clock division, find the best clock
* speed possible with selected clock source and skip the division.
*/
if (ourhost->no_divider) {
rate = clk_round_rate(clksrc, wanted);
return wanted - rate;
}
rate = ourhost->clk_rates[src];
for (shift = 0; shift <= 8; ++shift) {
if ((rate >> shift) <= wanted)
break;
}
if (shift > 8) {
dev_dbg(&ourhost->pdev->dev,
"clk %d: rate %ld, min rate %lu > wanted %u\n",
src, rate, rate / 256, wanted);
return UINT_MAX;
}
dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
src, rate, wanted, rate >> shift);
return wanted - (rate >> shift);
}
/**
* sdhci_s3c_set_clock - callback on clock change
* @host: The SDHCI host being changed
* @clock: The clock rate being requested.
*
* When the card's clock is going to be changed, look at the new frequency
* and find the best clock source to go with it.
*/
static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned int best = UINT_MAX;
unsigned int delta;
int best_src = 0;
int src;
u32 ctrl;
host->mmc->actual_clock = 0;
/* don't bother if the clock is going off. */
if (clock == 0) {
sdhci_set_clock(host, clock);
return;
}
for (src = 0; src < MAX_BUS_CLK; src++) {
delta = sdhci_s3c_consider_clock(ourhost, src, clock);
if (delta < best) {
best = delta;
best_src = src;
}
}
dev_dbg(&ourhost->pdev->dev,
"selected source %d, clock %d, delta %d\n",
best_src, clock, best);
/* select the new clock source */
if (ourhost->cur_clk != best_src) {
struct clk *clk = ourhost->clk_bus[best_src];
clk_prepare_enable(clk);
if (ourhost->cur_clk >= 0)
clk_disable_unprepare(
ourhost->clk_bus[ourhost->cur_clk]);
ourhost->cur_clk = best_src;
host->max_clk = ourhost->clk_rates[best_src];
}
/* turn clock off to card before changing clock source */
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
/* reprogram default hardware configuration */
writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA,
host->ioaddr + S3C64XX_SDHCI_CONTROL4);
ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
S3C_SDHCI_CTRL2_ENFBCLKRX |
S3C_SDHCI_CTRL2_DFCNT_NONE |
S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
/* reconfigure the controller for new clock rate */
ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
if (clock < 25 * 1000000)
ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
sdhci_set_clock(host, clock);
}
/**
* sdhci_s3c_get_min_clock - callback to get minimal supported clock value
* @host: The SDHCI host being queried
*
* To init mmc host properly a minimal clock value is needed. For high system
* bus clock's values the standard formula gives values out of allowed range.
* The clock still can be set to lower values, if clock source other then
* system bus is selected.
*/
static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned long rate, min = ULONG_MAX;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
rate = ourhost->clk_rates[src] / 256;
if (!rate)
continue;
if (rate < min)
min = rate;
}
return min;
}
/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned long rate, max = 0;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
struct clk *clk;
clk = ourhost->clk_bus[src];
if (IS_ERR(clk))
continue;
rate = clk_round_rate(clk, ULONG_MAX);
if (rate > max)
max = rate;
}
return max;
}
/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
unsigned long rate, min = ULONG_MAX;
int src;
for (src = 0; src < MAX_BUS_CLK; src++) {
struct clk *clk;
clk = ourhost->clk_bus[src];
if (IS_ERR(clk))
continue;
rate = clk_round_rate(clk, 0);
if (rate < min)
min = rate;
}
return min;
}
/* sdhci_cmu_set_clock - callback on clock change.*/
static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
struct device *dev = &ourhost->pdev->dev;
unsigned long timeout;
u16 clk = 0;
int ret;
host->mmc->actual_clock = 0;
/* If the clock is going off, set to 0 at clock control register */
if (clock == 0) {
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
return;
}
sdhci_s3c_set_clock(host, clock);
/* Reset SD Clock Enable */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
ret = clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
if (ret != 0) {
dev_err(dev, "%s: failed to set clock rate %uHz\n",
mmc_hostname(host->mmc), clock);
return;
}
clk = SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
timeout = 20;
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
dev_err(dev, "%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
return;
}
timeout--;
mdelay(1);
}
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
#ifdef CONFIG_OF
static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
{
struct device_node *node = dev->of_node;
u32 max_width;
/* if the bus-width property is not specified, assume width as 1 */
if (of_property_read_u32(node, "bus-width", &max_width))
max_width = 1;
pdata->max_width = max_width;
/* get the card detection method */
if (of_property_read_bool(node, "broken-cd")) {
pdata->cd_type = S3C_SDHCI_CD_NONE;
return 0;
}
if (of_property_read_bool(node, "non-removable")) {
pdata->cd_type = S3C_SDHCI_CD_PERMANENT;
return 0;
}
if (of_get_named_gpio(node, "cd-gpios", 0))
return 0;
/* assuming internal card detect that will be configured by pinctrl */
pdata->cd_type = S3C_SDHCI_CD_INTERNAL;
return 0;
}
#else
static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
{
return -EINVAL;
}
#endif
static inline const struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
struct platform_device *pdev)
{
#ifdef CONFIG_OF
if (pdev->dev.of_node)
return of_device_get_match_data(&pdev->dev);
#endif
return (const struct sdhci_s3c_drv_data *)
platform_get_device_id(pdev)->driver_data;
}
static int sdhci_s3c_probe(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata;
const struct sdhci_s3c_drv_data *drv_data;
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_s3c *sc;
int ret, irq, ptr, clks;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
dev_err(dev, "no device data specified\n");
return -ENOENT;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
if (IS_ERR(host)) {
dev_err(dev, "sdhci_alloc_host() failed\n");
return PTR_ERR(host);
}
sc = sdhci_priv(host);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
ret = -ENOMEM;
goto err_pdata_io_clk;
}
if (pdev->dev.of_node) {
ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
if (ret)
goto err_pdata_io_clk;
} else {
memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
}
drv_data = sdhci_s3c_get_driver_data(pdev);
sc->host = host;
sc->pdev = pdev;
sc->pdata = pdata;
sc->cur_clk = -1;
platform_set_drvdata(pdev, host);
sc->clk_io = devm_clk_get(dev, "hsmmc");
if (IS_ERR(sc->clk_io)) {
dev_err(dev, "failed to get io clock\n");
ret = PTR_ERR(sc->clk_io);
goto err_pdata_io_clk;
}
/* enable the local io clock and keep it running for the moment. */
clk_prepare_enable(sc->clk_io);
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
char name[14];
snprintf(name, 14, "mmc_busclk.%d", ptr);
sc->clk_bus[ptr] = devm_clk_get(dev, name);
if (IS_ERR(sc->clk_bus[ptr]))
continue;
clks++;
sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]);
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
ptr, name, sc->clk_rates[ptr]);
}
if (clks == 0) {
dev_err(dev, "failed to find any bus clocks\n");
ret = -ENOENT;
goto err_no_busclks;
}
host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
}
/* Ensure we have minimal gpio selected CMD/CLK/Detect */
if (pdata->cfg_gpio)
pdata->cfg_gpio(pdev, pdata->max_width);
host->hw_name = "samsung-hsmmc";
host->ops = &sdhci_s3c_ops;
host->quirks = 0;
host->quirks2 = 0;
host->irq = irq;
/* Setup quirks for the controller */
host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
if (drv_data) {
host->quirks |= drv_data->sdhci_quirks;
sc->no_divider = drv_data->no_divider;
}
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
/* we currently see overruns on errors, so disable the SDMA
* support as well. */
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
/* It seems we do not get an DATA transfer complete on non-busy
* transfers, not sure if this is a problem with this specific
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
/* This host supports the Auto CMD12 */
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->mmc->caps = MMC_CAP_NONREMOVABLE;
switch (pdata->max_width) {
case 8:
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
fallthrough;
case 4:
host->mmc->caps |= MMC_CAP_4_BIT_DATA;
break;
}
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
/*
* If controller does not have internal clock divider,
* we can use overriding functions instead of default.
*/
if (sc->no_divider) {
sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
}
/* It supports additional host capabilities if needed */
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
if (pdata->host_caps2)
host->mmc->caps2 |= pdata->host_caps2;
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
ret = mmc_of_parse(host->mmc);
if (ret)
goto err_req_regs;
ret = sdhci_add_host(host);
if (ret)
goto err_req_regs;
#ifdef CONFIG_PM
if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
clk_disable_unprepare(sc->clk_io);
#endif
return 0;
err_req_regs:
pm_runtime_disable(&pdev->dev);
err_no_busclks:
clk_disable_unprepare(sc->clk_io);
err_pdata_io_clk:
sdhci_free_host(host);
return ret;
}
static void sdhci_s3c_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
if (sc->ext_cd_irq)
free_irq(sc->ext_cd_irq, sc);
#ifdef CONFIG_PM
if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
clk_prepare_enable(sc->clk_io);
#endif
sdhci_remove_host(host, 1);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(sc->clk_io);
sdhci_free_host(host);
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_s3c_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
return sdhci_suspend_host(host);
}
static int sdhci_s3c_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
#endif
#ifdef CONFIG_PM
static int sdhci_s3c_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *busclk = ourhost->clk_io;
int ret;
ret = sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (ourhost->cur_clk >= 0)
clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]);
clk_disable_unprepare(busclk);
return ret;
}
static int sdhci_s3c_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_s3c *ourhost = to_s3c(host);
struct clk *busclk = ourhost->clk_io;
int ret;
clk_prepare_enable(busclk);
if (ourhost->cur_clk >= 0)
clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
ret = sdhci_runtime_resume_host(host, 0);
return ret;
}
#endif
static const struct dev_pm_ops sdhci_s3c_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
NULL)
};
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
static const struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
.no_divider = true,
};
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
.data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove_new = sdhci_s3c_remove,
.id_table = sdhci_s3c_driver_ids,
.driver = {
.name = "s3c-sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
.pm = &sdhci_s3c_pmops,
},
};
module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-s3c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Access SD/MMC cards through SPI master controllers
*
* (C) Copyright 2005, Intec Automation,
* Mike Lavender (mike@steroidmicros)
* (C) Copyright 2006-2007, David Brownell
* (C) Copyright 2007, Axis Communications,
* Hans-Peter Nilsson ([email protected])
* (C) Copyright 2007, ATRON electronic GmbH,
* Jan Nikitenko <[email protected]>
*/
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/dma-mapping.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
#include <linux/mmc/slot-gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
#include <asm/unaligned.h>
/* NOTES:
*
* - For now, we won't try to interoperate with a real mmc/sd/sdio
* controller, although some of them do have hardware support for
* SPI protocol. The main reason for such configs would be mmc-ish
* cards like DataFlash, which don't support that "native" protocol.
*
* We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
* switch between driver stacks, and in any case if "native" mode
* is available, it will be faster and hence preferable.
*
* - MMC depends on a different chipselect management policy than the
* SPI interface currently supports for shared bus segments: it needs
* to issue multiple spi_message requests with the chipselect active,
* using the results of one message to decide the next one to issue.
*
* Pending updates to the programming interface, this driver expects
* that it not share the bus with other drivers (precluding conflicts).
*
* - We tell the controller to keep the chipselect active from the
* beginning of an mmc_host_ops.request until the end. So beware
* of SPI controller drivers that mis-handle the cs_change flag!
*
* However, many cards seem OK with chipselect flapping up/down
* during that time ... at least on unshared bus segments.
*/
/*
* Local protocol constants, internal to data block protocols.
*/
/* Response tokens used to ack each block written: */
#define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
#define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
#define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
#define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
/* Read and write blocks start with these tokens and end with crc;
* on error, read tokens act like a subset of R2_SPI_* values.
*/
#define SPI_TOKEN_SINGLE 0xfe /* single block r/w, multiblock read */
#define SPI_TOKEN_MULTI_WRITE 0xfc /* multiblock write */
#define SPI_TOKEN_STOP_TRAN 0xfd /* terminate multiblock write */
#define MMC_SPI_BLOCKSIZE 512
#define MMC_SPI_R1B_TIMEOUT_MS 3000
#define MMC_SPI_INIT_TIMEOUT_MS 3000
/* One of the critical speed parameters is the amount of data which may
* be transferred in one command. If this value is too low, the SD card
* controller has to do multiple partial block writes (argggh!). With
* today (2008) SD cards there is little speed gain if we transfer more
* than 64 KBytes at a time. So use this value until there is any indication
* that we should do more here.
*/
#define MMC_SPI_BLOCKSATONCE 128
/****************************************************************************/
/*
* Local Data Structures
*/
/* "scratch" is per-{command,block} data exchanged with the card */
struct scratch {
u8 status[29];
u8 data_token;
__be16 crc_val;
};
struct mmc_spi_host {
struct mmc_host *mmc;
struct spi_device *spi;
unsigned char power_mode;
u16 powerup_msecs;
struct mmc_spi_platform_data *pdata;
/* for bulk data transfers */
struct spi_transfer token, t, crc, early_status;
struct spi_message m;
/* for status readback */
struct spi_transfer status;
struct spi_message readback;
/* underlying DMA-aware controller, or null */
struct device *dma_dev;
/* buffer used for commands and for message "overhead" */
struct scratch *data;
dma_addr_t data_dma;
/* Specs say to write ones most of the time, even when the card
* has no need to read its input data; and many cards won't care.
* This is our source of those ones.
*/
void *ones;
dma_addr_t ones_dma;
};
/****************************************************************************/
/*
* MMC-over-SPI protocol glue, used by the MMC stack interface
*/
static inline int mmc_cs_off(struct mmc_spi_host *host)
{
/* chipselect will always be inactive after setup() */
return spi_setup(host->spi);
}
static int
mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
{
int status;
if (len > sizeof(*host->data)) {
WARN_ON(1);
return -EIO;
}
host->status.len = len;
if (host->dma_dev)
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*host->data),
DMA_FROM_DEVICE);
status = spi_sync_locked(host->spi, &host->readback);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*host->data),
DMA_FROM_DEVICE);
return status;
}
static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
unsigned n, u8 byte)
{
u8 *cp = host->data->status;
unsigned long start = jiffies;
do {
int status;
unsigned i;
status = mmc_spi_readbytes(host, n);
if (status < 0)
return status;
for (i = 0; i < n; i++) {
if (cp[i] != byte)
return cp[i];
}
/* If we need long timeouts, we may release the CPU */
cond_resched();
} while (time_is_after_jiffies(start + timeout));
return -ETIMEDOUT;
}
static inline int
mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
{
return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
}
static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
{
return mmc_spi_skip(host, timeout, 1, 0xff);
}
/*
* Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
* hosts return! The low byte holds R1_SPI bits. The next byte may hold
* R2_SPI bits ... for SEND_STATUS, or after data read errors.
*
* cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
* newer cards R7 (IF_COND).
*/
static char *maptype(struct mmc_command *cmd)
{
switch (mmc_spi_resp_type(cmd)) {
case MMC_RSP_SPI_R1: return "R1";
case MMC_RSP_SPI_R1B: return "R1B";
case MMC_RSP_SPI_R2: return "R2/R5";
case MMC_RSP_SPI_R3: return "R3/R4/R7";
default: return "?";
}
}
/* return zero, else negative errno after setting cmd->error */
static int mmc_spi_response_get(struct mmc_spi_host *host,
struct mmc_command *cmd, int cs_on)
{
unsigned long timeout_ms;
u8 *cp = host->data->status;
u8 *end = cp + host->t.len;
int value = 0;
int bitshift;
u8 leftover = 0;
unsigned short rotator;
int i;
char tag[32];
snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
cmd->opcode, maptype(cmd));
/* Except for data block reads, the whole response will already
* be stored in the scratch buffer. It's somewhere after the
* command and the first byte we read after it. We ignore that
* first byte. After STOP_TRANSMISSION command it may include
* two data bits, but otherwise it's all ones.
*/
cp += 8;
while (cp < end && *cp == 0xff)
cp++;
/* Data block reads (R1 response types) may need more data... */
if (cp == end) {
cp = host->data->status;
end = cp+1;
/* Card sends N(CR) (== 1..8) bytes of all-ones then one
* status byte ... and we already scanned 2 bytes.
*
* REVISIT block read paths use nasty byte-at-a-time I/O
* so it can always DMA directly into the target buffer.
* It'd probably be better to memcpy() the first chunk and
* avoid extra i/o calls...
*
* Note we check for more than 8 bytes, because in practice,
* some SD cards are slow...
*/
for (i = 2; i < 16; i++) {
value = mmc_spi_readbytes(host, 1);
if (value < 0)
goto done;
if (*cp != 0xff)
goto checkstatus;
}
value = -ETIMEDOUT;
goto done;
}
checkstatus:
bitshift = 0;
if (*cp & 0x80) {
/* Houston, we have an ugly card with a bit-shifted response */
rotator = *cp++ << 8;
/* read the next byte */
if (cp == end) {
value = mmc_spi_readbytes(host, 1);
if (value < 0)
goto done;
cp = host->data->status;
end = cp+1;
}
rotator |= *cp++;
while (rotator & 0x8000) {
bitshift++;
rotator <<= 1;
}
cmd->resp[0] = rotator >> 8;
leftover = rotator;
} else {
cmd->resp[0] = *cp++;
}
cmd->error = 0;
/* Status byte: the entire seven-bit R1 response. */
if (cmd->resp[0] != 0) {
if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
& cmd->resp[0])
value = -EFAULT; /* Bad address */
else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
value = -ENOSYS; /* Function not implemented */
else if (R1_SPI_COM_CRC & cmd->resp[0])
value = -EILSEQ; /* Illegal byte sequence */
else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
& cmd->resp[0])
value = -EIO; /* I/O error */
/* else R1_SPI_IDLE, "it's resetting" */
}
switch (mmc_spi_resp_type(cmd)) {
/* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
* and less-common stuff like various erase operations.
*/
case MMC_RSP_SPI_R1B:
/* maybe we read all the busy tokens already */
while (cp < end && *cp == 0)
cp++;
if (cp == end) {
timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
MMC_SPI_R1B_TIMEOUT_MS;
mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
}
break;
/* SPI R2 == R1 + second status byte; SEND_STATUS
* SPI R5 == R1 + data byte; IO_RW_DIRECT
*/
case MMC_RSP_SPI_R2:
/* read the next byte */
if (cp == end) {
value = mmc_spi_readbytes(host, 1);
if (value < 0)
goto done;
cp = host->data->status;
end = cp+1;
}
if (bitshift) {
rotator = leftover << 8;
rotator |= *cp << bitshift;
cmd->resp[0] |= (rotator & 0xFF00);
} else {
cmd->resp[0] |= *cp << 8;
}
break;
/* SPI R3, R4, or R7 == R1 + 4 bytes */
case MMC_RSP_SPI_R3:
rotator = leftover << 8;
cmd->resp[1] = 0;
for (i = 0; i < 4; i++) {
cmd->resp[1] <<= 8;
/* read the next byte */
if (cp == end) {
value = mmc_spi_readbytes(host, 1);
if (value < 0)
goto done;
cp = host->data->status;
end = cp+1;
}
if (bitshift) {
rotator |= *cp++ << bitshift;
cmd->resp[1] |= (rotator >> 8);
rotator <<= 8;
} else {
cmd->resp[1] |= *cp++;
}
}
break;
/* SPI R1 == just one status byte */
case MMC_RSP_SPI_R1:
break;
default:
dev_dbg(&host->spi->dev, "bad response type %04x\n",
mmc_spi_resp_type(cmd));
if (value >= 0)
value = -EINVAL;
goto done;
}
if (value < 0)
dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
tag, cmd->resp[0], cmd->resp[1]);
/* disable chipselect on errors and some success cases */
if (value >= 0 && cs_on)
return value;
done:
if (value < 0)
cmd->error = value;
mmc_cs_off(host);
return value;
}
/* Issue command and read its response.
* Returns zero on success, negative for error.
*
* On error, caller must cope with mmc core retry mechanism. That
* means immediate low-level resubmit, which affects the bus lock...
*/
static int
mmc_spi_command_send(struct mmc_spi_host *host,
struct mmc_request *mrq,
struct mmc_command *cmd, int cs_on)
{
struct scratch *data = host->data;
u8 *cp = data->status;
int status;
struct spi_transfer *t;
/* We can handle most commands (except block reads) in one full
* duplex I/O operation before either starting the next transfer
* (data block or command) or else deselecting the card.
*
* First, write 7 bytes:
* - an all-ones byte to ensure the card is ready
* - opcode byte (plus start and transmission bits)
* - four bytes of big-endian argument
* - crc7 (plus end bit) ... always computed, it's cheap
*
* We init the whole buffer to all-ones, which is what we need
* to write while we're reading (later) response data.
*/
memset(cp, 0xff, sizeof(data->status));
cp[1] = 0x40 | cmd->opcode;
put_unaligned_be32(cmd->arg, cp + 2);
cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
cp += 7;
/* Then, read up to 13 bytes (while writing all-ones):
* - N(CR) (== 1..8) bytes of all-ones
* - status byte (for all response types)
* - the rest of the response, either:
* + nothing, for R1 or R1B responses
* + second status byte, for R2 responses
* + four data bytes, for R3 and R7 responses
*
* Finally, read some more bytes ... in the nice cases we know in
* advance how many, and reading 1 more is always OK:
* - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
* - N(RC) (== 1..N) bytes of all-ones, before next command
* - N(WR) (== 1..N) bytes of all-ones, before data write
*
* So in those cases one full duplex I/O of at most 21 bytes will
* handle the whole command, leaving the card ready to receive a
* data block or new command. We do that whenever we can, shaving
* CPU and IRQ costs (especially when using DMA or FIFOs).
*
* There are two other cases, where it's not generally practical
* to rely on a single I/O:
*
* - R1B responses need at least N(EC) bytes of all-zeroes.
*
* In this case we can *try* to fit it into one I/O, then
* maybe read more data later.
*
* - Data block reads are more troublesome, since a variable
* number of padding bytes precede the token and data.
* + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
* + N(AC) (== 1..many) bytes of all-ones
*
* In this case we currently only have minimal speedups here:
* when N(CR) == 1 we can avoid I/O in response_get().
*/
if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
cp += 2; /* min(N(CR)) + status */
/* R1 */
} else {
cp += 10; /* max(N(CR)) + status + min(N(RC),N(WR)) */
if (cmd->flags & MMC_RSP_SPI_S2) /* R2/R5 */
cp++;
else if (cmd->flags & MMC_RSP_SPI_B4) /* R3/R4/R7 */
cp += 4;
else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
cp = data->status + sizeof(data->status);
/* else: R1 (most commands) */
}
dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
cmd->opcode, maptype(cmd));
/* send command, leaving chipselect active */
spi_message_init(&host->m);
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = t->rx_buf = data->status;
t->tx_dma = t->rx_dma = host->data_dma;
t->len = cp - data->status;
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
if (host->dma_dev) {
host->m.is_dma_mapped = 1;
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
}
status = spi_sync_locked(host->spi, &host->m);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
if (status < 0) {
dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
cmd->error = status;
return status;
}
/* after no-data commands and STOP_TRANSMISSION, chipselect off */
return mmc_spi_response_get(host, cmd, cs_on);
}
/* Build data message with up to four separate transfers. For TX, we
* start by writing the data token. And in most cases, we finish with
* a status transfer.
*
* We always provide TX data for data and CRC. The MMC/SD protocol
* requires us to write ones; but Linux defaults to writing zeroes;
* so we explicitly initialize it to all ones on RX paths.
*
* We also handle DMA mapping, so the underlying SPI controller does
* not need to (re)do it for each message.
*/
static void
mmc_spi_setup_data_message(
struct mmc_spi_host *host,
bool multiple,
enum dma_data_direction direction)
{
struct spi_transfer *t;
struct scratch *scratch = host->data;
dma_addr_t dma = host->data_dma;
spi_message_init(&host->m);
if (dma)
host->m.is_dma_mapped = 1;
/* for reads, readblock() skips 0xff bytes before finding
* the token; for writes, this transfer issues that token.
*/
if (direction == DMA_TO_DEVICE) {
t = &host->token;
memset(t, 0, sizeof(*t));
t->len = 1;
if (multiple)
scratch->data_token = SPI_TOKEN_MULTI_WRITE;
else
scratch->data_token = SPI_TOKEN_SINGLE;
t->tx_buf = &scratch->data_token;
if (dma)
t->tx_dma = dma + offsetof(struct scratch, data_token);
spi_message_add_tail(t, &host->m);
}
/* Body of transfer is buffer, then CRC ...
* either TX-only, or RX with TX-ones.
*/
t = &host->t;
memset(t, 0, sizeof(*t));
t->tx_buf = host->ones;
t->tx_dma = host->ones_dma;
/* length and actual buffer info are written later */
spi_message_add_tail(t, &host->m);
t = &host->crc;
memset(t, 0, sizeof(*t));
t->len = 2;
if (direction == DMA_TO_DEVICE) {
/* the actual CRC may get written later */
t->tx_buf = &scratch->crc_val;
if (dma)
t->tx_dma = dma + offsetof(struct scratch, crc_val);
} else {
t->tx_buf = host->ones;
t->tx_dma = host->ones_dma;
t->rx_buf = &scratch->crc_val;
if (dma)
t->rx_dma = dma + offsetof(struct scratch, crc_val);
}
spi_message_add_tail(t, &host->m);
/*
* A single block read is followed by N(EC) [0+] all-ones bytes
* before deselect ... don't bother.
*
* Multiblock reads are followed by N(AC) [1+] all-ones bytes before
* the next block is read, or a STOP_TRANSMISSION is issued. We'll
* collect that single byte, so readblock() doesn't need to.
*
* For a write, the one-byte data response follows immediately, then
* come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
* Then single block reads may deselect, and multiblock ones issue
* the next token (next data block, or STOP_TRAN). We can try to
* minimize I/O ops by using a single read to collect end-of-busy.
*/
if (multiple || direction == DMA_TO_DEVICE) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
t->tx_buf = host->ones;
t->tx_dma = host->ones_dma;
t->rx_buf = scratch->status;
if (dma)
t->rx_dma = dma + offsetof(struct scratch, status);
t->cs_change = 1;
spi_message_add_tail(t, &host->m);
}
}
/*
* Write one block:
* - caller handled preceding N(WR) [1+] all-ones bytes
* - data block
* + token
* + data bytes
* + crc16
* - an all-ones byte ... card writes a data-response byte
* - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
*
* Return negative errno, else success.
*/
static int
mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
unsigned long timeout)
{
struct spi_device *spi = host->spi;
int status, i;
struct scratch *scratch = host->data;
u32 pattern;
if (host->mmc->use_spi_crc)
scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
if (host->dma_dev)
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
status = spi_sync_locked(spi, &host->m);
if (status != 0) {
dev_dbg(&spi->dev, "write error (%d)\n", status);
return status;
}
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
/*
* Get the transmission data-response reply. It must follow
* immediately after the data block we transferred. This reply
* doesn't necessarily tell whether the write operation succeeded;
* it just says if the transmission was ok and whether *earlier*
* writes succeeded; see the standard.
*
* In practice, there are (even modern SDHC-)cards which are late
* in sending the response, and miss the time frame by a few bits,
* so we have to cope with this situation and check the response
* bit-by-bit. Arggh!!!
*/
pattern = get_unaligned_be32(scratch->status);
/* First 3 bit of pattern are undefined */
pattern |= 0xE0000000;
/* left-adjust to leading 0 bit */
while (pattern & 0x80000000)
pattern <<= 1;
/* right-adjust for pattern matching. Code is in bit 4..0 now. */
pattern >>= 27;
switch (pattern) {
case SPI_RESPONSE_ACCEPTED:
status = 0;
break;
case SPI_RESPONSE_CRC_ERR:
/* host shall then issue MMC_STOP_TRANSMISSION */
status = -EILSEQ;
break;
case SPI_RESPONSE_WRITE_ERR:
/* host shall then issue MMC_STOP_TRANSMISSION,
* and should MMC_SEND_STATUS to sort it out
*/
status = -EIO;
break;
default:
status = -EPROTO;
break;
}
if (status != 0) {
dev_dbg(&spi->dev, "write error %02x (%d)\n",
scratch->status[0], status);
return status;
}
t->tx_buf += t->len;
if (host->dma_dev)
t->tx_dma += t->len;
/* Return when not busy. If we didn't collect that status yet,
* we'll need some more I/O.
*/
for (i = 4; i < sizeof(scratch->status); i++) {
/* card is non-busy if the most recent bit is 1 */
if (scratch->status[i] & 0x01)
return 0;
}
return mmc_spi_wait_unbusy(host, timeout);
}
/*
* Read one block:
* - skip leading all-ones bytes ... either
* + N(AC) [1..f(clock,CSD)] usually, else
* + N(CX) [0..8] when reading CSD or CID
* - data block
* + token ... if error token, no data or crc
* + data bytes
* + crc16
*
* After single block reads, we're done; N(EC) [0+] all-ones bytes follow
* before dropping chipselect.
*
* For multiblock reads, caller either reads the next block or issues a
* STOP_TRANSMISSION command.
*/
static int
mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
unsigned long timeout)
{
struct spi_device *spi = host->spi;
int status;
struct scratch *scratch = host->data;
unsigned int bitshift;
u8 leftover;
/* At least one SD card sends an all-zeroes byte when N(CX)
* applies, before the all-ones bytes ... just cope with that.
*/
status = mmc_spi_readbytes(host, 1);
if (status < 0)
return status;
status = scratch->status[0];
if (status == 0xff || status == 0)
status = mmc_spi_readtoken(host, timeout);
if (status < 0) {
dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
return status;
}
/* The token may be bit-shifted...
* the first 0-bit precedes the data stream.
*/
bitshift = 7;
while (status & 0x80) {
status <<= 1;
bitshift--;
}
leftover = status << 1;
if (host->dma_dev) {
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
dma_sync_single_for_device(host->dma_dev,
t->rx_dma, t->len,
DMA_FROM_DEVICE);
}
status = spi_sync_locked(spi, &host->m);
if (status < 0) {
dev_dbg(&spi->dev, "read error %d\n", status);
return status;
}
if (host->dma_dev) {
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
dma_sync_single_for_cpu(host->dma_dev,
t->rx_dma, t->len,
DMA_FROM_DEVICE);
}
if (bitshift) {
/* Walk through the data and the crc and do
* all the magic to get byte-aligned data.
*/
u8 *cp = t->rx_buf;
unsigned int len;
unsigned int bitright = 8 - bitshift;
u8 temp;
for (len = t->len; len; len--) {
temp = *cp;
*cp++ = leftover | (temp >> bitshift);
leftover = temp << bitright;
}
cp = (u8 *) &scratch->crc_val;
temp = *cp;
*cp++ = leftover | (temp >> bitshift);
leftover = temp << bitright;
temp = *cp;
*cp = leftover | (temp >> bitshift);
}
if (host->mmc->use_spi_crc) {
u16 crc = crc_itu_t(0, t->rx_buf, t->len);
be16_to_cpus(&scratch->crc_val);
if (scratch->crc_val != crc) {
dev_dbg(&spi->dev,
"read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
scratch->crc_val, crc, t->len);
return -EILSEQ;
}
}
t->rx_buf += t->len;
if (host->dma_dev)
t->rx_dma += t->len;
return 0;
}
/*
* An MMC/SD data stage includes one or more blocks, optional CRCs,
* and inline handshaking. That handhaking makes it unlike most
* other SPI protocol stacks.
*/
static void
mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
struct mmc_data *data, u32 blk_size)
{
struct spi_device *spi = host->spi;
struct device *dma_dev = host->dma_dev;
struct spi_transfer *t;
enum dma_data_direction direction = mmc_get_dma_dir(data);
struct scatterlist *sg;
unsigned n_sg;
bool multiple = (data->blocks > 1);
const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
u32 clock_rate;
unsigned long timeout;
mmc_spi_setup_data_message(host, multiple, direction);
t = &host->t;
if (t->speed_hz)
clock_rate = t->speed_hz;
else
clock_rate = spi->max_speed_hz;
timeout = data->timeout_ns / 1000 +
data->timeout_clks * 1000000 / clock_rate;
timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
/* Handle scatterlist segments one at a time, with synch for
* each 512-byte block
*/
for_each_sg(data->sg, sg, data->sg_len, n_sg) {
int status = 0;
dma_addr_t dma_addr = 0;
void *kmap_addr;
unsigned length = sg->length;
enum dma_data_direction dir = direction;
/* set up dma mapping for controller drivers that might
* use DMA ... though they may fall back to PIO
*/
if (dma_dev) {
/* never invalidate whole *shared* pages ... */
if ((sg->offset != 0 || length != PAGE_SIZE)
&& dir == DMA_FROM_DEVICE)
dir = DMA_BIDIRECTIONAL;
dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
PAGE_SIZE, dir);
if (dma_mapping_error(dma_dev, dma_addr)) {
data->error = -EFAULT;
break;
}
if (direction == DMA_TO_DEVICE)
t->tx_dma = dma_addr + sg->offset;
else
t->rx_dma = dma_addr + sg->offset;
}
/* allow pio too; we don't allow highmem */
kmap_addr = kmap(sg_page(sg));
if (direction == DMA_TO_DEVICE)
t->tx_buf = kmap_addr + sg->offset;
else
t->rx_buf = kmap_addr + sg->offset;
/* transfer each block, and update request status */
while (length) {
t->len = min(length, blk_size);
dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
if (direction == DMA_TO_DEVICE)
status = mmc_spi_writeblock(host, t, timeout);
else
status = mmc_spi_readblock(host, t, timeout);
if (status < 0)
break;
data->bytes_xfered += t->len;
length -= t->len;
if (!multiple)
break;
}
/* discard mappings */
if (direction == DMA_FROM_DEVICE)
flush_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
if (dma_dev)
dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
if (status < 0) {
data->error = status;
dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
break;
}
}
/* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
* can be issued before multiblock writes. Unlike its more widely
* documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
* that can affect the STOP_TRAN logic. Complete (and current)
* MMC specs should sort that out before Linux starts using CMD23.
*/
if (direction == DMA_TO_DEVICE && multiple) {
struct scratch *scratch = host->data;
int tmp;
const unsigned statlen = sizeof(scratch->status);
dev_dbg(&spi->dev, " STOP_TRAN\n");
/* Tweak the per-block message we set up earlier by morphing
* it to hold single buffer with the token followed by some
* all-ones bytes ... skip N(BR) (0..1), scan the rest for
* "not busy any longer" status, and leave chip selected.
*/
INIT_LIST_HEAD(&host->m.transfers);
list_add(&host->early_status.transfer_list,
&host->m.transfers);
memset(scratch->status, 0xff, statlen);
scratch->status[0] = SPI_TOKEN_STOP_TRAN;
host->early_status.tx_buf = host->early_status.rx_buf;
host->early_status.tx_dma = host->early_status.rx_dma;
host->early_status.len = statlen;
if (host->dma_dev)
dma_sync_single_for_device(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
tmp = spi_sync_locked(spi, &host->m);
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*scratch),
DMA_BIDIRECTIONAL);
if (tmp < 0) {
if (!data->error)
data->error = tmp;
return;
}
/* Ideally we collected "not busy" status with one I/O,
* avoiding wasteful byte-at-a-time scanning... but more
* I/O is often needed.
*/
for (tmp = 2; tmp < statlen; tmp++) {
if (scratch->status[tmp] != 0)
return;
}
tmp = mmc_spi_wait_unbusy(host, timeout);
if (tmp < 0 && !data->error)
data->error = tmp;
}
}
/****************************************************************************/
/*
* MMC driver implementation -- the interface to the MMC stack
*/
static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_spi_host *host = mmc_priv(mmc);
int status = -EINVAL;
int crc_retry = 5;
struct mmc_command stop;
#ifdef DEBUG
/* MMC core and layered drivers *MUST* issue SPI-aware commands */
{
struct mmc_command *cmd;
int invalid = 0;
cmd = mrq->cmd;
if (!mmc_spi_resp_type(cmd)) {
dev_dbg(&host->spi->dev, "bogus command\n");
cmd->error = -EINVAL;
invalid = 1;
}
cmd = mrq->stop;
if (cmd && !mmc_spi_resp_type(cmd)) {
dev_dbg(&host->spi->dev, "bogus STOP command\n");
cmd->error = -EINVAL;
invalid = 1;
}
if (invalid) {
dump_stack();
mmc_request_done(host->mmc, mrq);
return;
}
}
#endif
/* request exclusive bus access */
spi_bus_lock(host->spi->master);
crc_recover:
/* issue command; then optionally data and stop */
status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
if (status == 0 && mrq->data) {
mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
/*
* The SPI bus is not always reliable for large data transfers.
* If an occasional crc error is reported by the SD device with
* data read/write over SPI, it may be recovered by repeating
* the last SD command again. The retry count is set to 5 to
* ensure the driver passes stress tests.
*/
if (mrq->data->error == -EILSEQ && crc_retry) {
stop.opcode = MMC_STOP_TRANSMISSION;
stop.arg = 0;
stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
status = mmc_spi_command_send(host, mrq, &stop, 0);
crc_retry--;
mrq->data->error = 0;
goto crc_recover;
}
if (mrq->stop)
status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
else
mmc_cs_off(host);
}
/* release the bus */
spi_bus_unlock(host->spi->master);
mmc_request_done(host->mmc, mrq);
}
/* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
*
* NOTE that here we can't know that the card has just been powered up;
* not all MMC/SD sockets support power switching.
*
* FIXME when the card is still in SPI mode, e.g. from a previous kernel,
* this doesn't seem to do the right thing at all...
*/
static void mmc_spi_initsequence(struct mmc_spi_host *host)
{
/* Try to be very sure any previous command has completed;
* wait till not-busy, skip debris from any old commands.
*/
mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
mmc_spi_readbytes(host, 10);
/*
* Do a burst with chipselect active-high. We need to do this to
* meet the requirement of 74 clock cycles with both chipselect
* and CMD (MOSI) high before CMD0 ... after the card has been
* powered up to Vdd(min), and so is ready to take commands.
*
* Some cards are particularly needy of this (e.g. Viking "SD256")
* while most others don't seem to care.
*
* Note that this is one of the places MMC/SD plays games with the
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
*
* SPI_CS_HIGH means "asserted" here. In some cases like when using
* GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
* inverted by gpiolib, so if we want to ascertain to drive it high
* we should toggle the default with an XOR as we do here.
*/
host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
"can't restore chip-select polarity\n");
}
}
}
static char *mmc_powerstring(u8 power_mode)
{
switch (power_mode) {
case MMC_POWER_OFF: return "off";
case MMC_POWER_UP: return "up";
case MMC_POWER_ON: return "on";
}
return "?";
}
static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmc_spi_host *host = mmc_priv(mmc);
if (host->power_mode != ios->power_mode) {
int canpower;
canpower = host->pdata && host->pdata->setpower;
dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
mmc_powerstring(ios->power_mode),
ios->vdd,
canpower ? ", can switch" : "");
/* switch power on/off if possible, accounting for
* max 250msec powerup time if needed.
*/
if (canpower) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
case MMC_POWER_UP:
host->pdata->setpower(&host->spi->dev,
ios->vdd);
if (ios->power_mode == MMC_POWER_UP)
msleep(host->powerup_msecs);
}
}
/* See 6.4.1 in the simplified SD card physical spec 2.0 */
if (ios->power_mode == MMC_POWER_ON)
mmc_spi_initsequence(host);
/* If powering down, ground all card inputs to avoid power
* delivery from data lines! On a shared SPI bus, this
* will probably be temporary; 6.4.2 of the simplified SD
* spec says this must last at least 1msec.
*
* - Clock low means CPOL 0, e.g. mode 0
* - MOSI low comes from writing zero
* - Chipselect is usually active low...
*/
if (canpower && ios->power_mode == MMC_POWER_OFF) {
int mres;
u8 nullbyte = 0;
host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
mres = spi_setup(host->spi);
if (mres < 0)
dev_dbg(&host->spi->dev,
"switch to SPI mode 0 failed\n");
if (spi_write(host->spi, &nullbyte, 1) < 0)
dev_dbg(&host->spi->dev,
"put spi signals to low failed\n");
/*
* Now clock should be low due to spi mode 0;
* MOSI should be low because of written 0x00;
* chipselect should be low (it is active low)
* power supply is off, so now MMC is off too!
*
* FIXME no, chipselect can be high since the
* device is inactive and SPI_CS_HIGH is clear...
*/
msleep(10);
if (mres == 0) {
host->spi->mode |= (SPI_CPOL|SPI_CPHA);
mres = spi_setup(host->spi);
if (mres < 0)
dev_dbg(&host->spi->dev,
"switch back to SPI mode 3 failed\n");
}
}
host->power_mode = ios->power_mode;
}
if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
int status;
host->spi->max_speed_hz = ios->clock;
status = spi_setup(host->spi);
dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
host->spi->max_speed_hz, status);
}
}
static const struct mmc_host_ops mmc_spi_ops = {
.request = mmc_spi_request,
.set_ios = mmc_spi_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
};
/****************************************************************************/
/*
* SPI driver implementation
*/
static irqreturn_t
mmc_spi_detect_irq(int irq, void *mmc)
{
struct mmc_spi_host *host = mmc_priv(mmc);
u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
return IRQ_HANDLED;
}
#ifdef CONFIG_HAS_DMA
static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
{
struct spi_device *spi = host->spi;
struct device *dev;
if (!spi->master->dev.parent->dma_mask)
return 0;
dev = spi->master->dev.parent;
host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, host->ones_dma))
return -ENOMEM;
host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, host->data_dma)) {
dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
return -ENOMEM;
}
dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
host->dma_dev = dev;
return 0;
}
static void mmc_spi_dma_free(struct mmc_spi_host *host)
{
if (!host->dma_dev)
return;
dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
}
#else
static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
#endif
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
struct mmc_host *mmc;
struct mmc_spi_host *host;
int status;
bool has_ro = false;
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
*/
if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
return -EINVAL;
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
* which is appropriate for hotplugging, unless the platform data
* specify mode 3 (if hardware is not compatible to mode 0).
*/
if (spi->mode != SPI_MODE_3)
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
status = spi_setup(spi);
if (status < 0) {
dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
spi->mode, spi->max_speed_hz / 1000,
status);
return status;
}
/* We need a supply of ones to transmit. This is the only time
* the CPU touches these, so cache coherency isn't a concern.
*
* NOTE if many systems use more than one MMC-over-SPI connector
* it'd save some memory to share this. That's evidently rare.
*/
status = -ENOMEM;
ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
if (!ones)
goto nomem;
memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
if (!mmc)
goto nomem;
mmc->ops = &mmc_spi_ops;
mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
mmc->max_segs = MMC_SPI_BLOCKSATONCE;
mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
mmc->caps = MMC_CAP_SPI;
/* SPI doesn't need the lowspeed device identification thing for
* MMC or SD cards, since it never comes up in open drain mode.
* That's good; some SPI masters can't handle very low speeds!
*
* However, low speed SDIO cards need not handle over 400 KHz;
* that's the only reason not to use a few MHz for f_min (until
* the upper layer reads the target frequency from the CSD).
*/
mmc->f_min = 400000;
mmc->f_max = spi->max_speed_hz;
host = mmc_priv(mmc);
host->mmc = mmc;
host->spi = spi;
host->ones = ones;
dev_set_drvdata(&spi->dev, mmc);
/* Platform data is used to hook up things like card sensing
* and power switching gpios.
*/
host->pdata = mmc_spi_get_pdata(spi);
if (host->pdata)
mmc->ocr_avail = host->pdata->ocr_mask;
if (!mmc->ocr_avail) {
dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
}
if (host->pdata && host->pdata->setpower) {
host->powerup_msecs = host->pdata->powerup_msecs;
if (!host->powerup_msecs || host->powerup_msecs > 250)
host->powerup_msecs = 250;
}
/* preallocate dma buffers */
host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
if (!host->data)
goto fail_nobuf1;
status = mmc_spi_dma_alloc(host);
if (status)
goto fail_dma;
/* setup message for status/busy readback */
spi_message_init(&host->readback);
host->readback.is_dma_mapped = (host->dma_dev != NULL);
spi_message_add_tail(&host->status, &host->readback);
host->status.tx_buf = host->ones;
host->status.tx_dma = host->ones_dma;
host->status.rx_buf = &host->data->status;
host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
host->status.cs_change = 1;
/* register card detect irq */
if (host->pdata && host->pdata->init) {
status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
if (status != 0)
goto fail_glue_init;
}
/* pass platform capabilities, if any */
if (host->pdata) {
mmc->caps |= host->pdata->caps;
mmc->caps2 |= host->pdata->caps2;
}
status = mmc_add_host(mmc);
if (status != 0)
goto fail_glue_init;
/*
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_gpiod_request;
if (!status) {
/*
* The platform has a CD GPIO signal that may support
* interrupts, so let mmc_gpiod_request_cd_irq() decide
* if polling is needed or not.
*/
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_gpiod_request;
if (!status)
has_ro = true;
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),
host->dma_dev ? "" : ", no DMA",
has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
(mmc->caps & MMC_CAP_NEEDS_POLL)
? ", cd polling" : "");
return 0;
fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
mmc_spi_dma_free(host);
fail_dma:
kfree(host->data);
fail_nobuf1:
mmc_spi_put_pdata(spi);
mmc_free_host(mmc);
nomem:
kfree(ones);
return status;
}
static void mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
struct mmc_spi_host *host = mmc_priv(mmc);
/* prevent new mmc_detect_change() calls */
if (host->pdata && host->pdata->exit)
host->pdata->exit(&spi->dev, mmc);
mmc_remove_host(mmc);
mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
spi->max_speed_hz = mmc->f_max;
mmc_spi_put_pdata(spi);
mmc_free_host(mmc);
}
static const struct spi_device_id mmc_spi_dev_ids[] = {
{ "mmc-spi-slot"},
{ },
};
MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
static const struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
};
MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
.of_match_table = mmc_spi_of_match_table,
},
.id_table = mmc_spi_dev_ids,
.probe = mmc_spi_probe,
.remove = mmc_spi_remove,
};
module_spi_driver(mmc_spi_driver);
MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
MODULE_DESCRIPTION("SPI SD/MMC host driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:mmc_spi");
| linux-master | drivers/mmc/host/mmc_spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Marvell Xenon SDHC as a platform device
*
* Copyright (C) 2016 Marvell, All Rights Reserved.
*
* Author: Hu Ziji <[email protected]>
* Date: 2016-8-24
*
* Inspired by Jisheng Zhang <[email protected]>
* Special thanks to Video BG4 project team.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#include "sdhci-xenon.h"
static int xenon_enable_internal_clk(struct sdhci_host *host)
{
u32 reg;
ktime_t timeout;
reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
reg |= SDHCI_CLOCK_INT_EN;
sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
if (reg & SDHCI_CLOCK_INT_STABLE)
break;
if (timedout) {
dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
return -ETIMEDOUT;
}
usleep_range(900, 1100);
}
return 0;
}
/* Set SDCLK-off-while-idle */
static void xenon_set_sdclk_off_idle(struct sdhci_host *host,
unsigned char sdhc_id, bool enable)
{
u32 reg;
u32 mask;
reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
/* Get the bit shift basing on the SDHC index */
mask = (0x1 << (XENON_SDCLK_IDLEOFF_ENABLE_SHIFT + sdhc_id));
if (enable)
reg |= mask;
else
reg &= ~mask;
sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
}
/* Enable/Disable the Auto Clock Gating function */
static void xenon_set_acg(struct sdhci_host *host, bool enable)
{
u32 reg;
reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
if (enable)
reg &= ~XENON_AUTO_CLKGATE_DISABLE_MASK;
else
reg |= XENON_AUTO_CLKGATE_DISABLE_MASK;
sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
}
/* Enable this SDHC */
static void xenon_enable_sdhc(struct sdhci_host *host,
unsigned char sdhc_id)
{
u32 reg;
reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
reg |= (BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT);
sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
/*
* Force to clear BUS_TEST to
* skip bus_test_pre and bus_test_post
*/
host->mmc->caps &= ~MMC_CAP_BUS_WIDTH_TEST;
}
/* Disable this SDHC */
static void xenon_disable_sdhc(struct sdhci_host *host,
unsigned char sdhc_id)
{
u32 reg;
reg = sdhci_readl(host, XENON_SYS_OP_CTRL);
reg &= ~(BIT(sdhc_id) << XENON_SLOT_ENABLE_SHIFT);
sdhci_writel(host, reg, XENON_SYS_OP_CTRL);
}
/* Enable Parallel Transfer Mode */
static void xenon_enable_sdhc_parallel_tran(struct sdhci_host *host,
unsigned char sdhc_id)
{
u32 reg;
reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL);
reg |= BIT(sdhc_id);
sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL);
}
/* Mask command conflict error */
static void xenon_mask_cmd_conflict_err(struct sdhci_host *host)
{
u32 reg;
reg = sdhci_readl(host, XENON_SYS_EXT_OP_CTRL);
reg |= XENON_MASK_CMD_CONFLICT_ERR;
sdhci_writel(host, reg, XENON_SYS_EXT_OP_CTRL);
}
static void xenon_retune_setup(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 reg;
/* Disable the Re-Tuning Request functionality */
reg = sdhci_readl(host, XENON_SLOT_RETUNING_REQ_CTRL);
reg &= ~XENON_RETUNING_COMPATIBLE;
sdhci_writel(host, reg, XENON_SLOT_RETUNING_REQ_CTRL);
/* Disable the Re-tuning Interrupt */
reg = sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
reg &= ~SDHCI_INT_RETUNE;
sdhci_writel(host, reg, SDHCI_SIGNAL_ENABLE);
reg = sdhci_readl(host, SDHCI_INT_ENABLE);
reg &= ~SDHCI_INT_RETUNE;
sdhci_writel(host, reg, SDHCI_INT_ENABLE);
/* Force to use Tuning Mode 1 */
host->tuning_mode = SDHCI_TUNING_MODE_1;
/* Set re-tuning period */
host->tuning_count = 1 << (priv->tuning_count - 1);
}
/*
* Operations inside struct sdhci_ops
*/
/* Recover the Register Setting cleared during SOFTWARE_RESET_ALL */
static void xenon_reset_exit(struct sdhci_host *host,
unsigned char sdhc_id, u8 mask)
{
/* Only SOFTWARE RESET ALL will clear the register setting */
if (!(mask & SDHCI_RESET_ALL))
return;
/* Disable tuning request and auto-retuning again */
xenon_retune_setup(host);
/*
* The ACG should be turned off at the early init time, in order
* to solve a possible issues with the 1.8V regulator stabilization.
* The feature is enabled in later stage.
*/
xenon_set_acg(host, false);
xenon_set_sdclk_off_idle(host, sdhc_id, false);
xenon_mask_cmd_conflict_err(host);
}
static void xenon_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
sdhci_reset(host, mask);
xenon_reset_exit(host, priv->sdhc_id, mask);
}
/*
* Xenon defines different values for HS200 and HS400
* in Host_Control_2
*/
static void xenon_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
u16 ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
if (timing == MMC_TIMING_MMC_HS200)
ctrl_2 |= XENON_CTRL_HS200;
else if (timing == MMC_TIMING_UHS_SDR104)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
else if (timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (timing == MMC_TIMING_UHS_SDR50)
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
else if ((timing == MMC_TIMING_UHS_DDR50) ||
(timing == MMC_TIMING_MMC_DDR52))
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
else if (timing == MMC_TIMING_MMC_HS400)
ctrl_2 |= XENON_CTRL_HS400;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
u8 pwr = host->pwr;
sdhci_set_power_noreg(host, mode, vdd);
if (host->pwr == pwr)
return;
if (host->pwr == 0)
vdd = 0;
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
}
static unsigned int xenon_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
if (pltfm_host->clk)
return sdhci_pltfm_clk_get_max_clock(host);
else
return pltfm_host->clock;
}
static const struct sdhci_ops sdhci_xenon_ops = {
.voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
.reset = xenon_reset,
.set_uhs_signaling = xenon_set_uhs_signaling,
.get_max_clock = xenon_get_max_clock,
};
static const struct sdhci_pltfm_data sdhci_xenon_pdata = {
.ops = &sdhci_xenon_ops,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
};
/*
* Xenon Specific Operations in mmc_host_ops
*/
static void xenon_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 reg;
/*
* HS400/HS200/eMMC HS doesn't have Preset Value register.
* However, sdhci_set_ios will read HS400/HS200 Preset register.
* Disable Preset Value register for HS400/HS200.
* eMMC HS with preset_enabled set will trigger a bug in
* get_preset_value().
*/
if ((ios->timing == MMC_TIMING_MMC_HS400) ||
(ios->timing == MMC_TIMING_MMC_HS200) ||
(ios->timing == MMC_TIMING_MMC_HS)) {
host->preset_enabled = false;
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->flags &= ~SDHCI_PV_ENABLED;
reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
reg &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
} else {
host->quirks2 &= ~SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
}
sdhci_set_ios(mmc, ios);
xenon_phy_adj(host, ios);
if (host->clock > XENON_DEFAULT_SDCLK_FREQ)
xenon_set_sdclk_off_idle(host, priv->sdhc_id, true);
}
static int xenon_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
/*
* Before SD/SDIO set signal voltage, SD bus clock should be
* disabled. However, sdhci_set_clock will also disable the Internal
* clock in mmc_set_signal_voltage().
* If Internal clock is disabled, the 3.3V/1.8V bit can not be updated.
* Thus here manually enable internal clock.
*
* After switch completes, it is unnecessary to disable internal clock,
* since keeping internal clock active obeys SD spec.
*/
xenon_enable_internal_clk(host);
xenon_soc_pad_ctrl(host, ios->signal_voltage);
/*
* If Vqmmc is fixed on platform, vqmmc regulator should be unavailable.
* Thus SDHCI_CTRL_VDD_180 bit might not work then.
* Skip the standard voltage switch to avoid any issue.
*/
if (PTR_ERR(mmc->supply.vqmmc) == -ENODEV)
return 0;
return sdhci_start_signal_voltage_switch(mmc, ios);
}
/*
* Update card type.
* priv->init_card_type will be used in PHY timing adjustment.
*/
static void xenon_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
/* Update card type*/
priv->init_card_type = card->type;
}
static int xenon_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
if (host->timing == MMC_TIMING_UHS_DDR50 ||
host->timing == MMC_TIMING_MMC_DDR52)
return 0;
/*
* Currently force Xenon driver back to support mode 1 only,
* even though Xenon might claim to support mode 2 or mode 3.
* It requires more time to test mode 2/mode 3 on more platforms.
*/
if (host->tuning_mode != SDHCI_TUNING_MODE_1)
xenon_retune_setup(host);
return sdhci_execute_tuning(mmc, opcode);
}
static void xenon_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 reg;
u8 sdhc_id = priv->sdhc_id;
sdhci_enable_sdio_irq(mmc, enable);
if (enable) {
/*
* Set SDIO Card Inserted indication
* to enable detecting SDIO async irq.
*/
reg = sdhci_readl(host, XENON_SYS_CFG_INFO);
reg |= (1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT));
sdhci_writel(host, reg, XENON_SYS_CFG_INFO);
} else {
/* Clear SDIO Card Inserted indication */
reg = sdhci_readl(host, XENON_SYS_CFG_INFO);
reg &= ~(1 << (sdhc_id + XENON_SLOT_TYPE_SDIO_SHIFT));
sdhci_writel(host, reg, XENON_SYS_CFG_INFO);
}
}
static void xenon_replace_mmc_host_ops(struct sdhci_host *host)
{
host->mmc_host_ops.set_ios = xenon_set_ios;
host->mmc_host_ops.start_signal_voltage_switch =
xenon_start_signal_voltage_switch;
host->mmc_host_ops.init_card = xenon_init_card;
host->mmc_host_ops.execute_tuning = xenon_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = xenon_enable_sdio_irq;
}
/*
* Parse Xenon specific DT properties:
* sdhc-id: the index of current SDHC.
* Refer to XENON_SYS_CFG_INFO register
* tun-count: the interval between re-tuning
*/
static int xenon_probe_params(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct mmc_host *mmc = host->mmc;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 sdhc_id, nr_sdhc;
u32 tuning_count;
/* Disable HS200 on Armada AP806 */
if (priv->hw_version == XENON_AP806)
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
sdhc_id = 0x0;
if (!device_property_read_u32(dev, "marvell,xenon-sdhc-id", &sdhc_id)) {
nr_sdhc = sdhci_readl(host, XENON_SYS_CFG_INFO);
nr_sdhc &= XENON_NR_SUPPORTED_SLOT_MASK;
if (unlikely(sdhc_id > nr_sdhc)) {
dev_err(mmc_dev(mmc), "SDHC Index %d exceeds Number of SDHCs %d\n",
sdhc_id, nr_sdhc);
return -EINVAL;
}
}
priv->sdhc_id = sdhc_id;
tuning_count = XENON_DEF_TUNING_COUNT;
if (!device_property_read_u32(dev, "marvell,xenon-tun-count",
&tuning_count)) {
if (unlikely(tuning_count >= XENON_TMR_RETUN_NO_PRESENT)) {
dev_err(mmc_dev(mmc), "Wrong Re-tuning Count. Set default value %d\n",
XENON_DEF_TUNING_COUNT);
tuning_count = XENON_DEF_TUNING_COUNT;
}
}
priv->tuning_count = tuning_count;
return xenon_phy_parse_params(dev, host);
}
static int xenon_sdhc_prepare(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u8 sdhc_id = priv->sdhc_id;
/* Enable SDHC */
xenon_enable_sdhc(host, sdhc_id);
/* Enable ACG */
xenon_set_acg(host, true);
/* Enable Parallel Transfer Mode */
xenon_enable_sdhc_parallel_tran(host, sdhc_id);
/* Disable SDCLK-Off-While-Idle before card init */
xenon_set_sdclk_off_idle(host, sdhc_id, false);
xenon_mask_cmd_conflict_err(host);
return 0;
}
static void xenon_sdhc_unprepare(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u8 sdhc_id = priv->sdhc_id;
/* disable SDHC */
xenon_disable_sdhc(host, sdhc_id);
}
static int xenon_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct xenon_priv *priv;
int err;
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
sizeof(struct xenon_priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
/*
* Link Xenon specific mmc_host_ops function,
* to replace standard ones in sdhci_ops.
*/
xenon_replace_mmc_host_ops(host);
if (dev->of_node) {
pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(pltfm_host->clk)) {
err = PTR_ERR(pltfm_host->clk);
dev_err(&pdev->dev, "Failed to setup input clk: %d\n", err);
goto free_pltfm;
}
err = clk_prepare_enable(pltfm_host->clk);
if (err)
goto free_pltfm;
priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
if (IS_ERR(priv->axi_clk)) {
err = PTR_ERR(priv->axi_clk);
if (err == -EPROBE_DEFER)
goto err_clk;
} else {
err = clk_prepare_enable(priv->axi_clk);
if (err)
goto err_clk;
}
}
err = mmc_of_parse(host->mmc);
if (err)
goto err_clk_axi;
sdhci_get_property(pdev);
xenon_set_acg(host, false);
/* Xenon specific parameters parse */
err = xenon_probe_params(pdev);
if (err)
goto err_clk_axi;
err = xenon_sdhc_prepare(host);
if (err)
goto err_clk_axi;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);
err = sdhci_add_host(host);
if (err)
goto remove_sdhc;
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
remove_sdhc:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
xenon_sdhc_unprepare(host);
err_clk_axi:
clk_disable_unprepare(priv->axi_clk);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
free_pltfm:
sdhci_pltfm_free(pdev);
return err;
}
static void xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host);
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
}
#ifdef CONFIG_PM_SLEEP
static int xenon_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = pm_runtime_force_suspend(dev);
priv->restore_needed = true;
return ret;
}
#endif
#ifdef CONFIG_PM
static int xenon_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
if (ret)
return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
clk_disable_unprepare(pltfm_host->clk);
/*
* Need to update the priv->clock here, or when runtime resume
* back, phy don't aware the clock change and won't adjust phy
* which will cause cmd err
*/
priv->clock = 0;
return 0;
}
static int xenon_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
if (ret) {
dev_err(dev, "can't enable mainck\n");
return ret;
}
if (priv->restore_needed) {
ret = xenon_sdhc_prepare(host);
if (ret)
goto out;
priv->restore_needed = false;
}
ret = sdhci_runtime_resume_host(host, 0);
if (ret)
goto out;
return 0;
out:
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(xenon_runtime_suspend,
xenon_runtime_resume,
NULL)
};
static const struct of_device_id sdhci_xenon_dt_ids[] = {
{ .compatible = "marvell,armada-ap806-sdhci", .data = (void *)XENON_AP806},
{ .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807},
{ .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110},
{ .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700},
{}
};
MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id sdhci_xenon_acpi_ids[] = {
{ .id = "MRVL0002", XENON_AP806},
{ .id = "MRVL0003", XENON_AP807},
{ .id = "MRVL0004", XENON_CP110},
{}
};
MODULE_DEVICE_TABLE(acpi, sdhci_xenon_acpi_ids);
#endif
static struct platform_driver sdhci_xenon_driver = {
.driver = {
.name = "xenon-sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_xenon_dt_ids,
.acpi_match_table = ACPI_PTR(sdhci_xenon_acpi_ids),
.pm = &sdhci_xenon_dev_pm_ops,
},
.probe = xenon_probe,
.remove_new = xenon_remove,
};
module_platform_driver(sdhci_xenon_driver);
MODULE_DESCRIPTION("SDHCI platform driver for Marvell Xenon SDHC");
MODULE_AUTHOR("Hu Ziji <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-xenon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale eSDHC controller driver.
*
* Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
* Copyright 2020 NXP
*
* Authors: Xiaobo Xie <[email protected]>
* Anton Vorontsov <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/sys_soc.h>
#include <linux/clk.h>
#include <linux/ktime.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
#define VENDOR_V_22 0x12
#define VENDOR_V_23 0x13
#define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
struct esdhc_clk_fixup {
const unsigned int sd_dflt_max_clk;
const unsigned int max_clk[MMC_TIMING_NUM];
};
static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
.sd_dflt_max_clk = 25000000,
.max_clk[MMC_TIMING_MMC_HS] = 46500000,
.max_clk[MMC_TIMING_SD_HS] = 46500000,
};
static const struct esdhc_clk_fixup ls1043a_esdhc_clk = {
.sd_dflt_max_clk = 25000000,
.max_clk[MMC_TIMING_UHS_SDR104] = 116700000,
.max_clk[MMC_TIMING_MMC_HS200] = 116700000,
};
static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
.sd_dflt_max_clk = 25000000,
.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
};
static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
.sd_dflt_max_clk = 25000000,
.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
};
static const struct esdhc_clk_fixup p1010_esdhc_clk = {
.sd_dflt_max_clk = 20000000,
.max_clk[MMC_TIMING_LEGACY] = 20000000,
.max_clk[MMC_TIMING_MMC_HS] = 42000000,
.max_clk[MMC_TIMING_SD_HS] = 40000000,
};
static const struct of_device_id sdhci_esdhc_of_match[] = {
{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
{ .compatible = "fsl,ls1043a-esdhc", .data = &ls1043a_esdhc_clk},
{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
{ .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
{ .compatible = "fsl,mpc8379-esdhc" },
{ .compatible = "fsl,mpc8536-esdhc" },
{ .compatible = "fsl,esdhc" },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
struct sdhci_esdhc {
u8 vendor_ver;
u8 spec_ver;
bool quirk_incorrect_hostver;
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
bool quirk_tuning_erratum_type1;
bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
bool quirk_delay_before_data_reset;
bool quirk_trans_complete_erratum;
bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
};
/**
* esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
* to make it compatible with SD spec.
*
* @host: pointer to sdhci_host
* @spec_reg: SD spec register address
* @value: 32bit eSDHC register value on spec_reg address
*
* In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
* registers are 32 bits. There are differences in register size, register
* address, register function, bit position and function between eSDHC spec
* and SD spec.
*
* Return a fixed up register value
*/
static u32 esdhc_readl_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 ret;
/*
* The bit of ADMA flag in eSDHC is not compatible with standard
* SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
* supported by eSDHC.
* And for many FSL eSDHC controller, the reset value of field
* SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
* only these vendor version is greater than 2.2/0x12 support ADMA.
*/
if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
if (esdhc->vendor_ver > VENDOR_V_22) {
ret = value | SDHCI_CAN_DO_ADMA2;
return ret;
}
}
/*
* The DAT[3:0] line signal levels and the CMD line signal level are
* not compatible with standard SDHC register. The line signal levels
* DAT[7:0] are at bits 31:24 and the command line signal level is at
* bit 23. All other bits are the same as in the standard SDHC
* register.
*/
if (spec_reg == SDHCI_PRESENT_STATE) {
ret = value & 0x000fffff;
ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
ret |= (value << 1) & SDHCI_CMD_LVL;
/*
* Some controllers have unreliable Data Line Active
* bit for commands with busy signal. This affects
* Command Inhibit (data) bit. Just ignore it since
* MMC core driver has already polled card status
* with CMD13 after any command with busy siganl.
*/
if (esdhc->quirk_ignore_data_inhibit)
ret &= ~SDHCI_DATA_INHIBIT;
return ret;
}
/*
* DTS properties of mmc host are used to enable each speed mode
* according to soc and board capability. So clean up
* SDR50/SDR104/DDR50 support bits here.
*/
if (spec_reg == SDHCI_CAPABILITIES_1) {
ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
return ret;
}
ret = value;
return ret;
}
static u16 esdhc_readw_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u16 ret;
int shift = (spec_reg & 0x2) * 8;
if (spec_reg == SDHCI_TRANSFER_MODE)
return pltfm_host->xfer_mode_shadow;
if (spec_reg == SDHCI_HOST_VERSION)
ret = value & 0xffff;
else
ret = (value >> shift) & 0xffff;
/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
* vendor version and spec version information.
*/
if ((spec_reg == SDHCI_HOST_VERSION) &&
(esdhc->quirk_incorrect_hostver))
ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
return ret;
}
static u8 esdhc_readb_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
u8 ret;
u8 dma_bits;
int shift = (spec_reg & 0x3) * 8;
ret = (value >> shift) & 0xff;
/*
* "DMA select" locates at offset 0x28 in SD specification, but on
* P5020 or P3041, it locates at 0x29.
*/
if (spec_reg == SDHCI_HOST_CONTROL) {
/* DMA select is 22,23 bits in Protocol Control Register */
dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
/* fixup the result */
ret &= ~SDHCI_CTRL_DMA_MASK;
ret |= dma_bits;
}
return ret;
}
/**
* esdhc_writel_fixup - Fixup the SD spec register value so that it could be
* written into eSDHC register.
*
* @host: pointer to sdhci_host
* @spec_reg: SD spec register address
* @value: 8/16/32bit SD spec register value that would be written
* @old_value: 32bit eSDHC register value on spec_reg address
*
* In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
* registers are 32 bits. There are differences in register size, register
* address, register function, bit position and function between eSDHC spec
* and SD spec.
*
* Return a fixed up register value
*/
static u32 esdhc_writel_fixup(struct sdhci_host *host,
int spec_reg, u32 value, u32 old_value)
{
u32 ret;
/*
* Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
* when SYSCTL[RSTD] is set for some special operations.
* No any impact on other operation.
*/
if (spec_reg == SDHCI_INT_ENABLE)
ret = value | SDHCI_INT_BLK_GAP;
else
ret = value;
return ret;
}
static u32 esdhc_writew_fixup(struct sdhci_host *host,
int spec_reg, u16 value, u32 old_value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int shift = (spec_reg & 0x2) * 8;
u32 ret;
switch (spec_reg) {
case SDHCI_TRANSFER_MODE:
/*
* Postpone this write, we must do it together with a
* command write that is down below. Return old value.
*/
pltfm_host->xfer_mode_shadow = value;
return old_value;
case SDHCI_COMMAND:
ret = (value << 16) | pltfm_host->xfer_mode_shadow;
return ret;
}
ret = old_value & (~(0xffff << shift));
ret |= (value << shift);
if (spec_reg == SDHCI_BLOCK_SIZE) {
/*
* Two last DMA bits are reserved, and first one is used for
* non-standard blksz of 4096 bytes that we don't support
* yet. So clear the DMA boundary bits.
*/
ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
}
return ret;
}
static u32 esdhc_writeb_fixup(struct sdhci_host *host,
int spec_reg, u8 value, u32 old_value)
{
u32 ret;
u32 dma_bits;
u8 tmp;
int shift = (spec_reg & 0x3) * 8;
/*
* eSDHC doesn't have a standard power control register, so we do
* nothing here to avoid incorrect operation.
*/
if (spec_reg == SDHCI_POWER_CONTROL)
return old_value;
/*
* "DMA select" location is offset 0x28 in SD specification, but on
* P5020 or P3041, it's located at 0x29.
*/
if (spec_reg == SDHCI_HOST_CONTROL) {
/*
* If host control register is not standard, exit
* this function
*/
if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
return old_value;
/* DMA select is 22,23 bits in Protocol Control Register */
dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
(old_value & SDHCI_CTRL_DMA_MASK);
ret = (ret & (~0xff)) | tmp;
/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
ret &= ~ESDHC_HOST_CONTROL_RES;
return ret;
}
ret = (old_value & (~(0xff << shift))) | (value << shift);
return ret;
}
static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
{
u32 ret;
u32 value;
if (reg == SDHCI_CAPABILITIES_1)
value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
else
value = ioread32be(host->ioaddr + reg);
ret = esdhc_readl_fixup(host, reg, value);
return ret;
}
static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
{
u32 ret;
u32 value;
if (reg == SDHCI_CAPABILITIES_1)
value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
else
value = ioread32(host->ioaddr + reg);
ret = esdhc_readl_fixup(host, reg, value);
return ret;
}
static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
{
u16 ret;
u32 value;
int base = reg & ~0x3;
value = ioread32be(host->ioaddr + base);
ret = esdhc_readw_fixup(host, reg, value);
return ret;
}
static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
{
u16 ret;
u32 value;
int base = reg & ~0x3;
value = ioread32(host->ioaddr + base);
ret = esdhc_readw_fixup(host, reg, value);
return ret;
}
static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
{
u8 ret;
u32 value;
int base = reg & ~0x3;
value = ioread32be(host->ioaddr + base);
ret = esdhc_readb_fixup(host, reg, value);
return ret;
}
static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
{
u8 ret;
u32 value;
int base = reg & ~0x3;
value = ioread32(host->ioaddr + base);
ret = esdhc_readb_fixup(host, reg, value);
return ret;
}
static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
{
u32 value;
value = esdhc_writel_fixup(host, reg, val, 0);
iowrite32be(value, host->ioaddr + reg);
}
static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
{
u32 value;
value = esdhc_writel_fixup(host, reg, val, 0);
iowrite32(value, host->ioaddr + reg);
}
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
value = ioread32be(host->ioaddr + base);
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32be(ret, host->ioaddr + base);
/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
* 1us later after ESDHC_EXTN is set.
*/
if (base == ESDHC_SYSTEM_CONTROL_2) {
if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
esdhc->in_sw_tuning) {
udelay(1);
ret |= ESDHC_SMPCLKSEL;
iowrite32be(ret, host->ioaddr + base);
}
}
}
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
value = ioread32(host->ioaddr + base);
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32(ret, host->ioaddr + base);
/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
* 1us later after ESDHC_EXTN is set.
*/
if (base == ESDHC_SYSTEM_CONTROL_2) {
if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
esdhc->in_sw_tuning) {
udelay(1);
ret |= ESDHC_SMPCLKSEL;
iowrite32(ret, host->ioaddr + base);
}
}
}
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
{
int base = reg & ~0x3;
u32 value;
u32 ret;
value = ioread32be(host->ioaddr + base);
ret = esdhc_writeb_fixup(host, reg, val, value);
iowrite32be(ret, host->ioaddr + base);
}
static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
{
int base = reg & ~0x3;
u32 value;
u32 ret;
value = ioread32(host->ioaddr + base);
ret = esdhc_writeb_fixup(host, reg, val, value);
iowrite32(ret, host->ioaddr + base);
}
/*
* For Abort or Suspend after Stop at Block Gap, ignore the ADMA
* error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
* and Block Gap Event(IRQSTAT[BGE]) are also set.
* For Continue, apply soft reset for data(SYSCTL[RSTD]);
* and re-issue the entire read transaction from beginning.
*/
static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
bool applicable;
dma_addr_t dmastart;
dma_addr_t dmanow;
applicable = (intmask & SDHCI_INT_DATA_END) &&
(intmask & SDHCI_INT_BLK_GAP) &&
(esdhc->vendor_ver == VENDOR_V_23);
if (!applicable)
return;
host->data->error = 0;
dmastart = sg_dma_address(host->data->sg);
dmanow = dmastart + host->data->bytes_xfered;
/*
* Force update to the next DMA block boundary.
*/
dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
SDHCI_DEFAULT_BOUNDARY_SIZE;
host->data->bytes_xfered = dmanow - dmastart;
sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
}
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
if (ret)
return ret;
}
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
if (of_dma_is_coherent(dev->of_node))
value |= ESDHC_DMA_SNOOP;
else
value &= ~ESDHC_DMA_SNOOP;
sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
return 0;
}
static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
if (esdhc->peripheral_clock)
return esdhc->peripheral_clock;
else
return pltfm_host->clock;
}
static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
unsigned int clock;
if (esdhc->peripheral_clock)
clock = esdhc->peripheral_clock;
else
clock = pltfm_host->clock;
return clock / 256 / 16;
}
static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
ktime_t timeout;
u32 val, clk_en;
clk_en = ESDHC_CLOCK_SDCLKEN;
/*
* IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
* is 2.2 or lower.
*/
if (esdhc->vendor_ver <= VENDOR_V_22)
clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
ESDHC_CLOCK_PEREN);
val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
if (enable)
val |= clk_en;
else
val &= ~clk_en;
sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
/*
* Wait max 20 ms. If vendor version is 2.2 or lower, do not
* wait clock stable bit which does not exist.
*/
timeout = ktime_add_ms(ktime_get(), 20);
while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
usleep_range(10, 20);
}
}
static void esdhc_flush_async_fifo(struct sdhci_host *host)
{
ktime_t timeout;
u32 val;
val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
val |= ESDHC_FLUSH_ASYNC_FIFO;
sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
ESDHC_FLUSH_ASYNC_FIFO))
break;
if (timedout) {
pr_err("%s: flushing asynchronous FIFO timeout.\n",
mmc_hostname(host->mmc));
break;
}
usleep_range(10, 20);
}
}
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
unsigned int pre_div = 1, div = 1;
unsigned int clock_fixup = 0;
ktime_t timeout;
u32 temp;
if (clock == 0) {
host->mmc->actual_clock = 0;
esdhc_clock_enable(host, false);
return;
}
/* Start pre_div at 2 for vendor version < 2.3. */
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
/* Fix clock value. */
if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
else if (esdhc->clk_fixup)
clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
if (clock_fixup == 0 || clock < clock_fixup)
clock_fixup = clock;
/* Calculate pre_div and div. */
while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
pre_div *= 2;
while (host->max_clk / pre_div / div > clock_fixup && div < 16)
div++;
esdhc->div_ratio = pre_div * div;
/* Limit clock division for HS400 200MHz clock for quirk. */
if (esdhc->quirk_limited_clk_division &&
clock == MMC_HS200_MAX_DTR &&
(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING)) {
if (esdhc->div_ratio <= 4) {
pre_div = 4;
div = 1;
} else if (esdhc->div_ratio <= 8) {
pre_div = 4;
div = 2;
} else if (esdhc->div_ratio <= 12) {
pre_div = 4;
div = 3;
} else {
pr_warn("%s: using unsupported clock division.\n",
mmc_hostname(host->mmc));
}
esdhc->div_ratio = pre_div * div;
}
host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
clock, host->mmc->actual_clock);
/* Set clock division into register. */
pre_div >>= 1;
div--;
esdhc_clock_enable(host, false);
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~ESDHC_CLOCK_MASK;
temp |= ((div << ESDHC_DIVIDER_SHIFT) |
(pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
/*
* Wait max 20 ms. If vendor version is 2.2 or lower, do not
* wait clock stable bit which does not exist.
*/
timeout = ktime_add_ms(ktime_get(), 20);
while (esdhc->vendor_ver > VENDOR_V_22) {
bool timedout = ktime_after(ktime_get(), timeout);
if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
break;
if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
}
usleep_range(10, 20);
}
/* Additional setting for HS400. */
if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
clock == MMC_HS200_MAX_DTR) {
temp = sdhci_readl(host, ESDHC_TBCTL);
sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
temp = sdhci_readl(host, ESDHC_SDCLKCTL);
sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
esdhc_clock_enable(host, true);
temp = sdhci_readl(host, ESDHC_DLLCFG0);
temp |= ESDHC_DLL_ENABLE;
if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
temp |= ESDHC_DLL_FREQ_SEL;
sdhci_writel(host, temp, ESDHC_DLLCFG0);
temp |= ESDHC_DLL_RESET;
sdhci_writel(host, temp, ESDHC_DLLCFG0);
udelay(1);
temp &= ~ESDHC_DLL_RESET;
sdhci_writel(host, temp, ESDHC_DLLCFG0);
/* Wait max 20 ms */
if (read_poll_timeout(sdhci_readl, temp,
temp & ESDHC_DLL_STS_SLV_LOCK,
10, 20000, false,
host, ESDHC_DLLSTAT0))
pr_err("%s: timeout for delay chain lock.\n",
mmc_hostname(host->mmc));
temp = sdhci_readl(host, ESDHC_TBCTL);
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
}
esdhc_clock_enable(host, true);
}
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
u32 ctrl;
ctrl = sdhci_readl(host, ESDHC_PROCTL);
ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
switch (width) {
case MMC_BUS_WIDTH_8:
ctrl |= ESDHC_CTRL_8BITBUS;
break;
case MMC_BUS_WIDTH_4:
ctrl |= ESDHC_CTRL_4BITBUS;
break;
default:
break;
}
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 val, bus_width = 0;
/*
* Add delay to make sure all the DMA transfers are finished
* for quirk.
*/
if (esdhc->quirk_delay_before_data_reset &&
(mask & SDHCI_RESET_DATA) &&
(host->flags & SDHCI_REQ_USE_DMA))
mdelay(5);
/*
* Save bus-width for eSDHC whose vendor version is 2.2
* or lower for data reset.
*/
if ((mask & SDHCI_RESET_DATA) &&
(esdhc->vendor_ver <= VENDOR_V_22)) {
val = sdhci_readl(host, ESDHC_PROCTL);
bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
}
sdhci_reset(host, mask);
/*
* Restore bus-width setting and interrupt registers for eSDHC
* whose vendor version is 2.2 or lower for data reset.
*/
if ((mask & SDHCI_RESET_DATA) &&
(esdhc->vendor_ver <= VENDOR_V_22)) {
val = sdhci_readl(host, ESDHC_PROCTL);
val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
val |= bus_width;
sdhci_writel(host, val, ESDHC_PROCTL);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
/*
* Some bits have to be cleaned manually for eSDHC whose spec
* version is higher than 3.0 for all reset.
*/
if ((mask & SDHCI_RESET_ALL) &&
(esdhc->spec_ver >= SDHCI_SPEC_300)) {
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
/*
* Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
* 0 for quirk.
*/
if (esdhc->quirk_unreliable_pulse_detection) {
val = sdhci_readl(host, ESDHC_DLLCFG1);
val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
sdhci_writel(host, val, ESDHC_DLLCFG1);
}
}
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
* configuration and status registers for the device. There is a
* SDHC IO VSEL control register on SCFG for some platforms. It's
* used to support SDHC IO voltage switching.
*/
static const struct of_device_id scfg_device_ids[] = {
{ .compatible = "fsl,t1040-scfg", },
{ .compatible = "fsl,ls1012a-scfg", },
{ .compatible = "fsl,ls1046a-scfg", },
{}
};
/* SDHC IO VSEL control register definition */
#define SCFG_SDHCIOVSELCR 0x408
#define SDHCIOVSELCR_TGLEN 0x80000000
#define SDHCIOVSELCR_VSELVAL 0x60000000
#define SDHCIOVSELCR_SDHC_VS 0x00000001
static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct device_node *scfg_node;
void __iomem *scfg_base = NULL;
u32 sdhciovselcr;
u32 val;
/*
* Signal Voltage Switching is only applicable for Host Controllers
* v3.00 and above.
*/
if (host->version < SDHCI_SPEC_300)
return 0;
val = sdhci_readl(host, ESDHC_PROCTL);
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
val &= ~ESDHC_VOLT_SEL;
sdhci_writel(host, val, ESDHC_PROCTL);
return 0;
case MMC_SIGNAL_VOLTAGE_180:
scfg_node = of_find_matching_node(NULL, scfg_device_ids);
if (scfg_node)
scfg_base = of_iomap(scfg_node, 0);
of_node_put(scfg_node);
if (scfg_base) {
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_VSELVAL;
iowrite32be(sdhciovselcr,
scfg_base + SCFG_SDHCIOVSELCR);
val |= ESDHC_VOLT_SEL;
sdhci_writel(host, val, ESDHC_PROCTL);
mdelay(5);
sdhciovselcr = SDHCIOVSELCR_TGLEN |
SDHCIOVSELCR_SDHC_VS;
iowrite32be(sdhciovselcr,
scfg_base + SCFG_SDHCIOVSELCR);
iounmap(scfg_base);
} else {
val |= ESDHC_VOLT_SEL;
sdhci_writel(host, val, ESDHC_PROCTL);
}
return 0;
default:
return 0;
}
}
static struct soc_device_attribute soc_tuning_erratum_type1[] = {
{ .family = "QorIQ T1023", },
{ .family = "QorIQ T1040", },
{ .family = "QorIQ T2080", },
{ .family = "QorIQ LS1021A", },
{ /* sentinel */ }
};
static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1012A", },
{ .family = "QorIQ LS1043A", },
{ .family = "QorIQ LS1046A", },
{ .family = "QorIQ LS1080A", },
{ .family = "QorIQ LS2080A", },
{ .family = "QorIQ LA1575A", },
{ /* sentinel */ }
};
static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
{
u32 val;
esdhc_clock_enable(host, false);
esdhc_flush_async_fifo(host);
val = sdhci_readl(host, ESDHC_TBCTL);
if (enable)
val |= ESDHC_TB_EN;
else
val &= ~ESDHC_TB_EN;
sdhci_writel(host, val, ESDHC_TBCTL);
esdhc_clock_enable(host, true);
}
static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
u32 val;
/* Write TBCTL[11:8]=4'h8 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~(0xf << 8);
val |= 8 << 8;
sdhci_writel(host, val, ESDHC_TBCTL);
mdelay(1);
/* Read TBCTL[31:0] register and rewrite again */
val = sdhci_readl(host, ESDHC_TBCTL);
sdhci_writel(host, val, ESDHC_TBCTL);
mdelay(1);
/* Read the TBSTAT[31:0] register twice */
val = sdhci_readl(host, ESDHC_TBSTAT);
val = sdhci_readl(host, ESDHC_TBSTAT);
*window_end = val & 0xff;
*window_start = (val >> 8) & 0xff;
}
static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
u8 *window_end)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u8 start_ptr, end_ptr;
if (esdhc->quirk_tuning_erratum_type1) {
*window_start = 5 * esdhc->div_ratio;
*window_end = 3 * esdhc->div_ratio;
return;
}
esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
/* Reset data lines by setting ESDHCCTL[RSTD] */
sdhci_reset(host, SDHCI_RESET_DATA);
/* Write 32'hFFFF_FFFF to IRQSTAT register */
sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
* or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
* then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
* and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
*/
if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
*window_start = 8 * esdhc->div_ratio;
*window_end = 4 * esdhc->div_ratio;
} else {
*window_start = 5 * esdhc->div_ratio;
*window_end = 3 * esdhc->div_ratio;
}
}
static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
u8 window_start, u8 window_end)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 val;
int ret;
/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
ESDHC_WNDW_STRT_PTR_MASK;
val |= window_end & ESDHC_WNDW_END_PTR_MASK;
sdhci_writel(host, val, ESDHC_TBPTR);
/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_MODE_MASK;
val |= ESDHC_TB_MODE_SW;
sdhci_writel(host, val, ESDHC_TBCTL);
esdhc->in_sw_tuning = true;
ret = sdhci_execute_tuning(mmc, opcode);
esdhc->in_sw_tuning = false;
return ret;
}
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u8 window_start, window_end;
int ret, retries = 1;
bool hs400_tuning;
unsigned int clk;
u32 val;
/* For tuning mode, the sd clock divisor value
* must be larger than 3 according to reference manual.
*/
clk = esdhc->peripheral_clock / 3;
if (host->clock > clk)
esdhc_of_set_clock(host, clk);
esdhc_tuning_block_enable(host, true);
/*
* The eSDHC controller takes the data timeout value into account
* during tuning. If the SD card is too slow sending the response, the
* timer will expire and a "Buffer Read Ready" interrupt without data
* is triggered. This leads to tuning errors.
*
* Just set the timeout to the maximum value because the core will
* already take care of it in sdhci_send_tuning().
*/
sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
do {
if (esdhc->quirk_limited_clk_division &&
hs400_tuning)
esdhc_of_set_clock(host, host->clock);
/* Do HW tuning */
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_TB_MODE_MASK;
val |= ESDHC_TB_MODE_3;
sdhci_writel(host, val, ESDHC_TBCTL);
ret = sdhci_execute_tuning(mmc, opcode);
if (ret)
break;
/* For type2 affected platforms of the tuning erratum,
* tuning may succeed although eSDHC might not have
* tuned properly. Need to check tuning window.
*/
if (esdhc->quirk_tuning_erratum_type2 &&
!host->tuning_err) {
esdhc_tuning_window_ptr(host, &window_start,
&window_end);
if (abs(window_start - window_end) >
(4 * esdhc->div_ratio + 2))
host->tuning_err = -EAGAIN;
}
/* If HW tuning fails and triggers erratum,
* try workaround.
*/
ret = host->tuning_err;
if (ret == -EAGAIN &&
(esdhc->quirk_tuning_erratum_type1 ||
esdhc->quirk_tuning_erratum_type2)) {
/* Recover HS400 tuning flag */
if (hs400_tuning)
host->flags |= SDHCI_HS400_TUNING;
pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
mmc_hostname(mmc));
/* Do SW tuning */
esdhc_prepare_sw_tuning(host, &window_start,
&window_end);
ret = esdhc_execute_sw_tuning(mmc, opcode,
window_start,
window_end);
if (ret)
break;
/* Retry both HW/SW tuning with reduced clock. */
ret = host->tuning_err;
if (ret == -EAGAIN && retries) {
/* Recover HS400 tuning flag */
if (hs400_tuning)
host->flags |= SDHCI_HS400_TUNING;
clk = host->max_clk / (esdhc->div_ratio + 1);
esdhc_of_set_clock(host, clk);
pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
mmc_hostname(mmc));
} else {
break;
}
} else {
break;
}
} while (retries--);
if (ret) {
esdhc_tuning_block_enable(host, false);
} else if (hs400_tuning) {
val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
val |= ESDHC_FLW_CTL_BG;
sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
}
return ret;
}
static void esdhc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
u32 val;
/*
* There are specific registers setting for HS400 mode.
* Clean all of them if controller is in HS400 mode to
* exit HS400 mode before re-setting any speed mode.
*/
val = sdhci_readl(host, ESDHC_TBCTL);
if (val & ESDHC_HS400_MODE) {
val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
val &= ~ESDHC_FLW_CTL_BG;
sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
val = sdhci_readl(host, ESDHC_SDCLKCTL);
val &= ~ESDHC_CMD_CLK_CTL;
sdhci_writel(host, val, ESDHC_SDCLKCTL);
esdhc_clock_enable(host, false);
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_HS400_MODE;
sdhci_writel(host, val, ESDHC_TBCTL);
esdhc_clock_enable(host, true);
val = sdhci_readl(host, ESDHC_DLLCFG0);
val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
sdhci_writel(host, val, ESDHC_DLLCFG0);
val = sdhci_readl(host, ESDHC_TBCTL);
val &= ~ESDHC_HS400_WNDW_ADJUST;
sdhci_writel(host, val, ESDHC_TBCTL);
esdhc_tuning_block_enable(host, false);
}
if (timing == MMC_TIMING_MMC_HS400)
esdhc_tuning_block_enable(host, true);
else
sdhci_set_uhs_signaling(host, timing);
}
static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 command;
if (esdhc->quirk_trans_complete_erratum) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if (command == MMC_WRITE_MULTIPLE_BLOCK &&
sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
intmask & SDHCI_INT_DATA_END) {
intmask &= ~SDHCI_INT_DATA_END;
sdhci_writel(host, SDHCI_INT_DATA_END,
SDHCI_INT_STATUS);
}
}
return intmask;
}
#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
return sdhci_suspend_host(host);
}
static int esdhc_of_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
int ret = sdhci_resume_host(host);
if (ret == 0) {
/* Isn't this already done by sdhci_resume_host() ? --rmk */
esdhc_of_enable_dma(host);
sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
}
return ret;
}
#endif
static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
esdhc_of_suspend,
esdhc_of_resume);
static const struct sdhci_ops sdhci_esdhc_be_ops = {
.read_l = esdhc_be_readl,
.read_w = esdhc_be_readw,
.read_b = esdhc_be_readb,
.write_l = esdhc_be_writel,
.write_w = esdhc_be_writew,
.write_b = esdhc_be_writeb,
.set_clock = esdhc_of_set_clock,
.enable_dma = esdhc_of_enable_dma,
.get_max_clock = esdhc_of_get_max_clock,
.get_min_clock = esdhc_of_get_min_clock,
.adma_workaround = esdhc_of_adma_workaround,
.set_bus_width = esdhc_pltfm_set_bus_width,
.reset = esdhc_reset,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.irq = esdhc_irq,
};
static const struct sdhci_ops sdhci_esdhc_le_ops = {
.read_l = esdhc_le_readl,
.read_w = esdhc_le_readw,
.read_b = esdhc_le_readb,
.write_l = esdhc_le_writel,
.write_w = esdhc_le_writew,
.write_b = esdhc_le_writeb,
.set_clock = esdhc_of_set_clock,
.enable_dma = esdhc_of_enable_dma,
.get_max_clock = esdhc_of_get_max_clock,
.get_min_clock = esdhc_of_get_min_clock,
.adma_workaround = esdhc_of_adma_workaround,
.set_bus_width = esdhc_pltfm_set_bus_width,
.reset = esdhc_reset,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.irq = esdhc_irq,
};
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
.quirks = ESDHC_DEFAULT_QUIRKS |
#ifdef CONFIG_PPC
SDHCI_QUIRK_BROKEN_CARD_DETECTION |
#endif
SDHCI_QUIRK_NO_CARD_NO_RESET |
SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_be_ops,
};
static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
.quirks = ESDHC_DEFAULT_QUIRKS |
SDHCI_QUIRK_NO_CARD_NO_RESET |
SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.ops = &sdhci_esdhc_le_ops,
};
static struct soc_device_attribute soc_incorrect_hostver[] = {
{ .family = "QorIQ T4240", .revision = "1.0", },
{ .family = "QorIQ T4240", .revision = "2.0", },
{ /* sentinel */ }
};
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
{ /* sentinel */ }
};
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
{ .family = "QorIQ LX2160A", .revision = "2.0", },
{ .family = "QorIQ LS1028A", .revision = "1.0", },
{ /* sentinel */ }
};
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
{
const struct of_device_id *match;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
struct device_node *np;
struct clk *clk;
u32 val;
u16 host_ver;
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
if (soc_device_match(soc_incorrect_hostver))
esdhc->quirk_incorrect_hostver = true;
else
esdhc->quirk_incorrect_hostver = false;
if (soc_device_match(soc_fixup_sdhc_clkdivs))
esdhc->quirk_limited_clk_division = true;
else
esdhc->quirk_limited_clk_division = false;
if (soc_device_match(soc_unreliable_pulse_detection))
esdhc->quirk_unreliable_pulse_detection = true;
else
esdhc->quirk_unreliable_pulse_detection = false;
match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
if (match)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
esdhc->quirk_delay_before_data_reset = true;
esdhc->quirk_trans_complete_erratum = true;
}
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
/*
* esdhc->peripheral_clock would be assigned with a value
* which is eSDHC base clock when use periperal clock.
* For some platforms, the clock value got by common clk
* API is peripheral clock while the eSDHC base clock is
* 1/2 peripheral clock.
*/
if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
esdhc->peripheral_clock = clk_get_rate(clk) / 2;
else
esdhc->peripheral_clock = clk_get_rate(clk);
clk_put(clk);
}
esdhc_clock_enable(host, false);
val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
/*
* This bit is not able to be reset by SDHCI_RESET_ALL. Need to
* initialize it as 1 or 0 once, to override the different value
* which may be configured in bootloader.
*/
if (esdhc->peripheral_clock)
val |= ESDHC_PERIPHERAL_CLK_SEL;
else
val &= ~ESDHC_PERIPHERAL_CLK_SEL;
sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
esdhc_clock_enable(host, true);
}
static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
{
esdhc_tuning_block_enable(mmc_priv(mmc), false);
return 0;
}
static int sdhci_esdhc_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct device_node *np, *tp;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
int ret;
np = pdev->dev.of_node;
if (of_property_read_bool(np, "little-endian"))
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
sizeof(struct sdhci_esdhc));
else
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
sizeof(struct sdhci_esdhc));
if (IS_ERR(host))
return PTR_ERR(host);
host->mmc_host_ops.start_signal_voltage_switch =
esdhc_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
host->tuning_delay = 1;
esdhc_init(pdev, host);
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
if (soc_device_match(soc_tuning_erratum_type1))
esdhc->quirk_tuning_erratum_type1 = true;
else
esdhc->quirk_tuning_erratum_type1 = false;
if (soc_device_match(soc_tuning_erratum_type2))
esdhc->quirk_tuning_erratum_type2 = true;
else
esdhc->quirk_tuning_erratum_type2 = false;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
if (tp) {
of_node_put(tp);
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
of_device_is_compatible(np, "fsl,p1020-esdhc") ||
of_device_is_compatible(np, "fsl,t1040-esdhc"))
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
esdhc->quirk_ignore_data_inhibit = false;
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
/*
* Freescale messed up with P2020 as it has a non-standard
* host control register
*/
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
esdhc->quirk_ignore_data_inhibit = true;
}
/* call to generic mmc_of_parse to support additional capabilities */
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
ret = sdhci_add_host(host);
if (ret)
goto err;
return 0;
err:
sdhci_pltfm_free(pdev);
return ret;
}
static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_esdhc_of_match,
.pm = &esdhc_of_dev_pm_ops,
},
.probe = sdhci_esdhc_probe,
.remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_esdhc_driver);
MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
MODULE_AUTHOR("Xiaobo Xie <[email protected]>, "
"Anton Vorontsov <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-of-esdhc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
*
* Copyright (C) 2006 Texas Instruments.
* Original author: Purushotam Kumar
* Copyright (C) 2009 David Brownell
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/cpufreq.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_data/mmc-davinci.h>
/*
* Register Definitions
*/
#define DAVINCI_MMCCTL 0x00 /* Control Register */
#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
#define DAVINCI_MMCCMD 0x30 /* Command Register */
#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
#define DAVINCI_MMCETOK 0x4C
#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
#define DAVINCI_MMCCKC 0x54
#define DAVINCI_MMCTORC 0x58
#define DAVINCI_MMCTODC 0x5C
#define DAVINCI_MMCBLNC 0x60
#define DAVINCI_SDIOCTL 0x64
#define DAVINCI_SDIOST0 0x68
#define DAVINCI_SDIOIEN 0x6C
#define DAVINCI_SDIOIST 0x70
#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
/* DAVINCI_MMCCTL definitions */
#define MMCCTL_DATRST (1 << 0)
#define MMCCTL_CMDRST (1 << 1)
#define MMCCTL_WIDTH_8_BIT (1 << 8)
#define MMCCTL_WIDTH_4_BIT (1 << 2)
#define MMCCTL_DATEG_DISABLED (0 << 6)
#define MMCCTL_DATEG_RISING (1 << 6)
#define MMCCTL_DATEG_FALLING (2 << 6)
#define MMCCTL_DATEG_BOTH (3 << 6)
#define MMCCTL_PERMDR_LE (0 << 9)
#define MMCCTL_PERMDR_BE (1 << 9)
#define MMCCTL_PERMDX_LE (0 << 10)
#define MMCCTL_PERMDX_BE (1 << 10)
/* DAVINCI_MMCCLK definitions */
#define MMCCLK_CLKEN (1 << 8)
#define MMCCLK_CLKRT_MASK (0xFF << 0)
/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
#define MMCST0_DATDNE BIT(0) /* data done */
#define MMCST0_BSYDNE BIT(1) /* busy done */
#define MMCST0_RSPDNE BIT(2) /* command done */
#define MMCST0_TOUTRD BIT(3) /* data read timeout */
#define MMCST0_TOUTRS BIT(4) /* command response timeout */
#define MMCST0_CRCWR BIT(5) /* data write CRC error */
#define MMCST0_CRCRD BIT(6) /* data read CRC error */
#define MMCST0_CRCRS BIT(7) /* command response CRC error */
#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
#define MMCST0_TRNDNE BIT(12) /* transfer done */
/* DAVINCI_MMCST1 definitions */
#define MMCST1_BUSY (1 << 0)
/* DAVINCI_MMCCMD definitions */
#define MMCCMD_CMD_MASK (0x3F << 0)
#define MMCCMD_PPLEN (1 << 7)
#define MMCCMD_BSYEXP (1 << 8)
#define MMCCMD_RSPFMT_MASK (3 << 9)
#define MMCCMD_RSPFMT_NONE (0 << 9)
#define MMCCMD_RSPFMT_R1456 (1 << 9)
#define MMCCMD_RSPFMT_R2 (2 << 9)
#define MMCCMD_RSPFMT_R3 (3 << 9)
#define MMCCMD_DTRW (1 << 11)
#define MMCCMD_STRMTP (1 << 12)
#define MMCCMD_WDATX (1 << 13)
#define MMCCMD_INITCK (1 << 14)
#define MMCCMD_DCLR (1 << 15)
#define MMCCMD_DMATRIG (1 << 16)
/* DAVINCI_MMCFIFOCTL definitions */
#define MMCFIFOCTL_FIFORST (1 << 0)
#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
/* DAVINCI_SDIOST0 definitions */
#define SDIOST0_DAT1_HI BIT(0)
/* DAVINCI_SDIOIEN definitions */
#define SDIOIEN_IOINTEN BIT(0)
/* DAVINCI_SDIOIST definitions */
#define SDIOIST_IOINT BIT(0)
/* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
* and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
* for drivers with max_segs == 1, making the segments bigger (64KB)
* than the page or two that's otherwise typical. nr_sg (passed from
* platform data) == 16 gives at least the same throughput boost, using
* EDMA transfer linkage instead of spending CPU time copying pages.
*/
#define MAX_CCNT ((1 << 16) - 1)
#define MAX_NR_SG 16
static unsigned rw_threshold = 32;
module_param(rw_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(rw_threshold,
"Read/Write threshold. Default = 32");
static unsigned poll_threshold = 128;
module_param(poll_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(poll_threshold,
"Polling transaction size threshold. Default = 128");
static unsigned poll_loopcount = 32;
module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
struct mmc_davinci_host {
struct mmc_command *cmd;
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
unsigned int mmc_input_clk;
void __iomem *base;
struct resource *mem_res;
int mmc_irq, sdio_irq;
unsigned char bus_mode;
#define DAVINCI_MMC_DATADIR_NONE 0
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
* to all N blocks of the PIO transfer.
*/
u8 *buffer;
u32 buffer_bytes_left;
u32 bytes_left;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
bool use_dma;
bool do_dma;
bool sdio_int;
bool active_request;
/* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len;
struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
/* for ns in one cycle calculation */
unsigned ns_in_one_cycle;
/* Number of sg segments */
u8 nr_sg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
};
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
{
host->buffer_bytes_left = sg_dma_len(host->sg);
host->buffer = sg_virt(host->sg);
if (host->buffer_bytes_left > host->bytes_left)
host->buffer_bytes_left = host->bytes_left;
}
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
u8 *p;
unsigned int i;
if (host->buffer_bytes_left == 0) {
host->sg = sg_next(host->data->sg);
mmc_davinci_sg_to_buf(host);
}
p = host->buffer;
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
host->buffer_bytes_left -= n;
host->bytes_left -= n;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
* This also assumes that access width( i.e. ACCWD) is 4 bytes
*/
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
for (i = 0; i < (n >> 2); i++) {
writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
p = p + 4;
}
if (n & 3) {
iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
p = p + (n & 3);
}
} else {
for (i = 0; i < (n >> 2); i++) {
*((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
p = p + 4;
}
if (n & 3) {
ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
p = p + (n & 3);
}
}
host->buffer = p;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
struct mmc_command *cmd)
{
u32 cmd_reg = 0;
u32 im_val;
dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
cmd->opcode, cmd->arg,
({ char *s;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1:
s = ", R1/R5/R6/R7 response";
break;
case MMC_RSP_R1B:
s = ", R1b response";
break;
case MMC_RSP_R2:
s = ", R2 response";
break;
case MMC_RSP_R3:
s = ", R3/R4 response";
break;
default:
s = ", (R? response)";
break;
} s; }));
host->cmd = cmd;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1B:
/* There's some spec confusion about when R1B is
* allowed, but if the card doesn't issue a BUSY
* then it's harmless for us to allow it.
*/
cmd_reg |= MMCCMD_BSYEXP;
fallthrough;
case MMC_RSP_R1: /* 48 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R1456;
break;
case MMC_RSP_R2: /* 136 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R2;
break;
case MMC_RSP_R3: /* 48 bits, no CRC */
cmd_reg |= MMCCMD_RSPFMT_R3;
break;
default:
cmd_reg |= MMCCMD_RSPFMT_NONE;
dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
mmc_resp_type(cmd));
break;
}
/* Set command index */
cmd_reg |= cmd->opcode;
/* Enable EDMA transfer triggers */
if (host->do_dma)
cmd_reg |= MMCCMD_DMATRIG;
if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
host->data_dir == DAVINCI_MMC_DATADIR_READ)
cmd_reg |= MMCCMD_DMATRIG;
/* Setting whether command involves data transfer or not */
if (cmd->data)
cmd_reg |= MMCCMD_WDATX;
/* Setting whether data read or write */
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
cmd_reg |= MMCCMD_DTRW;
if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
cmd_reg |= MMCCMD_PPLEN;
/* set Command timeout */
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
/* Enable interrupt (calculate here, defer until FIFO is stuffed). */
im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
if (!host->do_dma)
im_val |= MMCST0_DXRDY;
} else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
if (!host->do_dma)
im_val |= MMCST0_DRRDY;
}
/*
* Before non-DMA WRITE commands the controller needs priming:
* FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
*/
if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
davinci_fifo_data_trans(host, rw_threshold);
writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
writel(cmd_reg, host->base + DAVINCI_MMCCMD);
host->active_request = true;
if (!host->do_dma && host->bytes_left <= poll_threshold) {
u32 count = poll_loopcount;
while (host->active_request && count--) {
mmc_davinci_irq(0, host);
cpu_relax();
}
}
if (host->active_request)
writel(im_val, host->base + DAVINCI_MMCIM);
}
/*----------------------------------------------------------------------*/
/* DMA infrastructure */
static void davinci_abort_dma(struct mmc_davinci_host *host)
{
struct dma_chan *sync_dev;
if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
sync_dev = host->dma_rx;
else
sync_dev = host->dma_tx;
dmaengine_terminate_all(sync_dev);
}
static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
int ret = 0;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
struct dma_slave_config dma_tx_conf = {
.direction = DMA_MEM_TO_DEV,
.dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_maxburst =
rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
};
chan = host->dma_tx;
dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
desc = dmaengine_prep_slave_sg(host->dma_tx,
data->sg,
host->sg_len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_dbg(mmc_dev(host->mmc),
"failed to allocate DMA TX descriptor");
ret = -1;
goto out;
}
} else {
struct dma_slave_config dma_rx_conf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = host->mem_res->start + DAVINCI_MMCDRR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst =
rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
};
chan = host->dma_rx;
dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
desc = dmaengine_prep_slave_sg(host->dma_rx,
data->sg,
host->sg_len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_dbg(mmc_dev(host->mmc),
"failed to allocate DMA RX descriptor");
ret = -1;
goto out;
}
}
dmaengine_submit(desc);
dma_async_issue_pending(chan);
out:
return ret;
}
static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
struct mmc_data *data)
{
int i;
int mask = rw_threshold - 1;
int ret = 0;
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
/* no individual DMA segment should need a partial FIFO */
for (i = 0; i < host->sg_len; i++) {
if (sg_dma_len(data->sg + i) & mask) {
dma_unmap_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -1;
}
}
host->do_dma = 1;
ret = mmc_davinci_send_dma_request(host, data);
return ret;
}
static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
dma_release_channel(host->dma_tx);
dma_release_channel(host->dma_rx);
}
static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
return PTR_ERR(host->dma_tx);
}
host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
if (IS_ERR(host->dma_rx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
dma_release_channel(host->dma_tx);
return PTR_ERR(host->dma_rx);
}
return 0;
}
/*----------------------------------------------------------------------*/
static void
mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
{
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
host->data = data;
if (data == NULL) {
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
writel(0, host->base + DAVINCI_MMCBLEN);
writel(0, host->base + DAVINCI_MMCNBLK);
return;
}
dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n",
(data->flags & MMC_DATA_WRITE) ? "write" : "read",
data->blocks, data->blksz);
dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
data->timeout_clks, data->timeout_ns);
timeout = data->timeout_clks +
(data->timeout_ns / host->ns_in_one_cycle);
if (timeout > 0xffff)
timeout = 0xffff;
writel(timeout, host->base + DAVINCI_MMCTOD);
writel(data->blocks, host->base + DAVINCI_MMCNBLK);
writel(data->blksz, host->base + DAVINCI_MMCBLEN);
/* Configure the FIFO */
if (data->flags & MMC_DATA_WRITE) {
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
} else {
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
host->base + DAVINCI_MMCFIFOCTL);
}
host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
* reads or writes, either for the whole transfer (as tested here)
* or for any individual scatterlist segment (tested when we call
* start_dma_transfer).
*
* While we *could* change that, unusual block sizes are rarely
* used. The occasional fallback to PIO should't hurt.
*/
if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
&& mmc_davinci_start_dma_transfer(host, data) == 0) {
/* zero this to ensure we take no PIO paths */
host->bytes_left = 0;
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
host->sg = host->data->sg;
mmc_davinci_sg_to_buf(host);
}
}
static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
unsigned long timeout = jiffies + msecs_to_jiffies(900);
u32 mmcst1 = 0;
/* Card may still be sending BUSY after a previous operation,
* typically some kind of write. If so, we can't proceed yet.
*/
while (time_before(jiffies, timeout)) {
mmcst1 = readl(host->base + DAVINCI_MMCST1);
if (!(mmcst1 & MMCST1_BUSY))
break;
cpu_relax();
}
if (mmcst1 & MMCST1_BUSY) {
dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
req->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, req);
return;
}
host->do_dma = 0;
mmc_davinci_prepare_data(host, req);
mmc_davinci_start_command(host, req->cmd);
}
static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
unsigned int mmc_req_freq)
{
unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
mmc_pclk = host->mmc_input_clk;
if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
mmc_push_pull_divisor = ((unsigned int)mmc_pclk
/ (2 * mmc_req_freq)) - 1;
else
mmc_push_pull_divisor = 0;
mmc_freq = (unsigned int)mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1));
if (mmc_freq > mmc_req_freq)
mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
/* Convert ns to clock cycles */
if (mmc_req_freq <= 400000)
host->ns_in_one_cycle = (1000000) / (((mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1)))/1000));
else
host->ns_in_one_cycle = (1000000) / (((mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1)))/1000000));
return mmc_push_pull_divisor;
}
static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
{
unsigned int open_drain_freq = 0, mmc_pclk = 0;
unsigned int mmc_push_pull_freq = 0;
struct mmc_davinci_host *host = mmc_priv(mmc);
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
u32 temp;
/* Ignoring the init clock value passed for fixing the inter
* operability with different cards.
*/
open_drain_freq = ((unsigned int)mmc_pclk
/ (2 * MMCSD_INIT_CLOCK)) - 1;
if (open_drain_freq > 0xFF)
open_drain_freq = 0xFF;
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
temp |= open_drain_freq;
writel(temp, host->base + DAVINCI_MMCCLK);
/* Convert ns to clock cycles */
host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
} else {
u32 temp;
mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
if (mmc_push_pull_freq > 0xFF)
mmc_push_pull_freq = 0xFF;
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
writel(temp, host->base + DAVINCI_MMCCLK);
udelay(10);
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
temp |= mmc_push_pull_freq;
writel(temp, host->base + DAVINCI_MMCCLK);
writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
udelay(10);
}
}
static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
dev_dbg(mmc_dev(host->mmc),
"clock %dHz busmode %d powermode %d Vdd %04x\n",
ios->clock, ios->bus_mode, ios->power_mode,
ios->vdd);
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (config && config->set_power)
config->set_power(pdev->id, false);
break;
case MMC_POWER_UP:
if (config && config->set_power)
config->set_power(pdev->id, true);
break;
}
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
writel((readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
host->base + DAVINCI_MMCCTL);
break;
case MMC_BUS_WIDTH_4:
dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
if (host->version == MMC_CTLR_VERSION_2)
writel((readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
else
writel(readl(host->base + DAVINCI_MMCCTL) |
MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
break;
case MMC_BUS_WIDTH_1:
dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
if (host->version == MMC_CTLR_VERSION_2)
writel(readl(host->base + DAVINCI_MMCCTL) &
~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
host->base + DAVINCI_MMCCTL);
else
writel(readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
break;
}
calculate_clk_divider(mmc, ios);
host->bus_mode = ios->bus_mode;
if (ios->power_mode == MMC_POWER_UP) {
unsigned long timeout = jiffies + msecs_to_jiffies(50);
bool lose = true;
/* Send clock cycles, poll completion */
writel(0, host->base + DAVINCI_MMCARGHL);
writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
while (time_before(jiffies, timeout)) {
u32 tmp = readl(host->base + DAVINCI_MMCST0);
if (tmp & MMCST0_RSPDNE) {
lose = false;
break;
}
cpu_relax();
}
if (lose)
dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
}
/* FIXME on power OFF, reset things ... */
}
static void
mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
{
host->data = NULL;
if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
/*
* SDIO Interrupt Detection work-around as suggested by
* Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
* 2.1.6): Signal SDIO interrupt only if it is enabled by core
*/
if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
}
if (host->do_dma) {
davinci_abort_dma(host);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
host->do_dma = false;
}
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
if (!data->stop || (host->cmd && host->cmd->error)) {
mmc_request_done(host->mmc, data->mrq);
writel(0, host->base + DAVINCI_MMCIM);
host->active_request = false;
} else
mmc_davinci_start_command(host, data->stop);
}
static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
struct mmc_command *cmd)
{
host->cmd = NULL;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* response type 2 */
cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
} else {
/* response types 1, 1b, 3, 4, 5, 6 */
cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
}
}
if (host->data == NULL || cmd->error) {
if (cmd->error == -ETIMEDOUT)
cmd->mrq->cmd->retries = 0;
mmc_request_done(host->mmc, cmd->mrq);
writel(0, host->base + DAVINCI_MMCIM);
host->active_request = false;
}
}
static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
int val)
{
u32 temp;
temp = readl(host->base + DAVINCI_MMCCTL);
if (val) /* reset */
temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
else /* enable */
temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
writel(temp, host->base + DAVINCI_MMCCTL);
udelay(10);
}
static void
davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = dev_id;
unsigned int status;
status = readl(host->base + DAVINCI_SDIOIST);
if (status & SDIOIST_IOINT) {
dev_dbg(mmc_dev(host->mmc),
"SDIO interrupt status %x\n", status);
writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
return IRQ_HANDLED;
}
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
unsigned int status, qstatus;
int end_command = 0;
int end_transfer = 0;
struct mmc_data *data = host->data;
if (host->cmd == NULL && host->data == NULL) {
status = readl(host->base + DAVINCI_MMCST0);
dev_dbg(mmc_dev(host->mmc),
"Spurious interrupt 0x%04x\n", status);
/* Disable the interrupt from mmcsd */
writel(0, host->base + DAVINCI_MMCIM);
return IRQ_NONE;
}
status = readl(host->base + DAVINCI_MMCST0);
qstatus = status;
/* handle FIFO first when using PIO for data.
* bytes_left will decrease to zero as I/O progress and status will
* read zero over iteration because this controller status
* register(MMCST0) reports any status only once and it is cleared
* by read. So, it is not unbouned loop even in the case of
* non-dma.
*/
if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
unsigned long im_val;
/*
* If interrupts fire during the following loop, they will be
* handled by the handler, but the PIC will still buffer these.
* As a result, the handler will be called again to serve these
* needlessly. In order to avoid these spurious interrupts,
* keep interrupts masked during the loop.
*/
im_val = readl(host->base + DAVINCI_MMCIM);
writel(0, host->base + DAVINCI_MMCIM);
do {
davinci_fifo_data_trans(host, rw_threshold);
status = readl(host->base + DAVINCI_MMCST0);
qstatus |= status;
} while (host->bytes_left &&
(status & (MMCST0_DXRDY | MMCST0_DRRDY)));
/*
* If an interrupt is pending, it is assumed it will fire when
* it is unmasked. This assumption is also taken when the MMCIM
* is first set. Otherwise, writing to MMCIM after reading the
* status is race-prone.
*/
writel(im_val, host->base + DAVINCI_MMCIM);
}
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
if ((host->do_dma == 0) && (host->bytes_left > 0)) {
/* if datasize < rw_threshold
* no RX ints are generated
*/
davinci_fifo_data_trans(host, host->bytes_left);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;
} else {
dev_err(mmc_dev(host->mmc),
"DATDNE with no host->data\n");
}
}
if (qstatus & MMCST0_TOUTRD) {
/* Read data timeout */
data->error = -ETIMEDOUT;
end_transfer = 1;
dev_dbg(mmc_dev(host->mmc),
"read data timeout, status %x\n",
qstatus);
davinci_abort_data(host, data);
}
if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
/* Data CRC error */
data->error = -EILSEQ;
end_transfer = 1;
/* NOTE: this controller uses CRCWR to report both CRC
* errors and timeouts (on writes). MMCDRSP values are
* only weakly documented, but 0x9f was clearly a timeout
* case and the two three-bit patterns in various SD specs
* (101, 010) aren't part of it ...
*/
if (qstatus & MMCST0_CRCWR) {
u32 temp = readb(host->base + DAVINCI_MMCDRSP);
if (temp == 0x9f)
data->error = -ETIMEDOUT;
}
dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
(qstatus & MMCST0_CRCWR) ? "write" : "read",
(data->error == -ETIMEDOUT) ? "timeout" : "CRC");
davinci_abort_data(host, data);
}
if (qstatus & MMCST0_TOUTRS) {
/* Command timeout */
if (host->cmd) {
dev_dbg(mmc_dev(host->mmc),
"CMD%d timeout, status %x\n",
host->cmd->opcode, qstatus);
host->cmd->error = -ETIMEDOUT;
if (data) {
end_transfer = 1;
davinci_abort_data(host, data);
} else
end_command = 1;
}
}
if (qstatus & MMCST0_CRCRS) {
/* Command CRC error */
dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
if (host->cmd) {
host->cmd->error = -EILSEQ;
end_command = 1;
}
}
if (qstatus & MMCST0_RSPDNE) {
/* End of command phase */
end_command = host->cmd ? 1 : 0;
}
if (end_command)
mmc_davinci_cmd_done(host, host->cmd);
if (end_transfer)
mmc_davinci_xfer_done(host, data);
return IRQ_HANDLED;
}
static int mmc_davinci_get_cd(struct mmc_host *mmc)
{
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
if (config && config->get_cd)
return config->get_cd(pdev->id);
return mmc_gpio_get_cd(mmc);
}
static int mmc_davinci_get_ro(struct mmc_host *mmc)
{
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
if (config && config->get_ro)
return config->get_ro(pdev->id);
return mmc_gpio_get_ro(mmc);
}
static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
if (enable) {
if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
} else {
host->sdio_int = true;
writel(readl(host->base + DAVINCI_SDIOIEN) |
SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
}
} else {
host->sdio_int = false;
writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
host->base + DAVINCI_SDIOIEN);
}
}
static const struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios,
.get_cd = mmc_davinci_get_cd,
.get_ro = mmc_davinci_get_ro,
.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
};
/*----------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct mmc_davinci_host *host;
unsigned int mmc_pclk;
struct mmc_host *mmc;
unsigned long flags;
host = container_of(nb, struct mmc_davinci_host, freq_transition);
mmc = host->mmc;
mmc_pclk = clk_get_rate(host->clk);
if (val == CPUFREQ_POSTCHANGE) {
spin_lock_irqsave(&mmc->lock, flags);
host->mmc_input_clk = mmc_pclk;
calculate_clk_divider(mmc, &mmc->ios);
spin_unlock_irqrestore(&mmc->lock, flags);
}
return 0;
}
static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
{
host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
return cpufreq_register_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
cpufreq_unregister_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
{
return 0;
}
static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
}
#endif
static void init_mmcsd_host(struct mmc_davinci_host *host)
{
mmc_davinci_reset_ctrl(host, 1);
writel(0, host->base + DAVINCI_MMCCLK);
writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
writel(0xFFFF, host->base + DAVINCI_MMCTOD);
mmc_davinci_reset_ctrl(host, 0);
}
static const struct platform_device_id davinci_mmc_devtype[] = {
{
.name = "dm6441-mmc",
.driver_data = MMC_CTLR_VERSION_1,
}, {
.name = "da830-mmc",
.driver_data = MMC_CTLR_VERSION_2,
},
{},
};
MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype);
static const struct of_device_id davinci_mmc_dt_ids[] = {
{
.compatible = "ti,dm6441-mmc",
.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1],
},
{
.compatible = "ti,da830-mmc",
.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2],
},
{},
};
MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
{
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
struct mmc_davinci_host *host;
int ret;
if (!pdata)
return -EINVAL;
host = mmc_priv(mmc);
if (!host)
return -EINVAL;
if (pdata && pdata->nr_sg)
host->nr_sg = pdata->nr_sg - 1;
if (pdata && (pdata->wires == 4 || pdata->wires == 0))
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (pdata && (pdata->wires == 8))
mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
mmc->f_min = 312500;
mmc->f_max = 25000000;
if (pdata && pdata->max_freq)
mmc->f_max = pdata->max_freq;
if (pdata && pdata->caps)
mmc->caps |= pdata->caps;
/* Register a cd gpio, if there is not one, enable polling */
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
mmc->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
return ret;
return 0;
}
static int davinci_mmcsd_probe(struct platform_device *pdev)
{
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
int ret, irq;
size_t mem_size;
const struct platform_device_id *id_entry;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
mem_size = resource_size(r);
mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
pdev->name);
if (!mem)
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc; /* Important */
host->mem_res = mem;
host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
if (!host->base) {
ret = -ENOMEM;
goto ioremap_fail;
}
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto clk_get_fail;
}
ret = clk_prepare_enable(host->clk);
if (ret)
goto clk_prepare_enable_fail;
host->mmc_input_clk = clk_get_rate(host->clk);
pdev->id_entry = of_device_get_match_data(&pdev->dev);
if (pdev->id_entry) {
ret = mmc_of_parse(mmc);
if (ret) {
dev_err_probe(&pdev->dev, ret,
"could not parse of data\n");
goto parse_fail;
}
} else {
ret = mmc_davinci_parse_pdata(mmc);
if (ret) {
dev_err(&pdev->dev,
"could not parse platform data: %d\n", ret);
goto parse_fail;
} }
if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
host->nr_sg = MAX_NR_SG;
init_mmcsd_host(host);
host->use_dma = use_dma;
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq_optional(pdev, 1);
if (host->use_dma) {
ret = davinci_acquire_dma_channels(host);
if (ret == -EPROBE_DEFER)
goto dma_probe_defer;
else if (ret)
host->use_dma = 0;
}
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
id_entry = platform_get_device_id(pdev);
if (id_entry)
host->version = id_entry->driver_data;
mmc->ops = &mmc_davinci_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* With no iommu coalescing pages, each phys_seg is a hw_seg.
* Each hw_seg uses one EDMA parameter RAM slot, always one
* channel and then usually some linked slots.
*/
mmc->max_segs = MAX_NR_SG;
/* EDMA limit per hw segment (one or two MBytes) */
mmc->max_seg_size = MAX_CCNT * rw_threshold;
/* MMC/SD controller limits for multiblock requests */
mmc->max_blk_size = 4095; /* BLEN is 12 bits */
mmc->max_blk_count = 65535; /* NBLK is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
platform_set_drvdata(pdev, host);
ret = mmc_davinci_cpufreq_register(host);
if (ret) {
dev_err(&pdev->dev, "failed to register cpufreq\n");
goto cpu_freq_fail;
}
ret = mmc_add_host(mmc);
if (ret < 0)
goto mmc_add_host_fail;
ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
mmc_hostname(mmc), host);
if (ret)
goto request_irq_fail;
if (host->sdio_irq >= 0) {
ret = devm_request_irq(&pdev->dev, host->sdio_irq,
mmc_davinci_sdio_irq, 0,
mmc_hostname(mmc), host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
rename_region(mem, mmc_hostname(mmc));
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
(mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
return 0;
request_irq_fail:
mmc_remove_host(mmc);
mmc_add_host_fail:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
davinci_release_dma_channels(host);
parse_fail:
dma_probe_defer:
clk_disable_unprepare(host->clk);
clk_prepare_enable_fail:
clk_get_fail:
ioremap_fail:
mmc_free_host(mmc);
return ret;
}
static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
mmc_remove_host(host->mmc);
mmc_davinci_cpufreq_deregister(host);
davinci_release_dma_channels(host);
clk_disable_unprepare(host->clk);
mmc_free_host(host->mmc);
}
#ifdef CONFIG_PM
static int davinci_mmcsd_suspend(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
writel(0, host->base + DAVINCI_MMCIM);
mmc_davinci_reset_ctrl(host, 1);
clk_disable(host->clk);
return 0;
}
static int davinci_mmcsd_resume(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
int ret;
ret = clk_enable(host->clk);
if (ret)
return ret;
mmc_davinci_reset_ctrl(host, 0);
return 0;
}
static const struct dev_pm_ops davinci_mmcsd_pm = {
.suspend = davinci_mmcsd_suspend,
.resume = davinci_mmcsd_resume,
};
#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
#else
#define davinci_mmcsd_pm_ops NULL
#endif
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
.remove_new = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
MODULE_ALIAS("platform:davinci_mmc");
| linux-master | drivers/mmc/host/davinci_mmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
*
* Copyright (C) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <[email protected]>
* Copyright (C) 2017 Martin Blumenstingl <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
#define MESON_MX_SDIO_ARGU 0x00
#define MESON_MX_SDIO_SEND 0x04
#define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
#define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
#define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
#define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
#define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
#define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
#define MESON_MX_SDIO_SEND_DATA BIT(20)
#define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
#define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
#define MESON_MX_SDIO_CONF 0x08
#define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
#define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
#define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
#define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
#define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
#define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
#define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
#define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
#define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
#define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
#define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
#define MESON_MX_SDIO_IRQS 0x0c
#define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
#define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
#define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
#define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
#define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
#define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
#define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
#define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
#define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
#define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
#define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
#define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
#define MESON_MX_SDIO_IRQC 0x10
#define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
#define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
#define MESON_MX_SDIO_MULT 0x14
#define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
#define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
#define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
#define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
#define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
#define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
#define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
#define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
#define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
#define MESON_MX_SDIO_ADDR 0x18
#define MESON_MX_SDIO_EXT 0x1c
#define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
#define MESON_MX_SDIO_MAX_SLOTS 3
struct meson_mx_mmc_host {
struct device *controller_dev;
struct clk *parent_clk;
struct clk *core_clk;
struct clk_divider cfg_div;
struct clk *cfg_div_clk;
struct clk_fixed_factor fixed_factor;
struct clk *fixed_factor_clk;
void __iomem *base;
int irq;
spinlock_t irq_lock;
struct timer_list cmd_timeout;
unsigned int slot_id;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
int error;
};
static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
u32 val)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
u32 regval;
regval = readl(host->base + reg);
regval &= ~mask;
regval |= (val & mask);
writel(regval, host->base + reg);
}
static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
{
writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
udelay(2);
}
static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
{
if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
return cmd->mrq->cmd;
else if (mmc_op_multi(cmd->opcode) &&
(!cmd->mrq->sbc || cmd->error || cmd->data->error))
return cmd->mrq->stop;
else
return NULL;
}
static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
unsigned int pack_size;
unsigned long irqflags, timeout;
u32 mult, send = 0, ext = 0;
host->cmd = cmd;
if (cmd->busy_timeout)
timeout = msecs_to_jiffies(cmd->busy_timeout);
else
timeout = msecs_to_jiffies(1000);
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1:
case MMC_RSP_R1B:
case MMC_RSP_R3:
/* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
break;
case MMC_RSP_R2:
/* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
break;
default:
break;
}
if (!(cmd->flags & MMC_RSP_CRC))
send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
if (cmd->flags & MMC_RSP_BUSY)
send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
if (cmd->data) {
send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
(cmd->data->blocks - 1));
pack_size = cmd->data->blksz * BITS_PER_BYTE;
if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
else
pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
pack_size);
if (cmd->data->flags & MMC_DATA_WRITE)
send |= MESON_MX_SDIO_SEND_DATA;
else
send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
cmd->data->bytes_xfered = 0;
}
send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
(0x40 | cmd->opcode));
spin_lock_irqsave(&host->irq_lock, irqflags);
mult = readl(host->base + MESON_MX_SDIO_MULT);
mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
mult |= BIT(31);
writel(mult, host->base + MESON_MX_SDIO_MULT);
/* enable the CMD done interrupt */
meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
/* clear pending interrupts */
meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
MESON_MX_SDIO_IRQS_CMD_INT,
MESON_MX_SDIO_IRQS_CMD_INT);
writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
writel(ext, host->base + MESON_MX_SDIO_EXT);
writel(send, host->base + MESON_MX_SDIO_SEND);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
mod_timer(&host->cmd_timeout, jiffies + timeout);
}
static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
{
struct mmc_request *mrq;
mrq = host->mrq;
if (host->cmd->error)
meson_mx_mmc_soft_reset(host);
host->mrq = NULL;
host->cmd = NULL;
mmc_request_done(host->mmc, mrq);
}
static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
unsigned short vdd = ios->vdd;
unsigned long clk_rate = ios->clock;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
break;
case MMC_BUS_WIDTH_4:
meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
MESON_MX_SDIO_CONF_BUS_WIDTH,
MESON_MX_SDIO_CONF_BUS_WIDTH);
break;
case MMC_BUS_WIDTH_8:
default:
dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
ios->bus_width);
host->error = -EINVAL;
return;
}
host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
if (host->error) {
dev_warn(mmc_dev(mmc),
"failed to set MMC clock to %lu: %d\n",
clk_rate, host->error);
return;
}
mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
switch (ios->power_mode) {
case MMC_POWER_OFF:
vdd = 0;
fallthrough;
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
host->error = mmc_regulator_set_ocr(mmc,
mmc->supply.vmmc,
vdd);
if (host->error)
return;
}
break;
}
}
static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
int dma_len;
struct scatterlist *sg;
if (!data)
return 0;
sg = data->sg;
if (sg->offset & 3 || sg->length & 3) {
dev_err(mmc_dev(mmc),
"unaligned scatterlist: offset %x length %d\n",
sg->offset, sg->length);
return -EINVAL;
}
dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
if (dma_len <= 0) {
dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
return -ENOMEM;
}
return 0;
}
static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
struct mmc_command *cmd = mrq->cmd;
if (!host->error)
host->error = meson_mx_mmc_map_dma(mmc, mrq);
if (host->error) {
cmd->error = host->error;
mmc_request_done(mmc, mrq);
return;
}
host->mrq = mrq;
if (mrq->data)
writel(sg_dma_address(mrq->data->sg),
host->base + MESON_MX_SDIO_ADDR);
if (mrq->sbc)
meson_mx_mmc_start_cmd(mmc, mrq->sbc);
else
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
}
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
u32 mult;
int i, resp[4];
mult = readl(host->base + MESON_MX_SDIO_MULT);
mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
writel(mult, host->base + MESON_MX_SDIO_MULT);
if (cmd->flags & MMC_RSP_136) {
for (i = 0; i <= 3; i++)
resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
cmd->resp[3] = (resp[3] << 8);
} else if (cmd->flags & MMC_RSP_PRESENT) {
cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
}
}
static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
u32 irqs, u32 send)
{
struct mmc_command *cmd = host->cmd;
/*
* NOTE: even though it shouldn't happen we sometimes get command
* interrupts twice (at least this is what it looks like). Ideally
* we find out why this happens and warn here as soon as it occurs.
*/
if (!cmd)
return IRQ_HANDLED;
cmd->error = 0;
meson_mx_mmc_read_response(host->mmc, cmd);
if (cmd->data) {
if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
(irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
cmd->error = -EILSEQ;
} else {
if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
(send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
cmd->error = -EILSEQ;
}
return IRQ_WAKE_THREAD;
}
static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
{
struct meson_mx_mmc_host *host = (void *) data;
u32 irqs, send;
irqreturn_t ret;
spin_lock(&host->irq_lock);
irqs = readl(host->base + MESON_MX_SDIO_IRQS);
send = readl(host->base + MESON_MX_SDIO_SEND);
if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
else
ret = IRQ_HANDLED;
/* finally ACK all pending interrupts */
writel(irqs, host->base + MESON_MX_SDIO_IRQS);
spin_unlock(&host->irq_lock);
return ret;
}
static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
{
struct meson_mx_mmc_host *host = (void *) irq_data;
struct mmc_command *cmd = host->cmd, *next_cmd;
if (WARN_ON(!cmd))
return IRQ_HANDLED;
del_timer_sync(&host->cmd_timeout);
if (cmd->data) {
dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
cmd->data->sg_len,
mmc_get_dma_dir(cmd->data));
cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
}
next_cmd = meson_mx_mmc_get_next_cmd(cmd);
if (next_cmd)
meson_mx_mmc_start_cmd(host->mmc, next_cmd);
else
meson_mx_mmc_request_done(host);
return IRQ_HANDLED;
}
static void meson_mx_mmc_timeout(struct timer_list *t)
{
struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
unsigned long irqflags;
u32 irqc;
spin_lock_irqsave(&host->irq_lock, irqflags);
/* disable the CMD interrupt */
irqc = readl(host->base + MESON_MX_SDIO_IRQC);
irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
writel(irqc, host->base + MESON_MX_SDIO_IRQC);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
/*
* skip the timeout handling if the interrupt handler already processed
* the command.
*/
if (!host->cmd)
return;
dev_dbg(mmc_dev(host->mmc),
"Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
readl(host->base + MESON_MX_SDIO_ARGU));
host->cmd->error = -ETIMEDOUT;
meson_mx_mmc_request_done(host);
}
static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
};
static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
{
struct device_node *slot_node;
struct platform_device *pdev;
/*
* TODO: the MMC core framework currently does not support
* controllers with multiple slots properly. So we only register
* the first slot for now
*/
slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
if (!slot_node) {
dev_warn(parent, "no 'mmc-slot' sub-node found\n");
return ERR_PTR(-ENOENT);
}
pdev = of_platform_device_create(slot_node, NULL, parent);
of_node_put(slot_node);
return pdev;
}
static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct device *slot_dev = mmc_dev(mmc);
int ret;
if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
dev_err(slot_dev, "missing 'reg' property\n");
return -EINVAL;
}
if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
dev_err(slot_dev, "invalid 'reg' property value %d\n",
host->slot_id);
return -EINVAL;
}
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
if (ret)
return ret;
mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
mmc->max_seg_size = mmc->max_req_size;
mmc->max_blk_count =
FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
0xffffffff);
mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
0xffffffff);
mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
mmc->max_blk_size /= BITS_PER_BYTE;
/* Get the min and max supported clock rates */
mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));
mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc);
if (ret)
return ret;
ret = mmc_add_host(mmc);
if (ret)
return ret;
return 0;
}
static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
{
struct clk_init_data init;
const char *clk_div_parent, *clk_fixed_factor_parent;
clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
"%s#fixed_factor",
dev_name(host->controller_dev));
if (!init.name)
return -ENOMEM;
init.ops = &clk_fixed_factor_ops;
init.flags = 0;
init.parent_names = &clk_fixed_factor_parent;
init.num_parents = 1;
host->fixed_factor.div = 2;
host->fixed_factor.mult = 1;
host->fixed_factor.hw.init = &init;
host->fixed_factor_clk = devm_clk_register(host->controller_dev,
&host->fixed_factor.hw);
if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
return PTR_ERR(host->fixed_factor_clk);
clk_div_parent = __clk_get_name(host->fixed_factor_clk);
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
"%s#div", dev_name(host->controller_dev));
if (!init.name)
return -ENOMEM;
init.ops = &clk_divider_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &clk_div_parent;
init.num_parents = 1;
host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
host->cfg_div.hw.init = &init;
host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
host->cfg_div_clk = devm_clk_register(host->controller_dev,
&host->cfg_div.hw);
if (WARN_ON(IS_ERR(host->cfg_div_clk)))
return PTR_ERR(host->cfg_div_clk);
return 0;
}
static int meson_mx_mmc_probe(struct platform_device *pdev)
{
struct platform_device *slot_pdev;
struct mmc_host *mmc;
struct meson_mx_mmc_host *host;
int ret, irq;
u32 conf;
slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
if (!slot_pdev)
return -ENODEV;
else if (IS_ERR(slot_pdev))
return PTR_ERR(slot_pdev);
mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto error_unregister_slot_pdev;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->controller_dev = &pdev->dev;
spin_lock_init(&host->irq_lock);
timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
platform_set_drvdata(pdev, host);
host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto error_free_mmc;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto error_free_mmc;
}
ret = devm_request_threaded_irq(host->controller_dev, irq,
meson_mx_mmc_irq,
meson_mx_mmc_irq_thread, IRQF_ONESHOT,
NULL, host);
if (ret)
goto error_free_mmc;
host->core_clk = devm_clk_get(host->controller_dev, "core");
if (IS_ERR(host->core_clk)) {
ret = PTR_ERR(host->core_clk);
goto error_free_mmc;
}
host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
if (IS_ERR(host->parent_clk)) {
ret = PTR_ERR(host->parent_clk);
goto error_free_mmc;
}
ret = meson_mx_mmc_register_clks(host);
if (ret)
goto error_free_mmc;
ret = clk_prepare_enable(host->core_clk);
if (ret) {
dev_err(host->controller_dev, "Failed to enable core clock\n");
goto error_free_mmc;
}
ret = clk_prepare_enable(host->cfg_div_clk);
if (ret) {
dev_err(host->controller_dev, "Failed to enable MMC clock\n");
goto error_disable_core_clk;
}
conf = 0;
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
writel(conf, host->base + MESON_MX_SDIO_CONF);
meson_mx_mmc_soft_reset(host);
ret = meson_mx_mmc_add_host(host);
if (ret)
goto error_disable_clks;
return 0;
error_disable_clks:
clk_disable_unprepare(host->cfg_div_clk);
error_disable_core_clk:
clk_disable_unprepare(host->core_clk);
error_free_mmc:
mmc_free_host(mmc);
error_unregister_slot_pdev:
of_platform_device_destroy(&slot_pdev->dev, NULL);
return ret;
}
static void meson_mx_mmc_remove(struct platform_device *pdev)
{
struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
struct device *slot_dev = mmc_dev(host->mmc);
del_timer_sync(&host->cmd_timeout);
mmc_remove_host(host->mmc);
of_platform_device_destroy(slot_dev, NULL);
clk_disable_unprepare(host->cfg_div_clk);
clk_disable_unprepare(host->core_clk);
mmc_free_host(host->mmc);
}
static const struct of_device_id meson_mx_mmc_of_match[] = {
{ .compatible = "amlogic,meson8-sdio", },
{ .compatible = "amlogic,meson8b-sdio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
static struct platform_driver meson_mx_mmc_driver = {
.probe = meson_mx_mmc_probe,
.remove_new = meson_mx_mmc_remove,
.driver = {
.name = "meson-mx-sdio",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(meson_mx_mmc_of_match),
},
};
module_platform_driver(meson_mx_mmc_driver);
MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
MODULE_AUTHOR("Carlo Caione <[email protected]>");
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/meson-mx-sdio.c |
// SPDX-License-Identifier: GPL-2.0-only
/**
* SDHCI Controller driver for TI's OMAP SoCs
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sys_soc.h>
#include <linux/thermal.h>
#include "sdhci-pltfm.h"
/*
* Note that the register offsets used here are from omap_regs
* base which is 0x100 for omap4 and later, and 0 for omap3 and
* earlier.
*/
#define SDHCI_OMAP_SYSCONFIG 0x10
#define SDHCI_OMAP_CON 0x2c
#define CON_DW8 BIT(5)
#define CON_DMA_MASTER BIT(20)
#define CON_DDR BIT(19)
#define CON_CLKEXTFREE BIT(16)
#define CON_PADEN BIT(15)
#define CON_CTPL BIT(11)
#define CON_INIT BIT(1)
#define CON_OD BIT(0)
#define SDHCI_OMAP_DLL 0x34
#define DLL_SWT BIT(20)
#define DLL_FORCE_SR_C_SHIFT 13
#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT)
#define DLL_FORCE_VALUE BIT(12)
#define DLL_CALIB BIT(1)
#define SDHCI_OMAP_CMD 0x10c
#define SDHCI_OMAP_PSTATE 0x124
#define PSTATE_DLEV_DAT0 BIT(20)
#define PSTATE_DATI BIT(1)
#define SDHCI_OMAP_HCTL 0x128
#define HCTL_SDBP BIT(8)
#define HCTL_SDVS_SHIFT 9
#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
#define SDHCI_OMAP_SYSCTL 0x12c
#define SYSCTL_CEN BIT(2)
#define SYSCTL_CLKD_SHIFT 6
#define SYSCTL_CLKD_MASK 0x3ff
#define SDHCI_OMAP_STAT 0x130
#define SDHCI_OMAP_IE 0x134
#define INT_CC_EN BIT(0)
#define SDHCI_OMAP_ISE 0x138
#define SDHCI_OMAP_AC12 0x13c
#define AC12_V1V8_SIGEN BIT(19)
#define AC12_SCLK_SEL BIT(23)
#define SDHCI_OMAP_CAPA 0x140
#define CAPA_VS33 BIT(24)
#define CAPA_VS30 BIT(25)
#define CAPA_VS18 BIT(26)
#define SDHCI_OMAP_CAPA2 0x144
#define CAPA2_TSDR50 BIT(13)
#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
#define SYSCTL_CLKD_MAX 0x3FF
#define IOV_1V8 1800000 /* 180000 uV */
#define IOV_3V0 3000000 /* 300000 uV */
#define IOV_3V3 3300000 /* 330000 uV */
#define MAX_PHASE_DELAY 0x7C
/* sdhci-omap controller flags */
#define SDHCI_OMAP_REQUIRE_IODELAY BIT(0)
#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
int omap_offset; /* Offset for omap regs from base */
u32 offset; /* Offset for SDHCI regs from base */
u8 flags;
};
struct sdhci_omap_host {
char *version;
void __iomem *base;
struct device *dev;
struct regulator *pbias;
bool pbias_enabled;
struct sdhci_host *host;
u8 bus_mode;
u8 power_mode;
u8 timing;
u8 flags;
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
int wakeirq;
bool is_tuning;
/* Offset for omap specific registers from base */
int omap_offset;
/* Omap specific context save */
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
u32 ie;
u32 ise;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host);
static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
unsigned int offset)
{
return readl(host->base + host->omap_offset + offset);
}
static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
unsigned int offset, u32 data)
{
writel(data, host->base + host->omap_offset + offset);
}
static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
bool power_on, unsigned int iov)
{
int ret;
struct device *dev = omap_host->dev;
if (IS_ERR(omap_host->pbias))
return 0;
if (power_on) {
ret = regulator_set_voltage(omap_host->pbias, iov, iov);
if (ret) {
dev_err(dev, "pbias set voltage failed\n");
return ret;
}
if (omap_host->pbias_enabled)
return 0;
ret = regulator_enable(omap_host->pbias);
if (ret) {
dev_err(dev, "pbias reg enable fail\n");
return ret;
}
omap_host->pbias_enabled = true;
} else {
if (!omap_host->pbias_enabled)
return 0;
ret = regulator_disable(omap_host->pbias);
if (ret) {
dev_err(dev, "pbias reg disable fail\n");
return ret;
}
omap_host->pbias_enabled = false;
}
return 0;
}
static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
unsigned int iov_pbias)
{
int ret;
struct sdhci_host *host = omap_host->host;
struct mmc_host *mmc = host->mmc;
ret = sdhci_omap_set_pbias(omap_host, false, 0);
if (ret)
return ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
/* Pick the right voltage to allow 3.0V for 3.3V nominal PBIAS */
ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
if (ret < 0) {
dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
return ret;
}
}
ret = sdhci_omap_set_pbias(omap_host, true, iov_pbias);
if (ret)
return ret;
return 0;
}
static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
unsigned char signal_voltage)
{
u32 reg, capa;
ktime_t timeout;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
reg &= ~HCTL_SDVS_MASK;
switch (signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (capa & CAPA_VS33)
reg |= HCTL_SDVS_33;
else if (capa & CAPA_VS30)
reg |= HCTL_SDVS_30;
else
dev_warn(omap_host->dev, "misconfigured CAPA: %08x\n",
capa);
break;
case MMC_SIGNAL_VOLTAGE_180:
default:
reg |= HCTL_SDVS_18;
break;
}
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
reg |= HCTL_SDBP;
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
/* wait 1ms */
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)
break;
if (WARN_ON(timedout))
return;
usleep_range(5, 10);
}
}
static void sdhci_omap_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
if (enable)
reg |= (CON_CTPL | CON_CLKEXTFREE);
else
reg &= ~(CON_CTPL | CON_CLKEXTFREE);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_enable_sdio_irq(mmc, enable);
}
static inline void sdhci_omap_set_dll(struct sdhci_omap_host *omap_host,
int count)
{
int i;
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
reg |= DLL_FORCE_VALUE;
reg &= ~DLL_FORCE_SR_C_MASK;
reg |= (count << DLL_FORCE_SR_C_SHIFT);
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
reg |= DLL_CALIB;
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
for (i = 0; i < 1000; i++) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
if (reg & DLL_CALIB)
break;
}
reg &= ~DLL_CALIB;
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
}
static void sdhci_omap_disable_tuning(struct sdhci_omap_host *omap_host)
{
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_SCLK_SEL;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
reg &= ~(DLL_FORCE_VALUE | DLL_SWT);
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
}
static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct thermal_zone_device *thermal_dev;
struct device *dev = omap_host->dev;
struct mmc_ios *ios = &mmc->ios;
u32 start_window = 0, max_window = 0;
bool single_point_failure = false;
bool dcrc_was_enabled = false;
u8 cur_match, prev_match = 0;
u32 length = 0, max_len = 0;
u32 phase_delay = 0;
int temperature;
int ret = 0;
u32 reg;
int i;
/* clock tuning is not needed for upto 52MHz */
if (ios->clock <= 52000000)
return 0;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA2);
if (ios->timing == MMC_TIMING_UHS_SDR50 && !(reg & CAPA2_TSDR50))
return 0;
thermal_dev = thermal_zone_get_zone_by_name("cpu_thermal");
if (IS_ERR(thermal_dev)) {
dev_err(dev, "Unable to get thermal zone for tuning\n");
return PTR_ERR(thermal_dev);
}
ret = thermal_zone_get_temp(thermal_dev, &temperature);
if (ret)
return ret;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
reg |= DLL_SWT;
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
/*
* OMAP5/DRA74X/DRA72x Errata i802:
* DCRC error interrupts (MMCHS_STAT[21] DCRC=0x1) can occur
* during the tuning procedure. So disable it during the
* tuning procedure.
*/
if (host->ier & SDHCI_INT_DATA_CRC) {
host->ier &= ~SDHCI_INT_DATA_CRC;
dcrc_was_enabled = true;
}
omap_host->is_tuning = true;
/*
* Stage 1: Search for a maximum pass window ignoring any
* single point failures. If the tuning value ends up
* near it, move away from it in stage 2 below
*/
while (phase_delay <= MAX_PHASE_DELAY) {
sdhci_omap_set_dll(omap_host, phase_delay);
cur_match = !mmc_send_tuning(mmc, opcode, NULL);
if (cur_match) {
if (prev_match) {
length++;
} else if (single_point_failure) {
/* ignore single point failure */
length++;
} else {
start_window = phase_delay;
length = 1;
}
} else {
single_point_failure = prev_match;
}
if (length > max_len) {
max_window = start_window;
max_len = length;
}
prev_match = cur_match;
phase_delay += 4;
}
if (!max_len) {
dev_err(dev, "Unable to find match\n");
ret = -EIO;
goto tuning_error;
}
/*
* Assign tuning value as a ratio of maximum pass window based
* on temperature
*/
if (temperature < -20000)
phase_delay = min(max_window + 4 * (max_len - 1) - 24,
max_window +
DIV_ROUND_UP(13 * max_len, 16) * 4);
else if (temperature < 20000)
phase_delay = max_window + DIV_ROUND_UP(9 * max_len, 16) * 4;
else if (temperature < 40000)
phase_delay = max_window + DIV_ROUND_UP(8 * max_len, 16) * 4;
else if (temperature < 70000)
phase_delay = max_window + DIV_ROUND_UP(7 * max_len, 16) * 4;
else if (temperature < 90000)
phase_delay = max_window + DIV_ROUND_UP(5 * max_len, 16) * 4;
else if (temperature < 120000)
phase_delay = max_window + DIV_ROUND_UP(4 * max_len, 16) * 4;
else
phase_delay = max_window + DIV_ROUND_UP(3 * max_len, 16) * 4;
/*
* Stage 2: Search for a single point failure near the chosen tuning
* value in two steps. First in the +3 to +10 range and then in the
* +2 to -10 range. If found, move away from it in the appropriate
* direction by the appropriate amount depending on the temperature.
*/
for (i = 3; i <= 10; i++) {
sdhci_omap_set_dll(omap_host, phase_delay + i);
if (mmc_send_tuning(mmc, opcode, NULL)) {
if (temperature < 10000)
phase_delay += i + 6;
else if (temperature < 20000)
phase_delay += i - 12;
else if (temperature < 70000)
phase_delay += i - 8;
else
phase_delay += i - 6;
goto single_failure_found;
}
}
for (i = 2; i >= -10; i--) {
sdhci_omap_set_dll(omap_host, phase_delay + i);
if (mmc_send_tuning(mmc, opcode, NULL)) {
if (temperature < 10000)
phase_delay += i + 12;
else if (temperature < 20000)
phase_delay += i + 8;
else if (temperature < 70000)
phase_delay += i + 8;
else if (temperature < 90000)
phase_delay += i + 10;
else
phase_delay += i + 12;
goto single_failure_found;
}
}
single_failure_found:
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
if (!(reg & AC12_SCLK_SEL)) {
ret = -EIO;
goto tuning_error;
}
sdhci_omap_set_dll(omap_host, phase_delay);
omap_host->is_tuning = false;
goto ret;
tuning_error:
omap_host->is_tuning = false;
dev_err(dev, "Tuning failed\n");
sdhci_omap_disable_tuning(omap_host);
ret:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
/* Reenable forbidden interrupt */
if (dcrc_was_enabled)
host->ier |= SDHCI_INT_DATA_CRC;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
return ret;
}
static int sdhci_omap_card_busy(struct mmc_host *mmc)
{
u32 reg, ac12;
int ret = false;
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_omap_host *omap_host;
u32 ier = host->ier;
pltfm_host = sdhci_priv(host);
omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
ac12 = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~CON_CLKEXTFREE;
if (ac12 & AC12_V1V8_SIGEN)
reg |= CON_CLKEXTFREE;
reg |= CON_PADEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
disable_irq(host->irq);
ier |= SDHCI_INT_CARD_INT;
sdhci_writel(host, ier, SDHCI_INT_ENABLE);
sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
/*
* Delay is required for PSTATE to correctly reflect
* DLEV/CLEV values after PADEN is set.
*/
usleep_range(50, 100);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_PSTATE);
if ((reg & PSTATE_DATI) || !(reg & PSTATE_DLEV_DAT0))
ret = true;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
reg &= ~(CON_CLKEXTFREE | CON_PADEN);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
enable_irq(host->irq);
return ret;
}
static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
u32 reg;
int ret;
unsigned int iov;
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_omap_host *omap_host;
struct device *dev;
pltfm_host = sdhci_priv(host);
omap_host = sdhci_pltfm_priv(pltfm_host);
dev = omap_host->dev;
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & (CAPA_VS30 | CAPA_VS33)))
return -EOPNOTSUPP;
if (reg & CAPA_VS30)
iov = IOV_3V0;
else
iov = IOV_3V3;
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & CAPA_VS18))
return -EOPNOTSUPP;
iov = IOV_1V8;
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg |= AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
} else {
return -EOPNOTSUPP;
}
ret = sdhci_omap_enable_iov(omap_host, iov);
if (ret) {
dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
return ret;
}
dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
return 0;
}
static void sdhci_omap_set_timing(struct sdhci_omap_host *omap_host, u8 timing)
{
int ret;
struct pinctrl_state *pinctrl_state;
struct device *dev = omap_host->dev;
if (!(omap_host->flags & SDHCI_OMAP_REQUIRE_IODELAY))
return;
if (omap_host->timing == timing)
return;
sdhci_omap_stop_clock(omap_host);
pinctrl_state = omap_host->pinctrl_state[timing];
ret = pinctrl_select_state(omap_host->pinctrl, pinctrl_state);
if (ret) {
dev_err(dev, "failed to select pinctrl state\n");
return;
}
sdhci_omap_start_clock(omap_host);
omap_host->timing = timing;
}
static void sdhci_omap_set_power_mode(struct sdhci_omap_host *omap_host,
u8 power_mode)
{
if (omap_host->bus_mode == MMC_POWER_OFF)
sdhci_omap_disable_tuning(omap_host);
omap_host->power_mode = power_mode;
}
static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
unsigned int mode)
{
u32 reg;
if (omap_host->bus_mode == mode)
return;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
if (mode == MMC_BUSMODE_OPENDRAIN)
reg |= CON_OD;
else
reg &= ~CON_OD;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
omap_host->bus_mode = mode;
}
static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_omap_host *omap_host;
pltfm_host = sdhci_priv(host);
omap_host = sdhci_pltfm_priv(pltfm_host);
sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
sdhci_omap_set_timing(omap_host, ios->timing);
sdhci_set_ios(mmc, ios);
sdhci_omap_set_power_mode(omap_host, ios->power_mode);
}
static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
unsigned int clock)
{
u16 dsor;
dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
if (dsor > SYSCTL_CLKD_MAX)
dsor = SYSCTL_CLKD_MAX;
return dsor;
}
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
{
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
reg |= SYSCTL_CEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
}
static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
{
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
reg &= ~SYSCTL_CEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
}
static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long clkdiv;
sdhci_omap_stop_clock(omap_host);
if (!clock)
return;
clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
sdhci_enable_clk(host, clkdiv);
sdhci_omap_start_clock(omap_host);
}
static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
/*
* MMCHS_HL_HWINFO has the MADMA_EN bit set if the controller instance
* is connected to L3 interconnect and is bus master capable. Note that
* the MMCHS_HL_HWINFO register is in the module registers before the
* omap registers and sdhci registers. The offset can vary for omap
* registers depending on the SoC. Do not use sdhci_omap_readl() here.
*/
static bool sdhci_omap_has_adma(struct sdhci_omap_host *omap_host, int offset)
{
/* MMCHS_HL_HWINFO register is only available on omap4 and later */
if (offset < 0x200)
return false;
return readl(omap_host->base + 4) & 1;
}
static int sdhci_omap_enable_dma(struct sdhci_host *host)
{
u32 reg;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
reg &= ~CON_DMA_MASTER;
/* Switch to DMA slave mode when using external DMA */
if (!host->use_external_dma)
reg |= CON_DMA_MASTER;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
return 0;
}
static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
}
static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
u32 reg;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
if (width == MMC_BUS_WIDTH_8)
reg |= CON_DW8;
else
reg &= ~CON_DW8;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_set_bus_width(host, width);
}
static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
{
u32 reg;
ktime_t timeout;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
if (omap_host->power_mode == power_mode)
return;
if (power_mode != MMC_POWER_ON)
return;
disable_irq(host->irq);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
reg |= CON_INIT;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
/* wait 1ms */
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
while (1) {
bool timedout = ktime_after(ktime_get(), timeout);
if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)
break;
if (WARN_ON(timedout))
return;
usleep_range(5, 10);
}
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
reg &= ~CON_INIT;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
enable_irq(host->irq);
}
static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
u32 reg;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
sdhci_omap_stop_clock(omap_host);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
if (timing == MMC_TIMING_UHS_DDR50 || timing == MMC_TIMING_MMC_DDR52)
reg |= CON_DDR;
else
reg &= ~CON_DDR;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
sdhci_set_uhs_signaling(host, timing);
sdhci_omap_start_clock(omap_host);
}
#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long limit = MMC_TIMEOUT_US;
unsigned long i = 0;
u32 sysc;
/* Save target module sysconfig configured by SoC PM layer */
if (mask & SDHCI_RESET_ALL)
sysc = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCONFIG);
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
mask &= ~SDHCI_RESET_DATA;
if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) {
sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) &&
(i++ < limit))
udelay(1);
i = 0;
while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) &&
(i++ < limit))
udelay(1);
if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
goto restore_sysc;
}
sdhci_reset(host, mask);
restore_sysc:
if (mask & SDHCI_RESET_ALL)
sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCONFIG, sysc);
}
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
SDHCI_INT_TIMEOUT)
#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
if (omap_host->is_tuning && host->cmd && !host->data_early &&
(intmask & CMD_ERR_MASK)) {
/*
* Since we are not resetting data lines during tuning
* operation, data error or data complete interrupts
* might still arrive. Mark this request as a failure
* but still wait for the data interrupt
*/
if (intmask & SDHCI_INT_TIMEOUT)
host->cmd->error = -ETIMEDOUT;
else
host->cmd->error = -EILSEQ;
host->cmd = NULL;
/*
* Sometimes command error interrupts and command complete
* interrupt will arrive together. Clear all command related
* interrupts here.
*/
sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
intmask &= ~CMD_MASK;
}
return intmask;
}
static void sdhci_omap_set_timeout(struct sdhci_host *host,
struct mmc_command *cmd)
{
if (cmd->opcode == MMC_ERASE)
sdhci_set_data_timeout_irq(host, false);
__sdhci_set_timeout(host, cmd);
}
static struct sdhci_ops sdhci_omap_ops = {
.set_clock = sdhci_omap_set_clock,
.set_power = sdhci_omap_set_power,
.enable_dma = sdhci_omap_enable_dma,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_min_clock = sdhci_omap_get_min_clock,
.set_bus_width = sdhci_omap_set_bus_width,
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
.reset = sdhci_omap_reset,
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
.irq = sdhci_omap_irq,
.set_timeout = sdhci_omap_set_timeout,
};
static unsigned int sdhci_omap_regulator_get_caps(struct device *dev,
const char *name)
{
struct regulator *reg;
unsigned int caps = 0;
reg = regulator_get(dev, name);
if (IS_ERR(reg))
return ~0U;
if (regulator_is_supported_voltage(reg, 1700000, 1950000))
caps |= SDHCI_CAN_VDD_180;
if (regulator_is_supported_voltage(reg, 2700000, 3150000))
caps |= SDHCI_CAN_VDD_300;
if (regulator_is_supported_voltage(reg, 3150000, 3600000))
caps |= SDHCI_CAN_VDD_330;
regulator_put(reg);
return caps;
}
static int sdhci_omap_set_capabilities(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = omap_host->dev;
const u32 mask = SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330;
unsigned int pbias, vqmmc, caps = 0;
u32 reg;
pbias = sdhci_omap_regulator_get_caps(dev, "pbias");
vqmmc = sdhci_omap_regulator_get_caps(dev, "vqmmc");
caps = pbias & vqmmc;
if (pbias != ~0U && vqmmc == ~0U)
dev_warn(dev, "vqmmc regulator missing for pbias\n");
else if (caps == ~0U)
return 0;
/*
* Quirk handling to allow 3.0V vqmmc with a valid 3.3V PBIAS. This is
* needed for 3.0V ldo9_reg on omap5 at least.
*/
if (pbias != ~0U && (pbias & SDHCI_CAN_VDD_330) &&
(vqmmc & SDHCI_CAN_VDD_300))
caps |= SDHCI_CAN_VDD_330;
/* voltage capabilities might be set by boot loader, clear it */
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
if (caps & SDHCI_CAN_VDD_180)
reg |= CAPA_VS18;
if (caps & SDHCI_CAN_VDD_300)
reg |= CAPA_VS30;
if (caps & SDHCI_CAN_VDD_330)
reg |= CAPA_VS33;
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
host->caps &= ~mask;
host->caps |= caps;
return 0;
}
static const struct sdhci_pltfm_data sdhci_omap_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_RSP_136_HAS_CRC |
SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
.ops = &sdhci_omap_ops,
};
static const struct sdhci_omap_data omap2430_data = {
.omap_offset = 0,
.offset = 0x100,
};
static const struct sdhci_omap_data omap3_data = {
.omap_offset = 0,
.offset = 0x100,
};
static const struct sdhci_omap_data omap4_data = {
.omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data omap5_data = {
.omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data k2g_data = {
.omap_offset = 0x100,
.offset = 0x200,
};
static const struct sdhci_omap_data am335_data = {
.omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data am437_data = {
.omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data dra7_data = {
.omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
};
static const struct of_device_id omap_sdhci_match[] = {
{ .compatible = "ti,omap2430-sdhci", .data = &omap2430_data },
{ .compatible = "ti,omap3-sdhci", .data = &omap3_data },
{ .compatible = "ti,omap4-sdhci", .data = &omap4_data },
{ .compatible = "ti,omap5-sdhci", .data = &omap5_data },
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
{ .compatible = "ti,am335-sdhci", .data = &am335_data },
{ .compatible = "ti,am437-sdhci", .data = &am437_data },
{},
};
MODULE_DEVICE_TABLE(of, omap_sdhci_match);
static struct pinctrl_state
*sdhci_omap_iodelay_pinctrl_state(struct sdhci_omap_host *omap_host, char *mode,
u32 *caps, u32 capmask)
{
struct device *dev = omap_host->dev;
char *version = omap_host->version;
struct pinctrl_state *pinctrl_state = ERR_PTR(-ENODEV);
char str[20];
if (!(*caps & capmask))
goto ret;
if (version) {
snprintf(str, 20, "%s-%s", mode, version);
pinctrl_state = pinctrl_lookup_state(omap_host->pinctrl, str);
}
if (IS_ERR(pinctrl_state))
pinctrl_state = pinctrl_lookup_state(omap_host->pinctrl, mode);
if (IS_ERR(pinctrl_state)) {
dev_err(dev, "no pinctrl state for %s mode", mode);
*caps &= ~capmask;
}
ret:
return pinctrl_state;
}
static int sdhci_omap_config_iodelay_pinctrl_state(struct sdhci_omap_host
*omap_host)
{
struct device *dev = omap_host->dev;
struct sdhci_host *host = omap_host->host;
struct mmc_host *mmc = host->mmc;
u32 *caps = &mmc->caps;
u32 *caps2 = &mmc->caps2;
struct pinctrl_state *state;
struct pinctrl_state **pinctrl_state;
if (!(omap_host->flags & SDHCI_OMAP_REQUIRE_IODELAY))
return 0;
pinctrl_state = devm_kcalloc(dev,
MMC_TIMING_MMC_HS200 + 1,
sizeof(*pinctrl_state),
GFP_KERNEL);
if (!pinctrl_state)
return -ENOMEM;
omap_host->pinctrl = devm_pinctrl_get(omap_host->dev);
if (IS_ERR(omap_host->pinctrl)) {
dev_err(dev, "Cannot get pinctrl\n");
return PTR_ERR(omap_host->pinctrl);
}
state = pinctrl_lookup_state(omap_host->pinctrl, "default");
if (IS_ERR(state)) {
dev_err(dev, "no pinctrl state for default mode\n");
return PTR_ERR(state);
}
pinctrl_state[MMC_TIMING_LEGACY] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr104", caps,
MMC_CAP_UHS_SDR104);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_UHS_SDR104] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr50", caps,
MMC_CAP_UHS_DDR50);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_UHS_DDR50] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr50", caps,
MMC_CAP_UHS_SDR50);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_UHS_SDR50] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr25", caps,
MMC_CAP_UHS_SDR25);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_UHS_SDR25] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "sdr12", caps,
MMC_CAP_UHS_SDR12);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_UHS_SDR12] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr_1_8v", caps,
MMC_CAP_1_8V_DDR);
if (!IS_ERR(state)) {
pinctrl_state[MMC_TIMING_MMC_DDR52] = state;
} else {
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "ddr_3_3v",
caps,
MMC_CAP_3_3V_DDR);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_MMC_DDR52] = state;
}
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs", caps,
MMC_CAP_SD_HIGHSPEED);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_SD_HS] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs", caps,
MMC_CAP_MMC_HIGHSPEED);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_MMC_HS] = state;
state = sdhci_omap_iodelay_pinctrl_state(omap_host, "hs200_1_8v", caps2,
MMC_CAP2_HS200_1_8V_SDR);
if (!IS_ERR(state))
pinctrl_state[MMC_TIMING_MMC_HS200] = state;
omap_host->pinctrl_state = pinctrl_state;
return 0;
}
static const struct soc_device_attribute sdhci_omap_soc_devices[] = {
{
.machine = "DRA7[45]*",
.revision = "ES1.[01]",
},
{
/* sentinel */
}
};
static int sdhci_omap_probe(struct platform_device *pdev)
{
int ret;
u32 offset;
struct device *dev = &pdev->dev;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_omap_host *omap_host;
struct mmc_host *mmc;
const struct sdhci_omap_data *data;
const struct soc_device_attribute *soc;
struct resource *regs;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(dev, "no sdhci omap data\n");
return -EINVAL;
}
offset = data->offset;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
sizeof(*omap_host));
if (IS_ERR(host)) {
dev_err(dev, "Failed sdhci_pltfm_init\n");
return PTR_ERR(host);
}
pltfm_host = sdhci_priv(host);
omap_host = sdhci_pltfm_priv(pltfm_host);
omap_host->host = host;
omap_host->base = host->ioaddr;
omap_host->dev = dev;
omap_host->power_mode = MMC_POWER_UNDEFINED;
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
omap_host->omap_offset = data->omap_offset;
omap_host->con = -EINVAL; /* Prevent invalid restore on first resume */
host->ioaddr += offset;
host->mapbase = regs->start + offset;
mmc = host->mmc;
sdhci_get_of_property(pdev);
ret = mmc_of_parse(mmc);
if (ret)
goto err_pltfm_free;
soc = soc_device_match(sdhci_omap_soc_devices);
if (soc) {
omap_host->version = "rev11";
if (!strcmp(dev_name(dev), "4809c000.mmc"))
mmc->f_max = 96000000;
if (!strcmp(dev_name(dev), "480b4000.mmc"))
mmc->f_max = 48000000;
if (!strcmp(dev_name(dev), "480ad000.mmc"))
mmc->f_max = 48000000;
}
if (!mmc_can_gpio_ro(mmc))
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
pltfm_host->clk = devm_clk_get(dev, "fck");
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
goto err_pltfm_free;
}
ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
if (ret) {
dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
goto err_pltfm_free;
}
omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
if (IS_ERR(omap_host->pbias)) {
ret = PTR_ERR(omap_host->pbias);
if (ret != -ENODEV)
goto err_pltfm_free;
dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
}
omap_host->pbias_enabled = false;
/*
* omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the
* SYSCONFIG register to clear any boot loader set voltage
* capabilities before calling sdhci_setup_host(). The
* callback will be invoked as part of pm_runtime_get_sync.
*/
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_rpm_disable;
}
ret = sdhci_omap_set_capabilities(host);
if (ret) {
dev_err(dev, "failed to set system capabilities\n");
goto err_rpm_put;
}
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_omap_start_signal_voltage_switch;
host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
host->mmc_host_ops.card_busy = sdhci_omap_card_busy;
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
/*
* Switch to external DMA only if there is the "dmas" property and
* ADMA is not available on the controller instance.
*/
if (device_property_present(dev, "dmas") &&
!sdhci_omap_has_adma(omap_host, offset))
sdhci_switch_external_dma(host, true);
if (device_property_read_bool(dev, "ti,non-removable")) {
dev_warn_once(dev, "using old ti,non-removable property\n");
mmc->caps |= MMC_CAP_NONREMOVABLE;
}
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* Allow card power off and runtime PM for eMMC/SD card devices */
mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
ret = sdhci_setup_host(host);
if (ret)
goto err_rpm_put;
ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host);
if (ret)
goto err_cleanup_host;
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
/*
* SDIO devices can use the dat1 pin as a wake-up interrupt. Some
* devices like wl1xxx, use an out-of-band GPIO interrupt instead.
*/
omap_host->wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
if (omap_host->wakeirq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_cleanup_host;
}
if (omap_host->wakeirq > 0) {
device_init_wakeup(dev, true);
ret = dev_pm_set_dedicated_wake_irq(dev, omap_host->wakeirq);
if (ret) {
device_init_wakeup(dev, false);
goto err_cleanup_host;
}
host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
}
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
err_cleanup_host:
sdhci_cleanup_host(host);
err_rpm_put:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
err_rpm_disable:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_pltfm_free:
sdhci_pltfm_free(pdev);
return ret;
}
static void sdhci_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
pm_runtime_get_sync(dev);
sdhci_remove_host(host, true);
device_init_wakeup(dev, false);
dev_pm_clear_wake_irq(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
/* Ensure device gets disabled despite userspace sysfs config */
pm_runtime_force_suspend(dev);
sdhci_pltfm_free(pdev);
}
#ifdef CONFIG_PM
static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE);
omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE);
}
/* Order matters here, HCTL must be restored in two phases */
static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie);
sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
pinctrl_pm_select_idle_state(dev);
return 0;
}
static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
pinctrl_pm_select_default_state(dev);
if (omap_host->con != -EINVAL) {
sdhci_omap_context_restore(omap_host);
sdhci_runtime_resume_host(host, 0);
}
return 0;
}
#endif
static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
SET_RUNTIME_PM_OPS(sdhci_omap_runtime_suspend,
sdhci_omap_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
.remove_new = sdhci_omap_remove,
.driver = {
.name = "sdhci-omap",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_omap_dev_pm_ops,
.of_match_table = omap_sdhci_match,
},
};
module_platform_driver(sdhci_omap_driver);
MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sdhci_omap");
| linux-master | drivers/mmc/host/sdhci-omap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 HiSilicon Technologies Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
#define ALL_INT_CLR 0x1ffff
struct hi3798cv200_priv {
struct clk *sample_clk;
struct clk *drive_clk;
};
static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct hi3798cv200_priv *priv = host->priv;
u32 val;
val = mci_readl(host, UHS_REG);
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
ios->timing == MMC_TIMING_UHS_DDR50)
val |= SDMMC_UHS_DDR;
else
val &= ~SDMMC_UHS_DDR;
mci_writel(host, UHS_REG, val);
val = mci_readl(host, ENABLE_SHIFT);
if (ios->timing == MMC_TIMING_MMC_DDR52)
val |= SDMMC_ENABLE_PHASE;
else
val &= ~SDMMC_ENABLE_PHASE;
mci_writel(host, ENABLE_SHIFT, val);
val = mci_readl(host, DDR_REG);
if (ios->timing == MMC_TIMING_MMC_HS400)
val |= SDMMC_DDR_HS400;
else
val &= ~SDMMC_DDR_HS400;
mci_writel(host, DDR_REG, val);
if (ios->timing == MMC_TIMING_MMC_HS ||
ios->timing == MMC_TIMING_LEGACY)
clk_set_phase(priv->drive_clk, 180);
else if (ios->timing == MMC_TIMING_MMC_HS200)
clk_set_phase(priv->drive_clk, 135);
}
static int dw_mci_hi3798cv200_execute_tuning(struct dw_mci_slot *slot,
u32 opcode)
{
static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
struct dw_mci *host = slot->host;
struct hi3798cv200_priv *priv = host->priv;
int raise_point = -1, fall_point = -1;
int err, prev_err = -1;
int found = 0;
int i;
for (i = 0; i < ARRAY_SIZE(degrees); i++) {
clk_set_phase(priv->sample_clk, degrees[i]);
mci_writel(host, RINTSTS, ALL_INT_CLR);
err = mmc_send_tuning(slot->mmc, opcode, NULL);
if (!err)
found = 1;
if (i > 0) {
if (err && !prev_err)
fall_point = i - 1;
if (!err && prev_err)
raise_point = i;
}
if (raise_point != -1 && fall_point != -1)
goto tuning_out;
prev_err = err;
err = 0;
}
tuning_out:
if (found) {
if (raise_point == -1)
raise_point = 0;
if (fall_point == -1)
fall_point = ARRAY_SIZE(degrees) - 1;
if (fall_point < raise_point) {
if ((raise_point + fall_point) >
(ARRAY_SIZE(degrees) - 1))
i = fall_point / 2;
else
i = (raise_point + ARRAY_SIZE(degrees) - 1) / 2;
} else {
i = (raise_point + fall_point) / 2;
}
clk_set_phase(priv->sample_clk, degrees[i]);
dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n",
raise_point, fall_point, degrees[i]);
} else {
dev_err(host->dev, "No valid clk_sample shift! use default\n");
err = -EINVAL;
}
mci_writel(host, RINTSTS, ALL_INT_CLR);
return err;
}
static int dw_mci_hi3798cv200_init(struct dw_mci *host)
{
struct hi3798cv200_priv *priv;
int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
if (IS_ERR(priv->sample_clk)) {
dev_err(host->dev, "failed to get ciu-sample clock\n");
return PTR_ERR(priv->sample_clk);
}
priv->drive_clk = devm_clk_get(host->dev, "ciu-drive");
if (IS_ERR(priv->drive_clk)) {
dev_err(host->dev, "failed to get ciu-drive clock\n");
return PTR_ERR(priv->drive_clk);
}
ret = clk_prepare_enable(priv->sample_clk);
if (ret) {
dev_err(host->dev, "failed to enable ciu-sample clock\n");
return ret;
}
ret = clk_prepare_enable(priv->drive_clk);
if (ret) {
dev_err(host->dev, "failed to enable ciu-drive clock\n");
goto disable_sample_clk;
}
host->priv = priv;
return 0;
disable_sample_clk:
clk_disable_unprepare(priv->sample_clk);
return ret;
}
static const struct dw_mci_drv_data hi3798cv200_data = {
.common_caps = MMC_CAP_CMD23,
.init = dw_mci_hi3798cv200_init,
.set_ios = dw_mci_hi3798cv200_set_ios,
.execute_tuning = dw_mci_hi3798cv200_execute_tuning,
};
static int dw_mci_hi3798cv200_probe(struct platform_device *pdev)
{
return dw_mci_pltfm_register(pdev, &hi3798cv200_data);
}
static void dw_mci_hi3798cv200_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
struct hi3798cv200_priv *priv = host->priv;
clk_disable_unprepare(priv->drive_clk);
clk_disable_unprepare(priv->sample_clk);
dw_mci_pltfm_remove(pdev);
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
{ .compatible = "hisilicon,hi3798cv200-dw-mshc", },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_hi3798cv200_match);
static struct platform_driver dw_mci_hi3798cv200_driver = {
.probe = dw_mci_hi3798cv200_probe,
.remove_new = dw_mci_hi3798cv200_remove,
.driver = {
.name = "dwmmc_hi3798cv200",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_hi3798cv200_match,
},
};
module_platform_driver(dw_mci_hi3798cv200_driver);
MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dwmmc_hi3798cv200");
| linux-master | drivers/mmc/host/dw_mmc-hi3798cv200.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
*
* Author: Saeed Bishara <[email protected]>
* Mike Rapoport <[email protected]>
* Based on sdhci-cns3xxx.c
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
#include "sdhci-pltfm.h"
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
switch (reg) {
case SDHCI_HOST_VERSION:
case SDHCI_SLOT_INT_STATUS:
/* those registers don't exist */
return 0;
default:
ret = readw(host->ioaddr + reg);
}
return ret;
}
static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
{
u32 ret;
ret = readl(host->ioaddr + reg);
switch (reg) {
case SDHCI_CAPABILITIES:
/* Mask the support for 3.0V */
ret &= ~SDHCI_CAN_VDD_300;
break;
}
return ret;
}
static const struct sdhci_ops sdhci_dove_ops = {
.read_w = sdhci_dove_readw,
.read_l = sdhci_dove_readl,
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_FORCE_DMA |
SDHCI_QUIRK_NO_HISPD_BIT,
};
static int sdhci_dove_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
int ret;
host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0);
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
ret = mmc_of_parse(host->mmc);
if (ret)
goto err_sdhci_add;
ret = sdhci_add_host(host);
if (ret)
goto err_sdhci_add;
return 0;
err_sdhci_add:
sdhci_pltfm_free(pdev);
return ret;
}
static const struct of_device_id sdhci_dove_of_match_table[] = {
{ .compatible = "marvell,dove-sdhci", },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
.remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_dove_driver);
MODULE_DESCRIPTION("SDHCI driver for Dove");
MODULE_AUTHOR("Saeed Bishara <[email protected]>, "
"Mike Rapoport <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-dove.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
*
* Copyright (C) 2012, Samsung Electronics Co., Ltd.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
#include "dw_mmc-exynos.h"
/* Variations in Exynos specific dw-mshc controller */
enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
DW_MCI_TYPE_EXYNOS5420,
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
DW_MCI_TYPE_ARTPEC8,
};
/* Exynos implementation specific driver private data */
struct dw_mci_exynos_priv_data {
enum dw_mci_exynos_type ctrl_type;
u8 ciu_div;
u32 sdr_timing;
u32 ddr_timing;
u32 hs400_timing;
u32 tuned_sample;
u32 cur_speed;
u32 dqs_delay;
u32 saved_dqs_en;
u32 saved_strobe_ctrl;
};
static struct dw_mci_exynos_compatible {
char *compatible;
enum dw_mci_exynos_type ctrl_type;
} exynos_compat[] = {
{
.compatible = "samsung,exynos4210-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS4210,
}, {
.compatible = "samsung,exynos4412-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS4412,
}, {
.compatible = "samsung,exynos5250-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5250,
}, {
.compatible = "samsung,exynos5420-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5420,
}, {
.compatible = "samsung,exynos5420-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
}, {
.compatible = "samsung,exynos7-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS7,
}, {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
}, {
.compatible = "axis,artpec8-dw-mshc",
.ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
};
static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
return EXYNOS4412_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
}
static void dw_mci_exynos_config_smu(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
/*
* If Exynos is provided the Security management,
* set for non-ecryption mode at this time.
*/
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
mci_writel(host, MPSBEGIN0, 0);
mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
SDMMC_MPSCTRL_NON_SECURE_READ_BIT |
SDMMC_MPSCTRL_VALID |
SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
}
}
static int dw_mci_exynos_priv_init(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
dw_mci_exynos_config_smu(host);
if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN);
priv->saved_dqs_en |= AXI_NON_BLOCKING_WR;
mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en);
if (!priv->dqs_delay)
priv->dqs_delay =
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
/* Quirk needed for the ARTPEC-8 SoC */
host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
}
host->bus_hz /= (priv->ciu_div + 1);
return 0;
}
static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
/*
* Exynos4412 and Exynos5250 extends the use of CMD register with the
* use of bit 29 (which is reserved on standard MSHC controllers) for
* optionally bypassing the HOLD register for command and data. The
* HOLD register should be bypassed in case there is no phase shift
* applied on CMD/DATA that is sent to the card.
*/
if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->slot)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
}
#ifdef CONFIG_PM
static int dw_mci_exynos_runtime_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
int ret;
ret = dw_mci_runtime_resume(dev);
if (ret)
return ret;
dw_mci_exynos_config_smu(host);
return ret;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
* dw_mci_exynos_suspend_noirq - Exynos-specific suspend code
* @dev: Device to suspend (this device)
*
* This ensures that device will be in runtime active state in
* dw_mci_exynos_resume_noirq after calling pm_runtime_force_resume()
*/
static int dw_mci_exynos_suspend_noirq(struct device *dev)
{
pm_runtime_get_noresume(dev);
return pm_runtime_force_suspend(dev);
}
/**
* dw_mci_exynos_resume_noirq - Exynos-specific resume code
* @dev: Device to resume (this device)
*
* On exynos5420 there is a silicon errata that will sometimes leave the
* WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
* that it fired and we can clear it by writing a 1 back. Clear it to prevent
* interrupts from going off constantly.
*
* We run this code on all exynos variants because it doesn't hurt.
*/
static int dw_mci_exynos_resume_noirq(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
}
pm_runtime_put(dev);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
u32 dqs, strobe;
/*
* Not supported to configure register
* related to HS400
*/
if ((priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) ||
(priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)) {
if (timing == MMC_TIMING_MMC_HS400)
dev_warn(host->dev,
"cannot configure HS400, unsupported chipset\n");
return;
}
dqs = priv->saved_dqs_en;
strobe = priv->saved_strobe_ctrl;
if (timing == MMC_TIMING_MMC_HS400) {
dqs |= DATA_STROBE_EN;
strobe = DQS_CTRL_RD_DELAY(strobe, priv->dqs_delay);
} else if (timing == MMC_TIMING_UHS_SDR104) {
dqs &= 0xffffff00;
} else {
dqs &= ~DATA_STROBE_EN;
}
mci_writel(host, HS400_DQS_EN, dqs);
mci_writel(host, HS400_DLINE_CTRL, strobe);
}
static void dw_mci_exynos_adjust_clock(struct dw_mci *host, unsigned int wanted)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
unsigned long actual;
u8 div;
int ret;
/*
* Don't care if wanted clock is zero or
* ciu clock is unavailable
*/
if (!wanted || IS_ERR(host->ciu_clk))
return;
/* Guaranteed minimum frequency for cclkin */
if (wanted < EXYNOS_CCLKIN_MIN)
wanted = EXYNOS_CCLKIN_MIN;
if (wanted == priv->cur_speed)
return;
div = dw_mci_exynos_get_ciu_div(host);
ret = clk_set_rate(host->ciu_clk, wanted * div);
if (ret)
dev_warn(host->dev,
"failed to set clk-rate %u error: %d\n",
wanted * div, ret);
actual = clk_get_rate(host->ciu_clk);
host->bus_hz = actual / div;
priv->cur_speed = wanted;
host->current_speed = 0;
}
static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
unsigned int wanted = ios->clock;
u32 timing = ios->timing, clksel;
switch (timing) {
case MMC_TIMING_MMC_HS400:
/* Update tuned sample timing */
clksel = SDMMC_CLKSEL_UP_SAMPLE(
priv->hs400_timing, priv->tuned_sample);
wanted <<= 1;
break;
case MMC_TIMING_MMC_DDR52:
clksel = priv->ddr_timing;
/* Should be double rate for DDR mode */
if (ios->bus_width == MMC_BUS_WIDTH_8)
wanted <<= 1;
break;
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_UHS_SDR50:
clksel = (priv->sdr_timing & 0xfff8ffff) |
(priv->ciu_div << 16);
break;
case MMC_TIMING_UHS_DDR50:
clksel = (priv->ddr_timing & 0xfff8ffff) |
(priv->ciu_div << 16);
break;
default:
clksel = priv->sdr_timing;
}
/* Set clock timing for the requested speed mode*/
dw_mci_exynos_set_clksel_timing(host, clksel);
/* Configure setting for HS400 */
dw_mci_exynos_config_hs400(host, timing);
/* Configure clock rate */
dw_mci_exynos_adjust_clock(host, wanted);
}
static int dw_mci_exynos_parse_dt(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[2];
u32 div = 0;
int idx;
int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
if (of_device_is_compatible(np, exynos_compat[idx].compatible))
priv->ctrl_type = exynos_compat[idx].ctrl_type;
}
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1;
else {
of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
priv->ciu_div = div;
}
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-sdr-timing", timing, 2);
if (ret)
return ret;
priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-ddr-timing", timing, 2);
if (ret)
return ret;
priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div);
ret = of_property_read_u32_array(np,
"samsung,dw-mshc-hs400-timing", timing, 2);
if (!ret && of_property_read_u32(np,
"samsung,read-strobe-delay", &priv->dqs_delay))
dev_dbg(host->dev,
"read-strobe-delay is not found, assuming usage of default value\n");
priv->hs400_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1],
HS400_FIXED_CIU_CLK_DIV);
host->priv = priv;
return 0;
}
static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
}
static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
{
u32 clksel;
struct dw_mci_exynos_priv_data *priv = host->priv;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
}
static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
u8 sample;
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
clksel = mci_readl(host, CLKSEL);
sample = (clksel + 1) & 0x7;
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
return sample;
}
static s8 dw_mci_exynos_get_best_clksmpl(u8 candidates)
{
const u8 iter = 8;
u8 __c;
s8 i, loc = -1;
for (i = 0; i < iter; i++) {
__c = ror8(candidates, i);
if ((__c & 0xc7) == 0xc7) {
loc = i;
goto out;
}
}
for (i = 0; i < iter; i++) {
__c = ror8(candidates, i);
if ((__c & 0x83) == 0x83) {
loc = i;
goto out;
}
}
/*
* If there is no cadiates value, then it needs to return -EIO.
* If there are candidates values and don't find bset clk sample value,
* then use a first candidates clock sample value.
*/
for (i = 0; i < iter; i++) {
__c = ror8(candidates, i);
if ((__c & 0x1) == 0x1) {
loc = i;
goto out;
}
}
out:
return loc;
}
static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
{
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
u8 start_smpl, smpl, candidates = 0;
s8 found;
int ret = 0;
start_smpl = dw_mci_exynos_get_clksmpl(host);
do {
mci_writel(host, TMOUT, ~0);
smpl = dw_mci_exynos_move_next_clksmpl(host);
if (!mmc_send_tuning(mmc, opcode, NULL))
candidates |= (1 << smpl);
} while (start_smpl != smpl);
found = dw_mci_exynos_get_best_clksmpl(candidates);
if (found >= 0) {
dw_mci_exynos_set_clksmpl(host, found);
priv->tuned_sample = found;
} else {
ret = -EIO;
dev_warn(&mmc->class_dev,
"There is no candidates value about clksmpl!\n");
}
return ret;
}
static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host,
struct mmc_ios *ios)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
dw_mci_exynos_set_clksel_timing(host, priv->hs400_timing);
dw_mci_exynos_adjust_clock(host, (ios->clock) << 1);
return 0;
}
static void dw_mci_exynos_set_data_timeout(struct dw_mci *host,
unsigned int timeout_ns)
{
u32 clk_div, tmout;
u64 tmp;
unsigned int tmp2;
clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
if (clk_div == 0)
clk_div = 1;
tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
/* TMOUT[7:0] (RESPONSE_TIMEOUT) */
tmout = 0xFF; /* Set maximum */
/*
* Extended HW timer (max = 0x6FFFFF2):
* ((TMOUT[10:8] - 1) * 0xFFFFFF + TMOUT[31:11] * 8)
*/
if (!tmp || tmp > 0x6FFFFF2)
tmout |= (0xFFFFFF << 8);
else {
/* TMOUT[10:8] */
tmp2 = (((unsigned int)tmp / 0xFFFFFF) + 1) & 0x7;
tmout |= tmp2 << 8;
/* TMOUT[31:11] */
tmp = tmp - ((tmp2 - 1) * 0xFFFFFF);
tmout |= (tmp & 0xFFFFF8) << 8;
}
mci_writel(host, TMOUT, tmout);
dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
timeout_ns, tmout >> 8);
}
static u32 dw_mci_exynos_get_drto_clks(struct dw_mci *host)
{
u32 drto_clks;
drto_clks = mci_readl(host, TMOUT) >> 8;
return (((drto_clks & 0x7) - 1) * 0xFFFFFF) + ((drto_clks & 0xFFFFF8));
}
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA,
0,
0,
0,
};
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
.common_caps = MMC_CAP_CMD23,
.init = dw_mci_exynos_priv_init,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
.prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning,
};
static const struct dw_mci_drv_data artpec_drv_data = {
.common_caps = MMC_CAP_CMD23,
.init = dw_mci_exynos_priv_init,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
.set_data_timeout = dw_mci_exynos_set_data_timeout,
.get_drto_clks = dw_mci_exynos_get_drto_clks,
};
static const struct of_device_id dw_mci_exynos_match[] = {
{ .compatible = "samsung,exynos4412-dw-mshc",
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5250-dw-mshc",
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5420-dw-mshc",
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5420-dw-mshc-smu",
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc",
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
{ .compatible = "axis,artpec8-dw-mshc",
.data = &artpec_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
static int dw_mci_exynos_probe(struct platform_device *pdev)
{
const struct dw_mci_drv_data *drv_data;
const struct of_device_id *match;
int ret;
match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
drv_data = match->data;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = dw_mci_pltfm_register(pdev, drv_data);
if (ret) {
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
return ret;
}
return 0;
}
static void dw_mci_exynos_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
dw_mci_pltfm_remove(pdev);
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq,
dw_mci_exynos_resume_noirq)
SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
dw_mci_exynos_runtime_resume,
NULL)
};
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
.remove_new = dw_mci_exynos_remove,
.driver = {
.name = "dwmmc_exynos",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_exynos_match,
.pm = &dw_mci_exynos_pmops,
},
};
module_platform_driver(dw_mci_exynos_pltfm_driver);
MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
MODULE_AUTHOR("Thomas Abraham <[email protected]");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dwmmc_exynos");
| linux-master | drivers/mmc/host/dw_mmc-exynos.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Secure Digital Host Controller Interface ACPI driver.
*
* Copyright (c) 2012, Intel Corporation.
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/compiler.h>
#include <linux/stddef.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/mmc/host.h>
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
#ifdef CONFIG_X86
#include <linux/platform_data/x86/soc.h>
#include <asm/iosf_mbi.h>
#endif
#include "sdhci.h"
enum {
SDHCI_ACPI_SD_CD = BIT(0),
SDHCI_ACPI_RUNTIME_PM = BIT(1),
SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2),
};
struct sdhci_acpi_chip {
const struct sdhci_ops *ops;
unsigned int quirks;
unsigned int quirks2;
unsigned long caps;
unsigned int caps2;
mmc_pm_flag_t pm_caps;
};
struct sdhci_acpi_slot {
const struct sdhci_acpi_chip *chip;
unsigned int quirks;
unsigned int quirks2;
unsigned long caps;
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
size_t priv_size;
int (*probe_slot)(struct platform_device *, struct acpi_device *);
int (*remove_slot)(struct platform_device *);
int (*free_slot)(struct platform_device *pdev);
int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
struct sdhci_host *host;
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
bool is_intel;
bool reset_signal_volt_on_suspend;
unsigned long private[] ____cacheline_aligned;
};
enum {
DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0),
DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1),
};
static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
{
return (void *)c->private;
}
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
u32 hs_caps;
};
static const guid_t intel_dsm_guid =
GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
union acpi_object *obj;
int err = 0;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
if (!obj)
return -EOPNOTSUPP;
if (obj->type == ACPI_TYPE_INTEGER) {
*result = obj->integer.value;
} else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
size_t len = min_t(size_t, obj->buffer.length, 4);
*result = 0;
memcpy(result, obj->buffer.pointer, len);
} else {
dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
__func__, fn, obj->type, obj->buffer.length);
err = -EINVAL;
}
ACPI_FREE(obj);
return err;
}
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
}
static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
struct mmc_host *mmc)
{
int err;
intel_host->hs_caps = ~0;
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
mmc_hostname(mmc), err);
return;
}
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct device *dev = mmc_dev(mmc);
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
unsigned int fn;
u32 result = 0;
int err;
err = sdhci_start_signal_voltage_switch(mmc, ios);
if (err)
return err;
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
fn = INTEL_DSM_V33_SWITCH;
break;
case MMC_SIGNAL_VOLTAGE_180:
fn = INTEL_DSM_V18_SWITCH;
break;
default:
return 0;
}
err = intel_dsm(intel_host, dev, fn, &result);
pr_debug("%s: %s DSM fn %u error %d result %u\n",
mmc_hostname(mmc), __func__, fn, err, result);
return 0;
}
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
reg |= 0x10;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
/* For eMMC, minimum is 1us but give it 9us for good measure */
udelay(9);
reg &= ~0x10;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
/* For eMMC, minimum is 200us but give it 300us for good measure */
usleep_range(300, 1000);
}
static const struct sdhci_ops sdhci_acpi_ops_dflt = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_ops sdhci_acpi_ops_int = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_acpi_int_hw_reset,
};
static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
#ifdef CONFIG_X86
#define BYT_IOSF_SCCEP 0x63
#define BYT_IOSF_OCP_NETCTRL0 0x1078
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
static void sdhci_acpi_byt_setting(struct device *dev)
{
u32 val = 0;
if (!soc_intel_is_byt())
return;
if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
&val)) {
dev_err(dev, "%s read error\n", __func__);
return;
}
if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
return;
val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
val)) {
dev_err(dev, "%s write error\n", __func__);
return;
}
dev_dbg(dev, "%s completed\n", __func__);
}
static bool sdhci_acpi_byt_defer(struct device *dev)
{
if (!soc_intel_is_byt())
return false;
if (!iosf_mbi_available())
return true;
sdhci_acpi_byt_setting(dev);
return false;
}
#else
static inline void sdhci_acpi_byt_setting(struct device *dev)
{
}
static inline bool sdhci_acpi_byt_defer(struct device *dev)
{
return false;
}
#endif
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
if (!gpio_cd)
return 0;
return sdhci_get_cd_nogpio(mmc);
}
static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
struct sdhci_host *host = c->host;
if (acpi_dev_hid_uid_match(adev, "80860F14", "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL))
host->mmc_host_ops.get_cd = bxt_get_cd;
intel_dsm_init(intel_host, &pdev->dev, host->mmc);
host->mmc_host_ops.start_signal_voltage_switch =
intel_start_signal_voltage_switch;
c->is_intel = true;
return 0;
}
static int intel_setup_host(struct platform_device *pdev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
return 0;
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
.setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_NO_LED |
SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
.setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL |
SDHCI_ACPI_RUNTIME_PM,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
.setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
#define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8
#define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac
static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
{
struct sdhci_host *host = ptr;
sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG);
sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG);
return IRQ_HANDLED;
}
static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
int *irq = sdhci_acpi_priv(c);
*irq = -EINVAL;
if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
*irq = platform_get_irq(pdev, 1);
if (*irq < 0)
return 0;
return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
"sdhci_qcom", host);
}
static int qcom_free_slot(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
struct acpi_device *adev;
int *irq = sdhci_acpi_priv(c);
adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
if (*irq < 0)
return 0;
free_irq(*irq, host);
return 0;
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.quirks2 = SDHCI_QUIRK2_NO_1_8_V,
.caps = MMC_CAP_NONREMOVABLE,
.priv_size = sizeof(int),
.probe_slot = qcom_probe_slot,
.free_slot = qcom_free_slot,
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.caps = MMC_CAP_NONREMOVABLE,
};
struct amd_sdhci_host {
bool tuned_clock;
bool dll_enabled;
};
/* AMD sdhci reset dll register. */
#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
static int amd_select_drive_strength(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *host_driver_strength)
{
struct sdhci_host *host = mmc_priv(card->host);
u16 preset, preset_driver_strength;
/*
* This method is only called by mmc_select_hs200 so we only need to
* read from the HS200 (SDR104) preset register.
*
* Firmware that has "invalid/default" presets return a driver strength
* of A. This matches the previously hard coded value.
*/
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
preset_driver_strength = FIELD_GET(SDHCI_PRESET_DRV_MASK, preset);
/*
* We want the controller driver strength to match the card's driver
* strength so they have similar rise/fall times.
*
* The controller driver strength set by this method is sticky for all
* timings after this method is called. This unfortunately means that
* while HS400 tuning is in progress we end up with mismatched driver
* strengths between the controller and the card. HS400 tuning requires
* switching from HS400->DDR52->HS->HS200->HS400. So the driver mismatch
* happens while in DDR52 and HS modes. This has not been observed to
* cause problems. Enabling presets would fix this issue.
*/
*host_driver_strength = preset_driver_strength;
/*
* The resulting card driver strength is only set when switching the
* card's timing to HS200 or HS400. The card will use the default driver
* strength (B) for any other mode.
*/
return preset_driver_strength;
}
static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable)
{
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
/* AMD Platform requires dll setting */
sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
usleep_range(10, 20);
if (enable)
sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
amd_host->dll_enabled = enable;
}
/*
* The initialization sequence for HS400 is:
* HS->HS200->Perform Tuning->HS->HS400
*
* The re-tuning sequence is:
* HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400
*
* The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400
* mode. If we switch to a different mode, we need to disable the tuned clock.
* If we have previously performed tuning and switch back to HS200 or
* HS400, we can re-enable the tuned clock.
*
*/
static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
unsigned int old_timing = host->timing;
u16 val;
sdhci_set_ios(mmc, ios);
if (old_timing != host->timing && amd_host->tuned_clock) {
if (host->timing == MMC_TIMING_MMC_HS400 ||
host->timing == MMC_TIMING_MMC_HS200) {
val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
val |= SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
} else {
val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
val &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
}
/* DLL is only required for HS400 */
if (host->timing == MMC_TIMING_MMC_HS400 &&
!amd_host->dll_enabled)
sdhci_acpi_amd_hs400_dll(host, true);
}
}
static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
int err;
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
amd_host->tuned_clock = false;
err = sdhci_execute_tuning(mmc, opcode);
if (!err && !host->tuning_err)
amd_host->tuned_clock = true;
return err;
}
static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
if (mask & SDHCI_RESET_ALL) {
amd_host->tuned_clock = false;
sdhci_acpi_amd_hs400_dll(host, false);
}
sdhci_reset(host, mask);
}
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
.ops = &sdhci_acpi_ops_amd,
};
static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
sdhci_read_caps(host);
if (host->caps1 & SDHCI_SUPPORT_DDR50)
host->mmc->caps = MMC_CAP_1_8V_DDR;
if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
(host->mmc->caps & MMC_CAP_1_8V_DDR))
host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
/*
* There are two types of presets out in the wild:
* 1) Default/broken presets.
* These presets have two sets of problems:
* a) The clock divisor for SDR12, SDR25, and SDR50 is too small.
* This results in clock frequencies that are 2x higher than
* acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 =
* 100 MHz.x
* b) The HS200 and HS400 driver strengths don't match.
* By default, the SDR104 preset register has a driver strength of
* A, but the (internal) HS400 preset register has a driver
* strength of B. As part of initializing HS400, HS200 tuning
* needs to be performed. Having different driver strengths
* between tuning and operation is wrong. It results in different
* rise/fall times that lead to incorrect sampling.
* 2) Firmware with properly initialized presets.
* These presets have proper clock divisors. i.e., SDR12 => 12MHz,
* SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and
* HS400 preset driver strengths match.
*
* Enabling presets for HS400 doesn't work for the following reasons:
* 1) sdhci_set_ios has a hard coded list of timings that are used
* to determine if presets should be enabled.
* 2) sdhci_get_preset_value is using a non-standard register to
* read out HS400 presets. The AMD controller doesn't support this
* non-standard register. In fact, it doesn't expose the HS400
* preset register anywhere in the SDHCI memory map. This results
* in reading a garbage value and using the wrong presets.
*
* Since HS400 and HS200 presets must be identical, we could
* instead use the SDR104 preset register.
*
* If the above issues are resolved we could remove this quirk for
* firmware that has valid presets (i.e., SDR12 <= 12 MHz).
*/
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
return 0;
}
static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
.chip = &sdhci_acpi_chip_amd,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE |
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
.priv_size = sizeof(struct amd_sdhci_host),
};
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
const struct sdhci_acpi_slot *slot;
};
static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80865ACA", NULL, &sdhci_acpi_slot_int_sd },
{ "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
{ "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
{ "80860F14" , "2" , &sdhci_acpi_slot_int_sdio },
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
{ "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
{ "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ "AMDI0041", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "80865ACA" },
{ "80865ACC" },
{ "80865AD0" },
{ "80860F14" },
{ "80860F16" },
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
{ "INT344D" },
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
{ "AMDI0040" },
{ "AMDI0041" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
static const struct dmi_system_id sdhci_acpi_quirks[] = {
{
/*
* The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
* the SHC1 ACPI device, this bug causes it to reprogram the
* wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
* card is (runtime) suspended + resumed. DLDO3 is used for
* the LCD and setting it to 1.8V causes the LCD to go black.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP,
},
{
/*
* The Acer Aspire Switch 10 (SW5-012) microSD slot always
* reports the card being write-protected even though microSD
* cards do not have a write-protect switch at all.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
{
/*
* The Toshiba WT8-B's microSD slot always reports the card being
* write-protected.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"),
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
{} /* Terminating entry */
};
static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
{
const struct sdhci_acpi_uid_slot *u;
for (u = sdhci_acpi_uids; u->hid; u++) {
if (acpi_dev_hid_uid_match(adev, u->hid, u->uid))
return u->slot;
}
return NULL;
}
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct sdhci_acpi_slot *slot;
const struct dmi_system_id *id;
struct acpi_device *device;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
size_t priv_size;
int quirks = 0;
int err;
device = ACPI_COMPANION(dev);
if (!device)
return -ENODEV;
id = dmi_first_match(sdhci_acpi_quirks);
if (id)
quirks = (long)id->driver_data;
slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power_extended(device);
if (sdhci_acpi_byt_defer(dev))
return -EPROBE_DEFER;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem)
return -ENOMEM;
len = resource_size(iomem);
if (len < 0x100)
dev_err(dev, "Invalid iomem size!\n");
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
priv_size = slot ? slot->priv_size : 0;
host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
platform_set_drvdata(pdev, c);
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
err = host->irq;
goto err_free;
}
host->ioaddr = devm_ioremap(dev, iomem->start,
resource_size(iomem));
if (host->ioaddr == NULL) {
err = -ENOMEM;
goto err_free;
}
if (c->slot) {
if (c->slot->probe_slot) {
err = c->slot->probe_slot(pdev, device);
if (err)
goto err_free;
}
if (c->slot->chip) {
host->ops = c->slot->chip->ops;
host->quirks |= c->slot->chip->quirks;
host->quirks2 |= c->slot->chip->quirks2;
host->mmc->caps |= c->slot->chip->caps;
host->mmc->caps2 |= c->slot->chip->caps2;
host->mmc->pm_caps |= c->slot->chip->pm_caps;
}
host->quirks |= c->slot->quirks;
host->quirks2 |= c->slot->quirks2;
host->mmc->caps |= c->slot->caps;
host->mmc->caps2 |= c->slot->caps2;
host->mmc->pm_caps |= c->slot->pm_caps;
}
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
}
if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
c->reset_signal_volt_on_suspend = true;
if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT)
host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
}
err = sdhci_setup_host(host);
if (err)
goto err_free;
if (c->slot && c->slot->setup_host) {
err = c->slot->setup_host(pdev);
if (err)
goto err_cleanup;
}
err = __sdhci_add_host(host);
if (err)
goto err_cleanup;
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
}
device_enable_async_suspend(dev);
return 0;
err_cleanup:
sdhci_cleanup_host(c->host);
err_free:
if (c->slot && c->slot->free_slot)
c->slot->free_slot(pdev);
sdhci_free_host(c->host);
return err;
}
static void sdhci_acpi_remove(struct platform_device *pdev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int dead;
if (c->use_runtime_pm) {
pm_runtime_get_sync(dev);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
}
if (c->slot && c->slot->remove_slot)
c->slot->remove_slot(pdev);
dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
sdhci_remove_host(c->host, dead);
if (c->slot && c->slot->free_slot)
c->slot->free_slot(pdev);
sdhci_free_host(c->host);
}
static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
if (c->is_intel && c->reset_signal_volt_on_suspend &&
host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) {
struct intel_host *intel_host = sdhci_acpi_priv(c);
unsigned int fn = INTEL_DSM_V33_SWITCH;
u32 result = 0;
intel_dsm(intel_host, dev, fn, &result);
}
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_acpi_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
int ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
if (ret)
return ret;
sdhci_acpi_reset_signal_voltage_if_needed(dev);
return 0;
}
static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
sdhci_acpi_byt_setting(&c->pdev->dev);
return sdhci_resume_host(c->host);
}
#endif
#ifdef CONFIG_PM
static int sdhci_acpi_runtime_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
int ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_runtime_suspend_host(host);
if (ret)
return ret;
sdhci_acpi_reset_signal_voltage_if_needed(dev);
return 0;
}
static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
sdhci_acpi_byt_setting(&c->pdev->dev);
return sdhci_runtime_resume_host(c->host, 0);
}
#endif
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
sdhci_acpi_runtime_resume, NULL)
};
static struct platform_driver sdhci_acpi_driver = {
.driver = {
.name = "sdhci-acpi",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.acpi_match_table = sdhci_acpi_ids,
.pm = &sdhci_acpi_pm_ops,
},
.probe = sdhci_acpi_probe,
.remove_new = sdhci_acpi_remove,
};
module_platform_driver(sdhci_acpi_driver);
MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-acpi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Genesys Logic, Inc.
*
* Authors: Ben Chuang <[email protected]>
*
* Version: v0.9.0 (2019-08-08)
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/pci.h>
#include <linux/mmc/mmc.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/iopoll.h>
#include "sdhci.h"
#include "sdhci-cqhci.h"
#include "sdhci-pci.h"
#include "cqhci.h"
/* Genesys Logic extra registers */
#define SDHCI_GLI_9750_WT 0x800
#define SDHCI_GLI_9750_WT_EN BIT(0)
#define GLI_9750_WT_EN_ON 0x1
#define GLI_9750_WT_EN_OFF 0x0
#define SDHCI_GLI_9750_CFG2 0x848
#define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
#define GLI_9750_CFG2_L1DLY_VALUE 0x1F
#define SDHCI_GLI_9750_DRIVING 0x860
#define SDHCI_GLI_9750_DRIVING_1 GENMASK(11, 0)
#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
#define GLI_9750_DRIVING_1_VALUE 0xFFF
#define GLI_9750_DRIVING_2_VALUE 0x3
#define SDHCI_GLI_9750_SEL_1 BIT(29)
#define SDHCI_GLI_9750_SEL_2 BIT(31)
#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
#define SDHCI_GLI_9750_PLL 0x864
#define SDHCI_GLI_9750_PLL_LDIV GENMASK(9, 0)
#define SDHCI_GLI_9750_PLL_PDIV GENMASK(14, 12)
#define SDHCI_GLI_9750_PLL_DIR BIT(15)
#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
#define SDHCI_GLI_9750_PLL_TX2_DLY GENMASK(22, 20)
#define GLI_9750_PLL_TX2_INV_VALUE 0x1
#define GLI_9750_PLL_TX2_DLY_VALUE 0x0
#define SDHCI_GLI_9750_PLLSSC_STEP GENMASK(28, 24)
#define SDHCI_GLI_9750_PLLSSC_EN BIT(31)
#define SDHCI_GLI_9750_PLLSSC 0x86C
#define SDHCI_GLI_9750_PLLSSC_PPM GENMASK(31, 16)
#define SDHCI_GLI_9750_SW_CTRL 0x874
#define SDHCI_GLI_9750_SW_CTRL_4 GENMASK(7, 6)
#define GLI_9750_SW_CTRL_4_VALUE 0x3
#define SDHCI_GLI_9750_MISC 0x878
#define SDHCI_GLI_9750_MISC_TX1_INV BIT(2)
#define SDHCI_GLI_9750_MISC_RX_INV BIT(3)
#define SDHCI_GLI_9750_MISC_TX1_DLY GENMASK(6, 4)
#define GLI_9750_MISC_TX1_INV_VALUE 0x0
#define GLI_9750_MISC_RX_INV_ON 0x1
#define GLI_9750_MISC_RX_INV_OFF 0x0
#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF
#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
#define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26)
#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
#define GLI_9750_TUNING_CONTROL_EN_ON 0x1
#define GLI_9750_TUNING_CONTROL_EN_OFF 0x0
#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1 BIT(16)
#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2 GENMASK(20, 19)
#define GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE 0x1
#define GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE 0x2
#define SDHCI_GLI_9750_TUNING_PARAMETERS 0x544
#define SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY GENMASK(2, 0)
#define GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE 0x1
#define SDHCI_GLI_9763E_CTRL_HS400 0x7
#define SDHCI_GLI_9763E_HS400_ES_REG 0x52C
#define SDHCI_GLI_9763E_HS400_ES_BIT BIT(8)
#define PCIE_GLI_9763E_VHS 0x884
#define GLI_9763E_VHS_REV GENMASK(19, 16)
#define GLI_9763E_VHS_REV_R 0x0
#define GLI_9763E_VHS_REV_M 0x1
#define GLI_9763E_VHS_REV_W 0x2
#define PCIE_GLI_9763E_MB 0x888
#define GLI_9763E_MB_CMDQ_OFF BIT(19)
#define GLI_9763E_MB_ERP_ON BIT(7)
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
#define PCIE_GLI_9763E_CFG 0x8A0
#define GLI_9763E_CFG_LPSN_DIS BIT(12)
#define PCIE_GLI_9763E_CFG2 0x8A4
#define GLI_9763E_CFG2_L1DLY GENMASK(28, 19)
#define GLI_9763E_CFG2_L1DLY_MID 0x54
#define PCIE_GLI_9763E_MMC_CTRL 0x960
#define GLI_9763E_HS400_SLOW BIT(3)
#define PCIE_GLI_9763E_CLKRXDLY 0x934
#define GLI_9763E_HS400_RXDLY GENMASK(31, 28)
#define GLI_9763E_HS400_RXDLY_5 0x5
#define SDHCI_GLI_9763E_CQE_BASE_ADDR 0x200
#define GLI_9763E_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
SDHCI_TRNS_BLK_CNT_EN | \
SDHCI_TRNS_DMA)
#define PCI_GLI_9755_WT 0x800
#define PCI_GLI_9755_WT_EN BIT(0)
#define GLI_9755_WT_EN_ON 0x1
#define GLI_9755_WT_EN_OFF 0x0
#define PCI_GLI_9755_PECONF 0x44
#define PCI_GLI_9755_LFCLK GENMASK(14, 12)
#define PCI_GLI_9755_DMACLK BIT(29)
#define PCI_GLI_9755_INVERT_CD BIT(30)
#define PCI_GLI_9755_INVERT_WP BIT(31)
#define PCI_GLI_9755_CFG2 0x48
#define PCI_GLI_9755_CFG2_L1DLY GENMASK(28, 24)
#define GLI_9755_CFG2_L1DLY_VALUE 0x1F
#define PCI_GLI_9755_PLL 0x64
#define PCI_GLI_9755_PLL_LDIV GENMASK(9, 0)
#define PCI_GLI_9755_PLL_PDIV GENMASK(14, 12)
#define PCI_GLI_9755_PLL_DIR BIT(15)
#define PCI_GLI_9755_PLLSSC_STEP GENMASK(28, 24)
#define PCI_GLI_9755_PLLSSC_EN BIT(31)
#define PCI_GLI_9755_PLLSSC 0x68
#define PCI_GLI_9755_PLLSSC_PPM GENMASK(15, 0)
#define PCI_GLI_9755_SerDes 0x70
#define PCI_GLI_9755_SCP_DIS BIT(19)
#define PCI_GLI_9755_MISC 0x78
#define PCI_GLI_9755_MISC_SSC_OFF BIT(26)
#define PCI_GLI_9755_PM_CTRL 0xFC
#define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
#define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
#define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
#define PCIE_GLI_9767_VHS 0x884
#define GLI_9767_VHS_REV GENMASK(19, 16)
#define GLI_9767_VHS_REV_R 0x0
#define GLI_9767_VHS_REV_M 0x1
#define GLI_9767_VHS_REV_W 0x2
#define PCIE_GLI_9767_COM_MAILBOX 0x888
#define PCIE_GLI_9767_COM_MAILBOX_SSC_EN BIT(1)
#define PCIE_GLI_9767_CFG 0x8A0
#define PCIE_GLI_9767_CFG_LOW_PWR_OFF BIT(12)
#define PCIE_GLI_9767_COMBO_MUX_CTL 0x8C8
#define PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN BIT(6)
#define PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN BIT(10)
#define PCIE_GLI_9767_PWR_MACRO_CTL 0x8D0
#define PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE GENMASK(3, 0)
#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE GENMASK(15, 12)
#define PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE 0x7
#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL GENMASK(29, 28)
#define PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE 0x3
#define PCIE_GLI_9767_SCR 0x8E0
#define PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST BIT(6)
#define PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST BIT(7)
#define PCIE_GLI_9767_SCR_AXI_REQ BIT(9)
#define PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN BIT(10)
#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 BIT(16)
#define PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 BIT(17)
#define PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF BIT(21)
#define PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN BIT(30)
#define PCIE_GLI_9767_SDHC_CAP 0x91C
#define PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT BIT(5)
#define PCIE_GLI_9767_SD_PLL_CTL 0x938
#define PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV GENMASK(9, 0)
#define PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV GENMASK(15, 12)
#define PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN BIT(16)
#define PCIE_GLI_9767_SD_PLL_CTL_SSC_EN BIT(19)
#define PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING GENMASK(28, 24)
#define PCIE_GLI_9767_SD_PLL_CTL2 0x93C
#define PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM GENMASK(31, 16)
#define PCIE_GLI_9767_SD_EXPRESS_CTL 0x940
#define PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE BIT(0)
#define PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE BIT(1)
#define PCIE_GLI_9767_SD_DATA_MULTI_CTL 0x944
#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME GENMASK(23, 16)
#define PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE 0x64
#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2 0x950
#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE BIT(0)
#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2 0x954
#define PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN BIT(0)
#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2 0x958
#define PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN BIT(0)
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
static inline void gl9750_wt_on(struct sdhci_host *host)
{
u32 wt_value;
u32 wt_enable;
wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT);
wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value);
if (wt_enable == GLI_9750_WT_EN_ON)
return;
wt_value &= ~SDHCI_GLI_9750_WT_EN;
wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_ON);
sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT);
}
static inline void gl9750_wt_off(struct sdhci_host *host)
{
u32 wt_value;
u32 wt_enable;
wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT);
wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value);
if (wt_enable == GLI_9750_WT_EN_OFF)
return;
wt_value &= ~SDHCI_GLI_9750_WT_EN;
wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_OFF);
sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT);
}
static void gli_set_9750(struct sdhci_host *host)
{
u32 driving_value;
u32 pll_value;
u32 sw_ctrl_value;
u32 misc_value;
u32 parameter_value;
u32 control_value;
u16 ctrl2;
gl9750_wt_on(host);
driving_value = sdhci_readl(host, SDHCI_GLI_9750_DRIVING);
pll_value = sdhci_readl(host, SDHCI_GLI_9750_PLL);
sw_ctrl_value = sdhci_readl(host, SDHCI_GLI_9750_SW_CTRL);
misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC);
parameter_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_PARAMETERS);
control_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_CONTROL);
driving_value &= ~(SDHCI_GLI_9750_DRIVING_1);
driving_value &= ~(SDHCI_GLI_9750_DRIVING_2);
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_1,
GLI_9750_DRIVING_1_VALUE);
driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
GLI_9750_DRIVING_2_VALUE);
driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
driving_value |= SDHCI_GLI_9750_SEL_2;
sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
sw_ctrl_value |= FIELD_PREP(SDHCI_GLI_9750_SW_CTRL_4,
GLI_9750_SW_CTRL_4_VALUE);
sdhci_writel(host, sw_ctrl_value, SDHCI_GLI_9750_SW_CTRL);
/* reset the tuning flow after reinit and before starting tuning */
pll_value &= ~SDHCI_GLI_9750_PLL_TX2_INV;
pll_value &= ~SDHCI_GLI_9750_PLL_TX2_DLY;
pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_INV,
GLI_9750_PLL_TX2_INV_VALUE);
pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_DLY,
GLI_9750_PLL_TX2_DLY_VALUE);
misc_value &= ~SDHCI_GLI_9750_MISC_TX1_INV;
misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV;
misc_value &= ~SDHCI_GLI_9750_MISC_TX1_DLY;
misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_INV,
GLI_9750_MISC_TX1_INV_VALUE);
misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
GLI_9750_MISC_RX_INV_VALUE);
misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_DLY,
GLI_9750_MISC_TX1_DLY_VALUE);
parameter_value &= ~SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY;
parameter_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY,
GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE);
control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1;
control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2;
control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1,
GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE);
control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2,
GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE);
sdhci_writel(host, pll_value, SDHCI_GLI_9750_PLL);
sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC);
/* disable tuned clk */
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
/* enable tuning parameters control */
control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN;
control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN,
GLI_9750_TUNING_CONTROL_EN_ON);
sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL);
/* write tuning parameters */
sdhci_writel(host, parameter_value, SDHCI_GLI_9750_TUNING_PARAMETERS);
/* disable tuning parameters control */
control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN;
control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN,
GLI_9750_TUNING_CONTROL_EN_OFF);
sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL);
/* clear tuned clk */
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
gl9750_wt_off(host);
}
static void gli_set_9750_rx_inv(struct sdhci_host *host, bool b)
{
u32 misc_value;
gl9750_wt_on(host);
misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC);
misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV;
if (b) {
misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
GLI_9750_MISC_RX_INV_ON);
} else {
misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
GLI_9750_MISC_RX_INV_OFF);
}
sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC);
gl9750_wt_off(host);
}
static int __sdhci_execute_tuning_9750(struct sdhci_host *host, u32 opcode)
{
int i;
int rx_inv;
for (rx_inv = 0; rx_inv < 2; rx_inv++) {
gli_set_9750_rx_inv(host, !!rx_inv);
sdhci_start_tuning(host);
for (i = 0; i < GLI_MAX_TUNING_LOOP; i++) {
u16 ctrl;
sdhci_send_tuning(host, opcode);
if (!host->tuning_done) {
sdhci_abort_tuning(host, opcode);
break;
}
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
if (ctrl & SDHCI_CTRL_TUNED_CLK)
return 0; /* Success! */
break;
}
}
}
if (!host->tuning_done) {
pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
mmc_hostname(host->mmc));
return -ETIMEDOUT;
}
pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
mmc_hostname(host->mmc));
sdhci_reset_tuning(host);
return -EAGAIN;
}
static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
{
host->mmc->retune_period = 0;
if (host->tuning_mode == SDHCI_TUNING_MODE_1)
host->mmc->retune_period = host->tuning_count;
gli_set_9750(host);
host->tuning_err = __sdhci_execute_tuning_9750(host, opcode);
sdhci_end_tuning(host);
return 0;
}
static void gl9750_disable_ssc_pll(struct sdhci_host *host)
{
u32 pll;
gl9750_wt_on(host);
pll = sdhci_readl(host, SDHCI_GLI_9750_PLL);
pll &= ~(SDHCI_GLI_9750_PLL_DIR | SDHCI_GLI_9750_PLLSSC_EN);
sdhci_writel(host, pll, SDHCI_GLI_9750_PLL);
gl9750_wt_off(host);
}
static void gl9750_set_pll(struct sdhci_host *host, u8 dir, u16 ldiv, u8 pdiv)
{
u32 pll;
gl9750_wt_on(host);
pll = sdhci_readl(host, SDHCI_GLI_9750_PLL);
pll &= ~(SDHCI_GLI_9750_PLL_LDIV |
SDHCI_GLI_9750_PLL_PDIV |
SDHCI_GLI_9750_PLL_DIR);
pll |= FIELD_PREP(SDHCI_GLI_9750_PLL_LDIV, ldiv) |
FIELD_PREP(SDHCI_GLI_9750_PLL_PDIV, pdiv) |
FIELD_PREP(SDHCI_GLI_9750_PLL_DIR, dir);
sdhci_writel(host, pll, SDHCI_GLI_9750_PLL);
gl9750_wt_off(host);
/* wait for pll stable */
mdelay(1);
}
static bool gl9750_ssc_enable(struct sdhci_host *host)
{
u32 misc;
u8 off;
gl9750_wt_on(host);
misc = sdhci_readl(host, SDHCI_GLI_9750_MISC);
off = FIELD_GET(SDHCI_GLI_9750_MISC_SSC_OFF, misc);
gl9750_wt_off(host);
return !off;
}
static void gl9750_set_ssc(struct sdhci_host *host, u8 enable, u8 step, u16 ppm)
{
u32 pll;
u32 ssc;
gl9750_wt_on(host);
pll = sdhci_readl(host, SDHCI_GLI_9750_PLL);
ssc = sdhci_readl(host, SDHCI_GLI_9750_PLLSSC);
pll &= ~(SDHCI_GLI_9750_PLLSSC_STEP |
SDHCI_GLI_9750_PLLSSC_EN);
ssc &= ~SDHCI_GLI_9750_PLLSSC_PPM;
pll |= FIELD_PREP(SDHCI_GLI_9750_PLLSSC_STEP, step) |
FIELD_PREP(SDHCI_GLI_9750_PLLSSC_EN, enable);
ssc |= FIELD_PREP(SDHCI_GLI_9750_PLLSSC_PPM, ppm);
sdhci_writel(host, ssc, SDHCI_GLI_9750_PLLSSC);
sdhci_writel(host, pll, SDHCI_GLI_9750_PLL);
gl9750_wt_off(host);
}
static void gl9750_set_ssc_pll_205mhz(struct sdhci_host *host)
{
bool enable = gl9750_ssc_enable(host);
/* set pll to 205MHz and ssc */
gl9750_set_ssc(host, enable, 0xF, 0x5A1D);
gl9750_set_pll(host, 0x1, 0x246, 0x0);
}
static void gl9750_set_ssc_pll_100mhz(struct sdhci_host *host)
{
bool enable = gl9750_ssc_enable(host);
/* set pll to 100MHz and ssc */
gl9750_set_ssc(host, enable, 0xE, 0x51EC);
gl9750_set_pll(host, 0x1, 0x244, 0x1);
}
static void gl9750_set_ssc_pll_50mhz(struct sdhci_host *host)
{
bool enable = gl9750_ssc_enable(host);
/* set pll to 50MHz and ssc */
gl9750_set_ssc(host, enable, 0xE, 0x51EC);
gl9750_set_pll(host, 0x1, 0x244, 0x3);
}
static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct mmc_ios *ios = &host->mmc->ios;
u16 clk;
host->mmc->actual_clock = 0;
gl9750_disable_ssc_pll(host);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9750_set_ssc_pll_205mhz(host);
} else if (clock == 100000000) {
gl9750_set_ssc_pll_100mhz(host);
} else if (clock == 50000000) {
gl9750_set_ssc_pll_50mhz(host);
}
sdhci_enable_clk(host, clk);
}
static void gl9750_hw_setting(struct sdhci_host *host)
{
u32 value;
gl9750_wt_on(host);
value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
value &= ~SDHCI_GLI_9750_CFG2_L1DLY;
/* set ASPM L1 entry delay to 7.9us */
value |= FIELD_PREP(SDHCI_GLI_9750_CFG2_L1DLY,
GLI_9750_CFG2_L1DLY_VALUE);
sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
gl9750_wt_off(host);
}
static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
{
int ret;
ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
pr_warn("%s: enable PCI MSI failed, error=%d\n",
mmc_hostname(slot->host->mmc), ret);
return;
}
slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
}
static inline void gl9755_wt_on(struct pci_dev *pdev)
{
u32 wt_value;
u32 wt_enable;
pci_read_config_dword(pdev, PCI_GLI_9755_WT, &wt_value);
wt_enable = FIELD_GET(PCI_GLI_9755_WT_EN, wt_value);
if (wt_enable == GLI_9755_WT_EN_ON)
return;
wt_value &= ~PCI_GLI_9755_WT_EN;
wt_value |= FIELD_PREP(PCI_GLI_9755_WT_EN, GLI_9755_WT_EN_ON);
pci_write_config_dword(pdev, PCI_GLI_9755_WT, wt_value);
}
static inline void gl9755_wt_off(struct pci_dev *pdev)
{
u32 wt_value;
u32 wt_enable;
pci_read_config_dword(pdev, PCI_GLI_9755_WT, &wt_value);
wt_enable = FIELD_GET(PCI_GLI_9755_WT_EN, wt_value);
if (wt_enable == GLI_9755_WT_EN_OFF)
return;
wt_value &= ~PCI_GLI_9755_WT_EN;
wt_value |= FIELD_PREP(PCI_GLI_9755_WT_EN, GLI_9755_WT_EN_OFF);
pci_write_config_dword(pdev, PCI_GLI_9755_WT, wt_value);
}
static void gl9755_disable_ssc_pll(struct pci_dev *pdev)
{
u32 pll;
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll);
pll &= ~(PCI_GLI_9755_PLL_DIR | PCI_GLI_9755_PLLSSC_EN);
pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll);
gl9755_wt_off(pdev);
}
static void gl9755_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
{
u32 pll;
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll);
pll &= ~(PCI_GLI_9755_PLL_LDIV |
PCI_GLI_9755_PLL_PDIV |
PCI_GLI_9755_PLL_DIR);
pll |= FIELD_PREP(PCI_GLI_9755_PLL_LDIV, ldiv) |
FIELD_PREP(PCI_GLI_9755_PLL_PDIV, pdiv) |
FIELD_PREP(PCI_GLI_9755_PLL_DIR, dir);
pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll);
gl9755_wt_off(pdev);
/* wait for pll stable */
mdelay(1);
}
static bool gl9755_ssc_enable(struct pci_dev *pdev)
{
u32 misc;
u8 off;
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_MISC, &misc);
off = FIELD_GET(PCI_GLI_9755_MISC_SSC_OFF, misc);
gl9755_wt_off(pdev);
return !off;
}
static void gl9755_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
{
u32 pll;
u32 ssc;
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PLL, &pll);
pci_read_config_dword(pdev, PCI_GLI_9755_PLLSSC, &ssc);
pll &= ~(PCI_GLI_9755_PLLSSC_STEP |
PCI_GLI_9755_PLLSSC_EN);
ssc &= ~PCI_GLI_9755_PLLSSC_PPM;
pll |= FIELD_PREP(PCI_GLI_9755_PLLSSC_STEP, step) |
FIELD_PREP(PCI_GLI_9755_PLLSSC_EN, enable);
ssc |= FIELD_PREP(PCI_GLI_9755_PLLSSC_PPM, ppm);
pci_write_config_dword(pdev, PCI_GLI_9755_PLLSSC, ssc);
pci_write_config_dword(pdev, PCI_GLI_9755_PLL, pll);
gl9755_wt_off(pdev);
}
static void gl9755_set_ssc_pll_205mhz(struct pci_dev *pdev)
{
bool enable = gl9755_ssc_enable(pdev);
/* set pll to 205MHz and ssc */
gl9755_set_ssc(pdev, enable, 0xF, 0x5A1D);
gl9755_set_pll(pdev, 0x1, 0x246, 0x0);
}
static void gl9755_set_ssc_pll_100mhz(struct pci_dev *pdev)
{
bool enable = gl9755_ssc_enable(pdev);
/* set pll to 100MHz and ssc */
gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
gl9755_set_pll(pdev, 0x1, 0x244, 0x1);
}
static void gl9755_set_ssc_pll_50mhz(struct pci_dev *pdev)
{
bool enable = gl9755_ssc_enable(pdev);
/* set pll to 50MHz and ssc */
gl9755_set_ssc(pdev, enable, 0xE, 0x51EC);
gl9755_set_pll(pdev, 0x1, 0x244, 0x3);
}
static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct mmc_ios *ios = &host->mmc->ios;
struct pci_dev *pdev;
u16 clk;
pdev = slot->chip->pdev;
host->mmc->actual_clock = 0;
gl9755_disable_ssc_pll(pdev);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9755_set_ssc_pll_205mhz(pdev);
} else if (clock == 100000000) {
gl9755_set_ssc_pll_100mhz(pdev);
} else if (clock == 50000000) {
gl9755_set_ssc_pll_50mhz(pdev);
}
sdhci_enable_clk(host, clk);
}
static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
gl9755_wt_on(pdev);
pci_read_config_dword(pdev, PCI_GLI_9755_PECONF, &value);
/*
* Apple ARM64 platforms using these chips may have
* inverted CD/WP detection.
*/
if (of_property_read_bool(pdev->dev.of_node, "cd-inverted"))
value |= PCI_GLI_9755_INVERT_CD;
if (of_property_read_bool(pdev->dev.of_node, "wp-inverted"))
value |= PCI_GLI_9755_INVERT_WP;
value &= ~PCI_GLI_9755_LFCLK;
value &= ~PCI_GLI_9755_DMACLK;
pci_write_config_dword(pdev, PCI_GLI_9755_PECONF, value);
/* enable short circuit protection */
pci_read_config_dword(pdev, PCI_GLI_9755_SerDes, &value);
value &= ~PCI_GLI_9755_SCP_DIS;
pci_write_config_dword(pdev, PCI_GLI_9755_SerDes, value);
pci_read_config_dword(pdev, PCI_GLI_9755_CFG2, &value);
value &= ~PCI_GLI_9755_CFG2_L1DLY;
/* set ASPM L1 entry delay to 7.9us */
value |= FIELD_PREP(PCI_GLI_9755_CFG2_L1DLY,
GLI_9755_CFG2_L1DLY_VALUE);
pci_write_config_dword(pdev, PCI_GLI_9755_CFG2, value);
/* toggle PM state to allow GL9755 to enter ASPM L1.2 */
pci_read_config_dword(pdev, PCI_GLI_9755_PM_CTRL, &value);
value |= PCI_GLI_9755_PM_STATE;
pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
value &= ~PCI_GLI_9755_PM_STATE;
pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
gl9755_wt_off(pdev);
}
static inline void gl9767_vhs_read(struct pci_dev *pdev)
{
u32 vhs_enable;
u32 vhs_value;
pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
if (vhs_enable == GLI_9767_VHS_REV_R)
return;
vhs_value &= ~GLI_9767_VHS_REV;
vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
}
static inline void gl9767_vhs_write(struct pci_dev *pdev)
{
u32 vhs_enable;
u32 vhs_value;
pci_read_config_dword(pdev, PCIE_GLI_9767_VHS, &vhs_value);
vhs_enable = FIELD_GET(GLI_9767_VHS_REV, vhs_value);
if (vhs_enable == GLI_9767_VHS_REV_W)
return;
vhs_value &= ~GLI_9767_VHS_REV;
vhs_value |= FIELD_PREP(GLI_9767_VHS_REV, GLI_9767_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9767_VHS, vhs_value);
}
static bool gl9767_ssc_enable(struct pci_dev *pdev)
{
u32 value;
u8 enable;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_COM_MAILBOX, &value);
enable = FIELD_GET(PCIE_GLI_9767_COM_MAILBOX_SSC_EN, value);
gl9767_vhs_read(pdev);
return enable;
}
static void gl9767_set_ssc(struct pci_dev *pdev, u8 enable, u8 step, u16 ppm)
{
u32 pll;
u32 ssc;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, &ssc);
pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING |
PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
ssc &= ~PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM;
pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_STEP_SETTING, step) |
FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_SSC_EN, enable);
ssc |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL2_PLLSSC_PPM, ppm);
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL2, ssc);
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
gl9767_vhs_read(pdev);
}
static void gl9767_set_pll(struct pci_dev *pdev, u8 dir, u16 ldiv, u8 pdiv)
{
u32 pll;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV |
PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV |
PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN);
pll |= FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_LDIV, ldiv) |
FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_PDIV, pdiv) |
FIELD_PREP(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN, dir);
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
gl9767_vhs_read(pdev);
/* wait for pll stable */
usleep_range(1000, 1100);
}
static void gl9767_set_ssc_pll_205mhz(struct pci_dev *pdev)
{
bool enable = gl9767_ssc_enable(pdev);
/* set pll to 205MHz and ssc */
gl9767_set_ssc(pdev, enable, 0x1F, 0xF5C3);
gl9767_set_pll(pdev, 0x1, 0x246, 0x0);
}
static void gl9767_disable_ssc_pll(struct pci_dev *pdev)
{
u32 pll;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, &pll);
pll &= ~(PCIE_GLI_9767_SD_PLL_CTL_PLL_DIR_EN | PCIE_GLI_9767_SD_PLL_CTL_SSC_EN);
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_PLL_CTL, pll);
gl9767_vhs_read(pdev);
}
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct mmc_ios *ios = &host->mmc->ios;
struct pci_dev *pdev;
u32 value;
u16 clk;
pdev = slot->chip->pdev;
host->mmc->actual_clock = 0;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
gl9767_disable_ssc_pll(pdev);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
if (clock == 200000000 && ios->timing == MMC_TIMING_UHS_SDR104) {
host->mmc->actual_clock = 205000000;
gl9767_set_ssc_pll_205mhz(pdev);
}
sdhci_enable_clk(host, clk);
pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
value &= ~PCIE_GLI_9767_CFG_LOW_PWR_OFF;
pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
gl9767_vhs_read(pdev);
}
static void gli_set_9767(struct sdhci_host *host)
{
u32 value;
value = sdhci_readl(host, SDHCI_GLI_9767_GM_BURST_SIZE);
value &= ~SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET;
sdhci_writel(host, value, SDHCI_GLI_9767_GM_BURST_SIZE);
}
static void gl9767_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, &value);
value &= ~(PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE |
PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL);
value |= PCIE_GLI_9767_PWR_MACRO_CTL_LOW_VOLTAGE |
FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE,
PCIE_GLI_9767_PWR_MACRO_CTL_LD0_LOW_OUTPUT_VOLTAGE_VALUE) |
FIELD_PREP(PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL,
PCIE_GLI_9767_PWR_MACRO_CTL_RCLK_AMPLITUDE_CTL_VALUE);
pci_write_config_dword(pdev, PCIE_GLI_9767_PWR_MACRO_CTL, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_SCR, &value);
value &= ~(PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE0 |
PCIE_GLI_9767_SCR_SYSTEM_CLK_SELECT_MODE1 |
PCIE_GLI_9767_SCR_CFG_RST_DATA_LINK_DOWN);
value |= PCIE_GLI_9767_SCR_AUTO_AXI_W_BURST |
PCIE_GLI_9767_SCR_AUTO_AXI_R_BURST |
PCIE_GLI_9767_SCR_AXI_REQ |
PCIE_GLI_9767_SCR_CARD_DET_PWR_SAVING_EN |
PCIE_GLI_9767_SCR_CORE_PWR_D3_OFF;
pci_write_config_dword(pdev, PCIE_GLI_9767_SCR, value);
gl9767_vhs_read(pdev);
}
static void sdhci_gl9767_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
gli_set_9767(host);
}
static int gl9767_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev;
u32 value;
int i;
pdev = slot->chip->pdev;
if (mmc->ops->get_ro(mmc)) {
mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
return 0;
}
gl9767_vhs_write(pdev);
pci_read_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, &value);
value &= ~(PCIE_GLI_9767_COMBO_MUX_CTL_RST_EN | PCIE_GLI_9767_COMBO_MUX_CTL_WAIT_PERST_EN);
pci_write_config_dword(pdev, PCIE_GLI_9767_COMBO_MUX_CTL, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, &value);
value &= ~PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME;
value |= FIELD_PREP(PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME,
PCIE_GLI_9767_SD_DATA_MULTI_CTL_DISCONNECT_TIME_VALUE);
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_DATA_MULTI_CTL, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE;
pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, &value);
value |= PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2_SDEI_COMPLETE_STATUS_EN;
pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_EN_REG2, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, &value);
value |= PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2_SDEI_COMPLETE_SIGNAL_EN;
pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_SIGNAL_EN_REG2, value);
pci_read_config_dword(pdev, PCIE_GLI_9767_CFG, &value);
value |= PCIE_GLI_9767_CFG_LOW_PWR_OFF;
pci_write_config_dword(pdev, PCIE_GLI_9767_CFG, value);
value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
value &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
value = sdhci_readb(host, SDHCI_POWER_CONTROL);
value |= (SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SDEI_EXE;
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
for (i = 0; i < 2; i++) {
usleep_range(10000, 10100);
pci_read_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2, &value);
if (value & PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2_SDEI_COMPLETE) {
pci_write_config_dword(pdev, PCIE_GLI_9767_NORMAL_ERR_INT_STATUS_REG2,
value);
break;
}
}
pci_read_config_dword(pdev, PCIE_GLI_9767_SDHC_CAP, &value);
if (value & PCIE_GLI_9767_SDHC_CAP_SDEI_RESULT) {
pci_read_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, &value);
value |= PCIE_GLI_9767_SD_EXPRESS_CTL_SD_EXPRESS_MODE;
pci_write_config_dword(pdev, PCIE_GLI_9767_SD_EXPRESS_CTL, value);
} else {
mmc->ios.timing &= ~(MMC_TIMING_SD_EXP | MMC_TIMING_SD_EXP_1_2V);
value = sdhci_readb(host, SDHCI_POWER_CONTROL);
value &= ~(SDHCI_VDD2_POWER_180 | SDHCI_VDD2_POWER_ON);
sdhci_writeb(host, value, SDHCI_POWER_CONTROL);
value = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
value |= (SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_PLL_EN);
sdhci_writew(host, value, SDHCI_CLOCK_CONTROL);
}
gl9767_vhs_read(pdev);
return 0;
}
static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
gl9750_hw_setting(host);
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
return 0;
}
static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
gl9755_hw_setting(slot);
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
return 0;
}
static int gli_probe_slot_gl9767(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
gli_set_9767(host);
gl9767_hw_setting(slot);
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
host->mmc->caps2 |= MMC_CAP2_SD_EXP;
host->mmc_host_ops.init_sd_express = gl9767_init_sd_express;
sdhci_enable_v4_mode(host);
return 0;
}
static void sdhci_gli_voltage_switch(struct sdhci_host *host)
{
/*
* According to Section 3.6.1 signal voltage switch procedure in
* SD Host Controller Simplified Spec. 4.20, steps 6~8 are as
* follows:
* (6) Set 1.8V Signal Enable in the Host Control 2 register.
* (7) Wait 5ms. 1.8V voltage regulator shall be stable within this
* period.
* (8) If 1.8V Signal Enable is cleared by Host Controller, go to
* step (12).
*
* Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9750/GL9755.
*
* ...however, the controller in the NUC10i3FNK4 (a 9755) requires
* slightly longer than 5ms before the control register reports that
* 1.8V is ready, and far longer still before the card will actually
* work reliably.
*/
usleep_range(100000, 110000);
}
static void sdhci_gl9767_voltage_switch(struct sdhci_host *host)
{
/*
* According to Section 3.6.1 signal voltage switch procedure in
* SD Host Controller Simplified Spec. 4.20, steps 6~8 are as
* follows:
* (6) Set 1.8V Signal Enable in the Host Control 2 register.
* (7) Wait 5ms. 1.8V voltage regulator shall be stable within this
* period.
* (8) If 1.8V Signal Enable is cleared by Host Controller, go to
* step (12).
*
* Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9767.
*
*/
usleep_range(5000, 5500);
}
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
gli_set_9750(host);
}
static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
{
u32 value;
value = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_MAX_CURRENT && !(value & 0xff)))
value |= 0xc8;
return value;
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
pci_free_irq_vectors(slot->chip->pdev);
gli_pcie_enable_msi(slot);
return sdhci_pci_resume_host(chip);
}
static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
int ret;
ret = sdhci_pci_gli_resume(chip);
if (ret)
return ret;
return cqhci_resume(slot->host->mmc);
}
static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
int ret;
ret = cqhci_suspend(slot->host->mmc);
if (ret)
return ret;
return sdhci_suspend_host(slot->host);
}
#endif
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
u32 val;
val = sdhci_readl(host, SDHCI_GLI_9763E_HS400_ES_REG);
if (ios->enhanced_strobe)
val |= SDHCI_GLI_9763E_HS400_ES_BIT;
else
val &= ~SDHCI_GLI_9763E_HS400_ES_BIT;
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
u16 ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
if (timing == MMC_TIMING_MMC_HS200)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (timing == MMC_TIMING_MMC_HS)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (timing == MMC_TIMING_MMC_DDR52)
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
else if (timing == MMC_TIMING_MMC_HS400)
ctrl_2 |= SDHCI_GLI_9763E_CTRL_HS400;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
static void sdhci_gl9763e_dumpregs(struct mmc_host *mmc)
{
sdhci_dumpregs(mmc_priv(mmc));
}
static void sdhci_gl9763e_cqe_pre_enable(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
u32 value;
value = cqhci_readl(cq_host, CQHCI_CFG);
value |= CQHCI_ENABLE;
cqhci_writel(cq_host, value, CQHCI_CFG);
}
static void sdhci_gl9763e_cqe_enable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
sdhci_writew(host, GLI_9763E_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
sdhci_cqe_enable(mmc);
}
static u32 sdhci_gl9763e_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
int data_error = 0;
if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
return intmask;
cqhci_irq(host->mmc, intmask, cmd_error, data_error);
return 0;
}
static void sdhci_gl9763e_cqe_post_disable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
struct cqhci_host *cq_host = mmc->cqe_private;
u32 value;
value = cqhci_readl(cq_host, CQHCI_CFG);
value &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, value, CQHCI_CFG);
sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
}
static const struct cqhci_host_ops sdhci_gl9763e_cqhci_ops = {
.enable = sdhci_gl9763e_cqe_enable,
.disable = sdhci_cqe_disable,
.dumpregs = sdhci_gl9763e_dumpregs,
.pre_enable = sdhci_gl9763e_cqe_pre_enable,
.post_disable = sdhci_gl9763e_cqe_post_disable,
};
static int gl9763e_add_host(struct sdhci_pci_slot *slot)
{
struct device *dev = &slot->chip->pdev->dev;
struct sdhci_host *host = slot->host;
struct cqhci_host *cq_host;
bool dma64;
int ret;
ret = sdhci_setup_host(host);
if (ret)
return ret;
cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
if (!cq_host) {
ret = -ENOMEM;
goto cleanup;
}
cq_host->mmio = host->ioaddr + SDHCI_GLI_9763E_CQE_BASE_ADDR;
cq_host->ops = &sdhci_gl9763e_cqhci_ops;
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
if (dma64)
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
ret = cqhci_init(cq_host, host->mmc, dma64);
if (ret)
goto cleanup;
ret = __sdhci_add_host(host);
if (ret)
goto cleanup;
return 0;
cleanup:
sdhci_cleanup_host(host);
return ret;
}
static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_SCR, &value);
value |= GLI_9763E_SCR_AXI_REQ;
pci_write_config_dword(pdev, PCIE_GLI_9763E_SCR, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_MMC_CTRL, &value);
value &= ~GLI_9763E_HS400_SLOW;
pci_write_config_dword(pdev, PCIE_GLI_9763E_MMC_CTRL, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG2, &value);
value &= ~GLI_9763E_CFG2_L1DLY;
/* set ASPM L1 entry delay to 21us */
value |= FIELD_PREP(GLI_9763E_CFG2_L1DLY, GLI_9763E_CFG2_L1DLY_MID);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG2, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, &value);
value &= ~GLI_9763E_HS400_RXDLY;
value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
#ifdef CONFIG_PM
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct sdhci_host *host = slot->host;
u16 clock;
/* Enable LPM negotiation to allow entering L1 state */
gl9763e_set_low_power_negotiation(slot, true);
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock &= ~(SDHCI_CLOCK_PLL_EN | SDHCI_CLOCK_CARD_EN);
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
return 0;
}
static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
struct sdhci_host *host = slot->host;
u16 clock;
if (host->mmc->ios.power_mode != MMC_POWER_ON)
return 0;
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock |= SDHCI_CLOCK_PLL_EN;
clock &= ~SDHCI_CLOCK_INT_STABLE;
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
/* Wait max 150 ms */
if (read_poll_timeout(sdhci_readw, clock, (clock & SDHCI_CLOCK_INT_STABLE),
1000, 150000, false, host, SDHCI_CLOCK_CONTROL)) {
pr_err("%s: PLL clock never stabilised.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
}
clock |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clock, SDHCI_CLOCK_CONTROL);
/* Disable LPM negotiation to avoid entering L1 state. */
gl9763e_set_low_power_negotiation(slot, false);
return 0;
}
#endif
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
struct sdhci_host *host = slot->host;
u32 value;
host->mmc->caps |= MMC_CAP_8_BIT_DATA |
MMC_CAP_1_8V_DDR |
MMC_CAP_NONREMOVABLE;
host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V |
MMC_CAP2_HS400_ES |
MMC_CAP2_NO_SDIO |
MMC_CAP2_NO_SD;
pci_read_config_dword(pdev, PCIE_GLI_9763E_MB, &value);
if (!(value & GLI_9763E_MB_CMDQ_OFF))
if (value & GLI_9763E_MB_ERP_ON)
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
gli_set_gl9763e(slot);
sdhci_enable_v4_mode(host);
return 0;
}
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
static u16 sdhci_gli_readw(struct sdhci_host *host, int reg)
{
u32 val = readl(host->ioaddr + (reg & ~3));
u16 word;
word = (val >> REG_OFFSET_IN_BITS(reg)) & 0xffff;
return word;
}
static u8 sdhci_gli_readb(struct sdhci_host *host, int reg)
{
u32 val = readl(host->ioaddr + (reg & ~3));
u8 byte = (val >> REG_OFFSET_IN_BITS(reg)) & 0xff;
return byte;
}
static const struct sdhci_ops sdhci_gl9755_ops = {
.read_w = sdhci_gli_readw,
.read_b = sdhci_gli_readb,
.set_clock = sdhci_gl9755_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
};
const struct sdhci_pci_fixes sdhci_gl9755 = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9755,
.ops = &sdhci_gl9755_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
#endif
};
static const struct sdhci_ops sdhci_gl9750_ops = {
.read_w = sdhci_gli_readw,
.read_b = sdhci_gli_readb,
.read_l = sdhci_gl9750_readl,
.set_clock = sdhci_gl9750_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_gl9750_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
.platform_execute_tuning = gl9750_execute_tuning,
};
const struct sdhci_pci_fixes sdhci_gl9750 = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9750,
.ops = &sdhci_gl9750_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
#endif
};
static const struct sdhci_ops sdhci_gl9763e_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_and_cqhci_reset,
.set_uhs_signaling = sdhci_set_gl9763e_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
.irq = sdhci_gl9763e_cqhci_irq,
};
const struct sdhci_pci_fixes sdhci_gl9763e = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_cqhci_gli_resume,
.suspend = sdhci_cqhci_gli_suspend,
#endif
#ifdef CONFIG_PM
.runtime_suspend = gl9763e_runtime_suspend,
.runtime_resume = gl9763e_runtime_resume,
.allow_runtime_pm = true,
#endif
.add_host = gl9763e_add_host,
};
static const struct sdhci_ops sdhci_gl9767_ops = {
.set_clock = sdhci_gl9767_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_gl9767_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.voltage_switch = sdhci_gl9767_voltage_switch,
};
const struct sdhci_pci_fixes sdhci_gl9767 = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
.probe_slot = gli_probe_slot_gl9767,
.ops = &sdhci_gl9767_ops,
#ifdef CONFIG_PM_SLEEP
.resume = sdhci_pci_gli_resume,
#endif
};
| linux-master | drivers/mmc/host/sdhci-pci-gli.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/bitops.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include "mmci.h"
/* Registers */
#define DML_CONFIG 0x00
#define PRODUCER_CRCI_MSK GENMASK(1, 0)
#define PRODUCER_CRCI_DISABLE 0
#define PRODUCER_CRCI_X_SEL BIT(0)
#define PRODUCER_CRCI_Y_SEL BIT(1)
#define CONSUMER_CRCI_MSK GENMASK(3, 2)
#define CONSUMER_CRCI_DISABLE 0
#define CONSUMER_CRCI_X_SEL BIT(2)
#define CONSUMER_CRCI_Y_SEL BIT(3)
#define PRODUCER_TRANS_END_EN BIT(4)
#define BYPASS BIT(16)
#define DIRECT_MODE BIT(17)
#define INFINITE_CONS_TRANS BIT(18)
#define DML_SW_RESET 0x08
#define DML_PRODUCER_START 0x0c
#define DML_CONSUMER_START 0x10
#define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x14
#define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x18
#define DML_PIPE_ID 0x1c
#define PRODUCER_PIPE_ID_SHFT 0
#define PRODUCER_PIPE_ID_MSK GENMASK(4, 0)
#define CONSUMER_PIPE_ID_SHFT 16
#define CONSUMER_PIPE_ID_MSK GENMASK(20, 16)
#define DML_PRODUCER_BAM_BLOCK_SIZE 0x24
#define DML_PRODUCER_BAM_TRANS_SIZE 0x28
/* other definitions */
#define PRODUCER_PIPE_LOGICAL_SIZE 4096
#define CONSUMER_PIPE_LOGICAL_SIZE 4096
#define DML_OFFSET 0x800
static int qcom_dma_start(struct mmci_host *host, unsigned int *datactrl)
{
u32 config;
void __iomem *base = host->base + DML_OFFSET;
struct mmc_data *data = host->data;
int ret = mmci_dmae_start(host, datactrl);
if (ret)
return ret;
if (data->flags & MMC_DATA_READ) {
/* Read operation: configure DML for producer operation */
/* Set producer CRCI-x and disable consumer CRCI */
config = readl_relaxed(base + DML_CONFIG);
config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL;
config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DISABLE;
writel_relaxed(config, base + DML_CONFIG);
/* Set the Producer BAM block size */
writel_relaxed(data->blksz, base + DML_PRODUCER_BAM_BLOCK_SIZE);
/* Set Producer BAM Transaction size */
writel_relaxed(data->blocks * data->blksz,
base + DML_PRODUCER_BAM_TRANS_SIZE);
/* Set Producer Transaction End bit */
config = readl_relaxed(base + DML_CONFIG);
config |= PRODUCER_TRANS_END_EN;
writel_relaxed(config, base + DML_CONFIG);
/* Trigger producer */
writel_relaxed(1, base + DML_PRODUCER_START);
} else {
/* Write operation: configure DML for consumer operation */
/* Set consumer CRCI-x and disable producer CRCI*/
config = readl_relaxed(base + DML_CONFIG);
config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL;
config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DISABLE;
writel_relaxed(config, base + DML_CONFIG);
/* Clear Producer Transaction End bit */
config = readl_relaxed(base + DML_CONFIG);
config &= ~PRODUCER_TRANS_END_EN;
writel_relaxed(config, base + DML_CONFIG);
/* Trigger consumer */
writel_relaxed(1, base + DML_CONSUMER_START);
}
/* make sure the dml is configured before dma is triggered */
wmb();
return 0;
}
static int of_get_dml_pipe_index(struct device_node *np, const char *name)
{
int index;
struct of_phandle_args dma_spec;
index = of_property_match_string(np, "dma-names", name);
if (index < 0)
return -ENODEV;
if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
&dma_spec))
return -ENODEV;
if (dma_spec.args_count)
return dma_spec.args[0];
return -ENODEV;
}
/* Initialize the dml hardware connected to SD Card controller */
static int qcom_dma_setup(struct mmci_host *host)
{
u32 config;
void __iomem *base;
int consumer_id, producer_id;
struct device_node *np = host->mmc->parent->of_node;
if (mmci_dmae_setup(host))
return -EINVAL;
consumer_id = of_get_dml_pipe_index(np, "tx");
producer_id = of_get_dml_pipe_index(np, "rx");
if (producer_id < 0 || consumer_id < 0) {
mmci_dmae_release(host);
return -EINVAL;
}
base = host->base + DML_OFFSET;
/* Reset the DML block */
writel_relaxed(1, base + DML_SW_RESET);
/* Disable the producer and consumer CRCI */
config = (PRODUCER_CRCI_DISABLE | CONSUMER_CRCI_DISABLE);
/*
* Disable the bypass mode. Bypass mode will only be used
* if data transfer is to happen in PIO mode and don't
* want the BAM interface to connect with SDCC-DML.
*/
config &= ~BYPASS;
/*
* Disable direct mode as we don't DML to MASTER the AHB bus.
* BAM connected with DML should MASTER the AHB bus.
*/
config &= ~DIRECT_MODE;
/*
* Disable infinite mode transfer as we won't be doing any
* infinite size data transfers. All data transfer will be
* of finite data size.
*/
config &= ~INFINITE_CONS_TRANS;
writel_relaxed(config, base + DML_CONFIG);
/*
* Initialize the logical BAM pipe size for producer
* and consumer.
*/
writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE,
base + DML_PRODUCER_PIPE_LOGICAL_SIZE);
writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE,
base + DML_CONSUMER_PIPE_LOGICAL_SIZE);
/* Initialize Producer/consumer pipe id */
writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT),
base + DML_PIPE_ID);
/* Make sure dml initialization is finished */
mb();
return 0;
}
static u32 qcom_get_dctrl_cfg(struct mmci_host *host)
{
return MCI_DPSM_ENABLE | (host->data->blksz << 4);
}
static struct mmci_host_ops qcom_variant_ops = {
.prep_data = mmci_dmae_prep_data,
.unprep_data = mmci_dmae_unprep_data,
.get_datactrl_cfg = qcom_get_dctrl_cfg,
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = qcom_dma_setup,
.dma_release = mmci_dmae_release,
.dma_start = qcom_dma_start,
.dma_finalize = mmci_dmae_finalize,
.dma_error = mmci_dmae_error,
};
void qcom_variant_init(struct mmci_host *host)
{
host->ops = &qcom_variant_ops;
}
| linux-master | drivers/mmc/host/mmci_qcom_dml.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atmel MultiMedia Card Interface driver
*
* Copyright (C) 2004-2008 Atmel Corporation
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <linux/atmel_pdc.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/unaligned.h>
#define ATMCI_MAX_NR_SLOTS 2
/*
* Superset of MCI IP registers integrated in Atmel AT91 Processor
* Registers and bitfields marked with [2] are only available in MCI2
*/
/* MCI Register Definitions */
#define ATMCI_CR 0x0000 /* Control */
#define ATMCI_CR_MCIEN BIT(0) /* MCI Enable */
#define ATMCI_CR_MCIDIS BIT(1) /* MCI Disable */
#define ATMCI_CR_PWSEN BIT(2) /* Power Save Enable */
#define ATMCI_CR_PWSDIS BIT(3) /* Power Save Disable */
#define ATMCI_CR_SWRST BIT(7) /* Software Reset */
#define ATMCI_MR 0x0004 /* Mode */
#define ATMCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
#define ATMCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
#define ATMCI_MR_RDPROOF BIT(11) /* Read Proof */
#define ATMCI_MR_WRPROOF BIT(12) /* Write Proof */
#define ATMCI_MR_PDCFBYTE BIT(13) /* Force Byte Transfer */
#define ATMCI_MR_PDCPADV BIT(14) /* Padding Value */
#define ATMCI_MR_PDCMODE BIT(15) /* PDC-oriented Mode */
#define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */
#define ATMCI_DTOR 0x0008 /* Data Timeout */
#define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
#define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
#define ATMCI_SDCR 0x000c /* SD Card / SDIO */
#define ATMCI_SDCSEL_SLOT_A (0 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_SLOT_B (1 << 0) /* Select SD slot A */
#define ATMCI_SDCSEL_MASK (3 << 0)
#define ATMCI_SDCBUS_1BIT (0 << 6) /* 1-bit data bus */
#define ATMCI_SDCBUS_4BIT (2 << 6) /* 4-bit data bus */
#define ATMCI_SDCBUS_8BIT (3 << 6) /* 8-bit data bus[2] */
#define ATMCI_SDCBUS_MASK (3 << 6)
#define ATMCI_ARGR 0x0010 /* Command Argument */
#define ATMCI_CMDR 0x0014 /* Command */
#define ATMCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
#define ATMCI_CMDR_RSPTYP_NONE (0 << 6) /* No response */
#define ATMCI_CMDR_RSPTYP_48BIT (1 << 6) /* 48-bit response */
#define ATMCI_CMDR_RSPTYP_136BIT (2 << 6) /* 136-bit response */
#define ATMCI_CMDR_SPCMD_INIT (1 << 8) /* Initialization command */
#define ATMCI_CMDR_SPCMD_SYNC (2 << 8) /* Synchronized command */
#define ATMCI_CMDR_SPCMD_INT (4 << 8) /* Interrupt command */
#define ATMCI_CMDR_SPCMD_INTRESP (5 << 8) /* Interrupt response */
#define ATMCI_CMDR_OPDCMD (1 << 11) /* Open Drain */
#define ATMCI_CMDR_MAXLAT_5CYC (0 << 12) /* Max latency 5 cycles */
#define ATMCI_CMDR_MAXLAT_64CYC (1 << 12) /* Max latency 64 cycles */
#define ATMCI_CMDR_START_XFER (1 << 16) /* Start data transfer */
#define ATMCI_CMDR_STOP_XFER (2 << 16) /* Stop data transfer */
#define ATMCI_CMDR_TRDIR_WRITE (0 << 18) /* Write data */
#define ATMCI_CMDR_TRDIR_READ (1 << 18) /* Read data */
#define ATMCI_CMDR_BLOCK (0 << 19) /* Single-block transfer */
#define ATMCI_CMDR_MULTI_BLOCK (1 << 19) /* Multi-block transfer */
#define ATMCI_CMDR_STREAM (2 << 19) /* MMC Stream transfer */
#define ATMCI_CMDR_SDIO_BYTE (4 << 19) /* SDIO Byte transfer */
#define ATMCI_CMDR_SDIO_BLOCK (5 << 19) /* SDIO Block transfer */
#define ATMCI_CMDR_SDIO_SUSPEND (1 << 24) /* SDIO Suspend Command */
#define ATMCI_CMDR_SDIO_RESUME (2 << 24) /* SDIO Resume Command */
#define ATMCI_BLKR 0x0018 /* Block */
#define ATMCI_BCNT(x) ((x) << 0) /* Data Block Count */
#define ATMCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
#define ATMCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
#define ATMCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
#define ATMCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
#define ATMCI_RSPR 0x0020 /* Response 0 */
#define ATMCI_RSPR1 0x0024 /* Response 1 */
#define ATMCI_RSPR2 0x0028 /* Response 2 */
#define ATMCI_RSPR3 0x002c /* Response 3 */
#define ATMCI_RDR 0x0030 /* Receive Data */
#define ATMCI_TDR 0x0034 /* Transmit Data */
#define ATMCI_SR 0x0040 /* Status */
#define ATMCI_IER 0x0044 /* Interrupt Enable */
#define ATMCI_IDR 0x0048 /* Interrupt Disable */
#define ATMCI_IMR 0x004c /* Interrupt Mask */
#define ATMCI_CMDRDY BIT(0) /* Command Ready */
#define ATMCI_RXRDY BIT(1) /* Receiver Ready */
#define ATMCI_TXRDY BIT(2) /* Transmitter Ready */
#define ATMCI_BLKE BIT(3) /* Data Block Ended */
#define ATMCI_DTIP BIT(4) /* Data Transfer In Progress */
#define ATMCI_NOTBUSY BIT(5) /* Data Not Busy */
#define ATMCI_ENDRX BIT(6) /* End of RX Buffer */
#define ATMCI_ENDTX BIT(7) /* End of TX Buffer */
#define ATMCI_SDIOIRQA BIT(8) /* SDIO IRQ in slot A */
#define ATMCI_SDIOIRQB BIT(9) /* SDIO IRQ in slot B */
#define ATMCI_SDIOWAIT BIT(12) /* SDIO Read Wait Operation Status */
#define ATMCI_CSRCV BIT(13) /* CE-ATA Completion Signal Received */
#define ATMCI_RXBUFF BIT(14) /* RX Buffer Full */
#define ATMCI_TXBUFE BIT(15) /* TX Buffer Empty */
#define ATMCI_RINDE BIT(16) /* Response Index Error */
#define ATMCI_RDIRE BIT(17) /* Response Direction Error */
#define ATMCI_RCRCE BIT(18) /* Response CRC Error */
#define ATMCI_RENDE BIT(19) /* Response End Bit Error */
#define ATMCI_RTOE BIT(20) /* Response Time-Out Error */
#define ATMCI_DCRCE BIT(21) /* Data CRC Error */
#define ATMCI_DTOE BIT(22) /* Data Time-Out Error */
#define ATMCI_CSTOE BIT(23) /* Completion Signal Time-out Error */
#define ATMCI_BLKOVRE BIT(24) /* DMA Block Overrun Error */
#define ATMCI_DMADONE BIT(25) /* DMA Transfer Done */
#define ATMCI_FIFOEMPTY BIT(26) /* FIFO Empty Flag */
#define ATMCI_XFRDONE BIT(27) /* Transfer Done Flag */
#define ATMCI_ACKRCV BIT(28) /* Boot Operation Acknowledge Received */
#define ATMCI_ACKRCVE BIT(29) /* Boot Operation Acknowledge Error */
#define ATMCI_OVRE BIT(30) /* RX Overrun Error */
#define ATMCI_UNRE BIT(31) /* TX Underrun Error */
#define ATMCI_DMA 0x0050 /* DMA Configuration[2] */
#define ATMCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
#define ATMCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
#define ATMCI_DMAEN BIT(8) /* DMA Hardware Handshaking Enable */
#define ATMCI_CFG 0x0054 /* Configuration[2] */
#define ATMCI_CFG_FIFOMODE_1DATA BIT(0) /* MCI Internal FIFO control mode */
#define ATMCI_CFG_FERRCTRL_COR BIT(4) /* Flow Error flag reset control mode */
#define ATMCI_CFG_HSMODE BIT(8) /* High Speed Mode */
#define ATMCI_CFG_LSYNC BIT(12) /* Synchronize on the last block */
#define ATMCI_WPMR 0x00e4 /* Write Protection Mode[2] */
#define ATMCI_WP_EN BIT(0) /* WP Enable */
#define ATMCI_WP_KEY (0x4d4349 << 8) /* WP Key */
#define ATMCI_WPSR 0x00e8 /* Write Protection Status[2] */
#define ATMCI_GET_WP_VS(x) ((x) & 0x0f)
#define ATMCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
#define ATMCI_VERSION 0x00FC /* Version */
#define ATMCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
/* This is not including the FIFO Aperture on MCI2 */
#define ATMCI_REGS_SIZE 0x100
/* Register access macros */
#define atmci_readl(port, reg) \
__raw_readl((port)->regs + reg)
#define atmci_writel(port, reg, value) \
__raw_writel((value), (port)->regs + reg)
#define ATMCI_CMD_TIMEOUT_MS 2000
#define AUTOSUSPEND_DELAY 50
#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
#define ATMCI_DMA_THRESHOLD 16
enum {
EVENT_CMD_RDY = 0,
EVENT_XFER_COMPLETE,
EVENT_NOTBUSY,
EVENT_DATA_ERROR,
};
enum atmel_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
STATE_DATA_XFER,
STATE_WAITING_NOTBUSY,
STATE_SENDING_STOP,
STATE_END_REQUEST,
};
enum atmci_xfer_dir {
XFER_RECEIVE = 0,
XFER_TRANSMIT,
};
enum atmci_pdc_buf {
PDC_FIRST_BUF = 0,
PDC_SECOND_BUF,
};
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
* @detect_pin: GPIO pin wired to the card detect switch
* @wp_pin: GPIO pin wired to the write protect sensor
* @non_removable: The slot is not removable, only detect once
*
* If a given slot is not present on the board, @bus_width should be
* set to 0. The other fields are ignored in this case.
*
* Any pins that aren't available should be set to a negative value.
*
* Note that support for multiple slots is experimental -- some cards
* might get upset if we don't get the clock management exactly right.
* But in most cases, it should work just fine.
*/
struct mci_slot_pdata {
unsigned int bus_width;
struct gpio_desc *detect_pin;
struct gpio_desc *wp_pin;
bool non_removable;
};
/**
* struct mci_platform_data - board-specific MMC/SDcard configuration
* @dma_slave: DMA slave interface to use in data transfers.
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
void *dma_slave;
dma_filter_fn dma_filter;
struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
};
struct atmel_mci_caps {
bool has_dma_conf_reg;
bool has_pdc;
bool has_cfg_reg;
bool has_cstor_reg;
bool has_highspeed;
bool has_rwproof;
bool has_odd_clk_div;
bool has_bad_data_ordering;
bool need_reset_after_xfer;
bool need_blksz_mul_4;
bool need_notbusy_for_read_ops;
};
struct atmel_mci_dma {
struct dma_chan *chan;
struct dma_async_tx_descriptor *data_desc;
};
/**
* struct atmel_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @sg_len: Size of the scatterlist
* @pio_offset: Offset into the current scatterlist entry.
* @buffer: Buffer used if we don't have the r/w proof capability. We
* don't have the time to switch pdc buffers so we have to use only
* one buffer for the full transaction.
* @buf_size: size of the buffer.
* @buf_phys_addr: buffer address needed for pdc.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
* @data_size: just data->blocks * data->blksz.
* @dma: DMA client state.
* @data_chan: DMA channel being used for the current data transfer.
* @dma_conf: Configuration for the DMA slave
* @cmd_status: Snapshot of SR taken upon completion of the current
* command. Only valid when EVENT_CMD_COMPLETE is pending.
* @data_status: Snapshot of SR taken upon completion of the current
* data transfer. Only valid when EVENT_DATA_COMPLETE or
* EVENT_DATA_ERROR is pending.
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
* @tasklet: Tasklet running the request state machine.
* @pending_events: Bitmask of events flagged by the interrupt handler
* to be processed by the tasklet.
* @completed_events: Bitmask of events which the state machine has
* processed.
* @state: Tasklet state.
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
* @timer: Timer to balance the data timeout error flag which cannot rise.
* @mode_reg: Value of the MR register.
* @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
* @mck: The peripheral bus clock hooked up to the MMC controller.
* @pdev: Platform device associated with the MMC controller.
* @slot: Slots sharing this MMC controller.
* @caps: MCI capabilities depending on MCI version.
* @prepare_data: function to setup MCI before data transfer which
* depends on MCI capabilities.
* @submit_data: function to start data transfer which depends on MCI
* capabilities.
* @stop_transfer: function to stop data transfer which depends on MCI
* capabilities.
*
* Locking
* =======
*
* @lock is a softirq-safe spinlock protecting @queue as well as
* @cur_slot, @mrq and @state. These must always be updated
* at the same time while holding @lock.
*
* @lock also protects mode_reg and need_clock_update since these are
* used to synchronize mode register updates with the queue
* processing.
*
* The @mrq field of struct atmel_mci_slot is also protected by @lock,
* and must always be written at the same time as the slot is added to
* @queue.
*
* @pending_events and @completed_events are accessed using atomic bit
* operations, so they don't need any locking.
*
* None of the fields touched by the interrupt handler need any
* locking. However, ordering is important: Before EVENT_DATA_ERROR or
* EVENT_DATA_COMPLETE is set in @pending_events, all data-related
* interrupts must be disabled and @data_status updated with a
* snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
* CMDRDY interrupt must be disabled and @cmd_status updated with a
* snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
* bytes_xfered field of @data must be written. This is ensured by
* using barriers.
*/
struct atmel_mci {
spinlock_t lock;
void __iomem *regs;
struct scatterlist *sg;
unsigned int sg_len;
unsigned int pio_offset;
unsigned int *buffer;
unsigned int buf_size;
dma_addr_t buf_phys_addr;
struct atmel_mci_slot *cur_slot;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int data_size;
struct atmel_mci_dma dma;
struct dma_chan *data_chan;
struct dma_slave_config dma_conf;
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
struct tasklet_struct tasklet;
unsigned long pending_events;
unsigned long completed_events;
enum atmel_mci_state state;
struct list_head queue;
bool need_clock_update;
bool need_reset;
struct timer_list timer;
u32 mode_reg;
u32 cfg_reg;
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
struct platform_device *pdev;
struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
struct atmel_mci_caps caps;
u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
void (*stop_transfer)(struct atmel_mci *host);
};
/**
* struct atmel_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @sdc_reg: Value of SDCR to be written before using this slot.
* @sdio_irq: SDIO irq mask for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
* &struct atmel_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @flags: Random state bits associated with the slot.
* @detect_pin: GPIO pin used for card detection, or negative if not
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
struct mmc_host *mmc;
struct atmel_mci *host;
u32 sdc_reg;
u32 sdio_irq;
struct mmc_request *mrq;
struct list_head queue_node;
unsigned int clock;
unsigned long flags;
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
struct gpio_desc *detect_pin;
struct gpio_desc *wp_pin;
struct timer_list detect_timer;
};
#define atmci_test_and_clear_pending(host, event) \
test_and_clear_bit(event, &host->pending_events)
#define atmci_set_completed(host, event) \
set_bit(event, &host->completed_events)
#define atmci_set_pending(host, event) \
set_bit(event, &host->pending_events)
/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
static int atmci_req_show(struct seq_file *s, void *v)
{
struct atmel_mci_slot *slot = s->private;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
mrq = slot->mrq;
if (mrq) {
cmd = mrq->cmd;
data = mrq->data;
stop = mrq->stop;
if (cmd)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
cmd->resp[3], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
data->blksz, data->flags, data->error);
if (stop)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
stop->resp[3], stop->error);
}
spin_unlock_bh(&slot->host->lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(atmci_req);
static void atmci_show_status_reg(struct seq_file *s,
const char *regname, u32 value)
{
static const char *sr_bit[] = {
[0] = "CMDRDY",
[1] = "RXRDY",
[2] = "TXRDY",
[3] = "BLKE",
[4] = "DTIP",
[5] = "NOTBUSY",
[6] = "ENDRX",
[7] = "ENDTX",
[8] = "SDIOIRQA",
[9] = "SDIOIRQB",
[12] = "SDIOWAIT",
[14] = "RXBUFF",
[15] = "TXBUFE",
[16] = "RINDE",
[17] = "RDIRE",
[18] = "RCRCE",
[19] = "RENDE",
[20] = "RTOE",
[21] = "DCRCE",
[22] = "DTOE",
[23] = "CSTOE",
[24] = "BLKOVRE",
[25] = "DMADONE",
[26] = "FIFOEMPTY",
[27] = "XFRDONE",
[30] = "OVRE",
[31] = "UNRE",
};
unsigned int i;
seq_printf(s, "%s:\t0x%08x", regname, value);
for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
if (value & (1 << i)) {
if (sr_bit[i])
seq_printf(s, " %s", sr_bit[i]);
else
seq_puts(s, " UNKNOWN");
}
}
seq_putc(s, '\n');
}
static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
u32 *buf;
int ret = 0;
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pm_runtime_get_sync(&host->pdev->dev);
/*
* Grab a more or less consistent snapshot. Note that we're
* not disabling interrupts, so IMR and SR may not be
* consistent.
*/
spin_lock_bh(&host->lock);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
spin_unlock_bh(&host->lock);
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&host->pdev->dev);
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
if (host->caps.has_odd_clk_div)
seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
((buf[ATMCI_MR / 4] & 0xff) << 1)
| ((buf[ATMCI_MR / 4] >> 16) & 1));
else
seq_printf(s, "CLKDIV=%u\n",
(buf[ATMCI_MR / 4] & 0xff));
seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
buf[ATMCI_BLKR / 4],
buf[ATMCI_BLKR / 4] & 0xffff,
(buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
if (host->caps.has_cstor_reg)
seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
if (host->caps.has_dma_conf_reg) {
u32 val;
val = buf[ATMCI_DMA / 4];
seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
val, val & 3,
((val >> 4) & 3) ?
1 << (((val >> 4) & 3) + 1) : 1,
val & ATMCI_DMAEN ? " DMAEN" : "");
}
if (host->caps.has_cfg_reg) {
u32 val;
val = buf[ATMCI_CFG / 4];
seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
val,
val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
kfree(buf);
return ret;
}
DEFINE_SHOW_ATTRIBUTE(atmci_regs);
static void atmci_init_debugfs(struct atmel_mci_slot *slot)
{
struct mmc_host *mmc = slot->mmc;
struct atmel_mci *host = slot->host;
struct dentry *root;
root = mmc->debugfs_root;
if (!root)
return;
debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
debugfs_create_u32("state", S_IRUSR, root, &host->state);
debugfs_create_xul("pending_events", S_IRUSR, root,
&host->pending_events);
debugfs_create_xul("completed_events", S_IRUSR, root,
&host->completed_events);
}
#if defined(CONFIG_OF)
static const struct of_device_id atmci_dt_ids[] = {
{ .compatible = "atmel,hsmci" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmci_dt_ids);
static struct mci_platform_data*
atmci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *cnp;
struct mci_platform_data *pdata;
u32 slot_id;
int err;
if (!np) {
dev_err(&pdev->dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
for_each_child_of_node(np, cnp) {
if (of_property_read_u32(cnp, "reg", &slot_id)) {
dev_warn(&pdev->dev, "reg property is missing for %pOF\n",
cnp);
continue;
}
if (slot_id >= ATMCI_MAX_NR_SLOTS) {
dev_warn(&pdev->dev, "can't have more than %d slots\n",
ATMCI_MAX_NR_SLOTS);
of_node_put(cnp);
break;
}
if (of_property_read_u32(cnp, "bus-width",
&pdata->slot[slot_id].bus_width))
pdata->slot[slot_id].bus_width = 1;
pdata->slot[slot_id].detect_pin =
devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
"cd", GPIOD_IN, "cd-gpios");
err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].detect_pin);
if (err) {
if (err != -ENOENT)
return ERR_PTR(err);
pdata->slot[slot_id].detect_pin = NULL;
}
pdata->slot[slot_id].non_removable =
of_property_read_bool(cnp, "non-removable");
pdata->slot[slot_id].wp_pin =
devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
"wp", GPIOD_IN, "wp-gpios");
err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].wp_pin);
if (err) {
if (err != -ENOENT)
return ERR_PTR(err);
pdata->slot[slot_id].wp_pin = NULL;
}
}
return pdata;
}
#else /* CONFIG_OF */
static inline struct mci_platform_data*
atmci_of_init(struct platform_device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif
static inline unsigned int atmci_get_version(struct atmel_mci *host)
{
return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
}
/*
* Fix sconfig's burst size according to atmel MCI. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
* With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2,
* 8 -> 3, 16 -> 4.
*
* This can be done by finding most significant bit set.
*/
static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
unsigned int maxburst)
{
unsigned int version = atmci_get_version(host);
unsigned int offset = 2;
if (version >= 0x600)
offset = 1;
if (maxburst > 1)
return fls(maxburst) - offset;
else
return 0;
}
static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
host->data = NULL;
/*
* With some SDIO modules, sometimes DMA transfer hangs. If
* stop_transfer() is not called then the DMA request is not
* removed, following ones are queued and never computed.
*/
if (host->state == STATE_DATA_XFER)
host->stop_transfer(host);
} else {
host->mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
}
host->need_reset = 1;
host->state = STATE_END_REQUEST;
smp_wmb();
tasklet_schedule(&host->tasklet);
}
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
/*
* It is easier here to use us instead of ns for the timeout,
* it prevents from overflows during calculation.
*/
unsigned int us = DIV_ROUND_UP(ns, 1000);
/* Maximum clock frequency is host->bus_hz/2 */
return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
}
static void atmci_set_timeout(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_data *data)
{
static unsigned dtomul_to_shift[] = {
0, 4, 7, 8, 10, 12, 16, 20
};
unsigned timeout;
unsigned dtocyc;
unsigned dtomul;
timeout = atmci_ns_to_clocks(host, data->timeout_ns)
+ data->timeout_clks;
for (dtomul = 0; dtomul < 8; dtomul++) {
unsigned shift = dtomul_to_shift[dtomul];
dtocyc = (timeout + (1 << shift) - 1) >> shift;
if (dtocyc < 15)
break;
}
if (dtomul >= 8) {
dtomul = 7;
dtocyc = 15;
}
dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
dtocyc << dtomul_to_shift[dtomul]);
atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
}
/*
* Return mask with command flags to be enabled for this command.
*/
static u32 atmci_prepare_command(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct mmc_data *data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
else
cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
}
/*
* This should really be MAXLAT_5 for CMD2 and ACMD41, but
* it's too difficult to determine whether this is an ACMD or
* not. Better make it 64.
*/
cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
cmdr |= ATMCI_CMDR_OPDCMD;
data = cmd->data;
if (data) {
cmdr |= ATMCI_CMDR_START_XFER;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= ATMCI_CMDR_SDIO_BLOCK;
} else {
if (data->blocks > 1)
cmdr |= ATMCI_CMDR_MULTI_BLOCK;
else
cmdr |= ATMCI_CMDR_BLOCK;
}
if (data->flags & MMC_DATA_READ)
cmdr |= ATMCI_CMDR_TRDIR_READ;
}
return cmdr;
}
static void atmci_send_command(struct atmel_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
unsigned int timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
ATMCI_CMD_TIMEOUT_MS;
WARN_ON(host->cmd);
host->cmd = cmd;
dev_vdbg(&host->pdev->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
atmci_writel(host, ATMCI_ARGR, cmd->arg);
atmci_writel(host, ATMCI_CMDR, cmd_flags);
mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms));
}
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
dev_dbg(&host->pdev->dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
/*
* Configure given PDC buffer taking care of alignement issues.
* Update host->data_size and host->sg.
*/
static void atmci_pdc_set_single_buf(struct atmel_mci *host,
enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
{
u32 pointer_reg, counter_reg;
unsigned int buf_size;
if (dir == XFER_RECEIVE) {
pointer_reg = ATMEL_PDC_RPR;
counter_reg = ATMEL_PDC_RCR;
} else {
pointer_reg = ATMEL_PDC_TPR;
counter_reg = ATMEL_PDC_TCR;
}
if (buf_nb == PDC_SECOND_BUF) {
pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
counter_reg += ATMEL_PDC_SCND_BUF_OFF;
}
if (!host->caps.has_rwproof) {
buf_size = host->buf_size;
atmci_writel(host, pointer_reg, host->buf_phys_addr);
} else {
buf_size = sg_dma_len(host->sg);
atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
}
if (host->data_size <= buf_size) {
if (host->data_size & 0x3) {
/* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
} else {
/* Else transfer 32-bits words */
atmci_writel(host, counter_reg, host->data_size / 4);
}
host->data_size = 0;
} else {
/* We assume the size of a page is 32-bits aligned */
atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
host->data_size -= sg_dma_len(host->sg);
if (host->data_size)
host->sg = sg_next(host->sg);
}
}
/*
* Configure PDC buffer according to the data size ie configuring one or two
* buffers. Don't use this function if you want to configure only the second
* buffer. In this case, use atmci_pdc_set_single_buf.
*/
static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
{
atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
if (host->data_size)
atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
}
/*
* Unmap sg lists, called when transfer is finished.
*/
static void atmci_pdc_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(&host->pdev->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
/*
* Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
* having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
* interrupt needed for both transfer directions.
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
int transfer_size = host->data->blocks * host->data->blksz;
int i;
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_READ)) {
if (host->caps.has_bad_data_ordering)
for (i = 0; i < transfer_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
sg_copy_from_buffer(host->data->sg, host->data->sg_len,
host->buffer, transfer_size);
}
atmci_pdc_cleanup(host);
dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
}
static void atmci_dma_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(host->dma.chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
/*
* This function is called by the DMA driver from tasklet context.
*/
static void atmci_dma_complete(void *arg)
{
struct atmel_mci *host = arg;
struct mmc_data *data = host->data;
dev_vdbg(&host->pdev->dev, "DMA complete\n");
if (host->caps.has_dma_conf_reg)
/* Disable DMA hardware handshaking on MCI */
atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
atmci_dma_cleanup(host);
/*
* If the card was removed, data will be NULL. No point trying
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
/*
* Regardless of what the documentation says, we have
* to wait for NOTBUSY even after block read
* operations.
*
* When the DMA transfer is complete, the controller
* may still be reading the CRC from the card, i.e.
* the data transfer is still in progress and we
* haven't seen all the potential error bits yet.
*
* The interrupt handler will schedule a different
* tasklet to finish things up when the data transfer
* is completely done.
*
* We may not complete the mmc request here anyway
* because the mmc layer may call back and cause us to
* violate the "don't submit new operations from the
* completion callback" rule of the dma engine
* framework.
*/
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/*
* Returns a mask of interrupt flags to be enabled after the whole
* request has been prepared.
*/
static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
data->error = -EINPROGRESS;
host->sg = data->sg;
host->sg_len = data->sg_len;
host->data = data;
host->data_chan = NULL;
iflags = ATMCI_DATA_ERROR_FLAGS;
/*
* Errata: MMC data write operation with less than 12
* bytes is impossible.
*
* Errata: MCI Transmit Data Register (TDR) FIFO
* corruption when length is not multiple of 4.
*/
if (data->blocks * data->blksz < 12
|| (data->blocks * data->blksz) & 3)
host->need_reset = true;
host->pio_offset = 0;
if (data->flags & MMC_DATA_READ)
iflags |= ATMCI_RXRDY;
else
iflags |= ATMCI_TXRDY;
return iflags;
}
/*
* Set interrupt flags and set block length into the MCI mode register even
* if this value is also accessible in the MCI block register. It seems to be
* necessary before the High Speed MCI version. It also map sg and configure
* PDC registers.
*/
static u32
atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags, tmp;
int i;
data->error = -EINPROGRESS;
host->data = data;
host->sg = data->sg;
iflags = ATMCI_DATA_ERROR_FLAGS;
/* Enable pdc mode */
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
if (data->flags & MMC_DATA_READ)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
else
iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
/* Set BLKLEN */
tmp = atmci_readl(host, ATMCI_MR);
tmp &= 0x0000ffff;
tmp |= ATMCI_BLKLEN(data->blksz);
atmci_writel(host, ATMCI_MR, tmp);
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
sg_copy_to_buffer(host->data->sg, host->data->sg_len,
host->buffer, host->data_size);
if (host->caps.has_bad_data_ordering)
for (i = 0; i < host->data_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
}
if (host->data_size)
atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
XFER_RECEIVE : XFER_TRANSMIT);
return iflags;
}
static u32
atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
struct scatterlist *sg;
unsigned int i;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 maxburst;
u32 iflags;
data->error = -EINPROGRESS;
WARN_ON(host->data);
host->sg = NULL;
host->data = data;
iflags = ATMCI_DATA_ERROR_FLAGS;
/*
* We don't do DMA on "complex" transfers, i.e. with
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
return atmci_prepare_data(host, data);
if (data->blksz & 3)
return atmci_prepare_data(host, data);
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
return atmci_prepare_data(host, data);
}
/* If we don't have a channel, we can't do DMA */
if (!host->dma.chan)
return -ENODEV;
chan = host->dma.chan;
host->data_chan = chan;
if (data->flags & MMC_DATA_READ) {
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host,
host->dma_conf.src_maxburst);
} else {
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
maxburst = atmci_convert_chksize(host,
host->dma_conf.dst_maxburst);
}
if (host->caps.has_dma_conf_reg)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
ATMCI_DMAEN);
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, mmc_get_dma_dir(data));
dmaengine_slave_config(chan, &host->dma_conf);
desc = dmaengine_prep_slave_sg(chan,
data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
desc->callback_param = host;
return iflags;
unmap_exit:
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -ENOMEM;
}
static void
atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
{
return;
}
/*
* Start PDC according to transfer direction.
*/
static void
atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ)
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
else
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
}
static void
atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan = host->data_chan;
struct dma_async_tx_descriptor *desc = host->dma.data_desc;
if (chan) {
dmaengine_submit(desc);
dma_async_issue_pending(chan);
}
}
static void atmci_stop_transfer(struct atmel_mci *host)
{
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
/*
* Stop data transfer because error(s) occurred.
*/
static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
}
static void atmci_stop_transfer_dma(struct atmel_mci *host)
{
struct dma_chan *chan = host->data_chan;
if (chan) {
dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
dev_dbg(&host->pdev->dev,
"(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/*
* Start a request: prepare data if needed, prepare the command and activate
* interrupts.
*/
static void atmci_start_request(struct atmel_mci *host,
struct atmel_mci_slot *slot)
{
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
u32 iflags;
u32 cmdflags;
mrq = slot->mrq;
host->cur_slot = slot;
host->mrq = mrq;
host->pending_events = 0;
host->completed_events = 0;
host->cmd_status = 0;
host->data_status = 0;
dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
atmci_writel(host, ATMCI_IER, iflags);
host->need_reset = false;
}
atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
iflags = atmci_readl(host, ATMCI_IMR);
if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
/* Send init sequence (74 clock cycles) */
atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
cpu_relax();
}
iflags = 0;
data = mrq->data;
if (data) {
atmci_set_timeout(host, slot, data);
/* Must set block count/size before sending command */
atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
| ATMCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
iflags |= host->prepare_data(host, data);
}
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
/*
* DMA transfer should be started before sending the command to avoid
* unexpected errors especially for read operations in SDIO mode.
* Unfortunately, in PDC mode, command has to be sent before starting
* the transfer.
*/
if (host->submit_data != &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
if (host->submit_data == &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
if (!(data->flags & MMC_DATA_WRITE))
host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
}
/*
* We could have enabled interrupts earlier, but I suspect
* that would open up a nice can of interesting race
* conditions (e.g. command and data complete, but stop not
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
}
static void atmci_queue_request(struct atmel_mci *host,
struct atmel_mci_slot *slot, struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
dev_dbg(&host->pdev->dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
}
static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
struct mmc_data *data;
WARN_ON(slot->mrq);
dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
* electrical connection. If so, we really need to communicate
* this to the MMC core since there won't be any more
* interrupts as the card is completely removed. Otherwise,
* the MMC core might believe the card is still there even
* though the card was just removed very slowly.
*/
if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
/* We don't support multiple blocks of weird lengths. */
data = mrq->data;
if (data && data->blocks > 1 && data->blksz & 3) {
mrq->cmd->error = -EINVAL;
mmc_request_done(mmc, mrq);
}
atmci_queue_request(host, slot, mrq);
}
static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
unsigned int i;
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
break;
case MMC_BUS_WIDTH_4:
slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
break;
case MMC_BUS_WIDTH_8:
slot->sdc_reg |= ATMCI_SDCBUS_8BIT;
break;
}
if (ios->clock) {
unsigned int clock_min = ~0U;
int clkdiv;
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
/*
* Use mirror of ios->clock to prevent race with mmc
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock
&& host->slot[i]->clock < clock_min)
clock_min = host->slot[i]->clock;
}
/* Calculate clock divider */
if (host->caps.has_odd_clk_div) {
clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
if (clkdiv < 0) {
dev_warn(&mmc->class_dev,
"clock %u too fast; using %lu\n",
clock_min, host->bus_hz / 2);
clkdiv = 0;
} else if (clkdiv > 511) {
dev_warn(&mmc->class_dev,
"clock %u too slow; using %lu\n",
clock_min, host->bus_hz / (511 + 2));
clkdiv = 511;
}
host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
| ATMCI_MR_CLKODD(clkdiv & 1);
} else {
clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
if (clkdiv > 255) {
dev_warn(&mmc->class_dev,
"clock %u too slow; using %lu\n",
clock_min, host->bus_hz / (2 * 256));
clkdiv = 255;
}
host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
}
/*
* WRPROOF and RDPROOF prevent overruns/underruns by
* stopping the clock when the FIFO is full/empty.
* This state is not expected to last for long.
*/
if (host->caps.has_rwproof)
host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
if (host->caps.has_cfg_reg) {
/* setup High Speed mode in relation with card capacity */
if (ios->timing == MMC_TIMING_SD_HS)
host->cfg_reg |= ATMCI_CFG_HSMODE;
else
host->cfg_reg &= ~ATMCI_CFG_HSMODE;
}
if (list_empty(&host->queue)) {
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
} else {
host->need_clock_update = true;
}
spin_unlock_bh(&host->lock);
} else {
bool any_slot_active = false;
spin_lock_bh(&host->lock);
slot->clock = 0;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i] && host->slot[i]->clock) {
any_slot_active = true;
break;
}
}
if (!any_slot_active) {
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
if (host->mode_reg) {
atmci_readl(host, ATMCI_MR);
}
host->mode_reg = 0;
}
spin_unlock_bh(&host->lock);
}
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break;
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
default:
break;
}
}
static int atmci_get_ro(struct mmc_host *mmc)
{
int read_only = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (slot->wp_pin) {
read_only = gpiod_get_value(slot->wp_pin);
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
}
return read_only;
}
static int atmci_get_cd(struct mmc_host *mmc)
{
int present = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
if (slot->detect_pin) {
present = gpiod_get_value_cansleep(slot->detect_pin);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
return present;
}
static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
if (enable)
atmci_writel(host, ATMCI_IER, slot->sdio_irq);
else
atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
}
static const struct mmc_host_ops atmci_ops = {
.request = atmci_request,
.set_ios = atmci_set_ios,
.get_ro = atmci_get_ro,
.get_cd = atmci_get_cd,
.enable_sdio_irq = atmci_enable_sdio_irq,
};
/* Called with host->lock held */
static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
{
struct atmel_mci_slot *slot = NULL;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
WARN_ON(host->cmd || host->data);
del_timer(&host->timer);
/*
* Update the MMC clock rate if necessary. This may be
* necessary if set_ios() is called when a different slot is
* busy transferring data.
*/
if (host->need_clock_update) {
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
host->cur_slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
slot = list_entry(host->queue.next,
struct atmel_mci_slot, queue_node);
list_del(&slot->queue_node);
dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
dev_vdbg(&host->pdev->dev, "list empty\n");
host->state = STATE_IDLE;
}
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
}
static void atmci_command_complete(struct atmel_mci *host,
struct mmc_command *cmd)
{
u32 status = host->cmd_status;
/* Read the response from the card (up to 16 bytes) */
cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
if (status & ATMCI_RTOE)
cmd->error = -ETIMEDOUT;
else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
cmd->error = -EILSEQ;
else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
if (host->caps.need_blksz_mul_4) {
cmd->error = -EINVAL;
host->need_reset = 1;
}
} else
cmd->error = 0;
}
static void atmci_detect_change(struct timer_list *t)
{
struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
/*
* atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
* freeing the interrupt. We must not re-enable the interrupt
* if it has been freed, and if we're shutting down, it
* doesn't really matter whether the card is present or not.
*/
smp_rmb();
if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
return;
enable_irq(gpiod_to_irq(slot->detect_pin));
present = gpiod_get_value_cansleep(slot->detect_pin);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
present, present_old);
if (present != present_old) {
struct atmel_mci *host = slot->host;
struct mmc_request *mrq;
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
spin_lock(&host->lock);
if (!present)
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
else
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
if (mrq == host->mrq) {
/*
* Reset controller to terminate any ongoing
* commands or data transfers.
*/
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
switch (host->state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
host->stop_transfer(host);
break;
case STATE_DATA_XFER:
mrq->data->error = -ENOMEDIUM;
host->stop_transfer(host);
break;
case STATE_WAITING_NOTBUSY:
mrq->data->error = -ENOMEDIUM;
break;
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
case STATE_END_REQUEST:
break;
}
atmci_request_end(host, mrq);
} else {
list_del(&slot->queue_node);
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
mrq->data->error = -ENOMEDIUM;
if (mrq->stop)
mrq->stop->error = -ENOMEDIUM;
spin_unlock(&host->lock);
mmc_request_done(slot->mmc, mrq);
spin_lock(&host->lock);
}
}
spin_unlock(&host->lock);
mmc_detect_change(slot->mmc, 0);
}
}
static void atmci_tasklet_func(struct tasklet_struct *t)
{
struct atmel_mci *host = from_tasklet(host, t, tasklet);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
spin_lock(&host->lock);
state = host->state;
dev_vdbg(&host->pdev->dev,
"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
state, host->pending_events, host->completed_events,
atmci_readl(host, ATMCI_IMR));
do {
prev_state = state;
dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
/*
* Command has been sent, we are waiting for command
* ready. Then we have three next states possible:
* END_REQUEST by default, WAITING_NOTBUSY if it's a
* command needing it or DATA_XFER if there is data.
*/
dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
host->cmd = NULL;
atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
if (mrq->data) {
dev_dbg(&host->pdev->dev,
"command with data transfer");
/*
* If there is a command error don't start
* data transfer.
*/
if (mrq->cmd->error) {
host->stop_transfer(host);
host->data = NULL;
atmci_writel(host, ATMCI_IDR,
ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
state = STATE_END_REQUEST;
} else
state = STATE_DATA_XFER;
} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
dev_dbg(&host->pdev->dev,
"command response need waiting notbusy");
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else
state = STATE_END_REQUEST;
break;
case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
dev_dbg(&host->pdev->dev, "set completed data error\n");
atmci_set_completed(host, EVENT_DATA_ERROR);
state = STATE_END_REQUEST;
break;
}
/*
* A data transfer is in progress. The event expected
* to move to the next state depends of data transfer
* type (PDC or DMA). Once transfer done we can move
* to the next step which is WAITING_NOTBUSY in write
* case and directly SENDING_STOP in read case.
*/
dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
dev_dbg(&host->pdev->dev,
"(%s) set completed xfer complete\n",
__func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
if (host->caps.need_notbusy_for_read_ops ||
(host->data->flags & MMC_DATA_WRITE)) {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
state = STATE_END_REQUEST;
}
break;
case STATE_WAITING_NOTBUSY:
/*
* We can be in the state for two reasons: a command
* requiring waiting not busy signal (stop command
* included) or a write operation. In the latest case,
* we need to send a stop command.
*/
dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_NOTBUSY))
break;
dev_dbg(&host->pdev->dev, "set completed not busy\n");
atmci_set_completed(host, EVENT_NOTBUSY);
if (host->data) {
/*
* For some commands such as CMD53, even if
* there is data transfer, there is no stop
* command to send.
*/
if (host->mrq->stop) {
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
host->data = NULL;
data->bytes_xfered = data->blocks
* data->blksz;
data->error = 0;
state = STATE_END_REQUEST;
}
} else
state = STATE_END_REQUEST;
break;
case STATE_SENDING_STOP:
/*
* In this state, it is important to set host->data to
* NULL (which is tested in the waiting notbusy state)
* in order to go to the end request state instead of
* sending stop again.
*/
dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY))
break;
dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
atmci_command_complete(host, mrq->stop);
if (mrq->stop->error) {
host->stop_transfer(host);
atmci_writel(host, ATMCI_IDR,
ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
state = STATE_END_REQUEST;
} else {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
}
host->data = NULL;
break;
case STATE_END_REQUEST:
atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
status = host->data_status;
if (unlikely(status)) {
host->stop_transfer(host);
host->data = NULL;
if (data) {
if (status & ATMCI_DTOE) {
data->error = -ETIMEDOUT;
} else if (status & ATMCI_DCRCE) {
data->error = -EILSEQ;
} else {
data->error = -EIO;
}
}
}
atmci_request_end(host, host->mrq);
goto unlock; /* atmci_request_end() sets host->state */
break;
}
} while (state != prev_state);
host->state = state;
unlock:
spin_unlock(&host->lock);
}
static void atmci_read_data_pio(struct atmel_mci *host)
{
struct scatterlist *sg = host->sg;
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
u32 value;
u32 status;
unsigned int nbytes = 0;
do {
value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
offset += 4;
nbytes += 4;
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 0;
}
} else {
unsigned int remaining = sg->length - offset;
sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 4 - remaining;
sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
nbytes += offset;
}
status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
return;
}
} while (status & ATMCI_RXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
static void atmci_write_data_pio(struct atmel_mci *host)
{
struct scatterlist *sg = host->sg;
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
u32 value;
u32 status;
unsigned int nbytes = 0;
do {
if (likely(offset + 4 <= sg->length)) {
sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
atmci_writel(host, ATMCI_TDR, value);
offset += 4;
nbytes += 4;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len)
goto done;
offset = 0;
}
} else {
unsigned int remaining = sg->length - offset;
value = 0;
sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
host->sg = sg = sg_next(sg);
host->sg_len--;
if (!sg || !host->sg_len) {
atmci_writel(host, ATMCI_TDR, value);
goto done;
}
offset = 4 - remaining;
sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;
}
status = atmci_readl(host, ATMCI_SR);
if (status & ATMCI_DATA_ERROR_FLAGS) {
atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
return;
}
} while (status & ATMCI_TXRDY);
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
data->bytes_xfered += nbytes;
smp_wmb();
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
struct atmel_mci_slot *slot = host->slot[i];
if (slot && (status & slot->sdio_irq)) {
mmc_signal_sdio_irq(slot->mmc);
}
}
}
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
u32 status, mask, pending;
unsigned int pass_count = 0;
do {
status = atmci_readl(host, ATMCI_SR);
mask = atmci_readl(host, ATMCI_IMR);
pending = status & mask;
if (!pending)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
dev_dbg(&host->pdev->dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
| ATMCI_RXRDY | ATMCI_TXRDY
| ATMCI_ENDRX | ATMCI_ENDTX
| ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
dev_dbg(&host->pdev->dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
* We can receive this interruption before having configured
* the second pdc buffer, so we need to reconfigure first and
* second buffers again
*/
if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
} else {
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
atmci_pdc_set_single_buf(host,
XFER_TRANSMIT, PDC_SECOND_BUF);
atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
}
}
if (pending & ATMCI_RXBUFF) {
dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
* We can receive this interruption before having configured
* the second pdc buffer, so we need to reconfigure first and
* second buffers again
*/
if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_RECEIVE);
atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
} else {
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
atmci_pdc_set_single_buf(host,
XFER_RECEIVE, PDC_SECOND_BUF);
atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
}
}
/*
* First mci IPs, so mainly the ones having pdc, have some
* issues with the notbusy signal. You can't get it after
* data transmission if you have not sent a stop command.
* The appropriate workaround is to use the BLKE signal.
*/
if (pending & ATMCI_BLKE) {
dev_dbg(&host->pdev->dev, "IRQ: blke\n");
atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_NOTBUSY) {
dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
if (pending & ATMCI_CMDRDY) {
dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
host->cmd_status = status;
smp_wmb();
dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
atmci_set_pending(host, EVENT_CMD_RDY);
tasklet_schedule(&host->tasklet);
}
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
} while (pass_count++ < 5);
return pass_count ? IRQ_HANDLED : IRQ_NONE;
}
static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
{
struct atmel_mci_slot *slot = dev_id;
/*
* Disable interrupts until the pin has stabilized and check
* the state then. Use mod_timer() since we may be in the
* middle of the timer routine when this interrupt triggers.
*/
disable_irq_nosync(irq);
mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
return IRQ_HANDLED;
}
static int atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
u32 sdc_reg, u32 sdio_irq)
{
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
int ret;
mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->mmc = mmc;
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
slot->sdc_reg = sdc_reg;
slot->sdio_irq = sdio_irq;
dev_dbg(&mmc->class_dev,
"slot[%u]: bus_width=%u, detect_pin=%d, "
"detect_is_active_high=%s, wp_pin=%d\n",
id, slot_data->bus_width, desc_to_gpio(slot_data->detect_pin),
!gpiod_is_active_low(slot_data->detect_pin) ? "true" : "false",
desc_to_gpio(slot_data->wp_pin));
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (sdio_irq)
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
/*
* Without the read/write proof capability, it is strongly suggested to
* use only one bit for data to prevent fifo underruns and overruns
* which will corrupt data.
*/
if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) {
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (slot_data->bus_width >= 8)
mmc->caps |= MMC_CAP_8_BIT_DATA;
}
if (atmci_get_version(host) < 0x200) {
mmc->max_segs = 256;
mmc->max_blk_size = 4095;
mmc->max_blk_count = 256;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
} else {
mmc->max_segs = 64;
mmc->max_req_size = 32768 * 512;
mmc->max_blk_size = 32768;
mmc->max_blk_count = 512;
}
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
if (slot->detect_pin) {
if (!gpiod_get_value_cansleep(slot->detect_pin))
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
} else {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
}
if (!slot->detect_pin) {
if (slot_data->non_removable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
else
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
if (!slot->wp_pin)
dev_dbg(&mmc->class_dev, "no WP pin available\n");
host->slot[id] = slot;
mmc_regulator_get_supply(mmc);
ret = mmc_add_host(mmc);
if (ret) {
mmc_free_host(mmc);
return ret;
}
if (slot->detect_pin) {
timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpiod_to_irq(slot->detect_pin),
atmci_detect_interrupt,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"mmc-detect", slot);
if (ret) {
dev_dbg(&mmc->class_dev,
"could not request IRQ %d for detect pin\n",
gpiod_to_irq(slot->detect_pin));
slot->detect_pin = NULL;
}
}
atmci_init_debugfs(slot);
return 0;
}
static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
unsigned int id)
{
/* Debugfs stuff is cleaned up by mmc core */
set_bit(ATMCI_SHUTDOWN, &slot->flags);
smp_wmb();
mmc_remove_host(slot->mmc);
if (slot->detect_pin) {
free_irq(gpiod_to_irq(slot->detect_pin), slot);
del_timer_sync(&slot->detect_timer);
}
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
}
static int atmci_configure_dma(struct atmel_mci *host)
{
host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
dma_cap_mask_t mask;
if (!pdata || !pdata->dma_filter)
return -ENODEV;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
pdata->dma_slave);
if (!host->dma.chan)
host->dma.chan = ERR_PTR(-ENODEV);
}
if (IS_ERR(host->dma.chan))
return PTR_ERR(host->dma.chan);
dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_conf.src_maxburst = 1;
host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_conf.dst_maxburst = 1;
host->dma_conf.device_fc = false;
return 0;
}
/*
* HSMCI (High Speed MCI) module is not fully compatible with MCI module.
* HSMCI provides DMA support and a new config register but no more supports
* PDC.
*/
static void atmci_get_cap(struct atmel_mci *host)
{
unsigned int version;
version = atmci_get_version(host);
dev_info(&host->pdev->dev,
"version: 0x%x\n", version);
host->caps.has_dma_conf_reg = false;
host->caps.has_pdc = true;
host->caps.has_cfg_reg = false;
host->caps.has_cstor_reg = false;
host->caps.has_highspeed = false;
host->caps.has_rwproof = false;
host->caps.has_odd_clk_div = false;
host->caps.has_bad_data_ordering = true;
host->caps.need_reset_after_xfer = true;
host->caps.need_blksz_mul_4 = true;
host->caps.need_notbusy_for_read_ops = false;
/* keep only major version number */
switch (version & 0xf00) {
case 0x600:
case 0x500:
host->caps.has_odd_clk_div = true;
fallthrough;
case 0x400:
case 0x300:
host->caps.has_dma_conf_reg = true;
host->caps.has_pdc = false;
host->caps.has_cfg_reg = true;
host->caps.has_cstor_reg = true;
host->caps.has_highspeed = true;
fallthrough;
case 0x200:
host->caps.has_rwproof = true;
host->caps.need_blksz_mul_4 = false;
host->caps.need_notbusy_for_read_ops = true;
fallthrough;
case 0x100:
host->caps.has_bad_data_ordering = false;
host->caps.need_reset_after_xfer = false;
fallthrough;
case 0x0:
break;
default:
host->caps.has_pdc = false;
dev_warn(&host->pdev->dev,
"Unmanaged mci version, set minimum capabilities\n");
break;
}
}
static int atmci_probe(struct platform_device *pdev)
{
struct mci_platform_data *pdata;
struct atmel_mci *host;
struct resource *regs;
unsigned int nr_slots;
int irq;
int ret, i;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
pdata = pdev->dev.platform_data;
if (!pdata) {
pdata = atmci_of_init(pdev);
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
return PTR_ERR(pdata);
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->pdev = pdev;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
host->mck = devm_clk_get(&pdev->dev, "mci_clk");
if (IS_ERR(host->mck))
return PTR_ERR(host->mck);
host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!host->regs)
return -ENOMEM;
ret = clk_prepare_enable(host->mck);
if (ret)
return ret;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
host->mapbase = regs->start;
tasklet_setup(&host->tasklet, atmci_tasklet_func);
ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
if (ret) {
clk_disable_unprepare(host->mck);
return ret;
}
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
if (ret == -EPROBE_DEFER)
goto err_dma_probe_defer;
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
} else if (host->caps.has_pdc) {
dev_info(&pdev->dev, "using PDC\n");
host->prepare_data = &atmci_prepare_data_pdc;
host->submit_data = &atmci_submit_data_pdc;
host->stop_transfer = &atmci_stop_transfer_pdc;
} else {
dev_info(&pdev->dev, "using PIO\n");
host->prepare_data = &atmci_prepare_data;
host->submit_data = &atmci_submit_data;
host->stop_transfer = &atmci_stop_transfer;
}
platform_set_drvdata(pdev, host);
timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
if (!ret) {
nr_slots++;
host->buf_size = host->slot[0]->mmc->max_req_size;
}
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
if (!ret) {
nr_slots++;
if (host->slot[1]->mmc->max_req_size > host->buf_size)
host->buf_size =
host->slot[1]->mmc->max_req_size;
}
}
if (!nr_slots) {
dev_err(&pdev->dev, "init failed: no slot defined\n");
goto err_init_slot;
}
if (!host->caps.has_rwproof) {
host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
&host->buf_phys_addr,
GFP_KERNEL);
if (!host->buffer) {
ret = -ENOMEM;
dev_err(&pdev->dev, "buffer allocation failed\n");
goto err_dma_alloc;
}
}
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
pm_runtime_mark_last_busy(&host->pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_dma_alloc:
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
}
err_init_slot:
clk_disable_unprepare(host->mck);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
del_timer_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
err_dma_probe_defer:
free_irq(irq, host);
return ret;
}
static void atmci_remove(struct platform_device *pdev)
{
struct atmel_mci *host = platform_get_drvdata(pdev);
unsigned int i;
pm_runtime_get_sync(&pdev->dev);
if (host->buffer)
dma_free_coherent(&pdev->dev, host->buf_size,
host->buffer, host->buf_phys_addr);
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
}
atmci_writel(host, ATMCI_IDR, ~0UL);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
del_timer_sync(&host->timer);
if (!IS_ERR(host->dma.chan))
dma_release_channel(host->dma.chan);
free_irq(platform_get_irq(pdev, 0), host);
clk_disable_unprepare(host->mck);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
#ifdef CONFIG_PM
static int atmci_runtime_suspend(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
clk_disable_unprepare(host->mck);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int atmci_runtime_resume(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
pinctrl_select_default_state(dev);
return clk_prepare_enable(host->mck);
}
#endif
static const struct dev_pm_ops atmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
};
static struct platform_driver atmci_driver = {
.probe = atmci_probe,
.remove_new = atmci_remove,
.driver = {
.name = "atmel_mci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(atmci_dt_ids),
.pm = &atmci_dev_pm_ops,
},
};
module_platform_driver(atmci_driver);
MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/atmel-mci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sdhci-pltfm.c Support for SDHCI platform devices
* Copyright (c) 2009 Intel Corporation
*
* Copyright (c) 2007, 2011 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
*
* Authors: Xiaobo Xie <[email protected]>
* Anton Vorontsov <[email protected]>
*/
/* Supports:
* SDHCI platform devices
*
* Inspired by sdhci-pci.c, by Pierre Ossman
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/of.h>
#ifdef CONFIG_PPC
#include <asm/machdep.h>
#endif
#include "sdhci-pltfm.h"
unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return clk_get_rate(pltfm_host->clk);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
static const struct sdhci_ops sdhci_pltfm_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static bool sdhci_wp_inverted(struct device *dev)
{
if (device_property_present(dev, "sdhci,wp-inverted") ||
device_property_present(dev, "wp-inverted"))
return true;
/* Old device trees don't have the wp-inverted property. */
#ifdef CONFIG_PPC
return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
#else
return false;
#endif /* CONFIG_PPC */
}
static void sdhci_get_compatibility(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
if (!np)
return;
if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
of_device_is_compatible(np, "fsl,p1010-esdhc") ||
of_device_is_compatible(np, "fsl,t4240-esdhc") ||
of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
}
void sdhci_get_property(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
u32 bus_width;
if (device_property_present(dev, "sdhci,auto-cmd12"))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
if (device_property_present(dev, "sdhci,1-bit-only") ||
(device_property_read_u32(dev, "bus-width", &bus_width) == 0 &&
bus_width == 1))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (sdhci_wp_inverted(dev))
host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
if (device_property_present(dev, "broken-cd"))
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (device_property_present(dev, "no-1-8-v"))
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
sdhci_get_compatibility(pdev);
device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock);
if (device_property_present(dev, "keep-power-in-suspend"))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
if (device_property_read_bool(dev, "wakeup-source") ||
device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
}
EXPORT_SYMBOL_GPL(sdhci_get_property);
struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
const struct sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct sdhci_host *host;
void __iomem *ioaddr;
int irq, ret;
ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr)) {
ret = PTR_ERR(ioaddr);
goto err;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err;
}
host = sdhci_alloc_host(&pdev->dev,
sizeof(struct sdhci_pltfm_host) + priv_size);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto err;
}
host->ioaddr = ioaddr;
host->irq = irq;
host->hw_name = dev_name(&pdev->dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
else
host->ops = &sdhci_pltfm_ops;
if (pdata) {
host->quirks = pdata->quirks;
host->quirks2 = pdata->quirks2;
}
platform_set_drvdata(pdev, host);
return host;
err:
dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
void sdhci_pltfm_free(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
sdhci_free_host(host);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
int sdhci_pltfm_init_and_add_host(struct platform_device *pdev,
const struct sdhci_pltfm_data *pdata,
size_t priv_size)
{
struct sdhci_host *host;
int ret = 0;
host = sdhci_pltfm_init(pdev, pdata, priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
sdhci_get_property(pdev);
ret = sdhci_add_host(host);
if (ret)
sdhci_pltfm_free(pdev);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_init_and_add_host);
void sdhci_pltfm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
sdhci_pltfm_free(pdev);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_remove);
#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
ret = sdhci_suspend_host(host);
if (ret)
return ret;
clk_disable_unprepare(pltfm_host->clk);
return 0;
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
int sdhci_pltfm_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
ret = clk_prepare_enable(pltfm_host->clk);
if (ret)
return ret;
ret = sdhci_resume_host(host);
if (ret)
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
#endif
const struct dev_pm_ops sdhci_pltfm_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
};
EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
static int __init sdhci_pltfm_drv_init(void)
{
pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n");
return 0;
}
module_init(sdhci_pltfm_drv_init);
static void __exit sdhci_pltfm_drv_exit(void)
{
}
module_exit(sdhci_pltfm_drv_exit);
MODULE_DESCRIPTION("SDHCI platform and OF driver helper");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-pltfm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (C) 2019 ASPEED Technology Inc. */
/* Copyright (C) 2019 IBM Corp. */
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/math64.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include "sdhci-pltfm.h"
#define ASPEED_SDC_INFO 0x00
#define ASPEED_SDC_S1_MMC8 BIT(25)
#define ASPEED_SDC_S0_MMC8 BIT(24)
#define ASPEED_SDC_PHASE 0xf4
#define ASPEED_SDC_S1_PHASE_IN GENMASK(25, 21)
#define ASPEED_SDC_S0_PHASE_IN GENMASK(20, 16)
#define ASPEED_SDC_S1_PHASE_OUT GENMASK(15, 11)
#define ASPEED_SDC_S1_PHASE_IN_EN BIT(10)
#define ASPEED_SDC_S1_PHASE_OUT_EN GENMASK(9, 8)
#define ASPEED_SDC_S0_PHASE_OUT GENMASK(7, 3)
#define ASPEED_SDC_S0_PHASE_IN_EN BIT(2)
#define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0)
#define ASPEED_SDC_PHASE_MAX 31
/* SDIO{10,20} */
#define ASPEED_SDC_CAP1_1_8V (0 * 32 + 26)
/* SDIO{14,24} */
#define ASPEED_SDC_CAP2_SDR104 (1 * 32 + 1)
struct aspeed_sdc {
struct clk *clk;
struct resource *res;
spinlock_t lock;
void __iomem *regs;
};
struct aspeed_sdhci_tap_param {
bool valid;
#define ASPEED_SDHCI_TAP_PARAM_INVERT_CLK BIT(4)
u8 in;
u8 out;
};
struct aspeed_sdhci_tap_desc {
u32 tap_mask;
u32 enable_mask;
u8 enable_value;
};
struct aspeed_sdhci_phase_desc {
struct aspeed_sdhci_tap_desc in;
struct aspeed_sdhci_tap_desc out;
};
struct aspeed_sdhci_pdata {
unsigned int clk_div_start;
const struct aspeed_sdhci_phase_desc *phase_desc;
size_t nr_phase_descs;
};
struct aspeed_sdhci {
const struct aspeed_sdhci_pdata *pdata;
struct aspeed_sdc *parent;
u32 width_mask;
struct mmc_clk_phase_map phase_map;
const struct aspeed_sdhci_phase_desc *phase_desc;
};
/*
* The function sets the mirror register for updating
* capbilities of the current slot.
*
* slot | capability | caps_reg | mirror_reg
* -----|-------------|----------|------------
* 0 | CAP1_1_8V | SDIO140 | SDIO10
* 0 | CAP2_SDR104 | SDIO144 | SDIO14
* 1 | CAP1_1_8V | SDIO240 | SDIO20
* 1 | CAP2_SDR104 | SDIO244 | SDIO24
*/
static void aspeed_sdc_set_slot_capability(struct sdhci_host *host, struct aspeed_sdc *sdc,
int capability, bool enable, u8 slot)
{
u32 mirror_reg_offset;
u32 cap_val;
u8 cap_reg;
if (slot > 1)
return;
cap_reg = capability / 32;
cap_val = sdhci_readl(host, 0x40 + (cap_reg * 4));
if (enable)
cap_val |= BIT(capability % 32);
else
cap_val &= ~BIT(capability % 32);
mirror_reg_offset = ((slot + 1) * 0x10) + (cap_reg * 4);
writel(cap_val, sdc->regs + mirror_reg_offset);
}
static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
struct aspeed_sdhci *sdhci,
bool bus8)
{
u32 info;
/* Set/clear 8 bit mode */
spin_lock(&sdc->lock);
info = readl(sdc->regs + ASPEED_SDC_INFO);
if (bus8)
info |= sdhci->width_mask;
else
info &= ~sdhci->width_mask;
writel(info, sdc->regs + ASPEED_SDC_INFO);
spin_unlock(&sdc->lock);
}
static u32
aspeed_sdc_set_phase_tap(const struct aspeed_sdhci_tap_desc *desc,
u8 tap, bool enable, u32 reg)
{
reg &= ~(desc->enable_mask | desc->tap_mask);
if (enable) {
reg |= tap << __ffs(desc->tap_mask);
reg |= desc->enable_value << __ffs(desc->enable_mask);
}
return reg;
}
static void
aspeed_sdc_set_phase_taps(struct aspeed_sdc *sdc,
const struct aspeed_sdhci_phase_desc *desc,
const struct aspeed_sdhci_tap_param *taps)
{
u32 reg;
spin_lock(&sdc->lock);
reg = readl(sdc->regs + ASPEED_SDC_PHASE);
reg = aspeed_sdc_set_phase_tap(&desc->in, taps->in, taps->valid, reg);
reg = aspeed_sdc_set_phase_tap(&desc->out, taps->out, taps->valid, reg);
writel(reg, sdc->regs + ASPEED_SDC_PHASE);
spin_unlock(&sdc->lock);
}
#define PICOSECONDS_PER_SECOND 1000000000000ULL
#define ASPEED_SDHCI_NR_TAPS 15
/* Measured value with *handwave* environmentals and static loading */
#define ASPEED_SDHCI_MAX_TAP_DELAY_PS 1253
static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz,
int phase_deg)
{
u64 phase_period_ps;
u64 prop_delay_ps;
u64 clk_period_ps;
unsigned int tap;
u8 inverted;
phase_deg %= 360;
if (phase_deg >= 180) {
inverted = ASPEED_SDHCI_TAP_PARAM_INVERT_CLK;
phase_deg -= 180;
dev_dbg(dev,
"Inverting clock to reduce phase correction from %d to %d degrees\n",
phase_deg + 180, phase_deg);
} else {
inverted = 0;
}
prop_delay_ps = ASPEED_SDHCI_MAX_TAP_DELAY_PS / ASPEED_SDHCI_NR_TAPS;
clk_period_ps = div_u64(PICOSECONDS_PER_SECOND, (u64)rate_hz);
phase_period_ps = div_u64((u64)phase_deg * clk_period_ps, 360ULL);
tap = div_u64(phase_period_ps, prop_delay_ps);
if (tap > ASPEED_SDHCI_NR_TAPS) {
dev_dbg(dev,
"Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n",
tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS);
tap = ASPEED_SDHCI_NR_TAPS;
}
return inverted | tap;
}
static void
aspeed_sdhci_phases_to_taps(struct device *dev, unsigned long rate,
const struct mmc_clk_phase *phases,
struct aspeed_sdhci_tap_param *taps)
{
taps->valid = phases->valid;
if (!phases->valid)
return;
taps->in = aspeed_sdhci_phase_to_tap(dev, rate, phases->in_deg);
taps->out = aspeed_sdhci_phase_to_tap(dev, rate, phases->out_deg);
}
static void
aspeed_sdhci_configure_phase(struct sdhci_host *host, unsigned long rate)
{
struct aspeed_sdhci_tap_param _taps = {0}, *taps = &_taps;
struct mmc_clk_phase *params;
struct aspeed_sdhci *sdhci;
struct device *dev;
dev = mmc_dev(host->mmc);
sdhci = sdhci_pltfm_priv(sdhci_priv(host));
if (!sdhci->phase_desc)
return;
params = &sdhci->phase_map.phase[host->timing];
aspeed_sdhci_phases_to_taps(dev, rate, params, taps);
aspeed_sdc_set_phase_taps(sdhci->parent, sdhci->phase_desc, taps);
dev_dbg(dev,
"Using taps [%d, %d] for [%d, %d] degrees of phase correction at %luHz (%d)\n",
taps->in & ASPEED_SDHCI_NR_TAPS,
taps->out & ASPEED_SDHCI_NR_TAPS,
params->in_deg, params->out_deg, rate, host->timing);
}
static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host;
unsigned long parent, bus;
struct aspeed_sdhci *sdhci;
int div;
u16 clk;
pltfm_host = sdhci_priv(host);
sdhci = sdhci_pltfm_priv(pltfm_host);
parent = clk_get_rate(pltfm_host->clk);
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
/*
* Regarding the AST2600:
*
* If (EMMC12C[7:6], EMMC12C[15:8] == 0) then
* period of SDCLK = period of SDMCLK.
*
* If (EMMC12C[7:6], EMMC12C[15:8] != 0) then
* period of SDCLK = period of SDMCLK * 2 * (EMMC12C[7:6], EMMC[15:8])
*
* If you keep EMMC12C[7:6] = 0 and EMMC12C[15:8] as one-hot,
* 0x1/0x2/0x4/etc, you will find it is compatible to AST2400 or AST2500
*
* Keep the one-hot behaviour for backwards compatibility except for
* supporting the value 0 in (EMMC12C[7:6], EMMC12C[15:8]), and capture
* the 0-value capability in clk_div_start.
*/
for (div = sdhci->pdata->clk_div_start; div < 256; div *= 2) {
bus = parent / div;
if (bus <= clock)
break;
}
div >>= 1;
clk = div << SDHCI_DIVIDER_SHIFT;
aspeed_sdhci_configure_phase(host, bus);
sdhci_enable_clk(host, clk);
}
static unsigned int aspeed_sdhci_get_max_clock(struct sdhci_host *host)
{
if (host->mmc->f_max)
return host->mmc->f_max;
return sdhci_pltfm_clk_get_max_clock(host);
}
static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
{
struct sdhci_pltfm_host *pltfm_priv;
struct aspeed_sdhci *aspeed_sdhci;
struct aspeed_sdc *aspeed_sdc;
u8 ctrl;
pltfm_priv = sdhci_priv(host);
aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv);
aspeed_sdc = aspeed_sdhci->parent;
/* Set/clear 8-bit mode */
aspeed_sdc_configure_8bit_mode(aspeed_sdc, aspeed_sdhci,
width == MMC_BUS_WIDTH_8);
/* Set/clear 1 or 4 bit mode */
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg)
{
u32 val = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_PRESENT_STATE) &&
(host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH))
val ^= SDHCI_CARD_PRESENT;
return val;
}
static const struct sdhci_ops aspeed_sdhci_ops = {
.read_l = aspeed_sdhci_readl,
.set_clock = aspeed_sdhci_set_clock,
.get_max_clock = aspeed_sdhci_get_max_clock,
.set_bus_width = aspeed_sdhci_set_bus_width,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data aspeed_sdhci_pdata = {
.ops = &aspeed_sdhci_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
};
static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev,
struct resource *res)
{
resource_size_t delta;
if (!res || resource_type(res) != IORESOURCE_MEM)
return -EINVAL;
if (res->start < dev->parent->res->start)
return -EINVAL;
delta = res->start - dev->parent->res->start;
if (delta & (0x100 - 1))
return -EINVAL;
return (delta / 0x100) - 1;
}
static int aspeed_sdhci_probe(struct platform_device *pdev)
{
const struct aspeed_sdhci_pdata *aspeed_pdata;
struct device_node *np = pdev->dev.of_node;
struct sdhci_pltfm_host *pltfm_host;
struct aspeed_sdhci *dev;
struct sdhci_host *host;
struct resource *res;
int slot;
int ret;
aspeed_pdata = of_device_get_match_data(&pdev->dev);
if (!aspeed_pdata) {
dev_err(&pdev->dev, "Missing platform configuration data\n");
return -EINVAL;
}
host = sdhci_pltfm_init(pdev, &aspeed_sdhci_pdata, sizeof(*dev));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
dev = sdhci_pltfm_priv(pltfm_host);
dev->pdata = aspeed_pdata;
dev->parent = dev_get_drvdata(pdev->dev.parent);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
slot = aspeed_sdhci_calculate_slot(dev, res);
if (slot < 0)
return slot;
else if (slot >= 2)
return -EINVAL;
if (slot < dev->pdata->nr_phase_descs) {
dev->phase_desc = &dev->pdata->phase_desc[slot];
} else {
dev_info(&pdev->dev,
"Phase control not supported for slot %d\n", slot);
dev->phase_desc = NULL;
}
dev->width_mask = !slot ? ASPEED_SDC_S0_MMC8 : ASPEED_SDC_S1_MMC8;
dev_info(&pdev->dev, "Configured for slot %d\n", slot);
sdhci_get_of_property(pdev);
if (of_property_read_bool(np, "mmc-hs200-1_8v") ||
of_property_read_bool(np, "sd-uhs-sdr104")) {
aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP1_1_8V,
true, slot);
}
if (of_property_read_bool(np, "sd-uhs-sdr104")) {
aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP2_SDR104,
true, slot);
}
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk))
return PTR_ERR(pltfm_host->clk);
ret = clk_prepare_enable(pltfm_host->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable SDIO clock\n");
goto err_pltfm_free;
}
ret = mmc_of_parse(host->mmc);
if (ret)
goto err_sdhci_add;
if (dev->phase_desc)
mmc_of_parse_clk_phase(host->mmc, &dev->phase_map);
ret = sdhci_add_host(host);
if (ret)
goto err_sdhci_add;
return 0;
err_sdhci_add:
clk_disable_unprepare(pltfm_host->clk);
err_pltfm_free:
sdhci_pltfm_free(pdev);
return ret;
}
static void aspeed_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
host = platform_get_drvdata(pdev);
pltfm_host = sdhci_priv(host);
sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
}
static const struct aspeed_sdhci_pdata ast2400_sdhci_pdata = {
.clk_div_start = 2,
};
static const struct aspeed_sdhci_phase_desc ast2600_sdhci_phase[] = {
/* SDHCI/Slot 0 */
[0] = {
.in = {
.tap_mask = ASPEED_SDC_S0_PHASE_IN,
.enable_mask = ASPEED_SDC_S0_PHASE_IN_EN,
.enable_value = 1,
},
.out = {
.tap_mask = ASPEED_SDC_S0_PHASE_OUT,
.enable_mask = ASPEED_SDC_S0_PHASE_OUT_EN,
.enable_value = 3,
},
},
/* SDHCI/Slot 1 */
[1] = {
.in = {
.tap_mask = ASPEED_SDC_S1_PHASE_IN,
.enable_mask = ASPEED_SDC_S1_PHASE_IN_EN,
.enable_value = 1,
},
.out = {
.tap_mask = ASPEED_SDC_S1_PHASE_OUT,
.enable_mask = ASPEED_SDC_S1_PHASE_OUT_EN,
.enable_value = 3,
},
},
};
static const struct aspeed_sdhci_pdata ast2600_sdhci_pdata = {
.clk_div_start = 1,
.phase_desc = ast2600_sdhci_phase,
.nr_phase_descs = ARRAY_SIZE(ast2600_sdhci_phase),
};
static const struct of_device_id aspeed_sdhci_of_match[] = {
{ .compatible = "aspeed,ast2400-sdhci", .data = &ast2400_sdhci_pdata, },
{ .compatible = "aspeed,ast2500-sdhci", .data = &ast2400_sdhci_pdata, },
{ .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
{ }
};
static struct platform_driver aspeed_sdhci_driver = {
.driver = {
.name = "sdhci-aspeed",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = aspeed_sdhci_of_match,
},
.probe = aspeed_sdhci_probe,
.remove_new = aspeed_sdhci_remove,
};
static int aspeed_sdc_probe(struct platform_device *pdev)
{
struct device_node *parent, *child;
struct aspeed_sdc *sdc;
int ret;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
if (!sdc)
return -ENOMEM;
spin_lock_init(&sdc->lock);
sdc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdc->clk))
return PTR_ERR(sdc->clk);
ret = clk_prepare_enable(sdc->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable SDCLK\n");
return ret;
}
sdc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &sdc->res);
if (IS_ERR(sdc->regs)) {
ret = PTR_ERR(sdc->regs);
goto err_clk;
}
dev_set_drvdata(&pdev->dev, sdc);
parent = pdev->dev.of_node;
for_each_available_child_of_node(parent, child) {
struct platform_device *cpdev;
cpdev = of_platform_device_create(child, NULL, &pdev->dev);
if (!cpdev) {
of_node_put(child);
ret = -ENODEV;
goto err_clk;
}
}
return 0;
err_clk:
clk_disable_unprepare(sdc->clk);
return ret;
}
static void aspeed_sdc_remove(struct platform_device *pdev)
{
struct aspeed_sdc *sdc = dev_get_drvdata(&pdev->dev);
clk_disable_unprepare(sdc->clk);
}
static const struct of_device_id aspeed_sdc_of_match[] = {
{ .compatible = "aspeed,ast2400-sd-controller", },
{ .compatible = "aspeed,ast2500-sd-controller", },
{ .compatible = "aspeed,ast2600-sd-controller", },
{ }
};
MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match);
static struct platform_driver aspeed_sdc_driver = {
.driver = {
.name = "sd-controller-aspeed",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = aspeed_sdc_of_match,
},
.probe = aspeed_sdc_probe,
.remove_new = aspeed_sdc_remove,
};
#if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST)
#include "sdhci-of-aspeed-test.c"
#endif
static int __init aspeed_sdc_init(void)
{
int rc;
rc = platform_driver_register(&aspeed_sdhci_driver);
if (rc < 0)
return rc;
rc = platform_driver_register(&aspeed_sdc_driver);
if (rc < 0)
platform_driver_unregister(&aspeed_sdhci_driver);
return rc;
}
module_init(aspeed_sdc_init);
static void __exit aspeed_sdc_exit(void)
{
platform_driver_unregister(&aspeed_sdc_driver);
platform_driver_unregister(&aspeed_sdhci_driver);
}
module_exit(aspeed_sdc_exit);
MODULE_DESCRIPTION("Driver for the ASPEED SD/SDIO/SDHCI Controllers");
MODULE_AUTHOR("Ryan Chen <[email protected]>");
MODULE_AUTHOR("Andrew Jeffery <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/sdhci-of-aspeed.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cb710/mmc.c
*
* Copyright by Michał Mirosław, 2008-2009
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "cb710-mmc.h"
#define CB710_MMC_REQ_TIMEOUT_MS 2000
static const u8 cb710_clock_divider_log2[8] = {
/* 1, 2, 4, 8, 16, 32, 128, 512 */
0, 1, 2, 3, 4, 5, 7, 9
};
#define CB710_MAX_DIVIDER_IDX \
(ARRAY_SIZE(cb710_clock_divider_log2) - 1)
static const u8 cb710_src_freq_mhz[16] = {
33, 10, 20, 25, 30, 35, 40, 45,
50, 55, 60, 65, 70, 75, 80, 85
};
static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
u32 src_freq_idx;
u32 divider_idx;
int src_hz;
/* on CB710 in HP nx9500:
* src_freq_idx == 0
* indexes 1-7 work as written in the table
* indexes 0,8-15 give no clock output
*/
pci_read_config_dword(pdev, 0x48, &src_freq_idx);
src_freq_idx = (src_freq_idx >> 16) & 0xF;
src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) {
if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx])
break;
}
if (src_freq_idx)
divider_idx |= 0x8;
else if (divider_idx == 0)
divider_idx = 1;
cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
dev_dbg(cb710_slot_dev(slot),
"clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n",
src_hz >> cb710_clock_divider_log2[divider_idx & 7],
hz, src_freq_idx, divider_idx & 7, divider_idx & 8);
}
static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
unsigned short enable, unsigned short mask)
{
/* clear global IE
* - it gets set later if any interrupt sources are enabled */
mask |= CB710_MMC_IE_IRQ_ENABLE;
/* look like interrupt is fired whenever
* WORD[0x0C] & WORD[0x10] != 0;
* -> bit 15 port 0x0C seems to be global interrupt enable
*/
enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT)
& ~mask) | enable;
if (enable)
enable |= CB710_MMC_IE_IRQ_ENABLE;
cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable);
}
static void cb710_mmc_enable_irq(struct cb710_slot *slot,
unsigned short enable, unsigned short mask)
{
struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot));
unsigned long flags;
spin_lock_irqsave(&reader->irq_lock, flags);
/* this is the only thing irq_lock protects */
__cb710_mmc_enable_irq(slot, enable, mask);
spin_unlock_irqrestore(&reader->irq_lock, flags);
}
static void cb710_mmc_reset_events(struct cb710_slot *slot)
{
cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF);
cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF);
cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
}
static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
{
if (enable)
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
CB710_MMC_C1_4BIT_DATA_BUS, 0);
else
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
0, CB710_MMC_C1_4BIT_DATA_BUS);
}
static int cb710_check_event(struct cb710_slot *slot, u8 what)
{
u16 status;
status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT);
if (status & CB710_MMC_S0_FIFO_UNDERFLOW) {
/* it is just a guess, so log it */
dev_dbg(cb710_slot_dev(slot),
"CHECK : ignoring bit 6 in status %04X\n", status);
cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
CB710_MMC_S0_FIFO_UNDERFLOW);
status &= ~CB710_MMC_S0_FIFO_UNDERFLOW;
}
if (status & CB710_MMC_STATUS_ERROR_EVENTS) {
dev_dbg(cb710_slot_dev(slot),
"CHECK : returning EIO on status %04X\n", status);
cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF);
cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
CB710_MMC_S1_RESET);
return -EIO;
}
/* 'what' is a bit in MMC_STATUS1 */
if ((status >> 8) & what) {
cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what);
return 1;
}
return 0;
}
static int cb710_wait_for_event(struct cb710_slot *slot, u8 what)
{
int err = 0;
unsigned limit = 2000000; /* FIXME: real timeout */
#ifdef CONFIG_CB710_DEBUG
u32 e, x;
e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
#endif
while (!(err = cb710_check_event(slot, what))) {
if (!--limit) {
cb710_dump_regs(cb710_slot_to_chip(slot),
CB710_DUMP_REGS_MMC);
err = -ETIMEDOUT;
break;
}
udelay(1);
}
#ifdef CONFIG_CB710_DEBUG
x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
limit = 2000000 - limit;
if (limit > 100)
dev_dbg(cb710_slot_dev(slot),
"WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n",
limit, what, e, x);
#endif
return err < 0 ? err : 0;
}
static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
{
unsigned limit = 500000; /* FIXME: real timeout */
int err = 0;
#ifdef CONFIG_CB710_DEBUG
u32 e, x;
e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
#endif
while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) {
if (!--limit) {
cb710_dump_regs(cb710_slot_to_chip(slot),
CB710_DUMP_REGS_MMC);
err = -ETIMEDOUT;
break;
}
udelay(1);
}
#ifdef CONFIG_CB710_DEBUG
x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
limit = 500000 - limit;
if (limit > 100)
dev_dbg(cb710_slot_dev(slot),
"WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
limit, mask, e, x);
#endif
return err;
}
static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
size_t count, size_t blocksize)
{
cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT,
((count - 1) << 16)|(blocksize - 1));
dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n",
count, count == 1 ? "" : "s", blocksize);
}
static void cb710_mmc_fifo_hack(struct cb710_slot *slot)
{
/* without this, received data is prepended with 8-bytes of zeroes */
u32 r1, r2;
int ok = 0;
r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT)
& CB710_MMC_S0_FIFO_UNDERFLOW) {
cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
CB710_MMC_S0_FIFO_UNDERFLOW);
ok = 1;
}
dev_dbg(cb710_slot_dev(slot),
"FIFO-read-hack: expected STATUS0 bit was %s\n",
ok ? "set." : "NOT SET!");
dev_dbg(cb710_slot_dev(slot),
"FIFO-read-hack: dwords ignored: %08X %08X - %s\n",
r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok");
}
static int cb710_mmc_receive_pio(struct cb710_slot *slot,
struct sg_mapping_iter *miter, size_t dw_count)
{
if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) {
int err = cb710_wait_for_event(slot,
CB710_MMC_S1_PIO_TRANSFER_DONE);
if (err)
return err;
}
cb710_sg_dwiter_write_from_io(miter,
slot->iobase + CB710_MMC_DATA_PORT, dw_count);
return 0;
}
static bool cb710_is_transfer_size_supported(struct mmc_data *data)
{
return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8));
}
static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
{
struct sg_mapping_iter miter;
size_t len, blocks = data->blocks;
int err = 0;
/* TODO: I don't know how/if the hardware handles non-16B-boundary blocks
* except single 8B block */
if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)))
return -EINVAL;
sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG);
cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
15, CB710_MMC_C2_READ_PIO_SIZE_MASK);
cb710_mmc_fifo_hack(slot);
while (blocks-- > 0) {
len = data->blksz;
while (len >= 16) {
err = cb710_mmc_receive_pio(slot, &miter, 4);
if (err)
goto out;
len -= 16;
}
if (!len)
continue;
cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK);
len = (len >= 8) ? 4 : 2;
err = cb710_mmc_receive_pio(slot, &miter, len);
if (err)
goto out;
}
out:
sg_miter_stop(&miter);
return err;
}
static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data)
{
struct sg_mapping_iter miter;
size_t len, blocks = data->blocks;
int err = 0;
/* TODO: I don't know how/if the hardware handles multiple
* non-16B-boundary blocks */
if (unlikely(data->blocks > 1 && data->blksz & 15))
return -EINVAL;
sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG);
cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
0, CB710_MMC_C2_READ_PIO_SIZE_MASK);
while (blocks-- > 0) {
len = (data->blksz + 15) >> 4;
do {
if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT)
& CB710_MMC_S2_FIFO_EMPTY)) {
err = cb710_wait_for_event(slot,
CB710_MMC_S1_PIO_TRANSFER_DONE);
if (err)
goto out;
}
cb710_sg_dwiter_read_to_io(&miter,
slot->iobase + CB710_MMC_DATA_PORT, 4);
} while (--len);
}
out:
sg_miter_stop(&miter);
return err;
}
static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader,
struct mmc_command *cmd)
{
unsigned int flags = cmd->flags;
u16 cb_flags = 0;
/* Windows driver returned 0 for commands for which no response
* is expected. It happened that there were only two such commands
* used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might
* as well be a bug in that driver.
*
* Original driver set bit 14 for MMC/SD application
* commands. There's no difference 'on the wire' and
* it apparently works without it anyway.
*/
switch (flags & MMC_CMD_MASK) {
case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break;
case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break;
case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break;
case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break;
}
if (flags & MMC_RSP_BUSY)
cb_flags |= CB710_MMC_RSP_BUSY;
cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT;
if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
cb_flags |= CB710_MMC_DATA_READ;
if (flags & MMC_RSP_PRESENT) {
/* Windows driver set 01 at bits 4,3 except for
* MMC_SET_BLOCKLEN where it set 10. Maybe the
* hardware can do something special about this
* command? The original driver looks buggy/incomplete
* anyway so we ignore this for now.
*
* I assume that 00 here means no response is expected.
*/
cb_flags |= CB710_MMC_RSP_PRESENT;
if (flags & MMC_RSP_136)
cb_flags |= CB710_MMC_RSP_136;
if (!(flags & MMC_RSP_CRC))
cb_flags |= CB710_MMC_RSP_NO_CRC;
}
return cb_flags;
}
static void cb710_receive_response(struct cb710_slot *slot,
struct mmc_command *cmd)
{
unsigned rsp_opcode, wanted_opcode;
/* Looks like final byte with CRC is always stripped (same as SDHCI) */
if (cmd->flags & MMC_RSP_136) {
u32 resp[4];
resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT);
resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT);
resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT);
resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
rsp_opcode = resp[0] >> 24;
cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24);
cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24);
cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24);
cmd->resp[3] = (resp[3] << 8);
} else {
rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F;
cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
}
wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F;
if (rsp_opcode != wanted_opcode)
cmd->error = -EILSEQ;
}
static int cb710_mmc_transfer_data(struct cb710_slot *slot,
struct mmc_data *data)
{
int error, to;
if (data->flags & MMC_DATA_READ)
error = cb710_mmc_receive(slot, data);
else
error = cb710_mmc_send(slot, data);
to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE);
if (!error)
error = to;
if (!error)
data->bytes_xfered = data->blksz * data->blocks;
return error;
}
static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct cb710_mmc_reader *reader = mmc_priv(mmc);
struct mmc_data *data = cmd->data;
u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd);
dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd);
if (data) {
if (!cb710_is_transfer_size_supported(data)) {
data->error = -EINVAL;
return -1;
}
cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz);
}
cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10);
cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd);
cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg);
cb710_mmc_reset_events(slot);
cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0);
cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT);
if (cmd->error)
return -1;
if (cmd->flags & MMC_RSP_PRESENT) {
cb710_receive_response(slot, cmd);
if (cmd->error)
return -1;
}
if (data)
data->error = cb710_mmc_transfer_data(slot, data);
return 0;
}
static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct cb710_mmc_reader *reader = mmc_priv(mmc);
WARN_ON(reader->mrq != NULL);
reader->mrq = mrq;
cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
cb710_mmc_command(mmc, mrq->stop);
tasklet_schedule(&reader->finish_req_tasklet);
}
static int cb710_mmc_powerup(struct cb710_slot *slot)
{
#ifdef CONFIG_CB710_DEBUG
struct cb710_chip *chip = cb710_slot_to_chip(slot);
#endif
int err;
/* a lot of magic for now */
dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
if (unlikely(err))
return err;
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0);
cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
mdelay(1);
dev_dbg(cb710_slot_dev(slot), "after delay 1\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
if (unlikely(err))
return err;
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
mdelay(1);
dev_dbg(cb710_slot_dev(slot), "after delay 2\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
if (unlikely(err))
return err;
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
mdelay(2);
dev_dbg(cb710_slot_dev(slot), "after delay 3\n");
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0);
cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0);
cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
if (unlikely(err))
return err;
/* This port behaves weird: quick byte reads of 0x08,0x09 return
* 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when
* read/written from userspace... What am I missing here?
* (it doesn't depend on write-to-read delay) */
cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF);
cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n");
return cb710_check_event(slot, 0);
}
static void cb710_mmc_powerdown(struct cb710_slot *slot)
{
cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81);
cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80);
}
static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
struct cb710_mmc_reader *reader = mmc_priv(mmc);
int err;
cb710_mmc_select_clock_divider(mmc, ios->clock);
if (ios->power_mode != reader->last_power_mode) {
switch (ios->power_mode) {
case MMC_POWER_ON:
err = cb710_mmc_powerup(slot);
if (err) {
dev_warn(cb710_slot_dev(slot),
"powerup failed (%d)- retrying\n", err);
cb710_mmc_powerdown(slot);
udelay(1);
err = cb710_mmc_powerup(slot);
if (err)
dev_warn(cb710_slot_dev(slot),
"powerup retry failed (%d) - expect errors\n",
err);
}
reader->last_power_mode = MMC_POWER_ON;
break;
case MMC_POWER_OFF:
cb710_mmc_powerdown(slot);
reader->last_power_mode = MMC_POWER_OFF;
break;
case MMC_POWER_UP:
default:
/* ignore */
break;
}
}
cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
}
static int cb710_mmc_get_ro(struct mmc_host *mmc)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
& CB710_MMC_S3_WRITE_PROTECTED;
}
static int cb710_mmc_get_cd(struct mmc_host *mmc)
{
struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
& CB710_MMC_S3_CARD_DETECTED;
}
static int cb710_mmc_irq_handler(struct cb710_slot *slot)
{
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
struct cb710_mmc_reader *reader = mmc_priv(mmc);
u32 status, config1, config2, irqen;
status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT);
config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT);
config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT);
dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, "
"ie: %08X, c2: %08X, c1: %08X\n",
status, irqen, config2, config1);
if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) {
/* ack the event */
cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
CB710_MMC_S1_CARD_CHANGED);
if ((irqen & CB710_MMC_IE_CISTATUS_MASK)
== CB710_MMC_IE_CISTATUS_MASK)
mmc_detect_change(mmc, HZ/5);
} else {
dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n");
spin_lock(&reader->irq_lock);
__cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK);
spin_unlock(&reader->irq_lock);
}
return 1;
}
static void cb710_mmc_finish_request_tasklet(struct tasklet_struct *t)
{
struct cb710_mmc_reader *reader = from_tasklet(reader, t,
finish_req_tasklet);
struct mmc_request *mrq = reader->mrq;
reader->mrq = NULL;
mmc_request_done(mmc_from_priv(reader), mrq);
}
static const struct mmc_host_ops cb710_mmc_host = {
.request = cb710_mmc_request,
.set_ios = cb710_mmc_set_ios,
.get_ro = cb710_mmc_get_ro,
.get_cd = cb710_mmc_get_cd,
};
#ifdef CONFIG_PM
static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
static int cb710_mmc_resume(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
#endif /* CONFIG_PM */
static int cb710_mmc_init(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct cb710_chip *chip = cb710_slot_to_chip(slot);
struct mmc_host *mmc;
struct cb710_mmc_reader *reader;
int err;
u32 val;
mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot));
if (!mmc)
return -ENOMEM;
platform_set_drvdata(pdev, mmc);
/* harmless (maybe) magic */
pci_read_config_dword(chip->pdev, 0x48, &val);
val = cb710_src_freq_mhz[(val >> 16) & 0xF];
dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val);
val *= 1000000;
mmc->ops = &cb710_mmc_host;
mmc->f_max = val;
mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX];
mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
/*
* In cb710_wait_for_event() we use a fixed timeout of ~2s, hence let's
* inform the core about it. A future improvement should instead make
* use of the cmd->busy_timeout.
*/
mmc->max_busy_timeout = CB710_MMC_REQ_TIMEOUT_MS;
reader = mmc_priv(mmc);
tasklet_setup(&reader->finish_req_tasklet,
cb710_mmc_finish_request_tasklet);
spin_lock_init(&reader->irq_lock);
cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
cb710_mmc_enable_irq(slot, 0, ~0);
cb710_set_irq_handler(slot, cb710_mmc_irq_handler);
err = mmc_add_host(mmc);
if (unlikely(err))
goto err_free_mmc;
dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n",
mmc_hostname(mmc));
cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0);
return 0;
err_free_mmc:
dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
cb710_set_irq_handler(slot, NULL);
mmc_free_host(mmc);
return err;
}
static void cb710_mmc_exit(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
struct cb710_mmc_reader *reader = mmc_priv(mmc);
cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS);
mmc_remove_host(mmc);
/* IRQs should be disabled now, but let's stay on the safe side */
cb710_mmc_enable_irq(slot, 0, ~0);
cb710_set_irq_handler(slot, NULL);
/* clear config ports - just in case */
cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
tasklet_kill(&reader->finish_req_tasklet);
mmc_free_host(mmc);
}
static struct platform_driver cb710_mmc_driver = {
.driver.name = "cb710-mmc",
.probe = cb710_mmc_init,
.remove_new = cb710_mmc_exit,
#ifdef CONFIG_PM
.suspend = cb710_mmc_suspend,
.resume = cb710_mmc_resume,
#endif
};
module_platform_driver(cb710_mmc_driver);
MODULE_AUTHOR("Michał Mirosław <[email protected]>");
MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cb710-mmc");
| linux-master | drivers/mmc/host/cb710-mmc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tifm_sd.c - TI FlashMedia driver
*
* Copyright (C) 2006 Alex Dubov <[email protected]>
*
* Special thanks to Brad Campbell for extensive testing of this driver.
*/
#include <linux/tifm.h>
#include <linux/mmc/host.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/module.h>
#include <asm/io.h>
#define DRIVER_NAME "tifm_sd"
#define DRIVER_VERSION "0.8"
static bool no_dma = 0;
static bool fixed_timeout = 0;
module_param(no_dma, bool, 0644);
module_param(fixed_timeout, bool, 0644);
/* Constants here are mostly from OMAP5912 datasheet */
#define TIFM_MMCSD_RESET 0x0002
#define TIFM_MMCSD_CLKMASK 0x03ff
#define TIFM_MMCSD_POWER 0x0800
#define TIFM_MMCSD_4BBUS 0x8000
#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
#define TIFM_MMCSD_READ 0x8000
#define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */
#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
#define TIFM_MMCSD_CD 0x0002 /* card detect */
#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
#define TIFM_MMCSD_DTO 0x0020 /* data time-out */
#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
#define TIFM_MMCSD_CTO 0x0080 /* command time-out */
#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
#define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */
#define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */
#define TIFM_MMCSD_CERR 0x4000 /* card status error */
#define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */
#define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */
#define TIFM_MMCSD_FIFO_SIZE 0x0020
#define TIFM_MMCSD_RSP_R0 0x0000
#define TIFM_MMCSD_RSP_R1 0x0100
#define TIFM_MMCSD_RSP_R2 0x0200
#define TIFM_MMCSD_RSP_R3 0x0300
#define TIFM_MMCSD_RSP_R4 0x0400
#define TIFM_MMCSD_RSP_R5 0x0500
#define TIFM_MMCSD_RSP_R6 0x0600
#define TIFM_MMCSD_RSP_BUSY 0x0800
#define TIFM_MMCSD_CMD_BC 0x0000
#define TIFM_MMCSD_CMD_BCR 0x1000
#define TIFM_MMCSD_CMD_AC 0x2000
#define TIFM_MMCSD_CMD_ADTC 0x3000
#define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
#define TIFM_MMCSD_REQ_TIMEOUT_MS 1000
enum {
CMD_READY = 0x0001,
FIFO_READY = 0x0002,
BRS_READY = 0x0004,
SCMD_ACTIVE = 0x0008,
SCMD_READY = 0x0010,
CARD_BUSY = 0x0020,
DATA_CARRY = 0x0040
};
struct tifm_sd {
struct tifm_dev *dev;
unsigned short eject:1,
open_drain:1,
no_dma:1;
unsigned short cmd_flags;
unsigned int clk_freq;
unsigned int clk_div;
unsigned long timeout_jiffies;
struct tasklet_struct finish_tasklet;
struct timer_list timer;
struct mmc_request *req;
int sg_len;
int sg_pos;
unsigned int block_pos;
struct scatterlist bounce_buf;
unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE];
};
/* for some reason, host won't respond correctly to readw/writew */
static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
unsigned int off, unsigned int cnt)
{
struct tifm_dev *sock = host->dev;
unsigned char *buf;
unsigned int pos = 0, val;
buf = kmap_local_page(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
buf[pos++] = host->bounce_buf_data[0];
host->cmd_flags &= ~DATA_CARRY;
}
while (pos < cnt) {
val = readl(sock->addr + SOCK_MMCSD_DATA);
buf[pos++] = val & 0xff;
if (pos == cnt) {
host->bounce_buf_data[0] = (val >> 8) & 0xff;
host->cmd_flags |= DATA_CARRY;
break;
}
buf[pos++] = (val >> 8) & 0xff;
}
kunmap_local(buf - off);
}
static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
unsigned int off, unsigned int cnt)
{
struct tifm_dev *sock = host->dev;
unsigned char *buf;
unsigned int pos = 0, val;
buf = kmap_local_page(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
writel(val, sock->addr + SOCK_MMCSD_DATA);
host->cmd_flags &= ~DATA_CARRY;
}
while (pos < cnt) {
val = buf[pos++];
if (pos == cnt) {
host->bounce_buf_data[0] = val & 0xff;
host->cmd_flags |= DATA_CARRY;
break;
}
val |= (buf[pos++] << 8) & 0xff00;
writel(val, sock->addr + SOCK_MMCSD_DATA);
}
kunmap_local(buf - off);
}
static void tifm_sd_transfer_data(struct tifm_sd *host)
{
struct mmc_data *r_data = host->req->cmd->data;
struct scatterlist *sg = r_data->sg;
unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2;
unsigned int p_off, p_cnt;
struct page *pg;
if (host->sg_pos == host->sg_len)
return;
while (t_size) {
cnt = sg[host->sg_pos].length - host->block_pos;
if (!cnt) {
host->block_pos = 0;
host->sg_pos++;
if (host->sg_pos == host->sg_len) {
if ((r_data->flags & MMC_DATA_WRITE)
&& (host->cmd_flags & DATA_CARRY))
writel(host->bounce_buf_data[0],
host->dev->addr
+ SOCK_MMCSD_DATA);
return;
}
cnt = sg[host->sg_pos].length;
}
off = sg[host->sg_pos].offset + host->block_pos;
pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
p_cnt = min(p_cnt, t_size);
if (r_data->flags & MMC_DATA_READ)
tifm_sd_read_fifo(host, pg, p_off, p_cnt);
else if (r_data->flags & MMC_DATA_WRITE)
tifm_sd_write_fifo(host, pg, p_off, p_cnt);
t_size -= p_cnt;
host->block_pos += p_cnt;
}
}
static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
struct page *src, unsigned int src_off,
unsigned int count)
{
unsigned char *src_buf = kmap_local_page(src) + src_off;
unsigned char *dst_buf = kmap_local_page(dst) + dst_off;
memcpy(dst_buf, src_buf, count);
kunmap_local(dst_buf - dst_off);
kunmap_local(src_buf - src_off);
}
static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
{
struct scatterlist *sg = r_data->sg;
unsigned int t_size = r_data->blksz;
unsigned int off, cnt;
unsigned int p_off, p_cnt;
struct page *pg;
dev_dbg(&host->dev->dev, "bouncing block\n");
while (t_size) {
cnt = sg[host->sg_pos].length - host->block_pos;
if (!cnt) {
host->block_pos = 0;
host->sg_pos++;
if (host->sg_pos == host->sg_len)
return;
cnt = sg[host->sg_pos].length;
}
off = sg[host->sg_pos].offset + host->block_pos;
pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
p_cnt = min(p_cnt, t_size);
if (r_data->flags & MMC_DATA_WRITE)
tifm_sd_copy_page(sg_page(&host->bounce_buf),
r_data->blksz - t_size,
pg, p_off, p_cnt);
else if (r_data->flags & MMC_DATA_READ)
tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
r_data->blksz - t_size, p_cnt);
t_size -= p_cnt;
host->block_pos += p_cnt;
}
}
static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
{
struct tifm_dev *sock = host->dev;
unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
unsigned int dma_len, dma_blk_cnt, dma_off;
struct scatterlist *sg = NULL;
if (host->sg_pos == host->sg_len)
return 1;
if (host->cmd_flags & DATA_CARRY) {
host->cmd_flags &= ~DATA_CARRY;
tifm_sd_bounce_block(host, r_data);
if (host->sg_pos == host->sg_len)
return 1;
}
dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos;
if (!dma_len) {
host->block_pos = 0;
host->sg_pos++;
if (host->sg_pos == host->sg_len)
return 1;
dma_len = sg_dma_len(&r_data->sg[host->sg_pos]);
}
if (dma_len < t_size) {
dma_blk_cnt = dma_len / r_data->blksz;
dma_off = host->block_pos;
host->block_pos += dma_blk_cnt * r_data->blksz;
} else {
dma_blk_cnt = TIFM_DMA_TSIZE;
dma_off = host->block_pos;
host->block_pos += t_size;
}
if (dma_blk_cnt)
sg = &r_data->sg[host->sg_pos];
else if (dma_len) {
if (r_data->flags & MMC_DATA_WRITE)
tifm_sd_bounce_block(host, r_data);
else
host->cmd_flags |= DATA_CARRY;
sg = &host->bounce_buf;
dma_off = 0;
dma_blk_cnt = 1;
} else
return 1;
dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt);
writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS);
if (r_data->flags & MMC_DATA_WRITE)
writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
sock->addr + SOCK_DMA_CONTROL);
else
writel((dma_blk_cnt << 8) | TIFM_DMA_EN,
sock->addr + SOCK_DMA_CONTROL);
return 0;
}
static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
{
unsigned int rc = 0;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
rc |= TIFM_MMCSD_RSP_R0;
break;
case MMC_RSP_R1B:
rc |= TIFM_MMCSD_RSP_BUSY;
fallthrough;
case MMC_RSP_R1:
rc |= TIFM_MMCSD_RSP_R1;
break;
case MMC_RSP_R2:
rc |= TIFM_MMCSD_RSP_R2;
break;
case MMC_RSP_R3:
rc |= TIFM_MMCSD_RSP_R3;
break;
default:
BUG();
}
switch (mmc_cmd_type(cmd)) {
case MMC_CMD_BC:
rc |= TIFM_MMCSD_CMD_BC;
break;
case MMC_CMD_BCR:
rc |= TIFM_MMCSD_CMD_BCR;
break;
case MMC_CMD_AC:
rc |= TIFM_MMCSD_CMD_AC;
break;
case MMC_CMD_ADTC:
rc |= TIFM_MMCSD_CMD_ADTC;
break;
default:
BUG();
}
return rc;
}
static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
{
struct tifm_dev *sock = host->dev;
unsigned int cmd_mask = tifm_sd_op_flags(cmd);
if (host->open_drain)
cmd_mask |= TIFM_MMCSD_ODTO;
if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
cmd_mask |= TIFM_MMCSD_READ;
dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
cmd->opcode, cmd->arg, cmd_mask);
writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
}
static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
{
cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
| readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
| readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
| readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
| readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
}
static void tifm_sd_check_status(struct tifm_sd *host)
{
struct tifm_dev *sock = host->dev;
struct mmc_command *cmd = host->req->cmd;
if (cmd->error)
goto finish_request;
if (!(host->cmd_flags & CMD_READY))
return;
if (cmd->data) {
if (cmd->data->error) {
if ((host->cmd_flags & SCMD_ACTIVE)
&& !(host->cmd_flags & SCMD_READY))
return;
goto finish_request;
}
if (!(host->cmd_flags & BRS_READY))
return;
if (!(host->no_dma || (host->cmd_flags & FIFO_READY)))
return;
if (cmd->data->flags & MMC_DATA_WRITE) {
if (host->req->stop) {
if (!(host->cmd_flags & SCMD_ACTIVE)) {
host->cmd_flags |= SCMD_ACTIVE;
writel(TIFM_MMCSD_EOFB
| readl(sock->addr
+ SOCK_MMCSD_INT_ENABLE),
sock->addr
+ SOCK_MMCSD_INT_ENABLE);
tifm_sd_exec(host, host->req->stop);
return;
} else {
if (!(host->cmd_flags & SCMD_READY)
|| (host->cmd_flags & CARD_BUSY))
return;
writel((~TIFM_MMCSD_EOFB)
& readl(sock->addr
+ SOCK_MMCSD_INT_ENABLE),
sock->addr
+ SOCK_MMCSD_INT_ENABLE);
}
} else {
if (host->cmd_flags & CARD_BUSY)
return;
writel((~TIFM_MMCSD_EOFB)
& readl(sock->addr
+ SOCK_MMCSD_INT_ENABLE),
sock->addr + SOCK_MMCSD_INT_ENABLE);
}
} else {
if (host->req->stop) {
if (!(host->cmd_flags & SCMD_ACTIVE)) {
host->cmd_flags |= SCMD_ACTIVE;
tifm_sd_exec(host, host->req->stop);
return;
} else {
if (!(host->cmd_flags & SCMD_READY))
return;
}
}
}
}
finish_request:
tasklet_schedule(&host->finish_tasklet);
}
/* Called from interrupt handler */
static void tifm_sd_data_event(struct tifm_dev *sock)
{
struct tifm_sd *host;
unsigned int fifo_status = 0;
struct mmc_data *r_data = NULL;
spin_lock(&sock->lock);
host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n",
fifo_status, host->cmd_flags);
if (host->req) {
r_data = host->req->cmd->data;
if (r_data && (fifo_status & TIFM_FIFO_READY)) {
if (tifm_sd_set_dma_data(host, r_data)) {
host->cmd_flags |= FIFO_READY;
tifm_sd_check_status(host);
}
}
}
writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
spin_unlock(&sock->lock);
}
/* Called from interrupt handler */
static void tifm_sd_card_event(struct tifm_dev *sock)
{
struct tifm_sd *host;
unsigned int host_status = 0;
int cmd_error = 0;
struct mmc_command *cmd = NULL;
spin_lock(&sock->lock);
host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
host_status, host->cmd_flags);
if (host->req) {
cmd = host->req->cmd;
if (host_status & TIFM_MMCSD_ERRMASK) {
writel(host_status & TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_STATUS);
if (host_status & TIFM_MMCSD_CTO)
cmd_error = -ETIMEDOUT;
else if (host_status & TIFM_MMCSD_CCRC)
cmd_error = -EILSEQ;
if (cmd->data) {
if (host_status & TIFM_MMCSD_DTO)
cmd->data->error = -ETIMEDOUT;
else if (host_status & TIFM_MMCSD_DCRC)
cmd->data->error = -EILSEQ;
}
writel(TIFM_FIFO_INT_SETALL,
sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
if (host->req->stop) {
if (host->cmd_flags & SCMD_ACTIVE) {
host->req->stop->error = cmd_error;
host->cmd_flags |= SCMD_READY;
} else {
cmd->error = cmd_error;
host->cmd_flags |= SCMD_ACTIVE;
tifm_sd_exec(host, host->req->stop);
goto done;
}
} else
cmd->error = cmd_error;
} else {
if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) {
if (!(host->cmd_flags & CMD_READY)) {
host->cmd_flags |= CMD_READY;
tifm_sd_fetch_resp(cmd, sock);
} else if (host->cmd_flags & SCMD_ACTIVE) {
host->cmd_flags |= SCMD_READY;
tifm_sd_fetch_resp(host->req->stop,
sock);
}
}
if (host_status & TIFM_MMCSD_BRS)
host->cmd_flags |= BRS_READY;
}
if (host->no_dma && cmd->data) {
if (host_status & TIFM_MMCSD_AE)
writel(host_status & TIFM_MMCSD_AE,
sock->addr + SOCK_MMCSD_STATUS);
if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
| TIFM_MMCSD_BRS)) {
tifm_sd_transfer_data(host);
host_status &= ~TIFM_MMCSD_AE;
}
}
if (host_status & TIFM_MMCSD_EOFB)
host->cmd_flags &= ~CARD_BUSY;
else if (host_status & TIFM_MMCSD_CB)
host->cmd_flags |= CARD_BUSY;
tifm_sd_check_status(host);
}
done:
writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
spin_unlock(&sock->lock);
}
static void tifm_sd_set_data_timeout(struct tifm_sd *host,
struct mmc_data *data)
{
struct tifm_dev *sock = host->dev;
unsigned int data_timeout = data->timeout_clks;
if (fixed_timeout)
return;
data_timeout += data->timeout_ns /
((1000000000UL / host->clk_freq) * host->clk_div);
if (data_timeout < 0xffff) {
writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
writel((~TIFM_MMCSD_DPE)
& readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
} else {
data_timeout = (data_timeout >> 10) + 1;
if (data_timeout > 0xffff)
data_timeout = 0; /* set to unlimited */
writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
writel(TIFM_MMCSD_DPE
| readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
}
}
static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct tifm_sd *host = mmc_priv(mmc);
struct tifm_dev *sock = host->dev;
unsigned long flags;
struct mmc_data *r_data = mrq->cmd->data;
spin_lock_irqsave(&sock->lock, flags);
if (host->eject) {
mrq->cmd->error = -ENOMEDIUM;
goto err_out;
}
if (host->req) {
pr_err("%s : unfinished request detected\n",
dev_name(&sock->dev));
mrq->cmd->error = -ETIMEDOUT;
goto err_out;
}
host->cmd_flags = 0;
host->block_pos = 0;
host->sg_pos = 0;
if (mrq->data && !is_power_of_2(mrq->data->blksz))
host->no_dma = 1;
else
host->no_dma = no_dma ? 1 : 0;
if (r_data) {
tifm_sd_set_data_timeout(host, r_data);
if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop)
writel(TIFM_MMCSD_EOFB
| readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
sock->addr + SOCK_MMCSD_INT_ENABLE);
if (host->no_dma) {
writel(TIFM_MMCSD_BUFINT
| readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
sock->addr + SOCK_MMCSD_INT_ENABLE);
writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
| (TIFM_MMCSD_FIFO_SIZE - 1),
sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
host->sg_len = r_data->sg_len;
} else {
sg_init_one(&host->bounce_buf, host->bounce_buf_data,
r_data->blksz);
if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
? DMA_TO_DEVICE
: DMA_FROM_DEVICE)) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
mrq->cmd->error = -ENOMEM;
goto err_out;
}
host->sg_len = tifm_map_sg(sock, r_data->sg,
r_data->sg_len,
r_data->flags
& MMC_DATA_WRITE
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
if (host->sg_len < 1) {
pr_err("%s : scatterlist map failed\n",
dev_name(&sock->dev));
tifm_unmap_sg(sock, &host->bounce_buf, 1,
r_data->flags & MMC_DATA_WRITE
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
mrq->cmd->error = -ENOMEM;
goto err_out;
}
writel(TIFM_FIFO_INT_SETALL,
sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
writel(ilog2(r_data->blksz) - 2,
sock->addr + SOCK_FIFO_PAGE_SIZE);
writel(TIFM_FIFO_ENABLE,
sock->addr + SOCK_FIFO_CONTROL);
writel(TIFM_FIFO_INTMASK,
sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
if (r_data->flags & MMC_DATA_WRITE)
writel(TIFM_MMCSD_TXDE,
sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
else
writel(TIFM_MMCSD_RXDE,
sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
tifm_sd_set_dma_data(host, r_data);
}
writel(r_data->blocks - 1,
sock->addr + SOCK_MMCSD_NUM_BLOCKS);
writel(r_data->blksz - 1,
sock->addr + SOCK_MMCSD_BLOCK_LEN);
}
host->req = mrq;
mod_timer(&host->timer, jiffies + host->timeout_jiffies);
writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
tifm_sd_exec(host, mrq->cmd);
spin_unlock_irqrestore(&sock->lock, flags);
return;
err_out:
spin_unlock_irqrestore(&sock->lock, flags);
mmc_request_done(mmc, mrq);
}
static void tifm_sd_end_cmd(struct tasklet_struct *t)
{
struct tifm_sd *host = from_tasklet(host, t, finish_tasklet);
struct tifm_dev *sock = host->dev;
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct mmc_request *mrq;
struct mmc_data *r_data = NULL;
unsigned long flags;
spin_lock_irqsave(&sock->lock, flags);
del_timer(&host->timer);
mrq = host->req;
host->req = NULL;
if (!mrq) {
pr_err(" %s : no request to complete?\n",
dev_name(&sock->dev));
spin_unlock_irqrestore(&sock->lock, flags);
return;
}
r_data = mrq->cmd->data;
if (r_data) {
if (host->no_dma) {
writel((~TIFM_MMCSD_BUFINT)
& readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
sock->addr + SOCK_MMCSD_INT_ENABLE);
} else {
tifm_unmap_sg(sock, &host->bounce_buf, 1,
(r_data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
(r_data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
r_data->bytes_xfered = r_data->blocks
- readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
r_data->bytes_xfered *= r_data->blksz;
r_data->bytes_xfered += r_data->blksz
- readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
}
writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
spin_unlock_irqrestore(&sock->lock, flags);
mmc_request_done(mmc, mrq);
}
static void tifm_sd_abort(struct timer_list *t)
{
struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
tifm_eject(host->dev);
}
static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct tifm_sd *host = mmc_priv(mmc);
struct tifm_dev *sock = host->dev;
unsigned int clk_div1, clk_div2;
unsigned long flags;
spin_lock_irqsave(&sock->lock, flags);
dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, "
"chip_select = %x, power_mode = %x, bus_width = %x\n",
ios->clock, ios->vdd, ios->bus_mode, ios->chip_select,
ios->power_mode, ios->bus_width);
if (ios->bus_width == MMC_BUS_WIDTH_4) {
writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
sock->addr + SOCK_MMCSD_CONFIG);
} else {
writel((~TIFM_MMCSD_4BBUS)
& readl(sock->addr + SOCK_MMCSD_CONFIG),
sock->addr + SOCK_MMCSD_CONFIG);
}
if (ios->clock) {
clk_div1 = 20000000 / ios->clock;
if (!clk_div1)
clk_div1 = 1;
clk_div2 = 24000000 / ios->clock;
if (!clk_div2)
clk_div2 = 1;
if ((20000000 / clk_div1) > ios->clock)
clk_div1++;
if ((24000000 / clk_div2) > ios->clock)
clk_div2++;
if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
host->clk_freq = 20000000;
host->clk_div = clk_div1;
writel((~TIFM_CTRL_FAST_CLK)
& readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
} else {
host->clk_freq = 24000000;
host->clk_div = clk_div2;
writel(TIFM_CTRL_FAST_CLK
| readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
}
} else {
host->clk_div = 0;
}
host->clk_div &= TIFM_MMCSD_CLKMASK;
writel(host->clk_div
| ((~TIFM_MMCSD_CLKMASK)
& readl(sock->addr + SOCK_MMCSD_CONFIG)),
sock->addr + SOCK_MMCSD_CONFIG);
host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN);
/* chip_select : maybe later */
//vdd
//power is set before probe / after remove
spin_unlock_irqrestore(&sock->lock, flags);
}
static int tifm_sd_ro(struct mmc_host *mmc)
{
int rc = 0;
struct tifm_sd *host = mmc_priv(mmc);
struct tifm_dev *sock = host->dev;
unsigned long flags;
spin_lock_irqsave(&sock->lock, flags);
if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE))
rc = 1;
spin_unlock_irqrestore(&sock->lock, flags);
return rc;
}
static const struct mmc_host_ops tifm_sd_ops = {
.request = tifm_sd_request,
.set_ios = tifm_sd_ios,
.get_ro = tifm_sd_ro
};
static int tifm_sd_initialize_host(struct tifm_sd *host)
{
int rc;
unsigned int host_status = 0;
struct tifm_dev *sock = host->dev;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
host->clk_div = 61;
host->clk_freq = 20000000;
writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
writel(host->clk_div | TIFM_MMCSD_POWER,
sock->addr + SOCK_MMCSD_CONFIG);
/* wait up to 0.51 sec for reset */
for (rc = 32; rc <= 256; rc <<= 1) {
if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
rc = 0;
break;
}
msleep(rc);
}
if (rc) {
pr_err("%s : controller failed to reset\n",
dev_name(&sock->dev));
return -ENODEV;
}
writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
writel(host->clk_div | TIFM_MMCSD_POWER,
sock->addr + SOCK_MMCSD_CONFIG);
writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
// command timeout fixed to 64 clocks for now
writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
for (rc = 16; rc <= 64; rc <<= 1) {
host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
if (!(host_status & TIFM_MMCSD_ERRMASK)
&& (host_status & TIFM_MMCSD_EOC)) {
rc = 0;
break;
}
msleep(rc);
}
if (rc) {
pr_err("%s : card not ready - probe failed on initialization\n",
dev_name(&sock->dev));
return -ENODEV;
}
writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
| TIFM_MMCSD_ERRMASK,
sock->addr + SOCK_MMCSD_INT_ENABLE);
return 0;
}
static int tifm_sd_probe(struct tifm_dev *sock)
{
struct mmc_host *mmc;
struct tifm_sd *host;
int rc = -EIO;
if (!(TIFM_SOCK_STATE_OCCUPIED
& readl(sock->addr + SOCK_PRESENT_STATE))) {
pr_warn("%s : card gone, unexpectedly\n",
dev_name(&sock->dev));
return rc;
}
mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
tifm_set_drvdata(sock, mmc);
host->dev = sock;
host->timeout_jiffies = msecs_to_jiffies(TIFM_MMCSD_REQ_TIMEOUT_MS);
/*
* We use a fixed request timeout of 1s, hence inform the core about it.
* A future improvement should instead respect the cmd->busy_timeout.
*/
mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd);
timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
mmc->f_min = 20000000 / 60;
mmc->f_max = 24000000;
mmc->max_blk_count = 2048;
mmc->max_segs = mmc->max_blk_count;
mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
mmc->max_req_size = mmc->max_seg_size;
sock->card_event = tifm_sd_card_event;
sock->data_event = tifm_sd_data_event;
rc = tifm_sd_initialize_host(host);
if (!rc)
rc = mmc_add_host(mmc);
if (!rc)
return 0;
mmc_free_host(mmc);
return rc;
}
static void tifm_sd_remove(struct tifm_dev *sock)
{
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct tifm_sd *host = mmc_priv(mmc);
unsigned long flags;
spin_lock_irqsave(&sock->lock, flags);
host->eject = 1;
writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
spin_unlock_irqrestore(&sock->lock, flags);
tasklet_kill(&host->finish_tasklet);
spin_lock_irqsave(&sock->lock, flags);
if (host->req) {
writel(TIFM_FIFO_INT_SETALL,
sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
host->req->cmd->error = -ENOMEDIUM;
if (host->req->stop)
host->req->stop->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
spin_unlock_irqrestore(&sock->lock, flags);
mmc_remove_host(mmc);
dev_dbg(&sock->dev, "after remove\n");
mmc_free_host(mmc);
}
#ifdef CONFIG_PM
static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
{
return 0;
}
static int tifm_sd_resume(struct tifm_dev *sock)
{
struct mmc_host *mmc = tifm_get_drvdata(sock);
struct tifm_sd *host = mmc_priv(mmc);
int rc;
rc = tifm_sd_initialize_host(host);
dev_dbg(&sock->dev, "resume initialize %d\n", rc);
if (rc)
host->eject = 1;
return rc;
}
#else
#define tifm_sd_suspend NULL
#define tifm_sd_resume NULL
#endif /* CONFIG_PM */
static struct tifm_device_id tifm_sd_id_tbl[] = {
{ TIFM_TYPE_SD }, { }
};
static struct tifm_driver tifm_sd_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE
},
.id_table = tifm_sd_id_tbl,
.probe = tifm_sd_probe,
.remove = tifm_sd_remove,
.suspend = tifm_sd_suspend,
.resume = tifm_sd_resume
};
static int __init tifm_sd_init(void)
{
return tifm_register_driver(&tifm_sd_driver);
}
static void __exit tifm_sd_exit(void)
{
tifm_unregister_driver(&tifm_sd_driver);
}
MODULE_AUTHOR("Alex Dubov");
MODULE_DESCRIPTION("TI FlashMedia SD driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
MODULE_VERSION(DRIVER_VERSION);
module_init(tifm_sd_init);
module_exit(tifm_sd_exit);
| linux-master | drivers/mmc/host/tifm_sd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Freescale eSDHC ColdFire family controller driver, platform bus.
*
* Copyright (c) 2020 Timesys Corporation
* Author: Angelo Dureghello <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_data/mmc-esdhc-mcf.h>
#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
#define ESDHC_PROCTL_D3CD 0x08
#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
#define ESDHC_DEFAULT_HOST_CONTROL 0x28
/*
* Freescale eSDHC has DMA ERR flag at bit 28, not as std spec says, bit 25.
*/
#define ESDHC_INT_VENDOR_SPEC_DMA_ERR BIT(28)
struct pltfm_mcf_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
int aside;
int current_bus_width;
};
static inline void esdhc_mcf_buffer_swap32(u32 *buf, int len)
{
int i;
u32 temp;
len = (len + 3) >> 2;
for (i = 0; i < len; i++) {
temp = swab32(*buf);
*buf++ = temp;
}
}
static inline void esdhc_clrset_be(struct sdhci_host *host,
u32 mask, u32 val, int reg)
{
void __iomem *base = host->ioaddr + (reg & ~3);
u8 shift = (reg & 3) << 3;
mask <<= shift;
val <<= shift;
if (reg == SDHCI_HOST_CONTROL)
val |= ESDHC_PROCTL_D3CD;
writel((readl(base) & ~mask) | val, base);
}
/*
* Note: mcf is big-endian, single bytes need to be accessed at big endian
* offsets.
*/
static void esdhc_mcf_writeb_be(struct sdhci_host *host, u8 val, int reg)
{
void __iomem *base = host->ioaddr + (reg & ~3);
u8 shift = (reg & 3) << 3;
u32 mask = ~(0xff << shift);
if (reg == SDHCI_HOST_CONTROL) {
u32 host_ctrl = ESDHC_DEFAULT_HOST_CONTROL;
u8 dma_bits = (val & SDHCI_CTRL_DMA_MASK) >> 3;
u8 tmp = readb(host->ioaddr + SDHCI_HOST_CONTROL + 1);
tmp &= ~0x03;
tmp |= dma_bits;
/*
* Recomposition needed, restore always endianness and
* keep D3CD and AI, just setting bus width.
*/
host_ctrl |= val;
host_ctrl |= (dma_bits << 8);
writel(host_ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
return;
}
writel((readl(base) & mask) | (val << shift), base);
}
static void esdhc_mcf_writew_be(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
void __iomem *base = host->ioaddr + (reg & ~3);
u8 shift = (reg & 3) << 3;
u32 mask = ~(0xffff << shift);
switch (reg) {
case SDHCI_TRANSFER_MODE:
mcf_data->aside = val;
return;
case SDHCI_COMMAND:
if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
val |= SDHCI_CMD_ABORTCMD;
/*
* As for the fsl driver,
* we have to set the mode in a single write here.
*/
writel(val << 16 | mcf_data->aside,
host->ioaddr + SDHCI_TRANSFER_MODE);
return;
}
writel((readl(base) & mask) | (val << shift), base);
}
static void esdhc_mcf_writel_be(struct sdhci_host *host, u32 val, int reg)
{
writel(val, host->ioaddr + reg);
}
static u8 esdhc_mcf_readb_be(struct sdhci_host *host, int reg)
{
if (reg == SDHCI_HOST_CONTROL) {
u8 __iomem *base = host->ioaddr + (reg & ~3);
u16 val = readw(base + 2);
u8 dma_bits = (val >> 5) & SDHCI_CTRL_DMA_MASK;
u8 host_ctrl = val & 0xff;
host_ctrl &= ~SDHCI_CTRL_DMA_MASK;
host_ctrl |= dma_bits;
return host_ctrl;
}
return readb(host->ioaddr + (reg ^ 0x3));
}
static u16 esdhc_mcf_readw_be(struct sdhci_host *host, int reg)
{
/*
* For SDHCI_HOST_VERSION, sdhci specs defines 0xFE,
* a wrong offset for us, we are at 0xFC.
*/
if (reg == SDHCI_HOST_VERSION)
reg -= 2;
return readw(host->ioaddr + (reg ^ 0x2));
}
static u32 esdhc_mcf_readl_be(struct sdhci_host *host, int reg)
{
u32 val;
val = readl(host->ioaddr + reg);
/*
* RM (25.3.9) sd pin clock must never exceed 25Mhz.
* So forcing legacy mode at 25Mhz.
*/
if (unlikely(reg == SDHCI_CAPABILITIES))
val &= ~SDHCI_CAN_DO_HISPD;
if (unlikely(reg == SDHCI_INT_STATUS)) {
if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
val |= SDHCI_INT_ADMA_ERROR;
}
}
return val;
}
static unsigned int esdhc_mcf_get_max_timeout_count(struct sdhci_host *host)
{
return 1 << 27;
}
static void esdhc_mcf_set_timeout(struct sdhci_host *host,
struct mmc_command *cmd)
{
/* Use maximum timeout counter */
esdhc_clrset_be(host, ESDHC_SYS_CTRL_DTOCV_MASK, 0xE,
SDHCI_TIMEOUT_CONTROL);
}
static void esdhc_mcf_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
sdhci_reset(host, mask);
esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK,
mcf_data->current_bus_width, SDHCI_HOST_CONTROL);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
static unsigned int esdhc_mcf_pltfm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return pltfm_host->clock;
}
static unsigned int esdhc_mcf_pltfm_get_min_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
return pltfm_host->clock / 256 / 16;
}
static void esdhc_mcf_pltfm_set_clock(struct sdhci_host *host,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
unsigned long *pll_dr = (unsigned long *)MCF_PLL_DR;
u32 fvco, fsys, fesdhc, temp;
const int sdclkfs[] = {2, 4, 8, 16, 32, 64, 128, 256};
int delta, old_delta = clock;
int i, q, ri, rq;
if (clock == 0) {
host->mmc->actual_clock = 0;
return;
}
/*
* ColdFire eSDHC clock.s
*
* pll -+-> / outdiv1 --> fsys
* +-> / outdiv3 --> eSDHC clock ---> / SDCCLKFS / DVS
*
* mcf5441x datasheet says:
* (8.1.2) eSDHC should be 40 MHz max
* (25.3.9) eSDHC input is, as example, 96 Mhz ...
* (25.3.9) sd pin clock must never exceed 25Mhz
*
* fvco = fsys * outdvi1 + 1
* fshdc = fvco / outdiv3 + 1
*/
temp = readl(pll_dr);
fsys = pltfm_host->clock;
fvco = fsys * ((temp & 0x1f) + 1);
fesdhc = fvco / (((temp >> 10) & 0x1f) + 1);
for (i = 0; i < 8; ++i) {
int result = fesdhc / sdclkfs[i];
for (q = 1; q < 17; ++q) {
int finale = result / q;
delta = abs(clock - finale);
if (delta < old_delta) {
old_delta = delta;
ri = i;
rq = q;
}
}
}
/*
* Apply divisors and re-enable all the clocks
*/
temp = ((sdclkfs[ri] >> 1) << 8) | ((rq - 1) << 4) |
(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN);
esdhc_clrset_be(host, 0x0000fff7, temp, SDHCI_CLOCK_CONTROL);
host->mmc->actual_clock = clock;
mdelay(1);
}
static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
switch (width) {
case MMC_BUS_WIDTH_4:
mcf_data->current_bus_width = ESDHC_CTRL_4BITBUS;
break;
default:
mcf_data->current_bus_width = 0;
break;
}
esdhc_clrset_be(host, ESDHC_CTRL_BUSWIDTH_MASK,
mcf_data->current_bus_width, SDHCI_HOST_CONTROL);
}
static void esdhc_mcf_request_done(struct sdhci_host *host,
struct mmc_request *mrq)
{
struct scatterlist *sg;
u32 *buffer;
int i;
if (!mrq->data || !mrq->data->bytes_xfered)
goto exit_done;
if (mmc_get_dma_dir(mrq->data) != DMA_FROM_DEVICE)
goto exit_done;
/*
* On mcf5441x there is no hw sdma option/flag to select the dma
* transfer endiannes. A swap after the transfer is needed.
*/
for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) {
buffer = (u32 *)sg_virt(sg);
esdhc_mcf_buffer_swap32(buffer, sg->length);
}
exit_done:
mmc_request_done(host->mmc, mrq);
}
static void esdhc_mcf_copy_to_bounce_buffer(struct sdhci_host *host,
struct mmc_data *data,
unsigned int length)
{
sg_copy_to_buffer(data->sg, data->sg_len,
host->bounce_buffer, length);
esdhc_mcf_buffer_swap32((u32 *)host->bounce_buffer,
data->blksz * data->blocks);
}
static struct sdhci_ops sdhci_esdhc_ops = {
.reset = esdhc_mcf_reset,
.set_clock = esdhc_mcf_pltfm_set_clock,
.get_max_clock = esdhc_mcf_pltfm_get_max_clock,
.get_min_clock = esdhc_mcf_pltfm_get_min_clock,
.set_bus_width = esdhc_mcf_pltfm_set_bus_width,
.get_max_timeout_count = esdhc_mcf_get_max_timeout_count,
.set_timeout = esdhc_mcf_set_timeout,
.write_b = esdhc_mcf_writeb_be,
.write_w = esdhc_mcf_writew_be,
.write_l = esdhc_mcf_writel_be,
.read_b = esdhc_mcf_readb_be,
.read_w = esdhc_mcf_readw_be,
.read_l = esdhc_mcf_readl_be,
.copy_to_bounce_buffer = esdhc_mcf_copy_to_bounce_buffer,
.request_done = esdhc_mcf_request_done,
};
static const struct sdhci_pltfm_data sdhci_esdhc_mcf_pdata = {
.ops = &sdhci_esdhc_ops,
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_FORCE_DMA,
/*
* Mandatory quirk,
* controller does not support cmd23,
* without, on > 8G cards cmd23 is used, and
* driver times out.
*/
SDHCI_QUIRK2_HOST_NO_CMD23,
};
static int esdhc_mcf_plat_init(struct sdhci_host *host,
struct pltfm_mcf_data *mcf_data)
{
struct mcf_esdhc_platform_data *plat_data;
struct device *dev = mmc_dev(host->mmc);
if (!dev->platform_data) {
dev_err(dev, "no platform data!\n");
return -EINVAL;
}
plat_data = (struct mcf_esdhc_platform_data *)dev->platform_data;
/* Card_detect */
switch (plat_data->cd_type) {
default:
case ESDHC_CD_CONTROLLER:
/* We have a working card_detect back */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
break;
case ESDHC_CD_PERMANENT:
host->mmc->caps |= MMC_CAP_NONREMOVABLE;
break;
case ESDHC_CD_NONE:
break;
}
switch (plat_data->max_bus_width) {
case 4:
host->mmc->caps |= MMC_CAP_4_BIT_DATA;
break;
case 1:
default:
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
break;
}
return 0;
}
static int sdhci_esdhc_mcf_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct pltfm_mcf_data *mcf_data;
int err;
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_mcf_pdata,
sizeof(*mcf_data));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
mcf_data = sdhci_pltfm_priv(pltfm_host);
host->sdma_boundary = 0;
host->flags |= SDHCI_AUTO_CMD12;
mcf_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(mcf_data->clk_ipg)) {
err = PTR_ERR(mcf_data->clk_ipg);
goto err_exit;
}
mcf_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(mcf_data->clk_ahb)) {
err = PTR_ERR(mcf_data->clk_ahb);
goto err_exit;
}
mcf_data->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(mcf_data->clk_per)) {
err = PTR_ERR(mcf_data->clk_per);
goto err_exit;
}
pltfm_host->clk = mcf_data->clk_per;
pltfm_host->clock = clk_get_rate(pltfm_host->clk);
err = clk_prepare_enable(mcf_data->clk_per);
if (err)
goto err_exit;
err = clk_prepare_enable(mcf_data->clk_ipg);
if (err)
goto unprep_per;
err = clk_prepare_enable(mcf_data->clk_ahb);
if (err)
goto unprep_ipg;
err = esdhc_mcf_plat_init(host, mcf_data);
if (err)
goto unprep_ahb;
err = sdhci_setup_host(host);
if (err)
goto unprep_ahb;
if (!host->bounce_buffer) {
dev_err(&pdev->dev, "bounce buffer not allocated");
err = -ENOMEM;
goto cleanup;
}
err = __sdhci_add_host(host);
if (err)
goto cleanup;
return 0;
cleanup:
sdhci_cleanup_host(host);
unprep_ahb:
clk_disable_unprepare(mcf_data->clk_ahb);
unprep_ipg:
clk_disable_unprepare(mcf_data->clk_ipg);
unprep_per:
clk_disable_unprepare(mcf_data->clk_per);
err_exit:
sdhci_pltfm_free(pdev);
return err;
}
static void sdhci_esdhc_mcf_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_mcf_data *mcf_data = sdhci_pltfm_priv(pltfm_host);
sdhci_remove_host(host, 0);
clk_disable_unprepare(mcf_data->clk_ipg);
clk_disable_unprepare(mcf_data->clk_ahb);
clk_disable_unprepare(mcf_data->clk_per);
sdhci_pltfm_free(pdev);
}
static struct platform_driver sdhci_esdhc_mcf_driver = {
.driver = {
.name = "sdhci-esdhc-mcf",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = sdhci_esdhc_mcf_probe,
.remove_new = sdhci_esdhc_mcf_remove,
};
module_platform_driver(sdhci_esdhc_mcf_driver);
MODULE_DESCRIPTION("SDHCI driver for Freescale ColdFire eSDHC");
MODULE_AUTHOR("Angelo Dureghello <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-esdhc-mcf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Remote VUB300 SDIO/SDmem Host Controller Driver
*
* Copyright (C) 2010 Elan Digital Systems Limited
*
* based on USB Skeleton driver - 2.2
*
* Copyright (C) 2001-2004 Greg Kroah-Hartman ([email protected])
*
* VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
* Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
* by virtue of this driver, to have been plugged into a local
* SDIO host controller, similar to, say, a PCI Ricoh controller
* This is because this kernel device driver is both a USB 2.0
* client device driver AND an MMC host controller driver. Thus
* if there is an existing driver for the inserted SDIO/SDmem/MMC
* device then that driver will be used by the kernel to manage
* the device in exactly the same fashion as if it had been
* directly plugged into, say, a local pci bus Ricoh controller
*
* RANT: this driver was written using a display 128x48 - converting it
* to a line width of 80 makes it very difficult to support. In
* particular functions have been broken down into sub functions
* and the original meaningful names have been shortened into
* cryptic ones.
* The problem is that executing a fragment of code subject to
* two conditions means an indentation of 24, thus leaving only
* 56 characters for a C statement. And that is quite ridiculous!
*
* Data types: data passed to/from the VUB300 is fixed to a number of
* bits and driver data fields reflect that limit by using
* u8, u16, u32
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/workqueue.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
#include <linux/scatterlist.h>
struct host_controller_info {
u8 info_size;
u16 firmware_version;
u8 number_of_ports;
} __packed;
#define FIRMWARE_BLOCK_BOUNDARY 1024
struct sd_command_header {
u8 header_size;
u8 header_type;
u8 port_number;
u8 command_type; /* Bit7 - Rd/Wr */
u8 command_index;
u8 transfer_size[4]; /* ReadSize + ReadSize */
u8 response_type;
u8 arguments[4];
u8 block_count[2];
u8 block_size[2];
u8 block_boundary[2];
u8 reserved[44]; /* to pad out to 64 bytes */
} __packed;
struct sd_irqpoll_header {
u8 header_size;
u8 header_type;
u8 port_number;
u8 command_type; /* Bit7 - Rd/Wr */
u8 padding[16]; /* don't ask why !! */
u8 poll_timeout_msb;
u8 poll_timeout_lsb;
u8 reserved[42]; /* to pad out to 64 bytes */
} __packed;
struct sd_common_header {
u8 header_size;
u8 header_type;
u8 port_number;
} __packed;
struct sd_response_header {
u8 header_size;
u8 header_type;
u8 port_number;
u8 command_type;
u8 command_index;
u8 command_response[];
} __packed;
struct sd_status_header {
u8 header_size;
u8 header_type;
u8 port_number;
u16 port_flags;
u32 sdio_clock;
u16 host_header_size;
u16 func_header_size;
u16 ctrl_header_size;
} __packed;
struct sd_error_header {
u8 header_size;
u8 header_type;
u8 port_number;
u8 error_code;
} __packed;
struct sd_interrupt_header {
u8 header_size;
u8 header_type;
u8 port_number;
} __packed;
struct offload_registers_access {
u8 command_byte[4];
u8 Respond_Byte[4];
} __packed;
#define INTERRUPT_REGISTER_ACCESSES 15
struct sd_offloaded_interrupt {
u8 header_size;
u8 header_type;
u8 port_number;
struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
} __packed;
struct sd_register_header {
u8 header_size;
u8 header_type;
u8 port_number;
u8 command_type;
u8 command_index;
u8 command_response[6];
} __packed;
#define PIGGYBACK_REGISTER_ACCESSES 14
struct sd_offloaded_piggyback {
struct sd_register_header sdio;
struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
} __packed;
union sd_response {
struct sd_common_header common;
struct sd_status_header status;
struct sd_error_header error;
struct sd_interrupt_header interrupt;
struct sd_response_header response;
struct sd_offloaded_interrupt irq;
struct sd_offloaded_piggyback pig;
} __packed;
union sd_command {
struct sd_command_header head;
struct sd_irqpoll_header poll;
} __packed;
enum SD_RESPONSE_TYPE {
SDRT_UNSPECIFIED = 0,
SDRT_NONE,
SDRT_1,
SDRT_1B,
SDRT_2,
SDRT_3,
SDRT_4,
SDRT_5,
SDRT_5B,
SDRT_6,
SDRT_7,
};
#define RESPONSE_INTERRUPT 0x01
#define RESPONSE_ERROR 0x02
#define RESPONSE_STATUS 0x03
#define RESPONSE_IRQ_DISABLED 0x05
#define RESPONSE_IRQ_ENABLED 0x06
#define RESPONSE_PIGGYBACKED 0x07
#define RESPONSE_NO_INTERRUPT 0x08
#define RESPONSE_PIG_DISABLED 0x09
#define RESPONSE_PIG_ENABLED 0x0A
#define SD_ERROR_1BIT_TIMEOUT 0x01
#define SD_ERROR_4BIT_TIMEOUT 0x02
#define SD_ERROR_1BIT_CRC_WRONG 0x03
#define SD_ERROR_4BIT_CRC_WRONG 0x04
#define SD_ERROR_1BIT_CRC_ERROR 0x05
#define SD_ERROR_4BIT_CRC_ERROR 0x06
#define SD_ERROR_NO_CMD_ENDBIT 0x07
#define SD_ERROR_NO_1BIT_DATEND 0x08
#define SD_ERROR_NO_4BIT_DATEND 0x09
#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A
#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B
#define SD_ERROR_ILLEGAL_COMMAND 0x0C
#define SD_ERROR_NO_DEVICE 0x0D
#define SD_ERROR_TRANSFER_LENGTH 0x0E
#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F
#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10
#define SD_ERROR_ILLEGAL_STATE 0x11
#define SD_ERROR_UNKNOWN_ERROR 0x12
#define SD_ERROR_RESERVED_ERROR 0x13
#define SD_ERROR_INVALID_FUNCTION 0x14
#define SD_ERROR_OUT_OF_RANGE 0x15
#define SD_ERROR_STAT_CMD 0x16
#define SD_ERROR_STAT_DATA 0x17
#define SD_ERROR_STAT_CMD_TIMEOUT 0x18
#define SD_ERROR_SDCRDY_STUCK 0x19
#define SD_ERROR_UNHANDLED 0x1A
#define SD_ERROR_OVERRUN 0x1B
#define SD_ERROR_PIO_TIMEOUT 0x1C
#define FUN(c) (0x000007 & (c->arg>>28))
#define REG(c) (0x01FFFF & (c->arg>>9))
static bool limit_speed_to_24_MHz;
module_param(limit_speed_to_24_MHz, bool, 0644);
MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
static bool pad_input_to_usb_pkt;
module_param(pad_input_to_usb_pkt, bool, 0644);
MODULE_PARM_DESC(pad_input_to_usb_pkt,
"Pad USB data input transfers to whole USB Packet");
static bool disable_offload_processing;
module_param(disable_offload_processing, bool, 0644);
MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
static bool force_1_bit_data_xfers;
module_param(force_1_bit_data_xfers, bool, 0644);
MODULE_PARM_DESC(force_1_bit_data_xfers,
"Force SDIO Data Transfers to 1-bit Mode");
static bool force_polling_for_irqs;
module_param(force_polling_for_irqs, bool, 0644);
MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
static int firmware_irqpoll_timeout = 1024;
module_param(firmware_irqpoll_timeout, int, 0644);
MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
static int force_max_req_size = 128;
module_param(force_max_req_size, int, 0644);
MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
#ifdef SMSC_DEVELOPMENT_BOARD
static int firmware_rom_wait_states = 0x04;
#else
static int firmware_rom_wait_states = 0x1C;
#endif
module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
#define ELAN_VENDOR_ID 0x2201
#define VUB300_VENDOR_ID 0x0424
#define VUB300_PRODUCT_ID 0x012C
static const struct usb_device_id vub300_table[] = {
{USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
{USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, vub300_table);
static struct workqueue_struct *cmndworkqueue;
static struct workqueue_struct *pollworkqueue;
static struct workqueue_struct *deadworkqueue;
static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
{
if (!interface)
return -1;
if (!interface->cur_altsetting)
return -1;
return interface->cur_altsetting->desc.bInterfaceNumber;
}
struct sdio_register {
unsigned func_num:3;
unsigned sdio_reg:17;
unsigned activate:1;
unsigned prepared:1;
unsigned regvalue:8;
unsigned response:8;
unsigned sparebit:26;
};
struct vub300_mmc_host {
struct usb_device *udev;
struct usb_interface *interface;
struct kref kref;
struct mutex cmd_mutex;
struct mutex irq_mutex;
char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
u8 cmnd_out_ep; /* EndPoint for commands */
u8 cmnd_res_ep; /* EndPoint for responses */
u8 data_out_ep; /* EndPoint for out data */
u8 data_inp_ep; /* EndPoint for inp data */
bool card_powered;
bool card_present;
bool read_only;
bool large_usb_packets;
bool app_spec; /* ApplicationSpecific */
bool irq_enabled; /* by the MMC CORE */
bool irq_disabled; /* in the firmware */
unsigned bus_width:4;
u8 total_offload_count;
u8 dynamic_register_count;
u8 resp_len;
u32 datasize;
int errors;
int usb_transport_fail;
int usb_timed_out;
int irqs_queued;
struct sdio_register sdio_register[16];
struct offload_interrupt_function_register {
#define MAXREGBITS 4
#define MAXREGS (1<<MAXREGBITS)
#define MAXREGMASK (MAXREGS-1)
u8 offload_count;
u32 offload_point;
struct offload_registers_access reg[MAXREGS];
} fn[8];
u16 fbs[8]; /* Function Block Size */
struct mmc_command *cmd;
struct mmc_request *req;
struct mmc_data *data;
struct mmc_host *mmc;
struct urb *urb;
struct urb *command_out_urb;
struct urb *command_res_urb;
struct completion command_complete;
struct completion irqpoll_complete;
union sd_command cmnd;
union sd_response resp;
struct timer_list sg_transfer_timer;
struct usb_sg_request sg_request;
struct timer_list inactivity_timer;
struct work_struct deadwork;
struct work_struct cmndwork;
struct delayed_work pollwork;
struct host_controller_info hc_info;
struct sd_status_header system_port_status;
u8 padded_buffer[64];
};
#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
#define SET_TRANSFER_PSEUDOCODE 21
#define SET_INTERRUPT_PSEUDOCODE 20
#define SET_FAILURE_MODE 18
#define SET_ROM_WAIT_STATES 16
#define SET_IRQ_ENABLE 13
#define SET_CLOCK_SPEED 11
#define SET_FUNCTION_BLOCK_SIZE 9
#define SET_SD_DATA_MODE 6
#define SET_SD_POWER 4
#define ENTER_DFU_MODE 3
#define GET_HC_INF0 1
#define GET_SYSTEM_PORT_STATUS 0
static void vub300_delete(struct kref *kref)
{ /* kref callback - softirq */
struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
struct mmc_host *mmc = vub300->mmc;
usb_free_urb(vub300->command_out_urb);
vub300->command_out_urb = NULL;
usb_free_urb(vub300->command_res_urb);
vub300->command_res_urb = NULL;
usb_put_dev(vub300->udev);
mmc_free_host(mmc);
/*
* and hence also frees vub300
* which is contained at the end of struct mmc
*/
}
static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
{
kref_get(&vub300->kref);
if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
/*
* then the cmndworkqueue was not previously
* running and the above get ref is obvious
* required and will be put when the thread
* terminates by a specific call
*/
} else {
/*
* the cmndworkqueue was already running from
* a previous invocation and thus to keep the
* kref counts correct we must undo the get
*/
kref_put(&vub300->kref, vub300_delete);
}
}
static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
{
kref_get(&vub300->kref);
if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
/*
* then the pollworkqueue was not previously
* running and the above get ref is obvious
* required and will be put when the thread
* terminates by a specific call
*/
} else {
/*
* the pollworkqueue was already running from
* a previous invocation and thus to keep the
* kref counts correct we must undo the get
*/
kref_put(&vub300->kref, vub300_delete);
}
}
static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
{
kref_get(&vub300->kref);
if (queue_work(deadworkqueue, &vub300->deadwork)) {
/*
* then the deadworkqueue was not previously
* running and the above get ref is obvious
* required and will be put when the thread
* terminates by a specific call
*/
} else {
/*
* the deadworkqueue was already running from
* a previous invocation and thus to keep the
* kref counts correct we must undo the get
*/
kref_put(&vub300->kref, vub300_delete);
}
}
static void irqpoll_res_completed(struct urb *urb)
{ /* urb completion handler - hardirq */
struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
if (urb->status)
vub300->usb_transport_fail = urb->status;
complete(&vub300->irqpoll_complete);
}
static void irqpoll_out_completed(struct urb *urb)
{ /* urb completion handler - hardirq */
struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
if (urb->status) {
vub300->usb_transport_fail = urb->status;
complete(&vub300->irqpoll_complete);
return;
} else {
int ret;
unsigned int pipe =
usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
&vub300->resp, sizeof(vub300->resp),
irqpoll_res_completed, vub300);
vub300->command_res_urb->actual_length = 0;
ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
if (ret) {
vub300->usb_transport_fail = ret;
complete(&vub300->irqpoll_complete);
}
return;
}
}
static void send_irqpoll(struct vub300_mmc_host *vub300)
{
/* cmd_mutex is held by vub300_pollwork_thread */
int retval;
int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
vub300->cmnd.poll.header_size = 22;
vub300->cmnd.poll.header_type = 1;
vub300->cmnd.poll.port_number = 0;
vub300->cmnd.poll.command_type = 2;
vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
, &vub300->cmnd, sizeof(vub300->cmnd)
, irqpoll_out_completed, vub300);
retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
if (0 > retval) {
vub300->usb_transport_fail = retval;
vub300_queue_poll_work(vub300, 1);
complete(&vub300->irqpoll_complete);
return;
} else {
return;
}
}
static void new_system_port_status(struct vub300_mmc_host *vub300)
{
int old_card_present = vub300->card_present;
int new_card_present =
(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
vub300->read_only =
(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
if (new_card_present && !old_card_present) {
dev_info(&vub300->udev->dev, "card just inserted\n");
vub300->card_present = 1;
vub300->bus_width = 0;
if (disable_offload_processing)
strncpy(vub300->vub_name, "EMPTY Processing Disabled",
sizeof(vub300->vub_name));
else
vub300->vub_name[0] = 0;
mmc_detect_change(vub300->mmc, 1);
} else if (!new_card_present && old_card_present) {
dev_info(&vub300->udev->dev, "card just ejected\n");
vub300->card_present = 0;
mmc_detect_change(vub300->mmc, 0);
} else {
/* no change */
}
}
static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
struct offload_registers_access
*register_access, u8 func)
{
u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
sizeof(struct offload_registers_access));
vub300->fn[func].offload_count += 1;
vub300->total_offload_count += 1;
}
static void add_offloaded_reg(struct vub300_mmc_host *vub300,
struct offload_registers_access *register_access)
{
u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
| ((0xFF & register_access->command_byte[1]) << 7)
| ((0xFE & register_access->command_byte[2]) >> 1);
u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
u8 regs = vub300->dynamic_register_count;
u8 i = 0;
while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
if (vub300->sdio_register[i].func_num == func &&
vub300->sdio_register[i].sdio_reg == Register) {
if (vub300->sdio_register[i].prepared == 0)
vub300->sdio_register[i].prepared = 1;
vub300->sdio_register[i].response =
register_access->Respond_Byte[2];
vub300->sdio_register[i].regvalue =
register_access->Respond_Byte[3];
return;
} else {
i += 1;
continue;
}
}
__add_offloaded_reg_to_fifo(vub300, register_access, func);
}
static void check_vub300_port_status(struct vub300_mmc_host *vub300)
{
/*
* cmd_mutex is held by vub300_pollwork_thread,
* vub300_deadwork_thread or vub300_cmndwork_thread
*/
int retval;
retval =
usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
sizeof(vub300->system_port_status), 1000);
if (sizeof(vub300->system_port_status) == retval)
new_system_port_status(vub300);
}
static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
{
/* cmd_mutex is held by vub300_pollwork_thread */
if (vub300->command_res_urb->actual_length == 0)
return;
switch (vub300->resp.common.header_type) {
case RESPONSE_INTERRUPT:
mutex_lock(&vub300->irq_mutex);
if (vub300->irq_enabled)
mmc_signal_sdio_irq(vub300->mmc);
else
vub300->irqs_queued += 1;
vub300->irq_disabled = 1;
mutex_unlock(&vub300->irq_mutex);
break;
case RESPONSE_ERROR:
if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
check_vub300_port_status(vub300);
break;
case RESPONSE_STATUS:
vub300->system_port_status = vub300->resp.status;
new_system_port_status(vub300);
if (!vub300->card_present)
vub300_queue_poll_work(vub300, HZ / 5);
break;
case RESPONSE_IRQ_DISABLED:
{
int offloaded_data_length = vub300->resp.common.header_size - 3;
int register_count = offloaded_data_length >> 3;
int ri = 0;
while (register_count--) {
add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
ri += 1;
}
mutex_lock(&vub300->irq_mutex);
if (vub300->irq_enabled)
mmc_signal_sdio_irq(vub300->mmc);
else
vub300->irqs_queued += 1;
vub300->irq_disabled = 1;
mutex_unlock(&vub300->irq_mutex);
break;
}
case RESPONSE_IRQ_ENABLED:
{
int offloaded_data_length = vub300->resp.common.header_size - 3;
int register_count = offloaded_data_length >> 3;
int ri = 0;
while (register_count--) {
add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
ri += 1;
}
mutex_lock(&vub300->irq_mutex);
if (vub300->irq_enabled)
mmc_signal_sdio_irq(vub300->mmc);
else
vub300->irqs_queued += 1;
vub300->irq_disabled = 0;
mutex_unlock(&vub300->irq_mutex);
break;
}
case RESPONSE_NO_INTERRUPT:
vub300_queue_poll_work(vub300, 1);
break;
default:
break;
}
}
static void __do_poll(struct vub300_mmc_host *vub300)
{
/* cmd_mutex is held by vub300_pollwork_thread */
unsigned long commretval;
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
init_completion(&vub300->irqpoll_complete);
send_irqpoll(vub300);
commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
msecs_to_jiffies(500));
if (vub300->usb_transport_fail) {
/* no need to do anything */
} else if (commretval == 0) {
vub300->usb_timed_out = 1;
usb_kill_urb(vub300->command_out_urb);
usb_kill_urb(vub300->command_res_urb);
} else { /* commretval > 0 */
__vub300_irqpoll_response(vub300);
}
}
/* this thread runs only when the driver
* is trying to poll the device for an IRQ
*/
static void vub300_pollwork_thread(struct work_struct *work)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = container_of(work,
struct vub300_mmc_host, pollwork.work);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
return;
}
mutex_lock(&vub300->cmd_mutex);
if (vub300->cmd) {
vub300_queue_poll_work(vub300, 1);
} else if (!vub300->card_present) {
/* no need to do anything */
} else { /* vub300->card_present */
mutex_lock(&vub300->irq_mutex);
if (!vub300->irq_enabled) {
mutex_unlock(&vub300->irq_mutex);
} else if (vub300->irqs_queued) {
vub300->irqs_queued -= 1;
mmc_signal_sdio_irq(vub300->mmc);
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
mutex_unlock(&vub300->irq_mutex);
} else { /* NOT vub300->irqs_queued */
mutex_unlock(&vub300->irq_mutex);
__do_poll(vub300);
}
}
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
}
static void vub300_deadwork_thread(struct work_struct *work)
{ /* NOT irq */
struct vub300_mmc_host *vub300 =
container_of(work, struct vub300_mmc_host, deadwork);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
return;
}
mutex_lock(&vub300->cmd_mutex);
if (vub300->cmd) {
/*
* a command got in as the inactivity
* timer expired - so we just let the
* processing of the command show if
* the device is dead
*/
} else if (vub300->card_present) {
check_vub300_port_status(vub300);
} else if (vub300->mmc && vub300->mmc->card) {
/*
* the MMC core must not have responded
* to the previous indication - lets
* hope that it eventually does so we
* will just ignore this for now
*/
} else {
check_vub300_port_status(vub300);
}
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
}
static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
struct vub300_mmc_host *vub300 = from_timer(vub300, t,
inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
} else {
vub300_queue_dead_work(vub300);
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
}
}
static int vub300_response_error(u8 error_code)
{
switch (error_code) {
case SD_ERROR_PIO_TIMEOUT:
case SD_ERROR_1BIT_TIMEOUT:
case SD_ERROR_4BIT_TIMEOUT:
return -ETIMEDOUT;
case SD_ERROR_STAT_DATA:
case SD_ERROR_OVERRUN:
case SD_ERROR_STAT_CMD:
case SD_ERROR_STAT_CMD_TIMEOUT:
case SD_ERROR_SDCRDY_STUCK:
case SD_ERROR_UNHANDLED:
case SD_ERROR_1BIT_CRC_WRONG:
case SD_ERROR_4BIT_CRC_WRONG:
case SD_ERROR_1BIT_CRC_ERROR:
case SD_ERROR_4BIT_CRC_ERROR:
case SD_ERROR_NO_CMD_ENDBIT:
case SD_ERROR_NO_1BIT_DATEND:
case SD_ERROR_NO_4BIT_DATEND:
case SD_ERROR_1BIT_DATA_TIMEOUT:
case SD_ERROR_4BIT_DATA_TIMEOUT:
case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
return -EILSEQ;
case 33:
return -EILSEQ;
case SD_ERROR_ILLEGAL_COMMAND:
return -EINVAL;
case SD_ERROR_NO_DEVICE:
return -ENOMEDIUM;
default:
return -ENODEV;
}
}
static void command_res_completed(struct urb *urb)
{ /* urb completion handler - hardirq */
struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
if (urb->status) {
/* we have to let the initiator handle the error */
} else if (vub300->command_res_urb->actual_length == 0) {
/*
* we have seen this happen once or twice and
* we suspect a buggy USB host controller
*/
} else if (!vub300->data) {
/* this means that the command (typically CMD52) succeeded */
} else if (vub300->resp.common.header_type != 0x02) {
/*
* this is an error response from the VUB300 chip
* and we let the initiator handle it
*/
} else if (vub300->urb) {
vub300->cmd->error =
vub300_response_error(vub300->resp.error.error_code);
usb_unlink_urb(vub300->urb);
} else {
vub300->cmd->error =
vub300_response_error(vub300->resp.error.error_code);
usb_sg_cancel(&vub300->sg_request);
}
complete(&vub300->command_complete); /* got_response_in */
}
static void command_out_completed(struct urb *urb)
{ /* urb completion handler - hardirq */
struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
if (urb->status) {
complete(&vub300->command_complete);
} else {
int ret;
unsigned int pipe =
usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
&vub300->resp, sizeof(vub300->resp),
command_res_completed, vub300);
vub300->command_res_urb->actual_length = 0;
ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
if (ret == 0) {
/*
* the urb completion handler will call
* our completion handler
*/
} else {
/*
* and thus we only call it directly
* when it will not be called
*/
complete(&vub300->command_complete);
}
}
}
/*
* the STUFF bits are masked out for the comparisons
*/
static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
u32 cmd_arg)
{
if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
vub300->bus_width = 1;
else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
vub300->bus_width = 4;
}
static void send_command(struct vub300_mmc_host *vub300)
{
/* cmd_mutex is held by vub300_cmndwork_thread */
struct mmc_command *cmd = vub300->cmd;
struct mmc_data *data = vub300->data;
int retval;
int i;
u8 response_type;
if (vub300->app_spec) {
switch (cmd->opcode) {
case 6:
response_type = SDRT_1;
vub300->resp_len = 6;
if (0x00000000 == (0x00000003 & cmd->arg))
vub300->bus_width = 1;
else if (0x00000002 == (0x00000003 & cmd->arg))
vub300->bus_width = 4;
else
dev_err(&vub300->udev->dev,
"unexpected ACMD6 bus_width=%d\n",
0x00000003 & cmd->arg);
break;
case 13:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 22:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 23:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 41:
response_type = SDRT_3;
vub300->resp_len = 6;
break;
case 42:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 51:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 55:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
default:
vub300->resp_len = 0;
cmd->error = -EINVAL;
complete(&vub300->command_complete);
return;
}
vub300->app_spec = 0;
} else {
switch (cmd->opcode) {
case 0:
response_type = SDRT_NONE;
vub300->resp_len = 0;
break;
case 1:
response_type = SDRT_3;
vub300->resp_len = 6;
break;
case 2:
response_type = SDRT_2;
vub300->resp_len = 17;
break;
case 3:
response_type = SDRT_6;
vub300->resp_len = 6;
break;
case 4:
response_type = SDRT_NONE;
vub300->resp_len = 0;
break;
case 5:
response_type = SDRT_4;
vub300->resp_len = 6;
break;
case 6:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 7:
response_type = SDRT_1B;
vub300->resp_len = 6;
break;
case 8:
response_type = SDRT_7;
vub300->resp_len = 6;
break;
case 9:
response_type = SDRT_2;
vub300->resp_len = 17;
break;
case 10:
response_type = SDRT_2;
vub300->resp_len = 17;
break;
case 12:
response_type = SDRT_1B;
vub300->resp_len = 6;
break;
case 13:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 15:
response_type = SDRT_NONE;
vub300->resp_len = 0;
break;
case 16:
for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
vub300->fbs[i] = 0xFFFF & cmd->arg;
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 17:
case 18:
case 24:
case 25:
case 27:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 28:
case 29:
response_type = SDRT_1B;
vub300->resp_len = 6;
break;
case 30:
case 32:
case 33:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 38:
response_type = SDRT_1B;
vub300->resp_len = 6;
break;
case 42:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
case 52:
response_type = SDRT_5;
vub300->resp_len = 6;
snoop_block_size_and_bus_width(vub300, cmd->arg);
break;
case 53:
response_type = SDRT_5;
vub300->resp_len = 6;
break;
case 55:
response_type = SDRT_1;
vub300->resp_len = 6;
vub300->app_spec = 1;
break;
case 56:
response_type = SDRT_1;
vub300->resp_len = 6;
break;
default:
vub300->resp_len = 0;
cmd->error = -EINVAL;
complete(&vub300->command_complete);
return;
}
}
/*
* it is a shame that we can not use "sizeof(struct sd_command_header)"
* this is because the packet _must_ be padded to 64 bytes
*/
vub300->cmnd.head.header_size = 20;
vub300->cmnd.head.header_type = 0x00;
vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
vub300->cmnd.head.command_type = 0x00; /* standard read command */
vub300->cmnd.head.response_type = response_type;
vub300->cmnd.head.command_index = cmd->opcode;
vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
if (cmd->opcode == 52) {
int fn = 0x7 & (cmd->arg >> 28);
vub300->cmnd.head.block_count[0] = 0;
vub300->cmnd.head.block_count[1] = 0;
vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
vub300->cmnd.head.command_type = 0x00;
vub300->cmnd.head.transfer_size[0] = 0;
vub300->cmnd.head.transfer_size[1] = 0;
vub300->cmnd.head.transfer_size[2] = 0;
vub300->cmnd.head.transfer_size[3] = 0;
} else if (!data) {
vub300->cmnd.head.block_count[0] = 0;
vub300->cmnd.head.block_count[1] = 0;
vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
vub300->cmnd.head.command_type = 0x00;
vub300->cmnd.head.transfer_size[0] = 0;
vub300->cmnd.head.transfer_size[1] = 0;
vub300->cmnd.head.transfer_size[2] = 0;
vub300->cmnd.head.transfer_size[3] = 0;
} else if (cmd->opcode == 53) {
int fn = 0x7 & (cmd->arg >> 28);
if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
vub300->cmnd.head.block_count[0] =
(data->blocks >> 8) & 0xFF;
vub300->cmnd.head.block_count[1] =
(data->blocks >> 0) & 0xFF;
vub300->cmnd.head.block_size[0] =
(data->blksz >> 8) & 0xFF;
vub300->cmnd.head.block_size[1] =
(data->blksz >> 0) & 0xFF;
} else { /* BYTE MODE */
vub300->cmnd.head.block_count[0] = 0;
vub300->cmnd.head.block_count[1] = 0;
vub300->cmnd.head.block_size[0] =
(vub300->datasize >> 8) & 0xFF;
vub300->cmnd.head.block_size[1] =
(vub300->datasize >> 0) & 0xFF;
}
vub300->cmnd.head.command_type =
(MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
vub300->cmnd.head.transfer_size[0] =
(vub300->datasize >> 24) & 0xFF;
vub300->cmnd.head.transfer_size[1] =
(vub300->datasize >> 16) & 0xFF;
vub300->cmnd.head.transfer_size[2] =
(vub300->datasize >> 8) & 0xFF;
vub300->cmnd.head.transfer_size[3] =
(vub300->datasize >> 0) & 0xFF;
if (vub300->datasize < vub300->fbs[fn]) {
vub300->cmnd.head.block_count[0] = 0;
vub300->cmnd.head.block_count[1] = 0;
}
} else {
vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
vub300->cmnd.head.command_type =
(MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
vub300->cmnd.head.transfer_size[0] =
(vub300->datasize >> 24) & 0xFF;
vub300->cmnd.head.transfer_size[1] =
(vub300->datasize >> 16) & 0xFF;
vub300->cmnd.head.transfer_size[2] =
(vub300->datasize >> 8) & 0xFF;
vub300->cmnd.head.transfer_size[3] =
(vub300->datasize >> 0) & 0xFF;
if (vub300->datasize < vub300->fbs[0]) {
vub300->cmnd.head.block_count[0] = 0;
vub300->cmnd.head.block_count[1] = 0;
}
}
if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
u16 block_size = vub300->cmnd.head.block_size[1] |
(vub300->cmnd.head.block_size[0] << 8);
u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
(FIRMWARE_BLOCK_BOUNDARY % block_size);
vub300->cmnd.head.block_boundary[0] =
(block_boundary >> 8) & 0xFF;
vub300->cmnd.head.block_boundary[1] =
(block_boundary >> 0) & 0xFF;
} else {
vub300->cmnd.head.block_boundary[0] = 0;
vub300->cmnd.head.block_boundary[1] = 0;
}
usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
&vub300->cmnd, sizeof(vub300->cmnd),
command_out_completed, vub300);
retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
if (retval < 0) {
cmd->error = retval;
complete(&vub300->command_complete);
return;
} else {
return;
}
}
/*
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
static void vub300_sg_timed_out(struct timer_list *t)
{
struct vub300_mmc_host *vub300 = from_timer(vub300, t,
sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
usb_unlink_urb(vub300->command_res_urb);
}
static u16 roundup_to_multiple_of_64(u16 number)
{
return 0xFFC0 & (0x3F + number);
}
/*
* this is a separate function to solve the 80 column width restriction
*/
static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
const struct firmware *fw)
{
u8 register_count = 0;
u16 ts = 0;
u16 interrupt_size = 0;
const u8 *data = fw->data;
int size = fw->size;
u8 c;
dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
vub300->vub_name);
do {
c = *data++;
} while (size-- && c); /* skip comment */
dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
vub300->vub_name);
if (size < 4) {
dev_err(&vub300->udev->dev,
"corrupt offload pseudocode in firmware %s\n",
vub300->vub_name);
strncpy(vub300->vub_name, "corrupt offload pseudocode",
sizeof(vub300->vub_name));
return;
}
interrupt_size += *data++;
size -= 1;
interrupt_size <<= 8;
interrupt_size += *data++;
size -= 1;
if (interrupt_size < size) {
u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
if (xfer_buffer) {
int retval;
memcpy(xfer_buffer, data, interrupt_size);
memset(xfer_buffer + interrupt_size, 0,
xfer_length - interrupt_size);
size -= interrupt_size;
data += interrupt_size;
retval =
usb_control_msg(vub300->udev,
usb_sndctrlpipe(vub300->udev, 0),
SET_INTERRUPT_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
" INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
vub300->vub_name);
strncpy(vub300->vub_name,
"SDIO interrupt pseudocode download failed",
sizeof(vub300->vub_name));
return;
}
} else {
dev_err(&vub300->udev->dev,
"corrupt interrupt pseudocode in firmware %s %s\n",
fw->data, vub300->vub_name);
strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
sizeof(vub300->vub_name));
return;
}
ts += *data++;
size -= 1;
ts <<= 8;
ts += *data++;
size -= 1;
if (ts < size) {
u16 xfer_length = roundup_to_multiple_of_64(ts);
u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
if (xfer_buffer) {
int retval;
memcpy(xfer_buffer, data, ts);
memset(xfer_buffer + ts, 0,
xfer_length - ts);
size -= ts;
data += ts;
retval =
usb_control_msg(vub300->udev,
usb_sndctrlpipe(vub300->udev, 0),
SET_TRANSFER_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
" TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
vub300->vub_name);
strncpy(vub300->vub_name,
"SDIO transfer pseudocode download failed",
sizeof(vub300->vub_name));
return;
}
} else {
dev_err(&vub300->udev->dev,
"corrupt transfer pseudocode in firmware %s %s\n",
fw->data, vub300->vub_name);
strncpy(vub300->vub_name, "corrupt transfer pseudocode",
sizeof(vub300->vub_name));
return;
}
register_count += *data++;
size -= 1;
if (register_count * 4 == size) {
int I = vub300->dynamic_register_count = register_count;
int i = 0;
while (I--) {
unsigned int func_num = 0;
vub300->sdio_register[i].func_num = *data++;
size -= 1;
func_num += *data++;
size -= 1;
func_num <<= 8;
func_num += *data++;
size -= 1;
func_num <<= 8;
func_num += *data++;
size -= 1;
vub300->sdio_register[i].sdio_reg = func_num;
vub300->sdio_register[i].activate = 1;
vub300->sdio_register[i].prepared = 0;
i += 1;
}
dev_info(&vub300->udev->dev,
"initialized %d dynamic pseudocode registers\n",
vub300->dynamic_register_count);
return;
} else {
dev_err(&vub300->udev->dev,
"corrupt dynamic registers in firmware %s\n",
vub300->vub_name);
strncpy(vub300->vub_name, "corrupt dynamic registers",
sizeof(vub300->vub_name));
return;
}
copy_error_message:
strncpy(vub300->vub_name, "SDIO pseudocode download failed",
sizeof(vub300->vub_name));
}
/*
* if the binary containing the EMPTY PseudoCode can not be found
* vub300->vub_name is set anyway in order to prevent an automatic retry
*/
static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
{
struct mmc_card *card = vub300->mmc->card;
int sdio_funcs = card->sdio_funcs;
const struct firmware *fw = NULL;
int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
"vub_%04X%04X", card->cis.vendor, card->cis.device);
int n = 0;
int retval;
for (n = 0; n < sdio_funcs; n++) {
struct sdio_func *sf = card->sdio_func[n];
l += scnprintf(vub300->vub_name + l,
sizeof(vub300->vub_name) - l, "_%04X%04X",
sf->vendor, sf->device);
}
snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
vub300->vub_name);
retval = request_firmware(&fw, vub300->vub_name, &card->dev);
if (retval < 0) {
strncpy(vub300->vub_name, "vub_default.bin",
sizeof(vub300->vub_name));
retval = request_firmware(&fw, vub300->vub_name, &card->dev);
if (retval < 0) {
strncpy(vub300->vub_name,
"no SDIO offload firmware found",
sizeof(vub300->vub_name));
} else {
__download_offload_pseudocode(vub300, fw);
release_firmware(fw);
}
} else {
__download_offload_pseudocode(vub300, fw);
release_firmware(fw);
}
}
static void vub300_usb_bulk_msg_completion(struct urb *urb)
{ /* urb completion handler - hardirq */
complete((struct completion *)urb->context);
}
static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
unsigned int pipe, void *data, int len,
int *actual_length, int timeout_msecs)
{
/* cmd_mutex is held by vub300_cmndwork_thread */
struct usb_device *usb_dev = vub300->udev;
struct completion done;
int retval;
vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!vub300->urb)
return -ENOMEM;
usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
vub300_usb_bulk_msg_completion, NULL);
init_completion(&done);
vub300->urb->context = &done;
vub300->urb->actual_length = 0;
retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
if (unlikely(retval))
goto out;
if (!wait_for_completion_timeout
(&done, msecs_to_jiffies(timeout_msecs))) {
retval = -ETIMEDOUT;
usb_kill_urb(vub300->urb);
} else {
retval = vub300->urb->status;
}
out:
*actual_length = vub300->urb->actual_length;
usb_free_urb(vub300->urb);
vub300->urb = NULL;
return retval;
}
static int __command_read_data(struct vub300_mmc_host *vub300,
struct mmc_command *cmd, struct mmc_data *data)
{
/* cmd_mutex is held by vub300_cmndwork_thread */
int linear_length = vub300->datasize;
int padded_length = vub300->large_usb_packets ?
((511 + linear_length) >> 9) << 9 :
((63 + linear_length) >> 6) << 6;
if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
int result;
unsigned pipe;
pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
result = usb_sg_init(&vub300->sg_request, vub300->udev,
pipe, 0, data->sg,
data->sg_len, 0, GFP_KERNEL);
if (result < 0) {
usb_unlink_urb(vub300->command_out_urb);
usb_unlink_urb(vub300->command_res_urb);
cmd->error = result;
data->bytes_xfered = 0;
return 0;
} else {
vub300->sg_transfer_timer.expires =
jiffies + msecs_to_jiffies(2000 +
(linear_length / 16384));
add_timer(&vub300->sg_transfer_timer);
usb_sg_wait(&vub300->sg_request);
del_timer(&vub300->sg_transfer_timer);
if (vub300->sg_request.status < 0) {
cmd->error = vub300->sg_request.status;
data->bytes_xfered = 0;
return 0;
} else {
data->bytes_xfered = vub300->datasize;
return linear_length;
}
}
} else {
u8 *buf = kmalloc(padded_length, GFP_KERNEL);
if (buf) {
int result;
unsigned pipe = usb_rcvbulkpipe(vub300->udev,
vub300->data_inp_ep);
int actual_length = 0;
result = vub300_usb_bulk_msg(vub300, pipe, buf,
padded_length, &actual_length,
2000 + (padded_length / 16384));
if (result < 0) {
cmd->error = result;
data->bytes_xfered = 0;
kfree(buf);
return 0;
} else if (actual_length < linear_length) {
cmd->error = -EREMOTEIO;
data->bytes_xfered = 0;
kfree(buf);
return 0;
} else {
sg_copy_from_buffer(data->sg, data->sg_len, buf,
linear_length);
kfree(buf);
data->bytes_xfered = vub300->datasize;
return linear_length;
}
} else {
cmd->error = -ENOMEM;
data->bytes_xfered = 0;
return 0;
}
}
}
static int __command_write_data(struct vub300_mmc_host *vub300,
struct mmc_command *cmd, struct mmc_data *data)
{
/* cmd_mutex is held by vub300_cmndwork_thread */
unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
int linear_length = vub300->datasize;
int modulo_64_length = linear_length & 0x003F;
int modulo_512_length = linear_length & 0x01FF;
if (linear_length < 64) {
int result;
int actual_length;
sg_copy_to_buffer(data->sg, data->sg_len,
vub300->padded_buffer,
sizeof(vub300->padded_buffer));
memset(vub300->padded_buffer + linear_length, 0,
sizeof(vub300->padded_buffer) - linear_length);
result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
sizeof(vub300->padded_buffer),
&actual_length, 2000 +
(sizeof(vub300->padded_buffer) /
16384));
if (result < 0) {
cmd->error = result;
data->bytes_xfered = 0;
} else {
data->bytes_xfered = vub300->datasize;
}
} else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
(vub300->large_usb_packets && (64 > modulo_512_length))
) { /* don't you just love these work-rounds */
int padded_length = ((63 + linear_length) >> 6) << 6;
u8 *buf = kmalloc(padded_length, GFP_KERNEL);
if (buf) {
int result;
int actual_length;
sg_copy_to_buffer(data->sg, data->sg_len, buf,
padded_length);
memset(buf + linear_length, 0,
padded_length - linear_length);
result =
vub300_usb_bulk_msg(vub300, pipe, buf,
padded_length, &actual_length,
2000 + padded_length / 16384);
kfree(buf);
if (result < 0) {
cmd->error = result;
data->bytes_xfered = 0;
} else {
data->bytes_xfered = vub300->datasize;
}
} else {
cmd->error = -ENOMEM;
data->bytes_xfered = 0;
}
} else { /* no data padding required */
int result;
unsigned char buf[64 * 4];
sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
result = usb_sg_init(&vub300->sg_request, vub300->udev,
pipe, 0, data->sg,
data->sg_len, 0, GFP_KERNEL);
if (result < 0) {
usb_unlink_urb(vub300->command_out_urb);
usb_unlink_urb(vub300->command_res_urb);
cmd->error = result;
data->bytes_xfered = 0;
} else {
vub300->sg_transfer_timer.expires =
jiffies + msecs_to_jiffies(2000 +
linear_length / 16384);
add_timer(&vub300->sg_transfer_timer);
usb_sg_wait(&vub300->sg_request);
if (cmd->error) {
data->bytes_xfered = 0;
} else {
del_timer(&vub300->sg_transfer_timer);
if (vub300->sg_request.status < 0) {
cmd->error = vub300->sg_request.status;
data->bytes_xfered = 0;
} else {
data->bytes_xfered = vub300->datasize;
}
}
}
}
return linear_length;
}
static void __vub300_command_response(struct vub300_mmc_host *vub300,
struct mmc_command *cmd,
struct mmc_data *data, int data_length)
{
/* cmd_mutex is held by vub300_cmndwork_thread */
long respretval;
int msec_timeout = 1000 + data_length / 4;
respretval =
wait_for_completion_timeout(&vub300->command_complete,
msecs_to_jiffies(msec_timeout));
if (respretval == 0) { /* TIMED OUT */
/* we don't know which of "out" and "res" if any failed */
int result;
vub300->usb_timed_out = 1;
usb_kill_urb(vub300->command_out_urb);
usb_kill_urb(vub300->command_res_urb);
cmd->error = -ETIMEDOUT;
result = usb_lock_device_for_reset(vub300->udev,
vub300->interface);
if (result == 0) {
result = usb_reset_device(vub300->udev);
usb_unlock_device(vub300->udev);
}
} else if (respretval < 0) {
/* we don't know which of "out" and "res" if any failed */
usb_kill_urb(vub300->command_out_urb);
usb_kill_urb(vub300->command_res_urb);
cmd->error = respretval;
} else if (cmd->error) {
/*
* the error occurred sending the command
* or receiving the response
*/
} else if (vub300->command_out_urb->status) {
vub300->usb_transport_fail = vub300->command_out_urb->status;
cmd->error = -EPROTO == vub300->command_out_urb->status ?
-ESHUTDOWN : vub300->command_out_urb->status;
} else if (vub300->command_res_urb->status) {
vub300->usb_transport_fail = vub300->command_res_urb->status;
cmd->error = -EPROTO == vub300->command_res_urb->status ?
-ESHUTDOWN : vub300->command_res_urb->status;
} else if (vub300->resp.common.header_type == 0x00) {
/*
* the command completed successfully
* and there was no piggybacked data
*/
} else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
cmd->error =
vub300_response_error(vub300->resp.error.error_code);
if (vub300->data)
usb_sg_cancel(&vub300->sg_request);
} else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
int offloaded_data_length =
vub300->resp.common.header_size -
sizeof(struct sd_register_header);
int register_count = offloaded_data_length >> 3;
int ri = 0;
while (register_count--) {
add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
ri += 1;
}
vub300->resp.common.header_size =
sizeof(struct sd_register_header);
vub300->resp.common.header_type = 0x00;
cmd->error = 0;
} else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
int offloaded_data_length =
vub300->resp.common.header_size -
sizeof(struct sd_register_header);
int register_count = offloaded_data_length >> 3;
int ri = 0;
while (register_count--) {
add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
ri += 1;
}
mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) {
vub300->irqs_queued += 1;
} else if (vub300->irq_enabled) {
vub300->irqs_queued += 1;
vub300_queue_poll_work(vub300, 0);
} else {
vub300->irqs_queued += 1;
}
vub300->irq_disabled = 1;
mutex_unlock(&vub300->irq_mutex);
vub300->resp.common.header_size =
sizeof(struct sd_register_header);
vub300->resp.common.header_type = 0x00;
cmd->error = 0;
} else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
int offloaded_data_length =
vub300->resp.common.header_size -
sizeof(struct sd_register_header);
int register_count = offloaded_data_length >> 3;
int ri = 0;
while (register_count--) {
add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
ri += 1;
}
mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) {
vub300->irqs_queued += 1;
} else if (vub300->irq_enabled) {
vub300->irqs_queued += 1;
vub300_queue_poll_work(vub300, 0);
} else {
vub300->irqs_queued += 1;
}
vub300->irq_disabled = 0;
mutex_unlock(&vub300->irq_mutex);
vub300->resp.common.header_size =
sizeof(struct sd_register_header);
vub300->resp.common.header_type = 0x00;
cmd->error = 0;
} else {
cmd->error = -EINVAL;
}
}
static void construct_request_response(struct vub300_mmc_host *vub300,
struct mmc_command *cmd)
{
int resp_len = vub300->resp_len;
int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
int bytes = 3 & less_cmd;
int words = less_cmd >> 2;
u8 *r = vub300->resp.response.command_response;
if (!resp_len)
return;
if (bytes == 3) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16)
| (r[3 + (words << 2)] << 8);
} else if (bytes == 2) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16);
} else if (bytes == 1) {
cmd->resp[words] = (r[1 + (words << 2)] << 24);
}
while (words-- > 0) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16)
| (r[3 + (words << 2)] << 8)
| (r[4 + (words << 2)] << 0);
}
if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
cmd->resp[0] &= 0xFFFFFF00;
}
/* this thread runs only when there is an upper level command req outstanding */
static void vub300_cmndwork_thread(struct work_struct *work)
{
struct vub300_mmc_host *vub300 =
container_of(work, struct vub300_mmc_host, cmndwork);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
return;
} else {
struct mmc_request *req = vub300->req;
struct mmc_command *cmd = vub300->cmd;
struct mmc_data *data = vub300->data;
int data_length;
mutex_lock(&vub300->cmd_mutex);
init_completion(&vub300->command_complete);
if (likely(vub300->vub_name[0]) || !vub300->mmc->card) {
/*
* the name of the EMPTY Pseudo firmware file
* is used as a flag to indicate that the file
* has been already downloaded to the VUB300 chip
*/
} else if (0 == vub300->mmc->card->sdio_funcs) {
strncpy(vub300->vub_name, "SD memory device",
sizeof(vub300->vub_name));
} else {
download_offload_pseudocode(vub300);
}
send_command(vub300);
if (!data)
data_length = 0;
else if (MMC_DATA_READ & data->flags)
data_length = __command_read_data(vub300, cmd, data);
else
data_length = __command_write_data(vub300, cmd, data);
__vub300_command_response(vub300, cmd, data, data_length);
vub300->req = NULL;
vub300->cmd = NULL;
vub300->data = NULL;
if (cmd->error) {
if (cmd->error == -ENOMEDIUM)
check_vub300_port_status(vub300);
mutex_unlock(&vub300->cmd_mutex);
mmc_request_done(vub300->mmc, req);
kref_put(&vub300->kref, vub300_delete);
return;
} else {
construct_request_response(vub300, cmd);
vub300->resp_len = 0;
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
mmc_request_done(vub300->mmc, req);
return;
}
}
}
static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
struct mmc_command *cmd, u8 Function)
{
/* cmd_mutex is held by vub300_mmc_request */
u8 cmd0 = 0xFF & (cmd->arg >> 24);
u8 cmd1 = 0xFF & (cmd->arg >> 16);
u8 cmd2 = 0xFF & (cmd->arg >> 8);
u8 cmd3 = 0xFF & (cmd->arg >> 0);
int first = MAXREGMASK & vub300->fn[Function].offload_point;
struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
if (cmd0 == rf->command_byte[0] &&
cmd1 == rf->command_byte[1] &&
cmd2 == rf->command_byte[2] &&
cmd3 == rf->command_byte[3]) {
u8 checksum = 0x00;
cmd->resp[1] = checksum << 24;
cmd->resp[0] = (rf->Respond_Byte[0] << 24)
| (rf->Respond_Byte[1] << 16)
| (rf->Respond_Byte[2] << 8)
| (rf->Respond_Byte[3] << 0);
vub300->fn[Function].offload_point += 1;
vub300->fn[Function].offload_count -= 1;
vub300->total_offload_count -= 1;
return 1;
} else {
int delta = 1; /* because it does not match the first one */
u8 register_count = vub300->fn[Function].offload_count - 1;
u32 register_point = vub300->fn[Function].offload_point + 1;
while (0 < register_count) {
int point = MAXREGMASK & register_point;
struct offload_registers_access *r =
&vub300->fn[Function].reg[point];
if (cmd0 == r->command_byte[0] &&
cmd1 == r->command_byte[1] &&
cmd2 == r->command_byte[2] &&
cmd3 == r->command_byte[3]) {
u8 checksum = 0x00;
cmd->resp[1] = checksum << 24;
cmd->resp[0] = (r->Respond_Byte[0] << 24)
| (r->Respond_Byte[1] << 16)
| (r->Respond_Byte[2] << 8)
| (r->Respond_Byte[3] << 0);
vub300->fn[Function].offload_point += delta;
vub300->fn[Function].offload_count -= delta;
vub300->total_offload_count -= delta;
return 1;
} else {
register_point += 1;
register_count -= 1;
delta += 1;
continue;
}
}
return 0;
}
}
static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
struct mmc_command *cmd)
{
/* cmd_mutex is held by vub300_mmc_request */
u8 regs = vub300->dynamic_register_count;
u8 i = 0;
u8 func = FUN(cmd);
u32 reg = REG(cmd);
while (0 < regs--) {
if ((vub300->sdio_register[i].func_num == func) &&
(vub300->sdio_register[i].sdio_reg == reg)) {
if (!vub300->sdio_register[i].prepared) {
return 0;
} else if ((0x80000000 & cmd->arg) == 0x80000000) {
/*
* a write to a dynamic register
* nullifies our offloaded value
*/
vub300->sdio_register[i].prepared = 0;
return 0;
} else {
u8 checksum = 0x00;
u8 rsp0 = 0x00;
u8 rsp1 = 0x00;
u8 rsp2 = vub300->sdio_register[i].response;
u8 rsp3 = vub300->sdio_register[i].regvalue;
vub300->sdio_register[i].prepared = 0;
cmd->resp[1] = checksum << 24;
cmd->resp[0] = (rsp0 << 24)
| (rsp1 << 16)
| (rsp2 << 8)
| (rsp3 << 0);
return 1;
}
} else {
i += 1;
continue;
}
}
if (vub300->total_offload_count == 0)
return 0;
else if (vub300->fn[func].offload_count == 0)
return 0;
else
return examine_cyclic_buffer(vub300, cmd, func);
}
static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
{ /* NOT irq */
struct mmc_command *cmd = req->cmd;
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
if (!vub300->interface) {
cmd->error = -ESHUTDOWN;
mmc_request_done(mmc, req);
return;
} else {
struct mmc_data *data = req->data;
if (!vub300->card_powered) {
cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, req);
return;
}
if (!vub300->card_present) {
cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, req);
return;
}
if (vub300->usb_transport_fail) {
cmd->error = vub300->usb_transport_fail;
mmc_request_done(mmc, req);
return;
}
if (!vub300->interface) {
cmd->error = -ENODEV;
mmc_request_done(mmc, req);
return;
}
kref_get(&vub300->kref);
mutex_lock(&vub300->cmd_mutex);
mod_timer(&vub300->inactivity_timer, jiffies + HZ);
/*
* for performance we have to return immediately
* if the requested data has been offloaded
*/
if (cmd->opcode == 52 &&
satisfy_request_from_offloaded_data(vub300, cmd)) {
cmd->error = 0;
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
mmc_request_done(mmc, req);
return;
} else {
vub300->cmd = cmd;
vub300->req = req;
vub300->data = data;
if (data)
vub300->datasize = data->blksz * data->blocks;
else
vub300->datasize = 0;
vub300_queue_cmnd_work(vub300);
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
/*
* the kernel lock diagnostics complain
* if the cmd_mutex * is "passed on"
* to the cmndwork thread,
* so we must release it now
* and re-acquire it in the cmndwork thread
*/
}
}
}
static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
struct mmc_ios *ios)
{
int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
int retval;
u32 kHzClock;
if (ios->clock >= 48000000)
kHzClock = 48000;
else if (ios->clock >= 24000000)
kHzClock = 24000;
else if (ios->clock >= 20000000)
kHzClock = 20000;
else if (ios->clock >= 15000000)
kHzClock = 15000;
else if (ios->clock >= 200000)
kHzClock = 200;
else
kHzClock = 0;
{
int i;
u64 c = kHzClock;
for (i = 0; i < buf_array_size; i++) {
buf[i] = c;
c >>= 8;
}
}
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_CLOCK_SPEED,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, 0x00, buf, buf_array_size, 1000);
if (retval != 8) {
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz failed with retval=%d\n", kHzClock, retval);
} else {
dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz\n", kHzClock);
}
}
static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
if (!vub300->interface)
return;
kref_get(&vub300->kref);
mutex_lock(&vub300->cmd_mutex);
if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
vub300->card_powered = 0;
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, NULL, 0, 1000);
/* must wait for the VUB300 u-proc to boot up */
msleep(600);
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0001, 0x0000, NULL, 0, 1000);
msleep(600);
vub300->card_powered = 1;
} else if (ios->power_mode == MMC_POWER_ON) {
u8 *buf = kmalloc(8, GFP_KERNEL);
if (buf) {
__set_clock_speed(vub300, buf, ios);
kfree(buf);
}
} else {
/* this should mean no change of state */
}
mutex_unlock(&vub300->cmd_mutex);
kref_put(&vub300->kref, vub300_delete);
}
static int vub300_mmc_get_ro(struct mmc_host *mmc)
{
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
return vub300->read_only;
}
static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
if (!vub300->interface)
return;
kref_get(&vub300->kref);
if (enable) {
set_current_state(TASK_RUNNING);
mutex_lock(&vub300->irq_mutex);
if (vub300->irqs_queued) {
vub300->irqs_queued -= 1;
mmc_signal_sdio_irq(vub300->mmc);
} else if (vub300->irq_disabled) {
vub300->irq_disabled = 0;
vub300->irq_enabled = 1;
vub300_queue_poll_work(vub300, 0);
} else if (vub300->irq_enabled) {
/* this should not happen, so we will just ignore it */
} else {
vub300->irq_enabled = 1;
vub300_queue_poll_work(vub300, 0);
}
mutex_unlock(&vub300->irq_mutex);
set_current_state(TASK_INTERRUPTIBLE);
} else {
vub300->irq_enabled = 0;
}
kref_put(&vub300->kref, vub300_delete);
}
static const struct mmc_host_ops vub300_mmc_ops = {
.request = vub300_mmc_request,
.set_ios = vub300_mmc_set_ios,
.get_ro = vub300_mmc_get_ro,
.enable_sdio_irq = vub300_enable_sdio_irq,
};
static int vub300_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{ /* NOT irq */
struct vub300_mmc_host *vub300;
struct usb_host_interface *iface_desc;
struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
int i;
int retval = -ENOMEM;
struct urb *command_out_urb;
struct urb *command_res_urb;
struct mmc_host *mmc;
char manufacturer[48];
char product[32];
char serial_number[32];
usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
sizeof(manufacturer));
usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
sizeof(serial_number));
dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
manufacturer, product, serial_number);
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_out_urb) {
retval = -ENOMEM;
goto error0;
}
command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_res_urb) {
retval = -ENOMEM;
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
if (!mmc) {
retval = -ENOMEM;
dev_err(&udev->dev, "not enough memory for the mmc_host\n");
goto error4;
}
/* MMC core transfer sizes tunable parameters */
mmc->caps = 0;
if (!force_1_bit_data_xfers)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (!force_polling_for_irqs)
mmc->caps |= MMC_CAP_SDIO_IRQ;
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
/*
* MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
* for devices which results in spurious CMD7's being
* issued which stops some SDIO cards from working
*/
if (limit_speed_to_24_MHz) {
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
mmc->f_max = 24000000;
dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
} else {
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
mmc->f_max = 48000000;
}
mmc->f_min = 200000;
mmc->max_blk_count = 511;
mmc->max_blk_size = 512;
mmc->max_segs = 128;
if (force_max_req_size)
mmc->max_req_size = force_max_req_size * 1024;
else
mmc->max_req_size = 64 * 1024;
mmc->max_seg_size = mmc->max_req_size;
mmc->ocr_avail = 0;
mmc->ocr_avail |= MMC_VDD_165_195;
mmc->ocr_avail |= MMC_VDD_20_21;
mmc->ocr_avail |= MMC_VDD_21_22;
mmc->ocr_avail |= MMC_VDD_22_23;
mmc->ocr_avail |= MMC_VDD_23_24;
mmc->ocr_avail |= MMC_VDD_24_25;
mmc->ocr_avail |= MMC_VDD_25_26;
mmc->ocr_avail |= MMC_VDD_26_27;
mmc->ocr_avail |= MMC_VDD_27_28;
mmc->ocr_avail |= MMC_VDD_28_29;
mmc->ocr_avail |= MMC_VDD_29_30;
mmc->ocr_avail |= MMC_VDD_30_31;
mmc->ocr_avail |= MMC_VDD_31_32;
mmc->ocr_avail |= MMC_VDD_32_33;
mmc->ocr_avail |= MMC_VDD_33_34;
mmc->ocr_avail |= MMC_VDD_34_35;
mmc->ocr_avail |= MMC_VDD_35_36;
mmc->ops = &vub300_mmc_ops;
vub300 = mmc_priv(mmc);
vub300->mmc = mmc;
vub300->card_powered = 0;
vub300->bus_width = 0;
vub300->cmnd.head.block_size[0] = 0x00;
vub300->cmnd.head.block_size[1] = 0x00;
vub300->app_spec = 0;
mutex_init(&vub300->cmd_mutex);
mutex_init(&vub300->irq_mutex);
vub300->command_out_urb = command_out_urb;
vub300->command_res_urb = command_res_urb;
vub300->usb_timed_out = 0;
vub300->dynamic_register_count = 0;
for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
vub300->fn[i].offload_point = 0;
vub300->fn[i].offload_count = 0;
}
vub300->total_offload_count = 0;
vub300->irq_enabled = 0;
vub300->irq_disabled = 0;
vub300->irqs_queued = 0;
for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
vub300->sdio_register[i++].activate = 0;
vub300->udev = udev;
vub300->interface = interface;
vub300->cmnd_res_ep = 0;
vub300->cmnd_out_ep = 0;
vub300->data_inp_ep = 0;
vub300->data_out_ep = 0;
for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
vub300->fbs[i] = 512;
/*
* set up the endpoint information
*
* use the first pair of bulk-in and bulk-out
* endpoints for Command/Response+Interrupt
*
* use the second pair of bulk-in and bulk-out
* endpoints for Data In/Out
*/
vub300->large_usb_packets = 0;
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
struct usb_endpoint_descriptor *endpoint =
&iface_desc->endpoint[i].desc;
dev_info(&vub300->udev->dev,
"vub300 testing %s EndPoint(%d) %02X\n",
usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
"UNKNOWN", i, endpoint->bEndpointAddress);
if (endpoint->wMaxPacketSize > 64)
vub300->large_usb_packets = 1;
if (usb_endpoint_is_bulk_in(endpoint)) {
if (!vub300->cmnd_res_ep) {
vub300->cmnd_res_ep =
endpoint->bEndpointAddress;
} else if (!vub300->data_inp_ep) {
vub300->data_inp_ep =
endpoint->bEndpointAddress;
} else {
dev_warn(&vub300->udev->dev,
"ignoring"
" unexpected bulk_in endpoint");
}
} else if (usb_endpoint_is_bulk_out(endpoint)) {
if (!vub300->cmnd_out_ep) {
vub300->cmnd_out_ep =
endpoint->bEndpointAddress;
} else if (!vub300->data_out_ep) {
vub300->data_out_ep =
endpoint->bEndpointAddress;
} else {
dev_warn(&vub300->udev->dev,
"ignoring"
" unexpected bulk_out endpoint");
}
} else {
dev_warn(&vub300->udev->dev,
"vub300 ignoring EndPoint(%d) %02X", i,
endpoint->bEndpointAddress);
}
}
if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
vub300->data_inp_ep && vub300->data_out_ep) {
dev_info(&vub300->udev->dev,
"vub300 %s packets"
" using EndPoints %02X %02X %02X %02X\n",
vub300->large_usb_packets ? "LARGE" : "SMALL",
vub300->cmnd_out_ep, vub300->cmnd_res_ep,
vub300->data_out_ep, vub300->data_inp_ep);
/* we have the expected EndPoints */
} else {
dev_err(&vub300->udev->dev,
"Could not find two sets of bulk-in/out endpoint pairs\n");
retval = -EINVAL;
goto error5;
}
retval =
usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
GET_HC_INF0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->hc_info,
sizeof(vub300->hc_info), 1000);
if (retval < 0)
goto error5;
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
if (retval < 0)
goto error5;
dev_info(&vub300->udev->dev,
"operating_mode = %s %s %d MHz %s %d byte USB packets\n",
(mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
(mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
mmc->f_max / 1000000,
pad_input_to_usb_pkt ? "padding input data to" : "with",
vub300->large_usb_packets ? 512 : 64);
retval =
usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
goto error5;
} else if (sizeof(vub300->system_port_status) == retval) {
vub300->card_present =
(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
vub300->read_only =
(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
} else {
goto error5;
}
usb_set_intfdata(interface, vub300);
INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
timer_setup(&vub300->inactivity_timer,
vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
dev_info(&vub300->udev->dev,
"USB vub300 remote SDIO host controller[%d]"
"connected with SD/SDIO card inserted\n",
interface_to_InterfaceNumber(interface));
else
dev_info(&vub300->udev->dev,
"USB vub300 remote SDIO host controller[%d]"
"connected with no SD/SDIO card inserted\n",
interface_to_InterfaceNumber(interface));
retval = mmc_add_host(mmc);
if (retval)
goto error6;
return 0;
error6:
del_timer_sync(&vub300->inactivity_timer);
error5:
mmc_free_host(mmc);
/*
* and hence also frees vub300
* which is contained at the end of struct mmc
*/
error4:
usb_free_urb(command_res_urb);
error1:
usb_free_urb(command_out_urb);
error0:
usb_put_dev(udev);
return retval;
}
static void vub300_disconnect(struct usb_interface *interface)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
if (!vub300 || !vub300->mmc) {
return;
} else {
struct mmc_host *mmc = vub300->mmc;
if (!vub300->mmc) {
return;
} else {
int ifnum = interface_to_InterfaceNumber(interface);
usb_set_intfdata(interface, NULL);
/* prevent more I/O from starting */
vub300->interface = NULL;
kref_put(&vub300->kref, vub300_delete);
mmc_remove_host(mmc);
pr_info("USB vub300 remote SDIO host controller[%d]"
" now disconnected", ifnum);
return;
}
}
}
#ifdef CONFIG_PM
static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int vub300_resume(struct usb_interface *intf)
{
return 0;
}
#else
#define vub300_suspend NULL
#define vub300_resume NULL
#endif
static int vub300_pre_reset(struct usb_interface *intf)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
mutex_lock(&vub300->cmd_mutex);
return 0;
}
static int vub300_post_reset(struct usb_interface *intf)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
/* we are sure no URBs are active - no locking needed */
vub300->errors = -EPIPE;
mutex_unlock(&vub300->cmd_mutex);
return 0;
}
static struct usb_driver vub300_driver = {
.name = "vub300",
.probe = vub300_probe,
.disconnect = vub300_disconnect,
.suspend = vub300_suspend,
.resume = vub300_resume,
.pre_reset = vub300_pre_reset,
.post_reset = vub300_post_reset,
.id_table = vub300_table,
.supports_autosuspend = 1,
};
static int __init vub300_init(void)
{ /* NOT irq */
int result;
pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
cmndworkqueue = create_singlethread_workqueue("kvub300c");
if (!cmndworkqueue) {
pr_err("not enough memory for the REQUEST workqueue");
result = -ENOMEM;
goto out1;
}
pollworkqueue = create_singlethread_workqueue("kvub300p");
if (!pollworkqueue) {
pr_err("not enough memory for the IRQPOLL workqueue");
result = -ENOMEM;
goto out2;
}
deadworkqueue = create_singlethread_workqueue("kvub300d");
if (!deadworkqueue) {
pr_err("not enough memory for the EXPIRED workqueue");
result = -ENOMEM;
goto out3;
}
result = usb_register(&vub300_driver);
if (result) {
pr_err("usb_register failed. Error number %d", result);
goto out4;
}
return 0;
out4:
destroy_workqueue(deadworkqueue);
out3:
destroy_workqueue(pollworkqueue);
out2:
destroy_workqueue(cmndworkqueue);
out1:
return result;
}
static void __exit vub300_exit(void)
{
usb_deregister(&vub300_driver);
flush_workqueue(cmndworkqueue);
flush_workqueue(pollworkqueue);
flush_workqueue(deadworkqueue);
destroy_workqueue(cmndworkqueue);
destroy_workqueue(pollworkqueue);
destroy_workqueue(deadworkqueue);
}
module_init(vub300_init);
module_exit(vub300_exit);
MODULE_AUTHOR("Tony Olech <[email protected]>");
MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/vub300.c |
/*
* Driver for MMC and SSD cards for Cavium ThunderX SOCs.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2016 Cavium Inc.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/mmc/mmc.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include "cavium.h"
static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
{
down(&host->mmc_serializer);
}
static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
{
up(&host->mmc_serializer);
}
static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{
writeq(val, host->base + MIO_EMM_INT(host));
writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
}
static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
struct pci_dev *pdev)
{
int nvec, ret, i;
nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
if (nvec < 0)
return nvec;
/* register interrupts */
for (i = 0; i < nvec; i++) {
ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
cvm_mmc_interrupt,
0, cvm_mmc_irq_names[i], host);
if (ret)
return ret;
}
return 0;
}
static int thunder_mmc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct device_node *child_node;
struct cvm_mmc_host *host;
int ret, i = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
pci_set_drvdata(pdev, host);
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret)
return ret;
host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!host->base) {
ret = -EINVAL;
goto error;
}
/* On ThunderX these are identical */
host->dma_base = host->base;
host->reg_off = 0x2000;
host->reg_off_dma = 0x160;
host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto error;
}
ret = clk_prepare_enable(host->clk);
if (ret)
goto error;
host->sys_freq = clk_get_rate(host->clk);
spin_lock_init(&host->irq_handler_lock);
sema_init(&host->mmc_serializer, 1);
host->dev = dev;
host->acquire_bus = thunder_mmc_acquire_bus;
host->release_bus = thunder_mmc_release_bus;
host->int_enable = thunder_mmc_int_enable;
host->use_sg = true;
host->big_dma_addr = true;
host->need_irq_handler_lock = true;
host->last_slot = -1;
ret = dma_set_mask(dev, DMA_BIT_MASK(48));
if (ret)
goto error;
/*
* Clear out any pending interrupts that may be left over from
* bootloader. Writing 1 to the bits clears them.
*/
writeq(127, host->base + MIO_EMM_INT_EN(host));
writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
/* Clear DMA FIFO */
writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
ret = thunder_mmc_register_interrupts(host, pdev);
if (ret)
goto error;
for_each_child_of_node(node, child_node) {
/*
* mmc_of_parse and devm* require one device per slot.
* Create a dummy device per slot and set the node pointer to
* the slot. The easiest way to get this is using
* of_platform_device_create.
*/
if (of_device_is_compatible(child_node, "mmc-slot")) {
host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
&pdev->dev);
if (!host->slot_pdev[i])
continue;
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
if (ret) {
of_node_put(child_node);
goto error;
}
}
i++;
}
dev_info(dev, "probed\n");
return 0;
error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
if (host->slot_pdev[i]) {
get_device(&host->slot_pdev[i]->dev);
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
put_device(&host->slot_pdev[i]->dev);
}
}
clk_disable_unprepare(host->clk);
pci_release_regions(pdev);
return ret;
}
static void thunder_mmc_remove(struct pci_dev *pdev)
{
struct cvm_mmc_host *host = pci_get_drvdata(pdev);
u64 dma_cfg;
int i;
for (i = 0; i < CAVIUM_MAX_MMC; i++)
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
{ 0, } /* end of table */
};
static struct pci_driver thunder_mmc_driver = {
.name = KBUILD_MODNAME,
.id_table = thunder_mmc_id_table,
.probe = thunder_mmc_probe,
.remove = thunder_mmc_remove,
};
module_pci_driver(thunder_mmc_driver);
MODULE_AUTHOR("Cavium Inc.");
MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
| linux-master | drivers/mmc/host/cavium-thunderx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atmel SDMMC controller driver.
*
* Copyright (C) 2015 Atmel,
* 2015 Ludovic Desroches <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "sdhci-pltfm.h"
#define SDMMC_MC1R 0x204
#define SDMMC_MC1R_DDR BIT(3)
#define SDMMC_MC1R_FCD BIT(7)
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
#define SDMMC_CALCR 0x240
#define SDMMC_CALCR_EN BIT(0)
#define SDMMC_CALCR_ALWYSON BIT(4)
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
struct sdhci_at91_soc_data {
const struct sdhci_pltfm_data *pdata;
bool baseclk_is_generated_internally;
unsigned int divider_for_baseclk;
};
struct sdhci_at91_priv {
const struct sdhci_at91_soc_data *soc_data;
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
bool restore_needed;
bool cal_always_on;
};
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
{
u8 mc1r;
mc1r = readb(host->ioaddr + SDMMC_MC1R);
mc1r |= SDMMC_MC1R_FCD;
writeb(mc1r, host->ioaddr + SDMMC_MC1R);
}
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
{
u16 clk;
host->mmc->actual_clock = 0;
/*
* There is no requirement to disable the internal clock before
* changing the SD clock configuration. Moreover, disabling the
* internal clock, changing the configuration and re-enabling the
* internal clock causes some bugs. It can prevent to get the internal
* clock stable flag ready and an unexpected switch to the base clock
* when using presets.
*/
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
if (clock == 0)
return;
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
return;
}
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
u8 mc1r;
if (timing == MMC_TIMING_MMC_DDR52) {
mc1r = sdhci_readb(host, SDMMC_MC1R);
mc1r |= SDMMC_MC1R_DDR;
sdhci_writeb(host, mc1r, SDMMC_MC1R);
}
sdhci_set_uhs_signaling(host, timing);
}
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
unsigned int tmp;
sdhci_reset(host, mask);
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|| mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
if (priv->cal_always_on && (mask & SDHCI_RESET_ALL)) {
u32 calcr = sdhci_readl(host, SDMMC_CALCR);
sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
SDMMC_CALCR);
if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
10, 20000, false, host, SDMMC_CALCR))
dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
}
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_at91_reset,
.set_uhs_signaling = sdhci_at91_set_uhs_signaling,
.set_power = sdhci_set_power_and_bus_voltage,
};
static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = {
.ops = &sdhci_at91_sama5d2_ops,
};
static const struct sdhci_at91_soc_data soc_data_sama5d2 = {
.pdata = &sdhci_sama5d2_pdata,
.baseclk_is_generated_internally = false,
};
static const struct sdhci_at91_soc_data soc_data_sam9x60 = {
.pdata = &sdhci_sama5d2_pdata,
.baseclk_is_generated_internally = true,
.divider_for_baseclk = 2,
};
static const struct of_device_id sdhci_at91_dt_match[] = {
{ .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 },
{ .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match);
static int sdhci_at91_set_clks_presets(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
unsigned int caps0, caps1;
unsigned int clk_base, clk_mul;
unsigned int gck_rate, clk_base_rate;
unsigned int preset_div;
clk_prepare_enable(priv->hclock);
caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES);
caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1);
gck_rate = clk_get_rate(priv->gck);
if (priv->soc_data->baseclk_is_generated_internally)
clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk;
else
clk_base_rate = clk_get_rate(priv->mainck);
clk_base = clk_base_rate / 1000000;
clk_mul = gck_rate / clk_base_rate - 1;
caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK;
caps0 |= FIELD_PREP(SDHCI_CLOCK_V3_BASE_MASK, clk_base);
caps1 &= ~SDHCI_CLOCK_MUL_MASK;
caps1 |= FIELD_PREP(SDHCI_CLOCK_MUL_MASK, clk_mul);
/* Set capabilities in r/w mode. */
writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR);
writel(caps0, host->ioaddr + SDHCI_CAPABILITIES);
writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1);
/* Set capabilities in ro mode. */
writel(0, host->ioaddr + SDMMC_CACR);
dev_dbg(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n",
clk_mul, gck_rate, clk_base_rate);
/*
* We have to set preset values because it depends on the clk_mul
* value. Moreover, SDR104 is supported in a degraded mode since the
* maximum sd clock value is 120 MHz instead of 208 MHz. For that
* reason, we need to use presets to support SDR104.
*/
preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR12);
preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR25);
preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR50);
preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_SDR104);
preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1;
writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
host->ioaddr + SDHCI_PRESET_FOR_DDR50);
clk_prepare_enable(priv->mainck);
clk_prepare_enable(priv->gck);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int sdhci_at91_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = pm_runtime_force_suspend(dev);
priv->restore_needed = true;
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->hclock);
clk_disable_unprepare(priv->mainck);
return ret;
}
static int sdhci_at91_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
if (priv->restore_needed) {
ret = sdhci_at91_set_clks_presets(dev);
if (ret)
return ret;
priv->restore_needed = false;
goto out;
}
ret = clk_prepare_enable(priv->mainck);
if (ret) {
dev_err(dev, "can't enable mainck\n");
return ret;
}
ret = clk_prepare_enable(priv->hclock);
if (ret) {
dev_err(dev, "can't enable hclock\n");
return ret;
}
ret = clk_prepare_enable(priv->gck);
if (ret) {
dev_err(dev, "can't enable gck\n");
return ret;
}
out:
return sdhci_runtime_resume_host(host, 0);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend,
sdhci_at91_runtime_resume,
NULL)
};
static int sdhci_at91_probe(struct platform_device *pdev)
{
const struct sdhci_at91_soc_data *soc_data;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_at91_priv *priv;
int ret;
soc_data = of_device_get_match_data(&pdev->dev);
if (!soc_data)
return -EINVAL;
host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
priv->soc_data = soc_data;
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
if (soc_data->baseclk_is_generated_internally) {
priv->mainck = NULL;
} else {
dev_err(&pdev->dev, "failed to get baseclk\n");
ret = PTR_ERR(priv->mainck);
goto sdhci_pltfm_free;
}
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
ret = PTR_ERR(priv->hclock);
goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
ret = PTR_ERR(priv->gck);
goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);
if (ret)
goto sdhci_pltfm_free;
priv->restore_needed = false;
/*
* if SDCAL pin is wrongly connected, we must enable
* the analog calibration cell permanently.
*/
priv->cal_always_on =
device_property_read_bool(&pdev->dev,
"microchip,sdcal-inverted");
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
sdhci_get_of_property(pdev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
/* HS200 is broken at this moment */
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
/*
* When calling sdhci_runtime_suspend_host(), the sdhci layer makes
* the assumption that all the clocks of the controller are disabled.
* It means we can't get irq from it when it is runtime suspended.
* For that reason, it is not planned to wake-up on a card detect irq
* from the controller.
* If we want to use runtime PM and to be able to wake-up on card
* insertion, we have to use a GPIO for the card detection or we can
* use polling. Be aware that using polling will resume/suspend the
* controller between each attempt.
* Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries
* to enable polling via device tree with broken-cd property.
*/
if (mmc_card_is_removable(host->mmc) &&
mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
/*
* If the device attached to the MMC bus is not removable, it is safer
* to set the Force Card Detect bit. People often don't connect the
* card detect signal and use this pin for another purpose. If the card
* detect pin is not muxed to SDHCI controller, a default value is
* used. This value can be different from a SoC revision to another
* one. Problems come when this default value is not card present. To
* avoid this case, if the device is non removable then the card
* detection procedure using the SDMCC_CD signal is bypassed.
* This bit is reset when a software reset for all command is performed
* so we need to implement our own reset function to set back this bit.
*
* WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
*/
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|| mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clocks_disable_unprepare:
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->mainck);
clk_disable_unprepare(priv->hclock);
sdhci_pltfm_free:
sdhci_pltfm_free(pdev);
return ret;
}
static void sdhci_at91_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct clk *gck = priv->gck;
struct clk *hclock = priv->hclock;
struct clk *mainck = priv->mainck;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
sdhci_pltfm_remove(pdev);
clk_disable_unprepare(gck);
clk_disable_unprepare(hclock);
clk_disable_unprepare(mainck);
}
static struct platform_driver sdhci_at91_driver = {
.driver = {
.name = "sdhci-at91",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_at91_dt_match,
.pm = &sdhci_at91_dev_pm_ops,
},
.probe = sdhci_at91_probe,
.remove_new = sdhci_at91_remove,
};
module_platform_driver(sdhci_at91_driver);
MODULE_DESCRIPTION("SDHCI driver for at91");
MODULE_AUTHOR("Ludovic Desroches <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-of-at91.c |
/*
* MOXA ART MMC host driver.
*
* Copyright (C) 2014 Jonas Jensen
*
* Jonas Jensen <[email protected]>
*
* Based on code from
* Moxa Technologies Co., Ltd. <www.moxa.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sd.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/bitops.h>
#include <linux/of_dma.h>
#include <linux/spinlock.h>
#define REG_COMMAND 0
#define REG_ARGUMENT 4
#define REG_RESPONSE0 8
#define REG_RESPONSE1 12
#define REG_RESPONSE2 16
#define REG_RESPONSE3 20
#define REG_RESPONSE_COMMAND 24
#define REG_DATA_CONTROL 28
#define REG_DATA_TIMER 32
#define REG_DATA_LENGTH 36
#define REG_STATUS 40
#define REG_CLEAR 44
#define REG_INTERRUPT_MASK 48
#define REG_POWER_CONTROL 52
#define REG_CLOCK_CONTROL 56
#define REG_BUS_WIDTH 60
#define REG_DATA_WINDOW 64
#define REG_FEATURE 68
#define REG_REVISION 72
/* REG_COMMAND */
#define CMD_SDC_RESET BIT(10)
#define CMD_EN BIT(9)
#define CMD_APP_CMD BIT(8)
#define CMD_LONG_RSP BIT(7)
#define CMD_NEED_RSP BIT(6)
#define CMD_IDX_MASK 0x3f
/* REG_RESPONSE_COMMAND */
#define RSP_CMD_APP BIT(6)
#define RSP_CMD_IDX_MASK 0x3f
/* REG_DATA_CONTROL */
#define DCR_DATA_FIFO_RESET BIT(8)
#define DCR_DATA_THRES BIT(7)
#define DCR_DATA_EN BIT(6)
#define DCR_DMA_EN BIT(5)
#define DCR_DATA_WRITE BIT(4)
#define DCR_BLK_SIZE 0x0f
/* REG_DATA_LENGTH */
#define DATA_LEN_MASK 0xffffff
/* REG_STATUS */
#define WRITE_PROT BIT(12)
#define CARD_DETECT BIT(11)
/* 1-10 below can be sent to either registers, interrupt or clear. */
#define CARD_CHANGE BIT(10)
#define FIFO_ORUN BIT(9)
#define FIFO_URUN BIT(8)
#define DATA_END BIT(7)
#define CMD_SENT BIT(6)
#define DATA_CRC_OK BIT(5)
#define RSP_CRC_OK BIT(4)
#define DATA_TIMEOUT BIT(3)
#define RSP_TIMEOUT BIT(2)
#define DATA_CRC_FAIL BIT(1)
#define RSP_CRC_FAIL BIT(0)
#define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \
RSP_CRC_OK | CARD_DETECT | CMD_SENT)
#define MASK_DATA (DATA_CRC_OK | DATA_END | \
DATA_CRC_FAIL | DATA_TIMEOUT)
#define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE)
/* REG_POWER_CONTROL */
#define SD_POWER_ON BIT(4)
#define SD_POWER_MASK 0x0f
/* REG_CLOCK_CONTROL */
#define CLK_HISPD BIT(9)
#define CLK_OFF BIT(8)
#define CLK_SD BIT(7)
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
#define BUS_WIDTH_4_SUPPORT BIT(3)
#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
#define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK)
#define MAX_RETRIES 500000
struct moxart_host {
spinlock_t lock;
void __iomem *base;
phys_addr_t reg_phys;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
u32 num_sg;
u32 data_remain;
u32 data_len;
u32 fifo_width;
u32 timeout;
u32 rate;
long sysclk;
bool have_dma;
bool is_removed;
};
static inline void moxart_init_sg(struct moxart_host *host,
struct mmc_data *data)
{
host->cur_sg = data->sg;
host->num_sg = data->sg_len;
host->data_remain = host->cur_sg->length;
if (host->data_remain > host->data_len)
host->data_remain = host->data_len;
}
static inline int moxart_next_sg(struct moxart_host *host)
{
int remain;
struct mmc_data *data = host->mrq->cmd->data;
host->cur_sg++;
host->num_sg--;
if (host->num_sg > 0) {
host->data_remain = host->cur_sg->length;
remain = host->data_len - data->bytes_xfered;
if (remain > 0 && remain < host->data_remain)
host->data_remain = remain;
}
return host->num_sg;
}
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
int ret = -ETIMEDOUT;
u32 i;
for (i = 0; i < MAX_RETRIES; i++) {
*status = readl(host->base + REG_STATUS);
if (!(*status & mask)) {
udelay(5);
continue;
}
writel(*status & mask, host->base + REG_CLEAR);
ret = 0;
break;
}
if (ret)
dev_err(mmc_dev(host->mmc), "timed out waiting for status\n");
return ret;
}
static void moxart_send_command(struct moxart_host *host,
struct mmc_command *cmd)
{
u32 status, cmdctrl;
writel(RSP_TIMEOUT | RSP_CRC_OK |
RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR);
writel(cmd->arg, host->base + REG_ARGUMENT);
cmdctrl = cmd->opcode & CMD_IDX_MASK;
if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND ||
cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS ||
cmdctrl == SD_APP_SEND_NUM_WR_BLKS)
cmdctrl |= CMD_APP_CMD;
if (cmd->flags & MMC_RSP_PRESENT)
cmdctrl |= CMD_NEED_RSP;
if (cmd->flags & MMC_RSP_136)
cmdctrl |= CMD_LONG_RSP;
writel(cmdctrl | CMD_EN, host->base + REG_COMMAND);
if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT)
cmd->error = -ETIMEDOUT;
if (status & RSP_TIMEOUT) {
cmd->error = -ETIMEDOUT;
return;
}
if (status & RSP_CRC_FAIL) {
cmd->error = -EIO;
return;
}
if (status & RSP_CRC_OK) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[3] = readl(host->base + REG_RESPONSE0);
cmd->resp[2] = readl(host->base + REG_RESPONSE1);
cmd->resp[1] = readl(host->base + REG_RESPONSE2);
cmd->resp[0] = readl(host->base + REG_RESPONSE3);
} else {
cmd->resp[0] = readl(host->base + REG_RESPONSE0);
}
}
}
static void moxart_dma_complete(void *param)
{
struct moxart_host *host = param;
complete(&host->dma_complete);
}
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_slave;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *dma_chan;
if (host->data_len == data->bytes_xfered)
return;
if (data->flags & MMC_DATA_WRITE) {
dma_chan = host->dma_chan_tx;
dir_slave = DMA_MEM_TO_DEV;
} else {
dma_chan = host->dma_chan_rx;
dir_slave = DMA_DEV_TO_MEM;
}
len = dma_map_sg(dma_chan->device->dev, data->sg,
data->sg_len, mmc_get_dma_dir(data));
if (len > 0) {
desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
len, dir_slave,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
} else {
dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n");
}
if (desc) {
host->tx_desc = desc;
desc->callback = moxart_dma_complete;
desc->callback_param = host;
dmaengine_submit(desc);
dma_async_issue_pending(dma_chan);
}
data->bytes_xfered += host->data_remain;
wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
static void moxart_transfer_pio(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
sgp = sg_virt(host->cur_sg);
remain = host->data_remain;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
if (moxart_wait_for_status(host, FIFO_URUN, &status)
== -ETIMEDOUT) {
data->error = -ETIMEDOUT;
complete(&host->pio_complete);
return;
}
for (len = 0; len < remain && len < host->fifo_width;) {
iowrite32(*sgp, host->base + REG_DATA_WINDOW);
sgp++;
len += 4;
}
remain -= len;
}
} else {
while (remain > 0) {
if (moxart_wait_for_status(host, FIFO_ORUN, &status)
== -ETIMEDOUT) {
data->error = -ETIMEDOUT;
complete(&host->pio_complete);
return;
}
for (len = 0; len < remain && len < host->fifo_width;) {
*sgp = ioread32(host->base + REG_DATA_WINDOW);
sgp++;
len += 4;
}
remain -= len;
}
}
data->bytes_xfered += host->data_remain - remain;
host->data_remain = remain;
if (host->data_len != data->bytes_xfered)
moxart_next_sg(host);
else
complete(&host->pio_complete);
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
u32 datactrl;
int blksz_bits;
if (!data)
return;
host->data_len = data->blocks * data->blksz;
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
moxart_init_sg(host, data);
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
if (data->flags & MMC_DATA_WRITE)
datactrl |= DCR_DATA_WRITE;
if ((host->data_len > host->fifo_width) && host->have_dma)
datactrl |= DCR_DMA_EN;
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
writel(host->rate, host->base + REG_DATA_TIMER);
writel(host->data_len, host->base + REG_DATA_LENGTH);
writel(datactrl, host->base + REG_DATA_CONTROL);
}
static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct moxart_host *host = mmc_priv(mmc);
unsigned long flags;
u32 status;
spin_lock_irqsave(&host->lock, flags);
init_completion(&host->dma_complete);
init_completion(&host->pio_complete);
host->mrq = mrq;
if (readl(host->base + REG_STATUS) & CARD_DETECT) {
mrq->cmd->error = -ETIMEDOUT;
goto request_done;
}
moxart_prepare_data(host);
moxart_send_command(host, host->mrq->cmd);
if (mrq->cmd->data) {
if ((host->data_len > host->fifo_width) && host->have_dma) {
writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
spin_unlock_irqrestore(&host->lock, flags);
moxart_transfer_dma(mrq->cmd->data, host);
spin_lock_irqsave(&host->lock, flags);
} else {
writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK);
spin_unlock_irqrestore(&host->lock, flags);
/* PIO transfers start from interrupt. */
wait_for_completion_interruptible_timeout(&host->pio_complete,
host->timeout);
spin_lock_irqsave(&host->lock, flags);
}
if (host->is_removed) {
dev_err(mmc_dev(host->mmc), "card removed\n");
mrq->cmd->error = -ETIMEDOUT;
goto request_done;
}
if (moxart_wait_for_status(host, MASK_DATA, &status)
== -ETIMEDOUT) {
mrq->cmd->data->error = -ETIMEDOUT;
goto request_done;
}
if (status & DATA_CRC_FAIL)
mrq->cmd->data->error = -ETIMEDOUT;
if (mrq->cmd->data->stop)
moxart_send_command(host, mrq->cmd->data->stop);
}
request_done:
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
static irqreturn_t moxart_irq(int irq, void *devid)
{
struct moxart_host *host = (struct moxart_host *)devid;
u32 status;
spin_lock(&host->lock);
status = readl(host->base + REG_STATUS);
if (status & CARD_CHANGE) {
host->is_removed = status & CARD_DETECT;
if (host->is_removed && host->have_dma) {
dmaengine_terminate_all(host->dma_chan_tx);
dmaengine_terminate_all(host->dma_chan_rx);
}
host->mrq = NULL;
writel(MASK_INTR_PIO, host->base + REG_CLEAR);
writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
mmc_detect_change(host->mmc, 0);
}
if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq)
moxart_transfer_pio(host);
spin_unlock(&host->lock);
return IRQ_HANDLED;
}
static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct moxart_host *host = mmc_priv(mmc);
unsigned long flags;
u8 power, div;
u32 ctrl;
spin_lock_irqsave(&host->lock, flags);
if (ios->clock) {
for (div = 0; div < CLK_DIV_MASK; ++div) {
if (ios->clock >= host->sysclk / (2 * (div + 1)))
break;
}
ctrl = CLK_SD | div;
host->rate = host->sysclk / (2 * (div + 1));
if (host->rate > host->sysclk)
ctrl |= CLK_HISPD;
writel(ctrl, host->base + REG_CLOCK_CONTROL);
}
if (ios->power_mode == MMC_POWER_OFF) {
writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON,
host->base + REG_POWER_CONTROL);
} else {
if (ios->vdd < MIN_POWER)
power = 0;
else
power = ios->vdd - MIN_POWER;
writel(SD_POWER_ON | (u32) power,
host->base + REG_POWER_CONTROL);
}
switch (ios->bus_width) {
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
}
spin_unlock_irqrestore(&host->lock, flags);
}
static int moxart_get_ro(struct mmc_host *mmc)
{
struct moxart_host *host = mmc_priv(mmc);
return !!(readl(host->base + REG_STATUS) & WRITE_PROT);
}
static const struct mmc_host_ops moxart_ops = {
.request = moxart_request,
.set_ios = moxart_set_ios,
.get_ro = moxart_get_ro,
};
static int moxart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource res_mmc;
struct mmc_host *mmc;
struct moxart_host *host = NULL;
struct dma_slave_config cfg;
struct clk *clk;
void __iomem *reg_mmc;
int irq, ret;
u32 i;
mmc = mmc_alloc_host(sizeof(struct moxart_host), dev);
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
ret = -ENOMEM;
goto out_mmc;
}
ret = of_address_to_resource(node, 0, &res_mmc);
if (ret) {
dev_err(dev, "of_address_to_resource failed\n");
goto out_mmc;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
dev_err(dev, "irq_of_parse_and_map failed\n");
ret = -EINVAL;
goto out_mmc;
}
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto out_mmc;
}
reg_mmc = devm_ioremap_resource(dev, &res_mmc);
if (IS_ERR(reg_mmc)) {
ret = PTR_ERR(reg_mmc);
goto out_mmc;
}
ret = mmc_of_parse(mmc);
if (ret)
goto out_mmc;
host = mmc_priv(mmc);
host->mmc = mmc;
host->base = reg_mmc;
host->reg_phys = res_mmc.start;
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
host->dma_chan_tx = dma_request_chan(dev, "tx");
host->dma_chan_rx = dma_request_chan(dev, "rx");
spin_lock_init(&host->lock);
mmc->ops = &moxart_ops;
mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */
mmc->max_blk_size = 2048; /* Max. block length in REG_DATA_CONTROL */
mmc->max_req_size = DATA_LEN_MASK; /* bits 0-23 in REG_DATA_LENGTH */
mmc->max_blk_count = mmc->max_req_size / 512;
if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) {
if (PTR_ERR(host->dma_chan_tx) == -EPROBE_DEFER ||
PTR_ERR(host->dma_chan_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out;
}
if (!IS_ERR(host->dma_chan_tx)) {
dma_release_channel(host->dma_chan_tx);
host->dma_chan_tx = NULL;
}
if (!IS_ERR(host->dma_chan_rx)) {
dma_release_channel(host->dma_chan_rx);
host->dma_chan_rx = NULL;
}
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
mmc->max_seg_size = mmc->max_req_size;
} else {
dev_dbg(dev, "DMA channels found (%p,%p)\n",
host->dma_chan_tx, host->dma_chan_rx);
host->have_dma = true;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.direction = DMA_MEM_TO_DEV;
cfg.src_addr = 0;
cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW;
dmaengine_slave_config(host->dma_chan_tx, &cfg);
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = host->reg_phys + REG_DATA_WINDOW;
cfg.dst_addr = 0;
dmaengine_slave_config(host->dma_chan_rx, &cfg);
mmc->max_seg_size = min3(mmc->max_req_size,
dma_get_max_seg_size(host->dma_chan_rx->device->dev),
dma_get_max_seg_size(host->dma_chan_tx->device->dev));
}
if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
writel(0, host->base + REG_INTERRUPT_MASK);
writel(CMD_SDC_RESET, host->base + REG_COMMAND);
for (i = 0; i < MAX_RETRIES; i++) {
if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET))
break;
udelay(5);
}
ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host);
if (ret)
goto out;
dev_set_drvdata(dev, mmc);
ret = mmc_add_host(mmc);
if (ret)
goto out;
dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
return 0;
out:
if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
out_mmc:
if (mmc)
mmc_free_host(mmc);
return ret;
}
static void moxart_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
struct moxart_host *host = mmc_priv(mmc);
if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
mmc_free_host(mmc);
}
static const struct of_device_id moxart_mmc_match[] = {
{ .compatible = "moxa,moxart-mmc" },
{ .compatible = "faraday,ftsdc010" },
{ }
};
MODULE_DEVICE_TABLE(of, moxart_mmc_match);
static struct platform_driver moxart_mmc_driver = {
.probe = moxart_probe,
.remove_new = moxart_remove,
.driver = {
.name = "mmc-moxart",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = moxart_mmc_match,
},
};
module_platform_driver(moxart_mmc_driver);
MODULE_ALIAS("platform:mmc-moxart");
MODULE_DESCRIPTION("MOXA ART MMC driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonas Jensen <[email protected]>");
| linux-master | drivers/mmc/host/moxart-mmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* StarFive Designware Mobile Storage Host Controller Driver
*
* Copyright (c) 2022 StarFive Technology Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
#define ALL_INT_CLR 0x1ffff
#define MAX_DELAY_CHAIN 32
struct starfive_priv {
struct device *dev;
struct regmap *reg_syscon;
u32 syscon_offset;
u32 syscon_shift;
u32 syscon_mask;
};
static void dw_mci_starfive_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
unsigned int clock;
if (ios->timing == MMC_TIMING_MMC_DDR52 || ios->timing == MMC_TIMING_UHS_DDR50) {
clock = (ios->clock > 50000000 && ios->clock <= 52000000) ? 100000000 : ios->clock;
ret = clk_set_rate(host->ciu_clk, clock);
if (ret)
dev_dbg(host->dev, "Use an external frequency divider %uHz\n", ios->clock);
host->bus_hz = clk_get_rate(host->ciu_clk);
} else {
dev_dbg(host->dev, "Using the internal divider\n");
}
}
static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot,
u32 opcode)
{
static const int grade = MAX_DELAY_CHAIN;
struct dw_mci *host = slot->host;
struct starfive_priv *priv = host->priv;
int rise_point = -1, fall_point = -1;
int err, prev_err = 0;
int i;
bool found = 0;
u32 regval;
/*
* Use grade as the max delay chain, and use the rise_point and
* fall_point to ensure the best sampling point of a data input
* signals.
*/
for (i = 0; i < grade; i++) {
regval = i << priv->syscon_shift;
err = regmap_update_bits(priv->reg_syscon, priv->syscon_offset,
priv->syscon_mask, regval);
if (err)
return err;
mci_writel(host, RINTSTS, ALL_INT_CLR);
err = mmc_send_tuning(slot->mmc, opcode, NULL);
if (!err)
found = 1;
if (i > 0) {
if (err && !prev_err)
fall_point = i - 1;
if (!err && prev_err)
rise_point = i;
}
if (rise_point != -1 && fall_point != -1)
goto tuning_out;
prev_err = err;
err = 0;
}
tuning_out:
if (found) {
if (rise_point == -1)
rise_point = 0;
if (fall_point == -1)
fall_point = grade - 1;
if (fall_point < rise_point) {
if ((rise_point + fall_point) >
(grade - 1))
i = fall_point / 2;
else
i = (rise_point + grade - 1) / 2;
} else {
i = (rise_point + fall_point) / 2;
}
regval = i << priv->syscon_shift;
err = regmap_update_bits(priv->reg_syscon, priv->syscon_offset,
priv->syscon_mask, regval);
if (err)
return err;
mci_writel(host, RINTSTS, ALL_INT_CLR);
dev_info(host->dev, "Found valid delay chain! use it [delay=%d]\n", i);
} else {
dev_err(host->dev, "No valid delay chain! use default\n");
err = -EINVAL;
}
mci_writel(host, RINTSTS, ALL_INT_CLR);
return err;
}
static int dw_mci_starfive_parse_dt(struct dw_mci *host)
{
struct of_phandle_args args;
struct starfive_priv *priv;
int ret;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = of_parse_phandle_with_fixed_args(host->dev->of_node,
"starfive,sysreg", 3, 0, &args);
if (ret) {
dev_err(host->dev, "Failed to parse starfive,sysreg\n");
return -EINVAL;
}
priv->reg_syscon = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(priv->reg_syscon))
return PTR_ERR(priv->reg_syscon);
priv->syscon_offset = args.args[0];
priv->syscon_shift = args.args[1];
priv->syscon_mask = args.args[2];
host->priv = priv;
return 0;
}
static const struct dw_mci_drv_data starfive_data = {
.common_caps = MMC_CAP_CMD23,
.set_ios = dw_mci_starfive_set_ios,
.parse_dt = dw_mci_starfive_parse_dt,
.execute_tuning = dw_mci_starfive_execute_tuning,
};
static const struct of_device_id dw_mci_starfive_match[] = {
{ .compatible = "starfive,jh7110-mmc",
.data = &starfive_data },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_starfive_match);
static int dw_mci_starfive_probe(struct platform_device *pdev)
{
return dw_mci_pltfm_register(pdev, &starfive_data);
}
static struct platform_driver dw_mci_starfive_driver = {
.probe = dw_mci_starfive_probe,
.remove_new = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_starfive",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_starfive_match,
},
};
module_platform_driver(dw_mci_starfive_driver);
MODULE_DESCRIPTION("StarFive JH7110 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dwmmc_starfive");
| linux-master | drivers/mmc/host/dw_mmc-starfive.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Actions Semi Owl SoCs SD/MMC driver
*
* Copyright (c) 2014 Actions Semi Inc.
* Copyright (c) 2019 Manivannan Sadhasivam <[email protected]>
*
* TODO: SDIO support
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
/*
* SDC registers
*/
#define OWL_REG_SD_EN 0x0000
#define OWL_REG_SD_CTL 0x0004
#define OWL_REG_SD_STATE 0x0008
#define OWL_REG_SD_CMD 0x000c
#define OWL_REG_SD_ARG 0x0010
#define OWL_REG_SD_RSPBUF0 0x0014
#define OWL_REG_SD_RSPBUF1 0x0018
#define OWL_REG_SD_RSPBUF2 0x001c
#define OWL_REG_SD_RSPBUF3 0x0020
#define OWL_REG_SD_RSPBUF4 0x0024
#define OWL_REG_SD_DAT 0x0028
#define OWL_REG_SD_BLK_SIZE 0x002c
#define OWL_REG_SD_BLK_NUM 0x0030
#define OWL_REG_SD_BUF_SIZE 0x0034
/* SD_EN Bits */
#define OWL_SD_EN_RANE BIT(31)
#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
#define OWL_SD_EN_S18EN BIT(12)
#define OWL_SD_EN_RESE BIT(10)
#define OWL_SD_EN_DAT1_S BIT(9)
#define OWL_SD_EN_CLK_S BIT(8)
#define OWL_SD_ENABLE BIT(7)
#define OWL_SD_EN_BSEL BIT(6)
#define OWL_SD_EN_SDIOEN BIT(3)
#define OWL_SD_EN_DDREN BIT(2)
#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
/* SD_CTL Bits */
#define OWL_SD_CTL_TOUTEN BIT(31)
#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
#define OWL_SD_CTL_CMDLEN BIT(13)
#define OWL_SD_CTL_SCC BIT(12)
#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
#define OWL_SD_CTL_TS BIT(7)
#define OWL_SD_CTL_LBE BIT(6)
#define OWL_SD_CTL_C7EN BIT(5)
#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
#define OWL_SD_DELAY_LOW_CLK 0x0f
#define OWL_SD_DELAY_MID_CLK 0x0a
#define OWL_SD_DELAY_HIGH_CLK 0x09
#define OWL_SD_RDELAY_DDR50 0x0a
#define OWL_SD_WDELAY_DDR50 0x08
/* SD_STATE Bits */
#define OWL_SD_STATE_DAT1BS BIT(18)
#define OWL_SD_STATE_SDIOB_P BIT(17)
#define OWL_SD_STATE_SDIOB_EN BIT(16)
#define OWL_SD_STATE_TOUTE BIT(15)
#define OWL_SD_STATE_BAEP BIT(14)
#define OWL_SD_STATE_MEMRDY BIT(12)
#define OWL_SD_STATE_CMDS BIT(11)
#define OWL_SD_STATE_DAT1AS BIT(10)
#define OWL_SD_STATE_SDIOA_P BIT(9)
#define OWL_SD_STATE_SDIOA_EN BIT(8)
#define OWL_SD_STATE_DAT0S BIT(7)
#define OWL_SD_STATE_TEIE BIT(6)
#define OWL_SD_STATE_TEI BIT(5)
#define OWL_SD_STATE_CLNR BIT(4)
#define OWL_SD_STATE_CLC BIT(3)
#define OWL_SD_STATE_WC16ER BIT(2)
#define OWL_SD_STATE_RC16ER BIT(1)
#define OWL_SD_STATE_CRC7ER BIT(0)
#define OWL_CMD_TIMEOUT_MS 30000
struct owl_mmc_host {
struct device *dev;
struct reset_control *reset;
void __iomem *base;
struct clk *clk;
struct completion sdc_complete;
spinlock_t lock;
int irq;
u32 clock;
bool ddr_50;
enum dma_data_direction dma_dir;
struct dma_chan *dma;
struct dma_async_tx_descriptor *desc;
struct dma_slave_config dma_cfg;
struct completion dma_complete;
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
};
static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
{
unsigned int regval;
regval = readl(reg);
if (state)
regval |= val;
else
regval &= ~val;
writel(regval, reg);
}
static irqreturn_t owl_irq_handler(int irq, void *devid)
{
struct owl_mmc_host *owl_host = devid;
u32 state;
spin_lock(&owl_host->lock);
state = readl(owl_host->base + OWL_REG_SD_STATE);
if (state & OWL_SD_STATE_TEI) {
state = readl(owl_host->base + OWL_REG_SD_STATE);
state |= OWL_SD_STATE_TEI;
writel(state, owl_host->base + OWL_REG_SD_STATE);
complete(&owl_host->sdc_complete);
}
spin_unlock(&owl_host->lock);
return IRQ_HANDLED;
}
static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
{
struct mmc_request *mrq = owl_host->mrq;
struct mmc_data *data = mrq->data;
/* Should never be NULL */
WARN_ON(!mrq);
owl_host->mrq = NULL;
if (data)
dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
owl_host->dma_dir);
/* Finally finish request */
mmc_request_done(owl_host->mmc, mrq);
}
static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
struct mmc_command *cmd,
struct mmc_data *data)
{
unsigned long timeout;
u32 mode, state, resp[2];
u32 cmd_rsp_mask = 0;
init_completion(&owl_host->sdc_complete);
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
mode = OWL_SD_CTL_TM(0);
break;
case MMC_RSP_R1:
if (data) {
if (data->flags & MMC_DATA_READ)
mode = OWL_SD_CTL_TM(4);
else
mode = OWL_SD_CTL_TM(5);
} else {
mode = OWL_SD_CTL_TM(1);
}
cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
break;
case MMC_RSP_R1B:
mode = OWL_SD_CTL_TM(3);
cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
break;
case MMC_RSP_R2:
mode = OWL_SD_CTL_TM(2);
cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
break;
case MMC_RSP_R3:
mode = OWL_SD_CTL_TM(1);
cmd_rsp_mask = OWL_SD_STATE_CLNR;
break;
default:
dev_warn(owl_host->dev, "Unknown MMC command\n");
cmd->error = -EINVAL;
return;
}
/* Keep current WDELAY and RDELAY */
mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
/* Start to send corresponding command type */
writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
/* Set LBE to send clk at the end of last read block */
if (data) {
mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
} else {
mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
mode |= OWL_SD_CTL_TS;
}
owl_host->cmd = cmd;
/* Start transfer */
writel(mode, owl_host->base + OWL_REG_SD_CTL);
if (data)
return;
timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
OWL_CMD_TIMEOUT_MS);
if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
dev_err(owl_host->dev, "CMD interrupt timeout\n");
cmd->error = -ETIMEDOUT;
return;
}
state = readl(owl_host->base + OWL_REG_SD_STATE);
if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
if (cmd_rsp_mask & state) {
if (state & OWL_SD_STATE_CLNR) {
dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
cmd->error = -EILSEQ;
return;
}
if (state & OWL_SD_STATE_CRC7ER) {
dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
cmd->error = -EILSEQ;
return;
}
}
if (mmc_resp_type(cmd) & MMC_RSP_136) {
cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
} else {
resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
cmd->resp[1] = resp[1] >> 8;
}
}
}
static void owl_mmc_dma_complete(void *param)
{
struct owl_mmc_host *owl_host = param;
struct mmc_data *data = owl_host->data;
if (data)
complete(&owl_host->dma_complete);
}
static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
struct mmc_data *data)
{
u32 total;
owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
true);
writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
total = data->blksz * data->blocks;
if (total < 512)
writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
else
writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
if (data->flags & MMC_DATA_WRITE) {
owl_host->dma_dir = DMA_TO_DEVICE;
owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
} else {
owl_host->dma_dir = DMA_FROM_DEVICE;
owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
}
dma_map_sg(owl_host->dma->device->dev, data->sg,
data->sg_len, owl_host->dma_dir);
dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
data->sg_len,
owl_host->dma_cfg.direction,
DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!owl_host->desc) {
dev_err(owl_host->dev, "Can't prepare slave sg\n");
return -EBUSY;
}
owl_host->data = data;
owl_host->desc->callback = owl_mmc_dma_complete;
owl_host->desc->callback_param = (void *)owl_host;
data->error = 0;
return 0;
}
static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct owl_mmc_host *owl_host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
int ret;
owl_host->mrq = mrq;
if (mrq->data) {
ret = owl_mmc_prepare_data(owl_host, data);
if (ret < 0) {
data->error = ret;
goto err_out;
}
init_completion(&owl_host->dma_complete);
dmaengine_submit(owl_host->desc);
dma_async_issue_pending(owl_host->dma);
}
owl_mmc_send_cmd(owl_host, mrq->cmd, data);
if (data) {
if (!wait_for_completion_timeout(&owl_host->sdc_complete,
10 * HZ)) {
dev_err(owl_host->dev, "CMD interrupt timeout\n");
mrq->cmd->error = -ETIMEDOUT;
dmaengine_terminate_all(owl_host->dma);
goto err_out;
}
if (!wait_for_completion_timeout(&owl_host->dma_complete,
5 * HZ)) {
dev_err(owl_host->dev, "DMA interrupt timeout\n");
mrq->cmd->error = -ETIMEDOUT;
dmaengine_terminate_all(owl_host->dma);
goto err_out;
}
if (data->stop)
owl_mmc_send_cmd(owl_host, data->stop, NULL);
data->bytes_xfered = data->blocks * data->blksz;
}
err_out:
owl_mmc_finish_request(owl_host);
}
static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
unsigned int rate)
{
unsigned long clk_rate;
int ret;
u32 reg;
reg = readl(owl_host->base + OWL_REG_SD_CTL);
reg &= ~OWL_SD_CTL_DELAY_MSK;
/* Set RDELAY and WDELAY based on the clock */
if (rate <= 1000000) {
writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
owl_host->base + OWL_REG_SD_CTL);
} else if ((rate > 1000000) && (rate <= 26000000)) {
writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
owl_host->base + OWL_REG_SD_CTL);
} else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
owl_host->base + OWL_REG_SD_CTL);
/* DDR50 mode has special delay chain */
} else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
owl_host->base + OWL_REG_SD_CTL);
} else {
dev_err(owl_host->dev, "SD clock rate not supported\n");
return -EINVAL;
}
clk_rate = clk_round_rate(owl_host->clk, rate << 1);
ret = clk_set_rate(owl_host->clk, clk_rate);
return ret;
}
static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
{
if (!ios->clock)
return;
owl_host->clock = ios->clock;
owl_mmc_set_clk_rate(owl_host, ios->clock);
}
static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
struct mmc_ios *ios)
{
u32 reg;
reg = readl(owl_host->base + OWL_REG_SD_EN);
reg &= ~0x03;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
break;
case MMC_BUS_WIDTH_4:
reg |= OWL_SD_EN_DATAWID(1);
break;
case MMC_BUS_WIDTH_8:
reg |= OWL_SD_EN_DATAWID(2);
break;
}
writel(reg, owl_host->base + OWL_REG_SD_EN);
}
static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
{
reset_control_assert(owl_host->reset);
udelay(20);
reset_control_deassert(owl_host->reset);
}
static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
{
u32 mode;
init_completion(&owl_host->sdc_complete);
/* Enable transfer end IRQ */
owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
OWL_SD_STATE_TEIE, true);
/* Send init clk */
mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
writel(mode, owl_host->base + OWL_REG_SD_CTL);
if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
dev_err(owl_host->dev, "CMD interrupt timeout\n");
return;
}
}
static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct owl_mmc_host *owl_host = mmc_priv(mmc);
switch (ios->power_mode) {
case MMC_POWER_UP:
dev_dbg(owl_host->dev, "Powering card up\n");
/* Reset the SDC controller to clear all previous states */
owl_mmc_ctr_reset(owl_host);
clk_prepare_enable(owl_host->clk);
writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
owl_host->base + OWL_REG_SD_EN);
break;
case MMC_POWER_ON:
dev_dbg(owl_host->dev, "Powering card on\n");
owl_mmc_power_on(owl_host);
break;
case MMC_POWER_OFF:
dev_dbg(owl_host->dev, "Powering card off\n");
clk_disable_unprepare(owl_host->clk);
return;
default:
dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
break;
}
if (ios->clock != owl_host->clock)
owl_mmc_set_clk(owl_host, ios);
owl_mmc_set_bus_width(owl_host, ios);
/* Enable DDR mode if requested */
if (ios->timing == MMC_TIMING_UHS_DDR50) {
owl_host->ddr_50 = true;
owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
OWL_SD_EN_DDREN, true);
} else {
owl_host->ddr_50 = false;
}
}
static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct owl_mmc_host *owl_host = mmc_priv(mmc);
/* It is enough to change the pad ctrl bit for voltage switch */
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
OWL_SD_EN_S18EN, false);
break;
case MMC_SIGNAL_VOLTAGE_180:
owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
OWL_SD_EN_S18EN, true);
break;
default:
return -ENOTSUPP;
}
return 0;
}
static const struct mmc_host_ops owl_mmc_ops = {
.request = owl_mmc_request,
.set_ios = owl_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
.start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
};
static int owl_mmc_probe(struct platform_device *pdev)
{
struct owl_mmc_host *owl_host;
struct mmc_host *mmc;
struct resource *res;
int ret;
mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
if (!mmc) {
dev_err(&pdev->dev, "mmc alloc host failed\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, mmc);
owl_host = mmc_priv(mmc);
owl_host->dev = &pdev->dev;
owl_host->mmc = mmc;
spin_lock_init(&owl_host->lock);
owl_host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(owl_host->base)) {
ret = PTR_ERR(owl_host->base);
goto err_free_host;
}
owl_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(owl_host->clk)) {
dev_err(&pdev->dev, "No clock defined\n");
ret = PTR_ERR(owl_host->clk);
goto err_free_host;
}
owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(owl_host->reset)) {
dev_err(&pdev->dev, "Could not get reset control\n");
ret = PTR_ERR(owl_host->reset);
goto err_free_host;
}
mmc->ops = &owl_mmc_ops;
mmc->max_blk_count = 512;
mmc->max_blk_size = 512;
mmc->max_segs = 256;
mmc->max_seg_size = 262144;
mmc->max_req_size = 262144;
/* 100kHz ~ 52MHz */
mmc->f_min = 100000;
mmc->f_max = 52000000;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_4_BIT_DATA;
mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_165_195;
ret = mmc_of_parse(mmc);
if (ret)
goto err_free_host;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
if (IS_ERR(owl_host->dma)) {
dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
ret = PTR_ERR(owl_host->dma);
goto err_free_host;
}
dev_info(&pdev->dev, "Using %s for DMA transfers\n",
dma_chan_name(owl_host->dma));
owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
owl_host->dma_cfg.device_fc = false;
owl_host->irq = platform_get_irq(pdev, 0);
if (owl_host->irq < 0) {
ret = owl_host->irq;
goto err_release_channel;
}
ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
0, dev_name(&pdev->dev), owl_host);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %d\n",
owl_host->irq);
goto err_release_channel;
}
ret = mmc_add_host(mmc);
if (ret) {
dev_err(&pdev->dev, "Failed to add host\n");
goto err_release_channel;
}
dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
return 0;
err_release_channel:
dma_release_channel(owl_host->dma);
err_free_host:
mmc_free_host(mmc);
return ret;
}
static void owl_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct owl_mmc_host *owl_host = mmc_priv(mmc);
mmc_remove_host(mmc);
disable_irq(owl_host->irq);
dma_release_channel(owl_host->dma);
mmc_free_host(mmc);
}
static const struct of_device_id owl_mmc_of_match[] = {
{.compatible = "actions,owl-mmc",},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
static struct platform_driver owl_mmc_driver = {
.driver = {
.name = "owl_mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = owl_mmc_of_match,
},
.probe = owl_mmc_probe,
.remove_new = owl_mmc_remove,
};
module_platform_driver(owl_mmc_driver);
MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
MODULE_AUTHOR("Actions Semi");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/owl-mmc.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (C) 2014 Broadcom Corporation
/*
* iProc SDHCI platform driver
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "sdhci-pltfm.h"
struct sdhci_iproc_data {
const struct sdhci_pltfm_data *pdata;
u32 caps;
u32 caps1;
u32 mmc_caps;
bool missing_caps;
};
struct sdhci_iproc_host {
const struct sdhci_iproc_data *data;
u32 shadow_cmd;
u32 shadow_blk;
bool is_cmd_shadowed;
bool is_blk_shadowed;
};
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
{
u32 val = readl(host->ioaddr + reg);
pr_debug("%s: readl [0x%02x] 0x%08x\n",
mmc_hostname(host->mmc), reg, val);
return val;
}
static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
u32 val;
u16 word;
if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
/* Get the saved transfer mode */
val = iproc_host->shadow_cmd;
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
iproc_host->is_blk_shadowed) {
/* Get the saved block info */
val = iproc_host->shadow_blk;
} else {
val = sdhci_iproc_readl(host, (reg & ~3));
}
word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
return word;
}
static u8 sdhci_iproc_readb(struct sdhci_host *host, int reg)
{
u32 val = sdhci_iproc_readl(host, (reg & ~3));
u8 byte = val >> REG_OFFSET_IN_BITS(reg) & 0xff;
return byte;
}
static inline void sdhci_iproc_writel(struct sdhci_host *host, u32 val, int reg)
{
pr_debug("%s: writel [0x%02x] 0x%08x\n",
mmc_hostname(host->mmc), reg, val);
writel(val, host->ioaddr + reg);
if (host->clock <= 400000) {
/* Round up to micro-second four SD clock delay */
if (host->clock)
udelay((4 * 1000000 + host->clock - 1) / host->clock);
else
udelay(10);
}
}
/*
* The Arasan has a bugette whereby it may lose the content of successive
* writes to the same register that are within two SD-card clock cycles of
* each other (a clock domain crossing problem). The data
* register does not have this problem, which is just as well - otherwise we'd
* have to nobble the DMA engine too.
*
* This wouldn't be a problem with the code except that we can only write the
* controller with 32-bit writes. So two different 16-bit registers are
* written back to back creates the problem.
*
* In reality, this only happens when SDHCI_BLOCK_SIZE and SDHCI_BLOCK_COUNT
* are written followed by SDHCI_TRANSFER_MODE and SDHCI_COMMAND.
* The BLOCK_SIZE and BLOCK_COUNT are meaningless until a command issued so
* the work around can be further optimized. We can keep shadow values of
* BLOCK_SIZE, BLOCK_COUNT, and TRANSFER_MODE until a COMMAND is issued.
* Then, write the BLOCK_SIZE+BLOCK_COUNT in a single 32-bit write followed
* by the TRANSFER+COMMAND in another 32-bit write.
*/
static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
u32 word_shift = REG_OFFSET_IN_BITS(reg);
u32 mask = 0xffff << word_shift;
u32 oldval, newval;
if (reg == SDHCI_COMMAND) {
/* Write the block now as we are issuing a command */
if (iproc_host->is_blk_shadowed) {
sdhci_iproc_writel(host, iproc_host->shadow_blk,
SDHCI_BLOCK_SIZE);
iproc_host->is_blk_shadowed = false;
}
oldval = iproc_host->shadow_cmd;
iproc_host->is_cmd_shadowed = false;
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
iproc_host->is_blk_shadowed) {
/* Block size and count are stored in shadow reg */
oldval = iproc_host->shadow_blk;
} else {
/* Read reg, all other registers are not shadowed */
oldval = sdhci_iproc_readl(host, (reg & ~3));
}
newval = (oldval & ~mask) | (val << word_shift);
if (reg == SDHCI_TRANSFER_MODE) {
/* Save the transfer mode until the command is issued */
iproc_host->shadow_cmd = newval;
iproc_host->is_cmd_shadowed = true;
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
/* Save the block info until the command is issued */
iproc_host->shadow_blk = newval;
iproc_host->is_blk_shadowed = true;
} else {
/* Command or other regular 32-bit write */
sdhci_iproc_writel(host, newval, reg & ~3);
}
}
static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
{
u32 oldval = sdhci_iproc_readl(host, (reg & ~3));
u32 byte_shift = REG_OFFSET_IN_BITS(reg);
u32 mask = 0xff << byte_shift;
u32 newval = (oldval & ~mask) | (val << byte_shift);
sdhci_iproc_writel(host, newval, reg & ~3);
}
static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
if (pltfm_host->clk)
return sdhci_pltfm_clk_get_max_clock(host);
else
return pltfm_host->clock;
}
/*
* There is a known bug on BCM2711's SDHCI core integration where the
* controller will hang when the difference between the core clock and the bus
* clock is too great. Specifically this can be reproduced under the following
* conditions:
*
* - No SD card plugged in, polling thread is running, probing cards at
* 100 kHz.
* - BCM2711's core clock configured at 500MHz or more
*
* So we set 200kHz as the minimum clock frequency available for that SoC.
*/
static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
{
return 200000;
}
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_ops sdhci_iproc_32only_ops = {
.read_l = sdhci_iproc_readl,
.read_w = sdhci_iproc_readw,
.read_b = sdhci_iproc_readb,
.write_l = sdhci_iproc_writel,
.write_w = sdhci_iproc_writew,
.write_b = sdhci_iproc_writeb,
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_NO_HISPD_BIT,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data iproc_cygnus_data = {
.pdata = &sdhci_iproc_cygnus_pltfm_data,
.caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
& SDHCI_MAX_BLOCK_MASK) |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_VDD_180 |
SDHCI_CAN_DO_SUSPEND |
SDHCI_CAN_DO_HISPD |
SDHCI_CAN_DO_ADMA2 |
SDHCI_CAN_DO_SDMA,
.caps1 = SDHCI_DRIVER_TYPE_C |
SDHCI_DRIVER_TYPE_D |
SDHCI_SUPPORT_DDR50,
.mmc_caps = MMC_CAP_1_8V_DDR,
};
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
SDHCI_QUIRK_NO_HISPD_BIT,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &sdhci_iproc_ops,
};
static const struct sdhci_iproc_data iproc_data = {
.pdata = &sdhci_iproc_pltfm_data,
.caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
& SDHCI_MAX_BLOCK_MASK) |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_VDD_180 |
SDHCI_CAN_DO_SUSPEND |
SDHCI_CAN_DO_HISPD |
SDHCI_CAN_DO_ADMA2 |
SDHCI_CAN_DO_SDMA,
.caps1 = SDHCI_DRIVER_TYPE_C |
SDHCI_DRIVER_TYPE_D |
SDHCI_SUPPORT_DDR50,
};
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_NO_HISPD_BIT,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data bcm2835_data = {
.pdata = &sdhci_bcm2835_pltfm_data,
.caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
& SDHCI_MAX_BLOCK_MASK) |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_DO_HISPD,
.caps1 = SDHCI_DRIVER_TYPE_A |
SDHCI_DRIVER_TYPE_C,
.mmc_caps = 0x00000000,
.missing_caps = true,
};
static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.read_l = sdhci_iproc_readl,
.read_w = sdhci_iproc_readw,
.read_b = sdhci_iproc_readb,
.write_l = sdhci_iproc_writel,
.write_w = sdhci_iproc_writew,
.write_b = sdhci_iproc_writeb,
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
.get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.ops = &sdhci_iproc_bcm2711_ops,
};
static const struct sdhci_iproc_data bcm2711_data = {
.pdata = &sdhci_bcm2711_pltfm_data,
.mmc_caps = MMC_CAP_3_3V_DDR,
};
static const struct sdhci_pltfm_data sdhci_bcm7211a0_pltfm_data = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_BROKEN_DMA |
SDHCI_QUIRK_BROKEN_ADMA,
.ops = &sdhci_iproc_ops,
};
#define BCM7211A0_BASE_CLK_MHZ 100
static const struct sdhci_iproc_data bcm7211a0_data = {
.pdata = &sdhci_bcm7211a0_pltfm_data,
.caps = ((BCM7211A0_BASE_CLK_MHZ / 2) << SDHCI_TIMEOUT_CLK_SHIFT) |
(BCM7211A0_BASE_CLK_MHZ << SDHCI_CLOCK_BASE_SHIFT) |
((0x2 << SDHCI_MAX_BLOCK_SHIFT)
& SDHCI_MAX_BLOCK_MASK) |
SDHCI_CAN_VDD_330 |
SDHCI_CAN_VDD_180 |
SDHCI_CAN_DO_SUSPEND |
SDHCI_CAN_DO_HISPD,
.caps1 = SDHCI_DRIVER_TYPE_C |
SDHCI_DRIVER_TYPE_D,
.missing_caps = true,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
{ .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
{ .compatible = "brcm,bcm2711-emmc2", .data = &bcm2711_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_cygnus_data},
{ .compatible = "brcm,sdhci-iproc", .data = &iproc_data },
{ .compatible = "brcm,bcm7211a0-sdhci", .data = &bcm7211a0_data },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
#ifdef CONFIG_ACPI
/*
* This is a duplicate of bcm2835_(pltfrm_)data without caps quirks
* which are provided by the ACPI table.
*/
static const struct sdhci_pltfm_data sdhci_bcm_arasan_data = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_NO_HISPD_BIT,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.ops = &sdhci_iproc_32only_ops,
};
static const struct sdhci_iproc_data bcm_arasan_data = {
.pdata = &sdhci_bcm_arasan_data,
};
static const struct acpi_device_id sdhci_iproc_acpi_ids[] = {
{ .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data },
{ .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data },
{ .id = "BCM2847", .driver_data = (kernel_ulong_t)&bcm_arasan_data },
{ .id = "BRCME88C", .driver_data = (kernel_ulong_t)&bcm2711_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids);
#endif
static int sdhci_iproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct sdhci_iproc_data *iproc_data = NULL;
struct sdhci_host *host;
struct sdhci_iproc_host *iproc_host;
struct sdhci_pltfm_host *pltfm_host;
int ret;
iproc_data = device_get_match_data(dev);
if (!iproc_data)
return -ENODEV;
host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
iproc_host = sdhci_pltfm_priv(pltfm_host);
iproc_host->data = iproc_data;
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
sdhci_get_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;
if (dev->of_node) {
pltfm_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
goto err;
}
}
if (iproc_host->data->missing_caps) {
__sdhci_read_caps(host, NULL,
&iproc_host->data->caps,
&iproc_host->data->caps1);
}
ret = sdhci_add_host(host);
if (ret)
goto err;
return 0;
err:
sdhci_pltfm_free(pdev);
return ret;
}
static void sdhci_iproc_shutdown(struct platform_device *pdev)
{
sdhci_pltfm_suspend(&pdev->dev);
}
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_iproc_of_match,
.acpi_match_table = ACPI_PTR(sdhci_iproc_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
.remove_new = sdhci_pltfm_remove,
.shutdown = sdhci_iproc_shutdown,
};
module_platform_driver(sdhci_iproc_driver);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("IPROC SDHCI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mmc/host/sdhci-iproc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
* Copyright (C) 2010 ST-Ericsson SA
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/amba/mmci.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
#include <linux/workqueue.h>
#include <asm/div64.h>
#include <asm/io.h>
#include "mmci.h"
#define DRIVER_NAME "mmci-pl18x"
static void mmci_variant_init(struct mmci_host *host);
static void ux500_variant_init(struct mmci_host *host);
static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
.init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
.init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.clkreg_enable = MCI_ARM_HWFCEN,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
.init = mmci_variant_init,
};
static struct variant_data variant_u300 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
.datactrl_blocksz = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
.init = mmci_variant_init,
};
static struct variant_data variant_nomadik = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
.init = mmci_variant_init,
};
static struct variant_data variant_ux500 = {
.fifosize = 30 * 4,
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
.datactrl_any_blocksz = true,
.dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
.init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
.fifosize = 30 * 4,
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
.datactrl_blocksz = 11,
.datactrl_any_blocksz = true,
.dma_power_of_2 = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
.pwrreg_clkgate = true,
.busy_detect = true,
.busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
.init = ux500v2_variant_init,
};
static struct variant_data variant_stm32 = {
.fifosize = 32 * 4,
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.datalength_bits = 24,
.datactrl_blocksz = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 48000000,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.init = mmci_variant_init,
};
static struct variant_data variant_stm32_sdmmc = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.f_max = 208000000,
.stm32_clkdiv = true,
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
.datactrl_first = true,
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(12, 5),
.stm32_idmabsize_align = BIT(5),
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
static struct variant_data variant_stm32_sdmmcv2 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.f_max = 267000000,
.stm32_clkdiv = true,
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
.datactrl_first = true,
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 5),
.stm32_idmabsize_align = BIT(5),
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
static struct variant_data variant_stm32_sdmmcv3 = {
.fifosize = 256 * 4,
.fifohalfsize = 128 * 4,
.f_max = 267000000,
.stm32_clkdiv = true,
.cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
.cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
.cmdreg_srsp = MCI_CPSM_STM32_SRSP,
.cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
.data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
.irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
.datactrl_first = true,
.datacnt_useless = true,
.datalength_bits = 25,
.datactrl_blocksz = 14,
.datactrl_any_blocksz = true,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 6),
.stm32_idmabsize_align = BIT(6),
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
.busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
static struct variant_data variant_qcom = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_QCOM_CLK_FLOWENA |
MCI_QCOM_CLK_SELECT_IN_FBCLK,
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
.cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
.cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
.cmdreg_srsp = MCI_CPSM_RESPONSE,
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.datalength_bits = 24,
.datactrl_blocksz = 11,
.datactrl_any_blocksz = true,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
.mmcimask1 = true,
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
.init = qcom_variant_init,
};
/* Busy detection for the ST Micro variant */
static int mmci_card_busy(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
unsigned long flags;
int busy = 0;
spin_lock_irqsave(&host->lock, flags);
if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
busy = 1;
spin_unlock_irqrestore(&host->lock, flags);
return busy;
}
static void mmci_reg_delay(struct mmci_host *host)
{
/*
* According to the spec, at least three feedback clock cycles
* of max 52 MHz must pass between two writes to the MMCICLOCK reg.
* Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
* Worst delay time during card init is at 100 kHz => 30 us.
* Worst delay time when up and running is at 25 MHz => 120 ns.
*/
if (host->cclk < 25000000)
udelay(30);
else
ndelay(120);
}
/*
* This must be called with host->lock held
*/
void mmci_write_clkreg(struct mmci_host *host, u32 clk)
{
if (host->clk_reg != clk) {
host->clk_reg = clk;
writel(clk, host->base + MMCICLOCK);
}
}
/*
* This must be called with host->lock held
*/
void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
{
if (host->pwr_reg != pwr) {
host->pwr_reg = pwr;
writel(pwr, host->base + MMCIPOWER);
}
}
/*
* This must be called with host->lock held
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
/* Keep busy mode in DPSM if enabled */
datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
writel(datactrl, host->base + MMCIDATACTRL);
}
}
/*
* This must be called with host->lock held
*/
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
{
struct variant_data *variant = host->variant;
u32 clk = variant->clkreg;
/* Make sure cclk reflects the current calculated clock */
host->cclk = 0;
if (desired) {
if (variant->explicit_mclk_control) {
host->cclk = host->mclk;
} else if (desired >= host->mclk) {
clk = MCI_CLK_BYPASS;
if (variant->st_clkdiv)
clk |= MCI_ST_UX500_NEG_EDGE;
host->cclk = host->mclk;
} else if (variant->st_clkdiv) {
/*
* DB8500 TRM says f = mclk / (clkdiv + 2)
* => clkdiv = (mclk / f) - 2
* Round the divider up so we don't exceed the max
* frequency
*/
clk = DIV_ROUND_UP(host->mclk, desired) - 2;
if (clk >= 256)
clk = 255;
host->cclk = host->mclk / (clk + 2);
} else {
/*
* PL180 TRM says f = mclk / (2 * (clkdiv + 1))
* => clkdiv = mclk / (2 * f) - 1
*/
clk = host->mclk / (2 * desired) - 1;
if (clk >= 256)
clk = 255;
host->cclk = host->mclk / (2 * (clk + 1));
}
clk |= variant->clkreg_enable;
clk |= MCI_CLK_ENABLE;
/* This hasn't proven to be worthwhile */
/* clk |= MCI_CLK_PWRSAVE; */
}
/* Set actual clock for debug */
host->mmc->actual_clock = host->cclk;
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
clk |= MCI_4BIT_BUS;
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= variant->clkreg_8bit_bus_enable;
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
clk |= variant->clkreg_neg_edge_enable;
mmci_write_clkreg(host, clk);
}
static void mmci_dma_release(struct mmci_host *host)
{
if (host->ops && host->ops->dma_release)
host->ops->dma_release(host);
host->use_dma = false;
}
static void mmci_dma_setup(struct mmci_host *host)
{
if (!host->ops || !host->ops->dma_setup)
return;
if (host->ops->dma_setup(host))
return;
/* initialize pre request cookie */
host->next_cookie = 1;
host->use_dma = true;
}
/*
* Validate mmc prerequisites
*/
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
struct variant_data *variant = host->variant;
if (!data)
return 0;
if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
}
if (host->ops && host->ops->validate_data)
return host->ops->validate_data(host, data);
return 0;
}
static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
{
int err;
if (!host->ops || !host->ops->prep_data)
return 0;
err = host->ops->prep_data(host, data, next);
if (next && !err)
data->host_cookie = ++host->next_cookie < 0 ?
1 : host->next_cookie;
return err;
}
static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
int err)
{
if (host->ops && host->ops->unprep_data)
host->ops->unprep_data(host, data, err);
data->host_cookie = 0;
}
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
if (host->ops && host->ops->get_next_data)
host->ops->get_next_data(host, data);
}
static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
{
struct mmc_data *data = host->data;
int ret;
if (!host->use_dma)
return -EINVAL;
ret = mmci_prep_data(host, data, false);
if (ret)
return ret;
if (!host->ops || !host->ops->dma_start)
return -EINVAL;
/* Okay, go for it. */
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
ret = host->ops->dma_start(host, &datactrl);
if (ret)
return ret;
/* Trigger the DMA transfer */
mmci_write_datactrlreg(host, datactrl);
/*
* Let the MMCI say when the data is ended and it's time
* to fire next DMA request. When that happens, MMCI will
* call mmci_data_end()
*/
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
host->base + MMCIMASK0);
return 0;
}
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!host->use_dma)
return;
if (host->ops && host->ops->dma_finalize)
host->ops->dma_finalize(host, data);
}
static void mmci_dma_error(struct mmci_host *host)
{
if (!host->use_dma)
return;
if (host->ops && host->ops->dma_error)
host->ops->dma_error(host);
}
static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{
writel(0, host->base + MMCICOMMAND);
BUG_ON(host->data);
host->mrq = NULL;
host->cmd = NULL;
mmc_request_done(host->mmc, mrq);
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
mask0 &= ~variant->irq_pio_mask;
mask0 |= mask;
writel(mask0, base + MMCIMASK0);
}
if (variant->mmcimask1)
writel(mask, base + MMCIMASK1);
host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
{
mmci_write_datactrlreg(host, 0);
mmci_set_mask1(host, 0);
host->data = NULL;
}
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
{
unsigned int flags = SG_MITER_ATOMIC;
if (data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
flags |= SG_MITER_FROM_SG;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
{
return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
}
static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
{
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
static void ux500_busy_clear_mask_done(struct mmci_host *host)
{
void __iomem *base = host->base;
writel(host->variant->busy_detect_mask, base + MMCICLEAR);
writel(readl(base + MMCIMASK0) &
~host->variant->busy_detect_mask, base + MMCIMASK0);
host->busy_state = MMCI_BUSY_DONE;
host->busy_status = 0;
}
/*
* ux500_busy_complete() - this will wait until the busy status
* goes off, saving any status that occur in the meantime into
* host->busy_status until we know the card is not busy any more.
* The function returns true when the busy detection is ended
* and we should continue processing the command.
*
* The Ux500 typically fires two IRQs over a busy cycle like this:
*
* DAT0 busy +-----------------+
* | |
* DAT0 not busy ----+ +--------
*
* ^ ^
* | |
* IRQ1 IRQ2
*/
static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
u32 status, u32 err_msk)
{
void __iomem *base = host->base;
int retries = 10;
if (status & err_msk) {
/* Stop any ongoing busy detection if an error occurs */
ux500_busy_clear_mask_done(host);
goto out_ret_state;
}
/*
* The state transitions are encoded in a state machine crossing
* the edges in this switch statement.
*/
switch (host->busy_state) {
/*
* Before unmasking for the busy end IRQ, confirm that the
* command was sent successfully. To keep track of having a
* command in-progress, waiting for busy signaling to end,
* store the status in host->busy_status.
*
* Note that, the card may need a couple of clock cycles before
* it starts signaling busy on DAT0, hence re-read the
* MMCISTATUS register here, to allow the busy bit to be set.
*/
case MMCI_BUSY_DONE:
/*
* Save the first status register read to be sure to catch
* all bits that may be lost will retrying. If the command
* is still busy this will result in assigning 0 to
* host->busy_status, which is what it should be in IDLE.
*/
host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
while (retries) {
status = readl(base + MMCISTATUS);
/* Keep accumulating status bits */
host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
if (status & host->variant->busy_detect_flag) {
writel(readl(base + MMCIMASK0) |
host->variant->busy_detect_mask,
base + MMCIMASK0);
host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
schedule_delayed_work(&host->ux500_busy_timeout_work,
msecs_to_jiffies(cmd->busy_timeout));
goto out_ret_state;
}
retries--;
}
dev_dbg(mmc_dev(host->mmc),
"no busy signalling in time CMD%02x\n", cmd->opcode);
ux500_busy_clear_mask_done(host);
break;
/*
* If there is a command in-progress that has been successfully
* sent, then bail out if busy status is set and wait for the
* busy end IRQ.
*
* Note that, the HW triggers an IRQ on both edges while
* monitoring DAT0 for busy completion, but there is only one
* status bit in MMCISTATUS for the busy state. Therefore
* both the start and the end interrupts needs to be cleared,
* one after the other. So, clear the busy start IRQ here.
*/
case MMCI_BUSY_WAITING_FOR_START_IRQ:
if (status & host->variant->busy_detect_flag) {
host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
writel(host->variant->busy_detect_mask, base + MMCICLEAR);
host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
} else {
dev_dbg(mmc_dev(host->mmc),
"lost busy status when waiting for busy start IRQ CMD%02x\n",
cmd->opcode);
cancel_delayed_work(&host->ux500_busy_timeout_work);
ux500_busy_clear_mask_done(host);
}
break;
case MMCI_BUSY_WAITING_FOR_END_IRQ:
if (!(status & host->variant->busy_detect_flag)) {
host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
writel(host->variant->busy_detect_mask, base + MMCICLEAR);
cancel_delayed_work(&host->ux500_busy_timeout_work);
ux500_busy_clear_mask_done(host);
} else {
dev_dbg(mmc_dev(host->mmc),
"busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
cmd->opcode);
}
break;
default:
dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
host->busy_state, cmd->opcode);
break;
}
out_ret_state:
return (host->busy_state == MMCI_BUSY_DONE);
}
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
struct mmci_dmae_next {
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan;
};
struct mmci_dmae_priv {
struct dma_chan *cur;
struct dma_chan *rx_channel;
struct dma_chan *tx_channel;
struct dma_async_tx_descriptor *desc_current;
struct mmci_dmae_next next_data;
};
int mmci_dmae_setup(struct mmci_host *host)
{
const char *rxname, *txname;
struct mmci_dmae_priv *dmae;
dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
if (!dmae)
return -ENOMEM;
host->dma_priv = dmae;
dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx");
if (IS_ERR(dmae->rx_channel)) {
int ret = PTR_ERR(dmae->rx_channel);
dmae->rx_channel = NULL;
return ret;
}
dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(dmae->tx_channel)) {
if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER)
dev_warn(mmc_dev(host->mmc),
"Deferred probe for TX channel ignored\n");
dmae->tx_channel = NULL;
}
/*
* If only an RX channel is specified, the driver will
* attempt to use it bidirectionally, however if it
* is specified but cannot be located, DMA will be disabled.
*/
if (dmae->rx_channel && !dmae->tx_channel)
dmae->tx_channel = dmae->rx_channel;
if (dmae->rx_channel)
rxname = dma_chan_name(dmae->rx_channel);
else
rxname = "none";
if (dmae->tx_channel)
txname = dma_chan_name(dmae->tx_channel);
else
txname = "none";
dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
rxname, txname);
/*
* Limit the maximum segment size in any SG entry according to
* the parameters of the DMA engine device.
*/
if (dmae->tx_channel) {
struct device *dev = dmae->tx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
if (dmae->rx_channel) {
struct device *dev = dmae->rx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
if (!dmae->tx_channel || !dmae->rx_channel) {
mmci_dmae_release(host);
return -EINVAL;
}
return 0;
}
/*
* This is used in or so inline it
* so it can be discarded.
*/
void mmci_dmae_release(struct mmci_host *host)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
if (dmae->rx_channel)
dma_release_channel(dmae->rx_channel);
if (dmae->tx_channel)
dma_release_channel(dmae->tx_channel);
dmae->rx_channel = dmae->tx_channel = NULL;
}
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
chan = dmae->rx_channel;
else
chan = dmae->tx_channel;
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
void mmci_dmae_error(struct mmci_host *host)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
if (!dma_inprogress(host))
return;
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
dmaengine_terminate_all(dmae->cur);
host->dma_in_progress = false;
dmae->cur = NULL;
dmae->desc_current = NULL;
host->data->host_cookie = 0;
mmci_dma_unmap(host, host->data);
}
void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
u32 status;
int i;
if (!dma_inprogress(host))
return;
/* Wait up to 1ms for the DMA to complete */
for (i = 0; ; i++) {
status = readl(host->base + MMCISTATUS);
if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
break;
udelay(10);
}
/*
* Check to see whether we still have some data left in the FIFO -
* this catches DMA controllers which are unable to monitor the
* DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
mmci_dma_error(host);
if (!data->error)
data->error = -EIO;
} else if (!data->host_cookie) {
mmci_dma_unmap(host, data);
}
/*
* Use of DMA with scatter-gather is impossible.
* Give up with DMA and switch back to PIO mode.
*/
if (status & MCI_RXDATAAVLBLMASK) {
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
host->dma_in_progress = false;
dmae->cur = NULL;
dmae->desc_current = NULL;
}
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan **dma_chan,
struct dma_async_tx_descriptor **dma_desc)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
.src_addr = host->phybase + MMCIFIFO,
.dst_addr = host->phybase + MMCIFIFO,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
.device_fc = false,
};
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
unsigned long flags = DMA_CTRL_ACK;
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
chan = dmae->rx_channel;
} else {
conf.direction = DMA_MEM_TO_DEV;
chan = dmae->tx_channel;
}
/* If there's no DMA channel, fall back to PIO */
if (!chan)
return -EINVAL;
/* If less than or equal to the fifo size, don't bother with DMA */
if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
/*
* This is necessary to get SDIO working on the Ux500. We do not yet
* know if this is a bug in:
* - The Ux500 DMA controller (DMA40)
* - The MMCI DMA interface on the Ux500
* some power of two blocks (such as 64 bytes) are sent regularly
* during SDIO traffic and those work fine so for these we enable DMA
* transfers.
*/
if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
return -EINVAL;
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
if (nr_sg == 0)
return -EINVAL;
if (host->variant->qcom_dml)
flags |= DMA_PREP_INTERRUPT;
dmaengine_slave_config(chan, &conf);
desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
conf.direction, flags);
if (!desc)
goto unmap_exit;
*dma_chan = chan;
*dma_desc = desc;
return 0;
unmap_exit:
dma_unmap_sg(device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
return -ENOMEM;
}
int mmci_dmae_prep_data(struct mmci_host *host,
struct mmc_data *data,
bool next)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
struct mmci_dmae_next *nd = &dmae->next_data;
if (!host->use_dma)
return -EINVAL;
if (next)
return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
/* Check if next job is already prepared. */
if (dmae->cur && dmae->desc_current)
return 0;
/* No job were prepared thus do it now. */
return _mmci_dmae_prep_data(host, data, &dmae->cur,
&dmae->desc_current);
}
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
int ret;
host->dma_in_progress = true;
ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
if (ret < 0) {
host->dma_in_progress = false;
return ret;
}
dma_async_issue_pending(dmae->cur);
*datactrl |= MCI_DPSM_DMAENABLE;
return 0;
}
void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
struct mmci_dmae_next *next = &dmae->next_data;
if (!host->use_dma)
return;
WARN_ON(!data->host_cookie && (next->desc || next->chan));
dmae->desc_current = next->desc;
dmae->cur = next->chan;
next->desc = NULL;
next->chan = NULL;
}
void mmci_dmae_unprep_data(struct mmci_host *host,
struct mmc_data *data, int err)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
if (!host->use_dma)
return;
mmci_dma_unmap(host, data);
if (err) {
struct mmci_dmae_next *next = &dmae->next_data;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
chan = dmae->rx_channel;
else
chan = dmae->tx_channel;
dmaengine_terminate_all(chan);
if (dmae->desc_current == next->desc)
dmae->desc_current = NULL;
if (dmae->cur == next->chan) {
host->dma_in_progress = false;
dmae->cur = NULL;
}
next->desc = NULL;
next->chan = NULL;
}
}
static struct mmci_host_ops mmci_variant_ops = {
.prep_data = mmci_dmae_prep_data,
.unprep_data = mmci_dmae_unprep_data,
.get_datactrl_cfg = mmci_get_dctrl_cfg,
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = mmci_dmae_setup,
.dma_release = mmci_dmae_release,
.dma_start = mmci_dmae_start,
.dma_finalize = mmci_dmae_finalize,
.dma_error = mmci_dmae_error,
};
#else
static struct mmci_host_ops mmci_variant_ops = {
.get_datactrl_cfg = mmci_get_dctrl_cfg,
};
#endif
static void mmci_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
}
static void ux500_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
host->ops->busy_complete = ux500_busy_complete;
}
static void ux500v2_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
host->ops->busy_complete = ux500_busy_complete;
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data)
return;
WARN_ON(data->host_cookie);
if (mmci_validate_data(host, data))
return;
mmci_prep_data(host, data, true);
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data || !data->host_cookie)
return;
mmci_unprep_data(host, data, err);
}
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
unsigned int datactrl, timeout, irqmask;
unsigned long long clks;
void __iomem *base;
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
data->blksz, data->blocks, data->flags);
host->data = data;
host->size = data->blksz * data->blocks;
data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, NSEC_PER_SEC);
timeout = data->timeout_clks + (unsigned int)clks;
base = host->base;
writel(timeout, base + MMCIDATATIMER);
writel(host->size, base + MMCIDATALENGTH);
datactrl = host->ops->get_datactrl_cfg(host);
datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
u32 clk;
datactrl |= variant->datactrl_mask_sdio;
/*
* The ST Micro variant for SDIO small write transfers
* needs to have clock H/W flow control disabled,
* otherwise the transfer will not start. The threshold
* depends on the rate of MCLK.
*/
if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
(host->size < 8 ||
(host->size <= 8 && host->mclk > 50000000)))
clk = host->clk_reg & ~variant->clkreg_enable;
else
clk = host->clk_reg | variant->clkreg_enable;
mmci_write_clkreg(host, clk);
}
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
datactrl |= variant->datactrl_mask_ddrmode;
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
if (!mmci_dma_start(host, datactrl))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
mmci_init_sg(host, data);
if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
* If we have less than the fifo 'half-full' threshold to
* transfer, trigger a PIO interrupt as soon as any data
* is available.
*/
if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
* We don't actually need to include "FIFO empty" here
* since its implicit in "FIFO half empty".
*/
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
mmci_write_datactrlreg(host, datactrl);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
mmci_set_mask1(host, irqmask);
}
static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
bool busy_resp = cmd->flags & MMC_RSP_BUSY;
unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
writel(0, base + MMCICOMMAND);
mmci_reg_delay(host);
}
if (host->variant->cmdreg_stop &&
cmd->opcode == MMC_STOP_TRANSMISSION)
c |= host->variant->cmdreg_stop;
c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
c |= host->variant->cmdreg_lrsp_crc;
else if (cmd->flags & MMC_RSP_CRC)
c |= host->variant->cmdreg_srsp_crc;
else
c |= host->variant->cmdreg_srsp;
}
host->busy_status = 0;
host->busy_state = MMCI_BUSY_DONE;
/* Assign a default timeout if the core does not provide one */
if (busy_resp && !cmd->busy_timeout)
cmd->busy_timeout = 10 * MSEC_PER_SEC;
if (busy_resp && host->variant->busy_timeout) {
if (cmd->busy_timeout > host->mmc->max_busy_timeout)
clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
else
clks = (unsigned long long)cmd->busy_timeout * host->cclk;
do_div(clks, MSEC_PER_SEC);
writel_relaxed(clks, host->base + MMCIDATATIMER);
}
if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
host->ops->pre_sig_volt_switch(host);
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
c |= host->variant->data_cmd_enable;
host->cmd = cmd;
writel(cmd->arg, base + MMCIARGUMENT);
writel(c, base + MMCICOMMAND);
}
static void mmci_stop_command(struct mmci_host *host)
{
host->stop_abort.error = 0;
mmci_start_command(host, &host->stop_abort, 0);
}
static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
unsigned int status_err;
/* Make sure we have data to handle */
if (!data)
return;
/* First check for errors */
status_err = status & (host->variant->start_err |
MCI_DATACRCFAIL | MCI_DATATIMEOUT |
MCI_TXUNDERRUN | MCI_RXOVERRUN);
if (status_err) {
u32 remain, success;
/* Terminate the DMA transfer */
mmci_dma_error(host);
/*
* Calculate how far we are into the transfer. Note that
* the data counter gives the number of bytes transferred
* on the MMC bus, not on the host side. On reads, this
* can be as much as a FIFO-worth of data ahead. This
* matters for FIFO overruns only.
*/
if (!host->variant->datacnt_useless) {
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
} else {
success = 0;
}
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
status_err, success);
if (status_err & MCI_DATACRCFAIL) {
/* Last block was not successful */
success -= 1;
data->error = -EILSEQ;
} else if (status_err & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
} else if (status_err & MCI_STARTBITERR) {
data->error = -ECOMM;
} else if (status_err & MCI_TXUNDERRUN) {
data->error = -EIO;
} else if (status_err & MCI_RXOVERRUN) {
if (success > host->variant->fifosize)
success -= host->variant->fifosize;
else
success = 0;
data->error = -EIO;
}
data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
if (host->variant->cmdreg_stop && data->error)
mmci_stop_command(host);
else
mmci_request_end(host, data->mrq);
} else if (host->mrq->sbc && !data->error) {
mmci_request_end(host, data->mrq);
} else {
mmci_start_command(host, data->stop, 0);
}
}
}
static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
bool sbc, busy_resp;
if (!cmd)
return;
sbc = (cmd == host->mrq->sbc);
busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
/*
* We need to be one of these interrupts to be considered worth
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
if (host->variant->busy_timeout && busy_resp)
err_msk |= MCI_DATATIMEOUT;
if (!((status | host->busy_status) &
(err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
return;
/* Handle busy detection on DAT0 if the variant supports it. */
if (busy_resp && host->variant->busy_detect)
if (!host->ops->busy_complete(host, cmd, status, err_msk))
return;
host->cmd = NULL;
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
/*
* This will wake up mmci_irq_thread() which will issue
* a hardware reset of the MMCI block.
*/
host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
cmd->resp[2] = readl(base + MMCIRESPONSE2);
cmd->resp[3] = readl(base + MMCIRESPONSE3);
}
if ((!sbc && !cmd->data) || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
mmci_dma_error(host);
mmci_stop_data(host);
if (host->variant->cmdreg_stop && cmd->error) {
mmci_stop_command(host);
return;
}
}
if (host->irq_action != IRQ_WAKE_THREAD)
mmci_request_end(host, host->mrq);
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
} else if (!host->variant->datactrl_first &&
!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
}
}
static char *ux500_state_str(struct mmci_host *host)
{
switch (host->busy_state) {
case MMCI_BUSY_WAITING_FOR_START_IRQ:
return "waiting for start IRQ";
case MMCI_BUSY_WAITING_FOR_END_IRQ:
return "waiting for end IRQ";
case MMCI_BUSY_DONE:
return "not waiting for IRQs";
default:
return "unknown";
}
}
/*
* This busy timeout worker is used to "kick" the command IRQ if a
* busy detect IRQ fails to appear in reasonable time. Only used on
* variants with busy detection IRQ delivery.
*/
static void ux500_busy_timeout_work(struct work_struct *work)
{
struct mmci_host *host = container_of(work, struct mmci_host,
ux500_busy_timeout_work.work);
unsigned long flags;
u32 status;
spin_lock_irqsave(&host->lock, flags);
if (host->cmd) {
/* If we are still busy let's tag on a cmd-timeout error. */
status = readl(host->base + MMCISTATUS);
if (status & host->variant->busy_detect_flag) {
status |= MCI_CMDTIMEOUT;
dev_err(mmc_dev(host->mmc),
"timeout in state %s still busy with CMD%02x\n",
ux500_state_str(host), host->cmd->opcode);
} else {
dev_err(mmc_dev(host->mmc),
"timeout in state %s waiting for busy CMD%02x\n",
ux500_state_str(host), host->cmd->opcode);
}
mmci_cmd_irq(host, host->cmd, status);
}
spin_unlock_irqrestore(&host->lock, flags);
}
static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
{
return remain - (readl(host->base + MMCIFIFOCNT) << 2);
}
static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
{
/*
* on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
* from the fifo range should be used
*/
if (status & MCI_RXFIFOHALFFULL)
return host->variant->fifohalfsize;
else if (status & MCI_RXDATAAVLBL)
return 4;
return 0;
}
static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
{
void __iomem *base = host->base;
char *ptr = buffer;
u32 status = readl(host->base + MMCISTATUS);
int host_remain = host->size;
do {
int count = host->get_rx_fifocnt(host, status, host_remain);
if (count > remain)
count = remain;
if (count <= 0)
break;
/*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc). Therefore make sure to always read the last bytes
* while only doing full 32-bit reads towards the FIFO.
*/
if (unlikely(count & 0x3)) {
if (count < 4) {
unsigned char buf[4];
ioread32_rep(base + MMCIFIFO, buf, 1);
memcpy(ptr, buf, count);
} else {
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
count &= ~0x3;
}
} else {
ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
}
ptr += count;
remain -= count;
host_remain -= count;
if (remain == 0)
break;
status = readl(base + MMCISTATUS);
} while (status & MCI_RXDATAAVLBL);
return ptr - buffer;
}
static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
{
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
char *ptr = buffer;
do {
unsigned int count, maxcnt;
maxcnt = status & MCI_TXFIFOEMPTY ?
variant->fifosize : variant->fifohalfsize;
count = min(remain, maxcnt);
/*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
* So compensate by adding +3 on the count, a single
* byte become a 32bit write, 7 bytes will be two
* 32bit writes etc.
*/
iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
ptr += count;
remain -= count;
if (remain == 0)
break;
status = readl(base + MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
return ptr - buffer;
}
/*
* PIO data transfer IRQ handler.
*/
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
u32 status;
status = readl(base + MMCISTATUS);
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
do {
unsigned int remain, len;
char *buffer;
/*
* For write, we only need to test the half-empty flag
* here - if the FIFO is completely empty, then by
* definition it is more than half empty.
*
* For read, check for data available.
*/
if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
break;
if (!sg_miter_next(sg_miter))
break;
buffer = sg_miter->addr;
remain = sg_miter->length;
len = 0;
if (status & MCI_RXACTIVE)
len = mmci_pio_read(host, buffer, remain);
if (status & MCI_TXACTIVE)
len = mmci_pio_write(host, buffer, remain, status);
sg_miter->consumed = len;
host->size -= len;
remain -= len;
if (remain)
break;
status = readl(base + MMCISTATUS);
} while (1);
sg_miter_stop(sg_miter);
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
*/
if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
* If we run out of data, disable the data IRQs; this
* prevents a race where the FIFO becomes empty before
* the chip itself has disabled the data path, and
* stops us racing with our data end IRQ.
*/
if (host->size == 0) {
mmci_set_mask1(host, 0);
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
}
return IRQ_HANDLED;
}
/*
* Handle completion of command and data transfers.
*/
static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
spin_lock(&host->lock);
host->irq_action = IRQ_HANDLED;
do {
status = readl(host->base + MMCISTATUS);
if (!status)
break;
if (host->singleirq) {
if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~host->variant->irq_pio_mask;
}
/*
* Busy detection is managed by mmci_cmd_irq(), including to
* clear the corresponding IRQ.
*/
status &= readl(host->base + MMCIMASK0);
if (host->variant->busy_detect)
writel(status & ~host->variant->busy_detect_mask,
host->base + MMCICLEAR);
else
writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
if (host->variant->reversed_irq_handling) {
mmci_data_irq(host, host->data, status);
mmci_cmd_irq(host, host->cmd, status);
} else {
mmci_cmd_irq(host, host->cmd, status);
mmci_data_irq(host, host->data, status);
}
/*
* Busy detection has been handled by mmci_cmd_irq() above.
* Clear the status bit to prevent polling in IRQ context.
*/
if (host->variant->busy_detect_flag)
status &= ~host->variant->busy_detect_flag;
} while (status);
spin_unlock(&host->lock);
return host->irq_action;
}
/*
* mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
*
* A reset is needed for some variants, where a datatimeout for a R1B request
* causes the DPSM to stay busy (non-functional).
*/
static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
unsigned long flags;
if (host->rst) {
reset_control_assert(host->rst);
udelay(2);
reset_control_deassert(host->rst);
}
spin_lock_irqsave(&host->lock, flags);
writel(host->clk_reg, host->base + MMCICLOCK);
writel(host->pwr_reg, host->base + MMCIPOWER);
writel(MCI_IRQENABLE | host->variant->start_err,
host->base + MMCIMASK0);
host->irq_action = IRQ_HANDLED;
mmci_request_end(host, host->mrq);
spin_unlock_irqrestore(&host->lock, flags);
return host->irq_action;
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
unsigned long flags;
WARN_ON(host->mrq != NULL);
mrq->cmd->error = mmci_validate_data(host, mrq->data);
if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
spin_lock_irqsave(&host->lock, flags);
host->mrq = mrq;
if (mrq->data)
mmci_get_next_data(host, mrq->data);
if (mrq->data &&
(host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
mmci_start_data(host, mrq->data);
if (mrq->sbc)
mmci_start_command(host, mrq->sbc, 0);
else
mmci_start_command(host, mrq->cmd, 0);
spin_unlock_irqrestore(&host->lock, flags);
}
static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
u32 max_busy_timeout = 0;
if (!host->variant->busy_detect)
return;
if (host->variant->busy_timeout && mmc->actual_clock)
max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
MSEC_PER_SEC);
mmc->max_busy_timeout = max_busy_timeout;
}
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
int ret;
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
regulator_disable(mmc->supply.vqmmc);
host->vqmmc_enabled = false;
}
break;
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
/*
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
* and instead uses MCI_PWR_ON so apply whatever value is
* configured in the variant data.
*/
pwr |= variant->pwrreg_powerup;
break;
case MMC_POWER_ON:
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
dev_err(mmc_dev(mmc),
"failed to enable vqmmc regulator\n");
else
host->vqmmc_enabled = true;
}
pwr |= MCI_PWR_ON;
break;
}
if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
/*
* The ST Micro variant has some additional bits
* indicating signal direction for the signals in
* the SD/MMC bus and feedback-clock usage.
*/
pwr |= host->pwr_reg_add;
if (ios->bus_width == MMC_BUS_WIDTH_4)
pwr &= ~MCI_ST_DATA74DIREN;
else if (ios->bus_width == MMC_BUS_WIDTH_1)
pwr &= (~MCI_ST_DATA74DIREN &
~MCI_ST_DATA31DIREN &
~MCI_ST_DATA2DIREN);
}
if (variant->opendrain) {
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pwr |= variant->opendrain;
} else {
/*
* If the variant cannot configure the pads by its own, then we
* expect the pinctrl to be able to do that for us
*/
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pinctrl_select_state(host->pinctrl, host->pins_opendrain);
else
pinctrl_select_default_state(mmc_dev(mmc));
}
/*
* If clock = 0 and the variant requires the MMCIPOWER to be used for
* gating the clock, the MCI_PWR_ON bit is cleared.
*/
if (!ios->clock && variant->pwrreg_clkgate)
pwr &= ~MCI_PWR_ON;
if (host->variant->explicit_mclk_control &&
ios->clock != host->clock_cache) {
ret = clk_set_rate(host->clk, ios->clock);
if (ret < 0)
dev_err(mmc_dev(host->mmc),
"Error setting clock rate (%d)\n", ret);
else
host->mclk = clk_get_rate(host->clk);
}
host->clock_cache = ios->clock;
spin_lock_irqsave(&host->lock, flags);
if (host->ops && host->ops->set_clkreg)
host->ops->set_clkreg(host, ios->clock);
else
mmci_set_clkreg(host, ios->clock);
mmci_set_max_busy_timeout(mmc);
if (host->ops && host->ops->set_pwrreg)
host->ops->set_pwrreg(host, pwr);
else
mmci_write_pwrreg(host, pwr);
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
}
static int mmci_get_cd(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmci_platform_data *plat = host->plat;
unsigned int status = mmc_gpio_get_cd(mmc);
if (status == -ENOSYS) {
if (!plat->status)
return 1; /* Assume always present */
status = plat->status(mmc_dev(host->mmc));
}
return status;
}
static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
int ret;
ret = mmc_regulator_set_vqmmc(mmc, ios);
if (!ret && host->ops && host->ops->post_sig_volt_switch)
ret = host->ops->post_sig_volt_switch(host, ios);
else if (ret)
ret = 0;
if (ret < 0)
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
return ret;
}
static struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.pre_req = mmci_pre_request,
.post_req = mmci_post_request,
.set_ios = mmci_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmci_get_cd,
.start_signal_voltage_switch = mmci_sig_volt_switch,
};
static void mmci_probe_level_translator(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
struct mmci_host *host = mmc_priv(mmc);
struct gpio_desc *cmd_gpio;
struct gpio_desc *ck_gpio;
struct gpio_desc *ckin_gpio;
int clk_hi, clk_lo;
/*
* Assume the level translator is present if st,use-ckin is set.
* This is to cater for DTs which do not implement this test.
*/
host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
cmd_gpio = gpiod_get(dev, "st,cmd", GPIOD_OUT_HIGH);
if (IS_ERR(cmd_gpio))
goto exit_cmd;
ck_gpio = gpiod_get(dev, "st,ck", GPIOD_OUT_HIGH);
if (IS_ERR(ck_gpio))
goto exit_ck;
ckin_gpio = gpiod_get(dev, "st,ckin", GPIOD_IN);
if (IS_ERR(ckin_gpio))
goto exit_ckin;
/* All GPIOs are valid, test whether level translator works */
/* Sample CKIN */
clk_hi = !!gpiod_get_value(ckin_gpio);
/* Set CK low */
gpiod_set_value(ck_gpio, 0);
/* Sample CKIN */
clk_lo = !!gpiod_get_value(ckin_gpio);
/* Tristate all */
gpiod_direction_input(cmd_gpio);
gpiod_direction_input(ck_gpio);
/* Level translator is present if CK signal is propagated to CKIN */
if (!clk_hi || clk_lo) {
host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
dev_warn(dev,
"Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
}
gpiod_put(ckin_gpio);
exit_ckin:
gpiod_put(ck_gpio);
exit_ck:
gpiod_put(cmd_gpio);
exit_cmd:
pinctrl_select_default_state(dev);
}
static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
int ret = mmc_of_parse(mmc);
if (ret)
return ret;
if (of_property_read_bool(np, "st,sig-dir-dat0"))
host->pwr_reg_add |= MCI_ST_DATA0DIREN;
if (of_property_read_bool(np, "st,sig-dir-dat2"))
host->pwr_reg_add |= MCI_ST_DATA2DIREN;
if (of_property_read_bool(np, "st,sig-dir-dat31"))
host->pwr_reg_add |= MCI_ST_DATA31DIREN;
if (of_property_read_bool(np, "st,sig-dir-dat74"))
host->pwr_reg_add |= MCI_ST_DATA74DIREN;
if (of_property_read_bool(np, "st,sig-dir-cmd"))
host->pwr_reg_add |= MCI_ST_CMDDIREN;
if (of_property_read_bool(np, "st,sig-pin-fbclk"))
host->pwr_reg_add |= MCI_ST_FBCLKEN;
if (of_property_read_bool(np, "st,sig-dir"))
host->pwr_reg_add |= MCI_STM32_DIRPOL;
if (of_property_read_bool(np, "st,neg-edge"))
host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
if (of_property_read_bool(np, "st,use-ckin"))
mmci_probe_level_translator(mmc);
if (of_property_read_bool(np, "mmc-cap-mmc-highspeed"))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
if (of_property_read_bool(np, "mmc-cap-sd-highspeed"))
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
return 0;
}
static int mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
struct device_node *np = dev->dev.of_node;
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
/* Must have platform data or Device Tree. */
if (!plat && !np) {
dev_err(&dev->dev, "No plat data or DT found\n");
return -EINVAL;
}
if (!plat) {
plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
}
mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc;
host->mmc_ops = &mmci_ops;
mmc->ops = &mmci_ops;
ret = mmci_of_parse(np, mmc);
if (ret)
goto host_free;
/*
* Some variant (STM32) doesn't have opendrain bit, nevertheless
* pins can be set accordingly using pinctrl
*/
if (!variant->opendrain) {
host->pinctrl = devm_pinctrl_get(&dev->dev);
if (IS_ERR(host->pinctrl)) {
dev_err(&dev->dev, "failed to get pinctrl");
ret = PTR_ERR(host->pinctrl);
goto host_free;
}
host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
MMCI_PINCTRL_STATE_OPENDRAIN);
if (IS_ERR(host->pins_opendrain)) {
dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
ret = PTR_ERR(host->pins_opendrain);
goto host_free;
}
}
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
host->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto host_free;
}
ret = clk_prepare_enable(host->clk);
if (ret)
goto host_free;
if (variant->qcom_fifo)
host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
else
host->get_rx_fifocnt = mmci_get_rx_fifocnt;
host->plat = plat;
host->variant = variant;
host->mclk = clk_get_rate(host->clk);
/*
* According to the spec, mclk is max 100 MHz,
* so we try to adjust the clock down to this,
* (if possible).
*/
if (host->mclk > variant->f_max) {
ret = clk_set_rate(host->clk, variant->f_max);
if (ret < 0)
goto clk_disable;
host->mclk = clk_get_rate(host->clk);
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
host->phybase = dev->res.start;
host->base = devm_ioremap_resource(&dev->dev, &dev->res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto clk_disable;
}
if (variant->init)
variant->init(host);
/*
* The ARM and ST versions of the block have slightly different
* clock divider equations which means that the minimum divider
* differs too.
* on Qualcomm like controllers get the nearest minimum clock to 100Khz
*/
if (variant->st_clkdiv)
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
else if (variant->stm32_clkdiv)
mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
else if (variant->explicit_mclk_control)
mmc->f_min = clk_round_rate(host->clk, 100000);
else
mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
/*
* If no maximum operating frequency is supplied, fall back to use
* the module parameter, which has a (low) default value in case it
* is not specified. Either value must not exceed the clock rate into
* the block, of course.
*/
if (mmc->f_max)
mmc->f_max = variant->explicit_mclk_control ?
min(variant->f_max, mmc->f_max) :
min(host->mclk, mmc->f_max);
else
mmc->f_max = variant->explicit_mclk_control ?
fmax : min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
if (IS_ERR(host->rst)) {
ret = PTR_ERR(host->rst);
goto clk_disable;
}
ret = reset_control_deassert(host->rst);
if (ret)
dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
mmc->ocr_avail = plat->ocr_mask;
else if (plat->ocr_mask)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
/* We support these capabilities. */
mmc->caps |= MMC_CAP_CMD23;
/*
* Enable busy detection.
*/
if (variant->busy_detect) {
mmci_ops.card_busy = mmci_card_busy;
/*
* Not all variants have a flag to enable busy detection
* in the DPSM, but if they do, set it here.
*/
if (variant->busy_dpsm_flag)
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
}
/* Variants with mandatory busy timeout in HW needs R1B responses. */
if (variant->busy_timeout)
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
host->stop_abort.arg = 0;
host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
/* We support these PM capabilities. */
mmc->pm_caps |= MMC_PM_KEEP_POWER;
/*
* We can do SGIO
*/
mmc->max_segs = NR_SG;
/*
* Since only a certain number of bits are valid in the data length
* register, we must ensure that we don't exceed 2^num-1 bytes in a
* single request.
*/
mmc->max_req_size = (1 << variant->datalength_bits) - 1;
/*
* Set the maximum segment size. Since we aren't doing DMA
* (yet) we are only limited by the data length register.
*/
mmc->max_seg_size = mmc->max_req_size;
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
mmc->max_blk_size = 1 << variant->datactrl_blocksz;
/*
* Limit the number of blocks transferred so that we don't overflow
* the maximum request size.
*/
mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
if (variant->mmcimask1)
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
/*
* If:
* - not using DT but using a descriptor table, or
* - using a table of descriptors ALONGSIDE DT, or
* look up these descriptors named "cd" and "wp" right here, fail
* silently of these do not exist
*/
if (!np) {
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
if (ret == -EPROBE_DEFER)
goto clk_disable;
}
ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq,
mmci_irq_thread, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
if (ret)
goto clk_disable;
if (!dev->irq[1])
host->singleirq = true;
else {
ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
IRQF_SHARED, DRIVER_NAME " (pio)", host);
if (ret)
goto clk_disable;
}
if (host->variant->busy_detect)
INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
ux500_busy_timeout_work);
writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
amba_rev(dev), (unsigned long long)dev->res.start,
dev->irq[0], dev->irq[1]);
mmci_dma_setup(host);
pm_runtime_set_autosuspend_delay(&dev->dev, 50);
pm_runtime_use_autosuspend(&dev->dev);
ret = mmc_add_host(mmc);
if (ret)
goto clk_disable;
pm_runtime_put(&dev->dev);
return 0;
clk_disable:
clk_disable_unprepare(host->clk);
host_free:
mmc_free_host(mmc);
return ret;
}
static void mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
* version here so that we can access the primecell.
*/
pm_runtime_get_sync(&dev->dev);
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
if (variant->mmcimask1)
writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
mmci_dma_release(host);
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
}
}
#ifdef CONFIG_PM
static void mmci_save(struct mmci_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
writel(0, host->base + MMCIMASK0);
if (host->variant->pwrreg_nopower) {
writel(0, host->base + MMCIDATACTRL);
writel(0, host->base + MMCIPOWER);
writel(0, host->base + MMCICLOCK);
}
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
}
static void mmci_restore(struct mmci_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (host->variant->pwrreg_nopower) {
writel(host->clk_reg, host->base + MMCICLOCK);
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
writel(host->pwr_reg, host->base + MMCIPOWER);
}
writel(MCI_IRQENABLE | host->variant->start_err,
host->base + MMCIMASK0);
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
}
static int mmci_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
pinctrl_pm_select_sleep_state(dev);
mmci_save(host);
clk_disable_unprepare(host->clk);
}
return 0;
}
static int mmci_runtime_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
struct mmc_host *mmc = amba_get_drvdata(adev);
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
clk_prepare_enable(host->clk);
mmci_restore(host);
pinctrl_select_default_state(dev);
}
return 0;
}
#endif
static const struct dev_pm_ops mmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static const struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
.mask = 0xff0fffff,
.data = &variant_arm,
},
{
.id = 0x01041180,
.mask = 0xff0fffff,
.data = &variant_arm_extended_fifo,
},
{
.id = 0x02041180,
.mask = 0xff0fffff,
.data = &variant_arm_extended_fifo_hwfc,
},
{
.id = 0x00041181,
.mask = 0x000fffff,
.data = &variant_arm,
},
/* ST Micro variants */
{
.id = 0x00180180,
.mask = 0x00ffffff,
.data = &variant_u300,
},
{
.id = 0x10180180,
.mask = 0xf0ffffff,
.data = &variant_nomadik,
},
{
.id = 0x00280180,
.mask = 0x00ffffff,
.data = &variant_nomadik,
},
{
.id = 0x00480180,
.mask = 0xf0ffffff,
.data = &variant_ux500,
},
{
.id = 0x10480180,
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
{
.id = 0x00880180,
.mask = 0x00ffffff,
.data = &variant_stm32,
},
{
.id = 0x10153180,
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmc,
},
{
.id = 0x00253180,
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv2,
},
{
.id = 0x20253180,
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv2,
},
{
.id = 0x00353180,
.mask = 0xf0ffffff,
.data = &variant_stm32_sdmmcv3,
},
/* Qualcomm variants */
{
.id = 0x00051180,
.mask = 0x000fffff,
.data = &variant_qcom,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
.pm = &mmci_dev_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mmci_probe,
.remove = mmci_remove,
.id_table = mmci_ids,
};
module_amba_driver(mmci_driver);
module_param(fmax, uint, 0444);
MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/mmci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sdricoh_cs.c - driver for Ricoh Secure Digital Card Readers that can be
* found on some Ricoh RL5c476 II cardbus bridge
*
* Copyright (C) 2006 - 2008 Sascha Sommer <[email protected]>
*/
/*
#define DEBUG
#define VERBOSE_DEBUG
*/
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/iopoll.h>
#include <linux/scatterlist.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#define DRIVER_NAME "sdricoh_cs"
static unsigned int switchlocked;
/* i/o region */
#define SDRICOH_PCI_REGION 0
#define SDRICOH_PCI_REGION_SIZE 0x1000
/* registers */
#define R104_VERSION 0x104
#define R200_CMD 0x200
#define R204_CMD_ARG 0x204
#define R208_DATAIO 0x208
#define R20C_RESP 0x20c
#define R21C_STATUS 0x21c
#define R2E0_INIT 0x2e0
#define R2E4_STATUS_RESP 0x2e4
#define R2F0_RESET 0x2f0
#define R224_MODE 0x224
#define R226_BLOCKSIZE 0x226
#define R228_POWER 0x228
#define R230_DATA 0x230
/* flags for the R21C_STATUS register */
#define STATUS_CMD_FINISHED 0x00000001
#define STATUS_TRANSFER_FINISHED 0x00000004
#define STATUS_CARD_INSERTED 0x00000020
#define STATUS_CARD_LOCKED 0x00000080
#define STATUS_CMD_TIMEOUT 0x00400000
#define STATUS_READY_TO_READ 0x01000000
#define STATUS_READY_TO_WRITE 0x02000000
#define STATUS_BUSY 0x40000000
/* timeouts */
#define SDRICOH_CMD_TIMEOUT_US 1000000
#define SDRICOH_DATA_TIMEOUT_US 1000000
/* list of supported pcmcia devices */
static const struct pcmcia_device_id pcmcia_ids[] = {
/* vendor and device strings followed by their crc32 hashes */
PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,
0xc3901202),
PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay Controller", 0xd9f522ed,
0xace80909),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, pcmcia_ids);
/* mmc privdata */
struct sdricoh_host {
struct device *dev;
struct mmc_host *mmc; /* MMC structure */
unsigned char __iomem *iobase;
struct pci_dev *pci_dev;
int app_cmd;
};
/***************** register i/o helper functions *****************************/
static inline unsigned int sdricoh_readl(struct sdricoh_host *host,
unsigned int reg)
{
unsigned int value = readl(host->iobase + reg);
dev_vdbg(host->dev, "rl %x 0x%x\n", reg, value);
return value;
}
static inline void sdricoh_writel(struct sdricoh_host *host, unsigned int reg,
unsigned int value)
{
writel(value, host->iobase + reg);
dev_vdbg(host->dev, "wl %x 0x%x\n", reg, value);
}
static inline void sdricoh_writew(struct sdricoh_host *host, unsigned int reg,
unsigned short value)
{
writew(value, host->iobase + reg);
dev_vdbg(host->dev, "ww %x 0x%x\n", reg, value);
}
static inline unsigned int sdricoh_readb(struct sdricoh_host *host,
unsigned int reg)
{
unsigned int value = readb(host->iobase + reg);
dev_vdbg(host->dev, "rb %x 0x%x\n", reg, value);
return value;
}
static bool sdricoh_status_ok(struct sdricoh_host *host, unsigned int status,
unsigned int wanted)
{
sdricoh_writel(host, R2E4_STATUS_RESP, status);
return status & wanted;
}
static int sdricoh_query_status(struct sdricoh_host *host, unsigned int wanted)
{
int ret;
unsigned int status = 0;
struct device *dev = host->dev;
ret = read_poll_timeout(sdricoh_readl, status,
sdricoh_status_ok(host, status, wanted),
32, SDRICOH_DATA_TIMEOUT_US, false,
host, R21C_STATUS);
if (ret) {
dev_err(dev, "query_status: timeout waiting for %x\n", wanted);
return -ETIMEDOUT;
}
/* do not do this check in the loop as some commands fail otherwise */
if (status & 0x7F0000) {
dev_err(dev, "waiting for status bit %x failed\n", wanted);
return -EINVAL;
}
return 0;
}
static int sdricoh_mmc_cmd(struct sdricoh_host *host, struct mmc_command *cmd)
{
unsigned int status, timeout_us;
int ret;
unsigned char opcode = cmd->opcode;
/* reset status reg? */
sdricoh_writel(host, R21C_STATUS, 0x18);
/* MMC_APP_CMDs need some special handling */
if (host->app_cmd) {
opcode |= 64;
host->app_cmd = 0;
} else if (opcode == MMC_APP_CMD)
host->app_cmd = 1;
/* fill parameters */
sdricoh_writel(host, R204_CMD_ARG, cmd->arg);
sdricoh_writel(host, R200_CMD, (0x10000 << 8) | opcode);
/* wait for command completion */
if (!opcode)
return 0;
timeout_us = cmd->busy_timeout ? cmd->busy_timeout * 1000 :
SDRICOH_CMD_TIMEOUT_US;
ret = read_poll_timeout(sdricoh_readl, status,
sdricoh_status_ok(host, status, STATUS_CMD_FINISHED),
32, timeout_us, false,
host, R21C_STATUS);
/*
* Don't check for timeout status in the loop, as it's not always reset
* correctly.
*/
if (ret || status & STATUS_CMD_TIMEOUT)
return -ETIMEDOUT;
return 0;
}
static int sdricoh_reset(struct sdricoh_host *host)
{
dev_dbg(host->dev, "reset\n");
sdricoh_writel(host, R2F0_RESET, 0x10001);
sdricoh_writel(host, R2E0_INIT, 0x10000);
if (sdricoh_readl(host, R2E0_INIT) != 0x10000)
return -EIO;
sdricoh_writel(host, R2E0_INIT, 0x10007);
sdricoh_writel(host, R224_MODE, 0x2000000);
sdricoh_writel(host, R228_POWER, 0xe0);
/* status register ? */
sdricoh_writel(host, R21C_STATUS, 0x18);
return 0;
}
static int sdricoh_blockio(struct sdricoh_host *host, int read,
u8 *buf, int len)
{
int size;
u32 data = 0;
/* wait until the data is available */
if (read) {
if (sdricoh_query_status(host, STATUS_READY_TO_READ))
return -ETIMEDOUT;
sdricoh_writel(host, R21C_STATUS, 0x18);
/* read data */
while (len) {
data = sdricoh_readl(host, R230_DATA);
size = min(len, 4);
len -= size;
while (size) {
*buf = data & 0xFF;
buf++;
data >>= 8;
size--;
}
}
} else {
if (sdricoh_query_status(host, STATUS_READY_TO_WRITE))
return -ETIMEDOUT;
sdricoh_writel(host, R21C_STATUS, 0x18);
/* write data */
while (len) {
size = min(len, 4);
len -= size;
while (size) {
data >>= 8;
data |= (u32)*buf << 24;
buf++;
size--;
}
sdricoh_writel(host, R230_DATA, data);
}
}
return 0;
}
static void sdricoh_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdricoh_host *host = mmc_priv(mmc);
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = cmd->data;
struct device *dev = host->dev;
int i;
dev_dbg(dev, "=============================\n");
dev_dbg(dev, "sdricoh_request opcode=%i\n", cmd->opcode);
sdricoh_writel(host, R21C_STATUS, 0x18);
/* read/write commands seem to require this */
if (data) {
sdricoh_writew(host, R226_BLOCKSIZE, data->blksz);
sdricoh_writel(host, R208_DATAIO, 0);
}
cmd->error = sdricoh_mmc_cmd(host, cmd);
/* read response buffer */
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* CRC is stripped so we need to do some shifting. */
for (i = 0; i < 4; i++) {
cmd->resp[i] =
sdricoh_readl(host,
R20C_RESP + (3 - i) * 4) << 8;
if (i != 3)
cmd->resp[i] |=
sdricoh_readb(host, R20C_RESP +
(3 - i) * 4 - 1);
}
} else
cmd->resp[0] = sdricoh_readl(host, R20C_RESP);
}
/* transfer data */
if (data && cmd->error == 0) {
dev_dbg(dev, "transfer: blksz %i blocks %i sg_len %i "
"sg length %i\n", data->blksz, data->blocks,
data->sg_len, data->sg->length);
/* enter data reading mode */
sdricoh_writel(host, R21C_STATUS, 0x837f031e);
for (i = 0; i < data->blocks; i++) {
size_t len = data->blksz;
u8 *buf;
struct page *page;
int result;
page = sg_page(data->sg);
buf = kmap(page) + data->sg->offset + (len * i);
result =
sdricoh_blockio(host,
data->flags & MMC_DATA_READ, buf, len);
kunmap(page);
flush_dcache_page(page);
if (result) {
dev_err(dev, "sdricoh_request: cmd %i "
"block transfer failed\n", cmd->opcode);
cmd->error = result;
break;
} else
data->bytes_xfered += len;
}
sdricoh_writel(host, R208_DATAIO, 1);
if (sdricoh_query_status(host, STATUS_TRANSFER_FINISHED)) {
dev_err(dev, "sdricoh_request: transfer end error\n");
cmd->error = -EINVAL;
}
}
/* FIXME check busy flag */
mmc_request_done(mmc, mrq);
dev_dbg(dev, "=============================\n");
}
static void sdricoh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdricoh_host *host = mmc_priv(mmc);
dev_dbg(host->dev, "set_ios\n");
if (ios->power_mode == MMC_POWER_ON) {
sdricoh_writel(host, R228_POWER, 0xc0e0);
if (ios->bus_width == MMC_BUS_WIDTH_4) {
sdricoh_writel(host, R224_MODE, 0x2000300);
sdricoh_writel(host, R228_POWER, 0x40e0);
} else {
sdricoh_writel(host, R224_MODE, 0x2000340);
}
} else if (ios->power_mode == MMC_POWER_UP) {
sdricoh_writel(host, R224_MODE, 0x2000320);
sdricoh_writel(host, R228_POWER, 0xe0);
}
}
static int sdricoh_get_ro(struct mmc_host *mmc)
{
struct sdricoh_host *host = mmc_priv(mmc);
unsigned int status;
status = sdricoh_readl(host, R21C_STATUS);
sdricoh_writel(host, R2E4_STATUS_RESP, status);
/* some notebooks seem to have the locked flag switched */
if (switchlocked)
return !(status & STATUS_CARD_LOCKED);
return (status & STATUS_CARD_LOCKED);
}
static const struct mmc_host_ops sdricoh_ops = {
.request = sdricoh_request,
.set_ios = sdricoh_set_ios,
.get_ro = sdricoh_get_ro,
};
/* initialize the control and register it to the mmc framework */
static int sdricoh_init_mmc(struct pci_dev *pci_dev,
struct pcmcia_device *pcmcia_dev)
{
int result;
void __iomem *iobase;
struct mmc_host *mmc;
struct sdricoh_host *host;
struct device *dev = &pcmcia_dev->dev;
/* map iomem */
if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
SDRICOH_PCI_REGION_SIZE) {
dev_dbg(dev, "unexpected pci resource len\n");
return -ENODEV;
}
iobase =
pci_iomap(pci_dev, SDRICOH_PCI_REGION, SDRICOH_PCI_REGION_SIZE);
if (!iobase) {
dev_err(dev, "unable to map iobase\n");
return -ENODEV;
}
/* check version? */
if (readl(iobase + R104_VERSION) != 0x4000) {
dev_dbg(dev, "no supported mmc controller found\n");
result = -ENODEV;
goto unmap_io;
}
/* allocate privdata */
mmc = pcmcia_dev->priv =
mmc_alloc_host(sizeof(struct sdricoh_host), &pcmcia_dev->dev);
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
result = -ENOMEM;
goto unmap_io;
}
host = mmc_priv(mmc);
host->iobase = iobase;
host->dev = dev;
host->pci_dev = pci_dev;
mmc->ops = &sdricoh_ops;
/* FIXME: frequency and voltage handling is done by the controller
*/
mmc->f_min = 450000;
mmc->f_max = 24000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps |= MMC_CAP_4_BIT_DATA;
mmc->max_seg_size = 1024 * 512;
mmc->max_blk_size = 512;
/* reset the controller */
if (sdricoh_reset(host)) {
dev_dbg(dev, "could not reset\n");
result = -EIO;
goto free_host;
}
result = mmc_add_host(mmc);
if (!result) {
dev_dbg(dev, "mmc host registered\n");
return 0;
}
free_host:
mmc_free_host(mmc);
unmap_io:
pci_iounmap(pci_dev, iobase);
return result;
}
/* search for supported mmc controllers */
static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)
{
struct pci_dev *pci_dev = NULL;
dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
/* search pci cardbus bridge that contains the mmc controller */
/* the io region is already claimed by yenta_socket... */
while ((pci_dev =
pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
pci_dev))) {
/* try to init the device */
if (!sdricoh_init_mmc(pci_dev, pcmcia_dev)) {
dev_info(&pcmcia_dev->dev, "MMC controller found\n");
return 0;
}
}
dev_err(&pcmcia_dev->dev, "No MMC controller was found.\n");
return -ENODEV;
}
static void sdricoh_pcmcia_detach(struct pcmcia_device *link)
{
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "detach\n");
/* remove mmc host */
if (mmc) {
struct sdricoh_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
pci_iounmap(host->pci_dev, host->iobase);
pci_dev_put(host->pci_dev);
mmc_free_host(mmc);
}
pcmcia_disable_device(link);
}
#ifdef CONFIG_PM
static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "suspend\n");
return 0;
}
static int sdricoh_pcmcia_resume(struct pcmcia_device *link)
{
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "resume\n");
sdricoh_reset(mmc_priv(mmc));
return 0;
}
#else
#define sdricoh_pcmcia_suspend NULL
#define sdricoh_pcmcia_resume NULL
#endif
static struct pcmcia_driver sdricoh_driver = {
.name = DRIVER_NAME,
.probe = sdricoh_pcmcia_probe,
.remove = sdricoh_pcmcia_detach,
.id_table = pcmcia_ids,
.suspend = sdricoh_pcmcia_suspend,
.resume = sdricoh_pcmcia_resume,
};
module_pcmcia_driver(sdricoh_driver);
module_param(switchlocked, uint, 0444);
MODULE_AUTHOR("Sascha Sommer <[email protected]>");
MODULE_DESCRIPTION("Ricoh PCMCIA Secure Digital Interface driver");
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(switchlocked, "Switch the cards locked status."
"Use this when unlocked cards are shown readonly (default 0)");
| linux-master | drivers/mmc/host/sdricoh_cs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, 2022 MediaTek Inc.
* Author: Chaotian.Jing <[email protected]>
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
#include "cqhci.h"
#define MAX_BD_NUM 1024
#define MSDC_NR_CLOCKS 3
/*--------------------------------------------------------------------------*/
/* Common Definition */
/*--------------------------------------------------------------------------*/
#define MSDC_BUS_1BITS 0x0
#define MSDC_BUS_4BITS 0x1
#define MSDC_BUS_8BITS 0x2
#define MSDC_BURST_64B 0x6
/*--------------------------------------------------------------------------*/
/* Register Offset */
/*--------------------------------------------------------------------------*/
#define MSDC_CFG 0x0
#define MSDC_IOCON 0x04
#define MSDC_PS 0x08
#define MSDC_INT 0x0c
#define MSDC_INTEN 0x10
#define MSDC_FIFOCS 0x14
#define SDC_CFG 0x30
#define SDC_CMD 0x34
#define SDC_ARG 0x38
#define SDC_STS 0x3c
#define SDC_RESP0 0x40
#define SDC_RESP1 0x44
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define DMA_SA_H4BIT 0x8c
#define MSDC_DMA_SA 0x90
#define MSDC_DMA_CTRL 0x98
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC51_CFG0 0x204
#define EMMC50_CFG0 0x208
#define EMMC50_CFG1 0x20c
#define EMMC50_CFG3 0x220
#define SDC_FIFO_CFG 0x228
#define CQHCI_SETTING 0x7fc
/*--------------------------------------------------------------------------*/
/* Top Pad Register Offset */
/*--------------------------------------------------------------------------*/
#define EMMC_TOP_CONTROL 0x00
#define EMMC_TOP_CMD 0x04
#define EMMC50_PAD_DS_TUNE 0x0c
/*--------------------------------------------------------------------------*/
/* Register Mask */
/*--------------------------------------------------------------------------*/
/* MSDC_CFG mask */
#define MSDC_CFG_MODE BIT(0) /* RW */
#define MSDC_CFG_CKPDN BIT(1) /* RW */
#define MSDC_CFG_RST BIT(2) /* RW */
#define MSDC_CFG_PIO BIT(3) /* RW */
#define MSDC_CFG_CKDRVEN BIT(4) /* RW */
#define MSDC_CFG_BV18SDT BIT(5) /* RW */
#define MSDC_CFG_BV18PSS BIT(6) /* R */
#define MSDC_CFG_CKSTB BIT(7) /* R */
#define MSDC_CFG_CKDIV GENMASK(15, 8) /* RW */
#define MSDC_CFG_CKMOD GENMASK(17, 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE BIT(18) /* RW */
#define MSDC_CFG_HS400_CK_MODE_EXTRA BIT(22) /* RW */
#define MSDC_CFG_CKDIV_EXTRA GENMASK(19, 8) /* RW */
#define MSDC_CFG_CKMOD_EXTRA GENMASK(21, 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS BIT(0) /* RW */
#define MSDC_IOCON_RSPL BIT(1) /* RW */
#define MSDC_IOCON_DSPL BIT(2) /* RW */
#define MSDC_IOCON_DDLSEL BIT(3) /* RW */
#define MSDC_IOCON_DDR50CKD BIT(4) /* RW */
#define MSDC_IOCON_DSPLSEL BIT(5) /* RW */
#define MSDC_IOCON_W_DSPL BIT(8) /* RW */
#define MSDC_IOCON_D0SPL BIT(16) /* RW */
#define MSDC_IOCON_D1SPL BIT(17) /* RW */
#define MSDC_IOCON_D2SPL BIT(18) /* RW */
#define MSDC_IOCON_D3SPL BIT(19) /* RW */
#define MSDC_IOCON_D4SPL BIT(20) /* RW */
#define MSDC_IOCON_D5SPL BIT(21) /* RW */
#define MSDC_IOCON_D6SPL BIT(22) /* RW */
#define MSDC_IOCON_D7SPL BIT(23) /* RW */
#define MSDC_IOCON_RISCSZ GENMASK(25, 24) /* RW */
/* MSDC_PS mask */
#define MSDC_PS_CDEN BIT(0) /* RW */
#define MSDC_PS_CDSTS BIT(1) /* R */
#define MSDC_PS_CDDEBOUNCE GENMASK(15, 12) /* RW */
#define MSDC_PS_DAT GENMASK(23, 16) /* R */
#define MSDC_PS_DATA1 BIT(17) /* R */
#define MSDC_PS_CMD BIT(24) /* R */
#define MSDC_PS_WP BIT(31) /* R */
/* MSDC_INT mask */
#define MSDC_INT_MMCIRQ BIT(0) /* W1C */
#define MSDC_INT_CDSC BIT(1) /* W1C */
#define MSDC_INT_ACMDRDY BIT(3) /* W1C */
#define MSDC_INT_ACMDTMO BIT(4) /* W1C */
#define MSDC_INT_ACMDCRCERR BIT(5) /* W1C */
#define MSDC_INT_DMAQ_EMPTY BIT(6) /* W1C */
#define MSDC_INT_SDIOIRQ BIT(7) /* W1C */
#define MSDC_INT_CMDRDY BIT(8) /* W1C */
#define MSDC_INT_CMDTMO BIT(9) /* W1C */
#define MSDC_INT_RSPCRCERR BIT(10) /* W1C */
#define MSDC_INT_CSTA BIT(11) /* R */
#define MSDC_INT_XFER_COMPL BIT(12) /* W1C */
#define MSDC_INT_DXFER_DONE BIT(13) /* W1C */
#define MSDC_INT_DATTMO BIT(14) /* W1C */
#define MSDC_INT_DATCRCERR BIT(15) /* W1C */
#define MSDC_INT_ACMD19_DONE BIT(16) /* W1C */
#define MSDC_INT_DMA_BDCSERR BIT(17) /* W1C */
#define MSDC_INT_DMA_GPDCSERR BIT(18) /* W1C */
#define MSDC_INT_DMA_PROTECT BIT(19) /* W1C */
#define MSDC_INT_CMDQ BIT(28) /* W1C */
/* MSDC_INTEN mask */
#define MSDC_INTEN_MMCIRQ BIT(0) /* RW */
#define MSDC_INTEN_CDSC BIT(1) /* RW */
#define MSDC_INTEN_ACMDRDY BIT(3) /* RW */
#define MSDC_INTEN_ACMDTMO BIT(4) /* RW */
#define MSDC_INTEN_ACMDCRCERR BIT(5) /* RW */
#define MSDC_INTEN_DMAQ_EMPTY BIT(6) /* RW */
#define MSDC_INTEN_SDIOIRQ BIT(7) /* RW */
#define MSDC_INTEN_CMDRDY BIT(8) /* RW */
#define MSDC_INTEN_CMDTMO BIT(9) /* RW */
#define MSDC_INTEN_RSPCRCERR BIT(10) /* RW */
#define MSDC_INTEN_CSTA BIT(11) /* RW */
#define MSDC_INTEN_XFER_COMPL BIT(12) /* RW */
#define MSDC_INTEN_DXFER_DONE BIT(13) /* RW */
#define MSDC_INTEN_DATTMO BIT(14) /* RW */
#define MSDC_INTEN_DATCRCERR BIT(15) /* RW */
#define MSDC_INTEN_ACMD19_DONE BIT(16) /* RW */
#define MSDC_INTEN_DMA_BDCSERR BIT(17) /* RW */
#define MSDC_INTEN_DMA_GPDCSERR BIT(18) /* RW */
#define MSDC_INTEN_DMA_PROTECT BIT(19) /* RW */
/* MSDC_FIFOCS mask */
#define MSDC_FIFOCS_RXCNT GENMASK(7, 0) /* R */
#define MSDC_FIFOCS_TXCNT GENMASK(23, 16) /* R */
#define MSDC_FIFOCS_CLR BIT(31) /* RW */
/* SDC_CFG mask */
#define SDC_CFG_SDIOINTWKUP BIT(0) /* RW */
#define SDC_CFG_INSWKUP BIT(1) /* RW */
#define SDC_CFG_WRDTOC GENMASK(14, 2) /* RW */
#define SDC_CFG_BUSWIDTH GENMASK(17, 16) /* RW */
#define SDC_CFG_SDIO BIT(19) /* RW */
#define SDC_CFG_SDIOIDE BIT(20) /* RW */
#define SDC_CFG_INTATGAP BIT(21) /* RW */
#define SDC_CFG_DTOC GENMASK(31, 24) /* RW */
/* SDC_STS mask */
#define SDC_STS_SDCBUSY BIT(0) /* RW */
#define SDC_STS_CMDBUSY BIT(1) /* RW */
#define SDC_STS_SWR_COMPL BIT(31) /* RW */
#define SDC_DAT1_IRQ_TRIGGER BIT(19) /* RW */
/* SDC_ADV_CFG0 mask */
#define SDC_RX_ENHANCE_EN BIT(20) /* RW */
/* DMA_SA_H4BIT mask */
#define DMA_ADDR_HIGH_4BIT GENMASK(3, 0) /* RW */
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START BIT(0) /* W */
#define MSDC_DMA_CTRL_STOP BIT(1) /* W */
#define MSDC_DMA_CTRL_RESUME BIT(2) /* W */
#define MSDC_DMA_CTRL_MODE BIT(8) /* RW */
#define MSDC_DMA_CTRL_LASTBUF BIT(10) /* RW */
#define MSDC_DMA_CTRL_BRUSTSZ GENMASK(14, 12) /* RW */
/* MSDC_DMA_CFG mask */
#define MSDC_DMA_CFG_STS BIT(0) /* R */
#define MSDC_DMA_CFG_DECSEN BIT(1) /* RW */
#define MSDC_DMA_CFG_AHBHPROT2 BIT(9) /* RW */
#define MSDC_DMA_CFG_ACTIVEEN BIT(13) /* RW */
#define MSDC_DMA_CFG_CS12B16B BIT(16) /* RW */
/* MSDC_PATCH_BIT mask */
#define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */
#define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7)
#define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10)
#define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */
#define MSDC_PATCH_BIT_IOINTSEL BIT(17) /* RW */
#define MSDC_PATCH_BIT_BUSYDLY GENMASK(21, 18) /* RW */
#define MSDC_PATCH_BIT_WDOD GENMASK(25, 22) /* RW */
#define MSDC_PATCH_BIT_IDRTSEL BIT(26) /* RW */
#define MSDC_PATCH_BIT_CMDFSEL BIT(27) /* RW */
#define MSDC_PATCH_BIT_INTDLSEL BIT(28) /* RW */
#define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */
#define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */
#define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */
#define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */
#define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */
#define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */
#define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */
#define MSDC_PB2_RESPWAIT GENMASK(3, 2) /* RW */
#define MSDC_PB2_RESPSTSENSEL GENMASK(18, 16) /* RW */
#define MSDC_PB2_CRCSTSENSEL GENMASK(31, 29) /* RW */
#define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */
#define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */
#define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */
#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
#define PAD_CMD_TUNE_RX_DLY3 GENMASK(5, 1) /* RW */
/* EMMC51_CFG0 mask */
#define CMDQ_RDAT_CNT GENMASK(21, 12) /* RW */
#define EMMC50_CFG_PADCMD_LATCHCK BIT(0) /* RW */
#define EMMC50_CFG_CRCSTS_EDGE BIT(3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL BIT(4) /* RW */
#define EMMC50_CFG_CMD_RESP_SEL BIT(9) /* RW */
/* EMMC50_CFG1 mask */
#define EMMC50_CFG1_DS_CFG BIT(28) /* RW */
#define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */
#define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */
#define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */
/* CQHCI_SETTING */
#define CQHCI_RD_CMD_WND_SEL BIT(14) /* RW */
#define CQHCI_WR_CMD_WND_SEL BIT(15) /* RW */
/* EMMC_TOP_CONTROL mask */
#define PAD_RXDLY_SEL BIT(0) /* RW */
#define DELAY_EN BIT(1) /* RW */
#define PAD_DAT_RD_RXDLY2 GENMASK(6, 2) /* RW */
#define PAD_DAT_RD_RXDLY GENMASK(11, 7) /* RW */
#define PAD_DAT_RD_RXDLY2_SEL BIT(12) /* RW */
#define PAD_DAT_RD_RXDLY_SEL BIT(13) /* RW */
#define DATA_K_VALUE_SEL BIT(14) /* RW */
#define SDC_RX_ENH_EN BIT(15) /* TW */
/* EMMC_TOP_CMD mask */
#define PAD_CMD_RXDLY2 GENMASK(4, 0) /* RW */
#define PAD_CMD_RXDLY GENMASK(9, 5) /* RW */
#define PAD_CMD_RD_RXDLY2_SEL BIT(10) /* RW */
#define PAD_CMD_RD_RXDLY_SEL BIT(11) /* RW */
#define PAD_CMD_TX_DLY GENMASK(16, 12) /* RW */
/* EMMC50_PAD_DS_TUNE mask */
#define PAD_DS_DLY_SEL BIT(16) /* RW */
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
#define REQ_CMD_EIO BIT(0)
#define REQ_CMD_TMO BIT(1)
#define REQ_DAT_ERR BIT(2)
#define REQ_STOP_EIO BIT(3)
#define REQ_STOP_TMO BIT(4)
#define REQ_CMD_BUSY BIT(5)
#define MSDC_PREPARE_FLAG BIT(0)
#define MSDC_ASYNC_FLAG BIT(1)
#define MSDC_MMAP_FLAG BIT(2)
#define MTK_MMC_AUTOSUSPEND_DELAY 50
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
#define PAD_DELAY_MAX 32 /* PAD delay cells */
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
struct mt_gpdma_desc {
u32 gpd_info;
#define GPDMA_DESC_HWO BIT(0)
#define GPDMA_DESC_BDP BIT(1)
#define GPDMA_DESC_CHECKSUM GENMASK(15, 8)
#define GPDMA_DESC_INT BIT(16)
#define GPDMA_DESC_NEXT_H4 GENMASK(27, 24)
#define GPDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 gpd_data_len;
#define GPDMA_DESC_BUFLEN GENMASK(15, 0)
#define GPDMA_DESC_EXTLEN GENMASK(23, 16)
u32 arg;
u32 blknum;
u32 cmd;
};
struct mt_bdma_desc {
u32 bd_info;
#define BDMA_DESC_EOL BIT(0)
#define BDMA_DESC_CHECKSUM GENMASK(15, 8)
#define BDMA_DESC_BLKPAD BIT(17)
#define BDMA_DESC_DWPAD BIT(18)
#define BDMA_DESC_NEXT_H4 GENMASK(27, 24)
#define BDMA_DESC_PTR_H4 GENMASK(31, 28)
u32 next;
u32 ptr;
u32 bd_data_len;
#define BDMA_DESC_BUFLEN GENMASK(15, 0)
#define BDMA_DESC_BUFLEN_EXT GENMASK(23, 0)
};
struct msdc_dma {
struct scatterlist *sg; /* I/O scatter list */
struct mt_gpdma_desc *gpd; /* pointer to gpd array */
struct mt_bdma_desc *bd; /* pointer to bd array */
dma_addr_t gpd_addr; /* the physical address of gpd array */
dma_addr_t bd_addr; /* the physical address of bd array */
};
struct msdc_save_para {
u32 msdc_cfg;
u32 iocon;
u32 sdc_cfg;
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
u32 emmc50_cfg3;
u32 sdc_fifo_cfg;
u32 emmc_top_control;
u32 emmc_top_cmd;
u32 emmc50_pad_ds_tune;
};
struct mtk_mmc_compatible {
u8 clk_div_bits;
bool recheck_sdio_irq;
bool hs400_tune; /* only used for MT8173 */
u32 pad_tune_reg;
bool async_fifo;
bool data_tune;
bool busy_check;
bool stop_clk_fix;
bool enhance_rx;
bool support_64g;
bool use_internal_cd;
};
struct msdc_tune_para {
u32 iocon;
u32 pad_tune;
u32 pad_cmd_tune;
u32 emmc_top_control;
u32 emmc_top_cmd;
};
struct msdc_delay_phase {
u8 maxlen;
u8 start;
u8 final_phase;
};
struct msdc_host {
struct device *dev;
const struct mtk_mmc_compatible *dev_comp;
int cmd_rsp;
spinlock_t lock;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
int error;
void __iomem *base; /* host base address */
void __iomem *top_base; /* host top register base address */
struct msdc_dma dma; /* dma channel */
u64 dma_mask;
u32 timeout_ns; /* data timeout ns */
u32 timeout_clks; /* data timeout clks */
struct pinctrl *pinctrl;
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_uhs;
struct pinctrl_state *pins_eint;
struct delayed_work req_timeout;
int irq; /* host interrupt */
int eint_irq; /* interrupt from sdio device for waking up system */
struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
struct clk *bus_clk; /* bus clock which used to access register */
struct clk *src_clk_cg; /* msdc source clock control gate */
struct clk *sys_clk_cg; /* msdc subsys clock control gate */
struct clk *crypto_clk; /* msdc crypto clock control gate */
struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS];
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
unsigned char timing;
bool vqmmc_enabled;
u32 latch_ck;
u32 hs400_ds_delay;
u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
bool hs400_tuning; /* hs400 mode online tuning */
bool internal_cd; /* Use internal card-detect logic */
bool cqhci; /* support eMMC hw cmdq */
struct msdc_save_para save_para; /* used when gate HCLK */
struct msdc_tune_para def_tune_para; /* default tune setting */
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
struct cqhci_host *cq_host;
u32 cq_ssc1_time;
};
static const struct mtk_mmc_compatible mt2701_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
static const struct mtk_mmc_compatible mt2712_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
.enhance_rx = true,
.support_64g = true,
};
static const struct mtk_mmc_compatible mt6779_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
.enhance_rx = true,
.support_64g = true,
};
static const struct mtk_mmc_compatible mt6795_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = false,
.hs400_tune = true,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
static const struct mtk_mmc_compatible mt7620_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.use_internal_cd = true,
};
static const struct mtk_mmc_compatible mt7622_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
.enhance_rx = true,
.support_64g = false,
};
static const struct mtk_mmc_compatible mt7986_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
.enhance_rx = true,
.support_64g = true,
};
static const struct mtk_mmc_compatible mt8135_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
static const struct mtk_mmc_compatible mt8173_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = true,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
.enhance_rx = true,
.support_64g = true,
};
static const struct mtk_mmc_compatible mt8516_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
.busy_check = true,
.stop_clk_fix = true,
};
static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
{ .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
{ .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat},
{ .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
{ .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat},
{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
{ .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
val |= bs;
writel(val, reg);
}
static void sdr_clr_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
val &= ~bs;
writel(val, reg);
}
static void sdr_set_field(void __iomem *reg, u32 field, u32 val)
{
unsigned int tv = readl(reg);
tv &= ~field;
tv |= ((val) << (ffs((unsigned int)field) - 1));
writel(tv, reg);
}
static void sdr_get_field(void __iomem *reg, u32 field, u32 *val)
{
unsigned int tv = readl(reg);
*val = ((tv & field) >> (ffs((unsigned int)field) - 1));
}
static void msdc_reset_hw(struct msdc_host *host)
{
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
readl_poll_timeout(host->base + MSDC_FIFOCS, val,
!(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
}
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd);
static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb);
static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
static u8 msdc_dma_calcs(u8 *buf, u32 len)
{
u32 i, sum = 0;
for (i = 0; i < len; i++)
sum += buf[i];
return 0xff - (u8) sum;
}
static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
struct mmc_data *data)
{
unsigned int j, dma_len;
dma_addr_t dma_address;
u32 dma_ctrl;
struct scatterlist *sg;
struct mt_gpdma_desc *gpd;
struct mt_bdma_desc *bd;
sg = data->sg;
gpd = dma->gpd;
bd = dma->bd;
/* modify gpd */
gpd->gpd_info |= GPDMA_DESC_HWO;
gpd->gpd_info |= GPDMA_DESC_BDP;
/* need to clear first. use these bits to calc checksum */
gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM;
gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8;
/* modify bd */
for_each_sg(data->sg, sg, data->sg_count, j) {
dma_address = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
/* init bd */
bd[j].bd_info &= ~BDMA_DESC_BLKPAD;
bd[j].bd_info &= ~BDMA_DESC_DWPAD;
bd[j].ptr = lower_32_bits(dma_address);
if (host->dev_comp->support_64g) {
bd[j].bd_info &= ~BDMA_DESC_PTR_H4;
bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf)
<< 28;
}
if (host->dev_comp->support_64g) {
bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT;
bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT);
} else {
bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN;
bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN);
}
if (j == data->sg_count - 1) /* the last bd */
bd[j].bd_info |= BDMA_DESC_EOL;
else
bd[j].bd_info &= ~BDMA_DESC_EOL;
/* checksum need to clear first */
bd[j].bd_info &= ~BDMA_DESC_CHECKSUM;
bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8;
}
sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1);
dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL);
dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE);
dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8));
writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL);
if (host->dev_comp->support_64g)
sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT,
upper_32_bits(dma->gpd_addr) & 0xf);
writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA);
}
static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data)
{
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
}
static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data)
{
if (data->host_cookie & MSDC_ASYNC_FLAG)
return;
if (data->host_cookie & MSDC_PREPARE_FLAG) {
dma_unmap_sg(host->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
data->host_cookie &= ~MSDC_PREPARE_FLAG;
}
}
static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
{
struct mmc_host *mmc = mmc_from_priv(host);
u64 timeout, clk_ns;
u32 mode = 0;
if (mmc->actual_clock == 0) {
timeout = 0;
} else {
clk_ns = 1000000000ULL;
do_div(clk_ns, mmc->actual_clock);
timeout = ns + clk_ns - 1;
do_div(timeout, clk_ns);
timeout += clks;
/* in 1048576 sclk cycle unit */
timeout = DIV_ROUND_UP(timeout, BIT(20));
if (host->dev_comp->clk_div_bits == 8)
sdr_get_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD, &mode);
else
sdr_get_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
}
return timeout;
}
/* clock control primitives */
static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks)
{
u64 timeout;
host->timeout_ns = ns;
host->timeout_clks = clks;
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC,
(u32)(timeout > 255 ? 255 : timeout));
}
static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks)
{
u64 timeout;
timeout = msdc_timeout_cal(host, ns, clks);
sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC,
(u32)(timeout > 8191 ? 8191 : timeout));
}
static void msdc_gate_clock(struct msdc_host *host)
{
clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks);
clk_disable_unprepare(host->crypto_clk);
clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->bus_clk);
clk_disable_unprepare(host->h_clk);
}
static int msdc_ungate_clock(struct msdc_host *host)
{
u32 val;
int ret;
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->bus_clk);
clk_prepare_enable(host->src_clk);
clk_prepare_enable(host->src_clk_cg);
clk_prepare_enable(host->crypto_clk);
ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks);
if (ret) {
dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n");
return ret;
}
return readl_poll_timeout(host->base + MSDC_CFG, val,
(val & MSDC_CFG_CKSTB), 1, 20000);
}
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
{
struct mmc_host *mmc = mmc_from_priv(host);
u32 mode;
u32 flags;
u32 div;
u32 sclk;
u32 tune_reg = host->dev_comp->pad_tune_reg;
u32 val;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
host->mclk = 0;
mmc->actual_clock = 0;
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
return;
}
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
if (host->dev_comp->clk_div_bits == 8)
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
else
sdr_clr_bits(host->base + MSDC_CFG,
MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
if (timing == MMC_TIMING_MMC_HS400)
mode = 0x3;
else
mode = 0x2; /* ddr mode and use divisor */
if (hz >= (host->src_clk_freq >> 2)) {
div = 0; /* mean div = 1/4 */
sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
} else {
div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
sclk = (host->src_clk_freq >> 2) / div;
div = (div >> 1);
}
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
if (host->dev_comp->clk_div_bits == 8)
sdr_set_bits(host->base + MSDC_CFG,
MSDC_CFG_HS400_CK_MODE);
else
sdr_set_bits(host->base + MSDC_CFG,
MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
} else if (hz >= host->src_clk_freq) {
mode = 0x1; /* no divisor */
div = 0;
sclk = host->src_clk_freq;
} else {
mode = 0x0; /* use divisor */
if (hz >= (host->src_clk_freq >> 1)) {
div = 0; /* mean div = 1/2 */
sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */
} else {
div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2);
sclk = (host->src_clk_freq >> 2) / div;
}
}
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
clk_disable_unprepare(host->src_clk_cg);
if (host->dev_comp->clk_div_bits == 8)
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
(mode << 8) | div);
else
sdr_set_field(host->base + MSDC_CFG,
MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
(mode << 12) | div);
clk_prepare_enable(host->src_clk_cg);
readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
mmc->actual_clock = sclk;
host->mclk = hz;
host->timing = timing;
/* need because clk changed. */
msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
sdr_set_bits(host->base + MSDC_INTEN, flags);
/*
* mmc_select_hs400() will drop to 50Mhz and High speed mode,
* tune result of hs200/200Mhz is not suitable for 50Mhz
*/
if (mmc->actual_clock <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
if (host->top_base) {
writel(host->def_tune_para.emmc_top_control,
host->top_base + EMMC_TOP_CONTROL);
writel(host->def_tune_para.emmc_top_cmd,
host->top_base + EMMC_TOP_CMD);
} else {
writel(host->def_tune_para.pad_tune,
host->base + tune_reg);
}
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
if (host->top_base) {
writel(host->saved_tune_para.emmc_top_control,
host->top_base + EMMC_TOP_CONTROL);
writel(host->saved_tune_para.emmc_top_cmd,
host->top_base + EMMC_TOP_CMD);
} else {
writel(host->saved_tune_para.pad_tune,
host->base + tune_reg);
}
}
if (timing == MMC_TIMING_MMC_HS400 &&
host->dev_comp->hs400_tune)
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock,
timing);
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
struct mmc_command *cmd)
{
u32 resp;
switch (mmc_resp_type(cmd)) {
/* Actually, R1, R5, R6, R7 are the same */
case MMC_RSP_R1:
resp = 0x1;
break;
case MMC_RSP_R1B:
resp = 0x7;
break;
case MMC_RSP_R2:
resp = 0x2;
break;
case MMC_RSP_R3:
resp = 0x3;
break;
case MMC_RSP_NONE:
default:
resp = 0x0;
break;
}
return resp;
}
static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
struct mmc_host *mmc = mmc_from_priv(host);
/* rawcmd :
* vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
*/
u32 opcode = cmd->opcode;
u32 resp = msdc_cmd_find_resp(host, cmd);
u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
host->cmd_rsp = resp;
if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) ||
opcode == MMC_STOP_TRANSMISSION)
rawcmd |= BIT(14);
else if (opcode == SD_SWITCH_VOLTAGE)
rawcmd |= BIT(30);
else if (opcode == SD_APP_SEND_SCR ||
opcode == SD_APP_SEND_NUM_WR_BLKS ||
(opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) ||
(opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC))
rawcmd |= BIT(11);
if (cmd->data) {
struct mmc_data *data = cmd->data;
if (mmc_op_multi(opcode)) {
if (mmc_card_mmc(mmc->card) && mrq->sbc &&
!(mrq->sbc->arg & 0xFFFF0000))
rawcmd |= BIT(29); /* AutoCMD23 */
}
rawcmd |= ((data->blksz & 0xFFF) << 16);
if (data->flags & MMC_DATA_WRITE)
rawcmd |= BIT(13);
if (data->blocks > 1)
rawcmd |= BIT(12);
else
rawcmd |= BIT(11);
/* Always use dma mode */
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO);
if (host->timeout_ns != data->timeout_ns ||
host->timeout_clks != data->timeout_clks)
msdc_set_timeout(host, data->timeout_ns,
data->timeout_clks);
writel(data->blocks, host->base + SDC_BLK_NUM);
}
return rawcmd;
}
static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
struct mmc_data *data)
{
bool read;
WARN_ON(host->data);
host->data = data;
read = data->flags & MMC_DATA_READ;
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
msdc_dma_setup(host, &host->dma, data);
sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask);
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
dev_dbg(host->dev, "DMA start\n");
dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n",
__func__, cmd->opcode, data->blocks, read);
}
static int msdc_auto_cmd_done(struct msdc_host *host, int events,
struct mmc_command *cmd)
{
u32 *rsp = cmd->resp;
rsp[0] = readl(host->base + SDC_ACMD_RESP);
if (events & MSDC_INT_ACMDRDY) {
cmd->error = 0;
} else {
msdc_reset_hw(host);
if (events & MSDC_INT_ACMDCRCERR) {
cmd->error = -EILSEQ;
host->error |= REQ_STOP_EIO;
} else if (events & MSDC_INT_ACMDTMO) {
cmd->error = -ETIMEDOUT;
host->error |= REQ_STOP_TMO;
}
dev_err(host->dev,
"%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n",
__func__, cmd->opcode, cmd->arg, rsp[0], cmd->error);
}
return cmd->error;
}
/*
* msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost
*
* Host controller may lost interrupt in some special case.
* Add SDIO irq recheck mechanism to make sure all interrupts
* can be processed immediately
*/
static void msdc_recheck_sdio_irq(struct msdc_host *host)
{
struct mmc_host *mmc = mmc_from_priv(host);
u32 reg_int, reg_inten, reg_ps;
if (mmc->caps & MMC_CAP_SDIO_IRQ) {
reg_inten = readl(host->base + MSDC_INTEN);
if (reg_inten & MSDC_INTEN_SDIOIRQ) {
reg_int = readl(host->base + MSDC_INT);
reg_ps = readl(host->base + MSDC_PS);
if (!(reg_int & MSDC_INT_SDIOIRQ ||
reg_ps & MSDC_PS_DATA1)) {
__msdc_enable_sdio_irq(host, 0);
sdio_signal_irq(mmc);
}
}
}
}
static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
if (host->error)
dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
__func__, cmd->opcode, cmd->arg, host->error);
}
static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
{
unsigned long flags;
/*
* No need check the return value of cancel_delayed_work, as only ONE
* path will go here!
*/
cancel_delayed_work(&host->req_timeout);
spin_lock_irqsave(&host->lock, flags);
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
msdc_track_cmd_data(host, mrq->cmd);
if (mrq->data)
msdc_unprepare_data(host, mrq->data);
if (host->error)
msdc_reset_hw(host);
mmc_request_done(mmc_from_priv(host), mrq);
if (host->dev_comp->recheck_sdio_irq)
msdc_recheck_sdio_irq(host);
}
/* returns true if command is fully handled; returns false otherwise */
static bool msdc_cmd_done(struct msdc_host *host, int events,
struct mmc_request *mrq, struct mmc_command *cmd)
{
bool done = false;
bool sbc_error;
unsigned long flags;
u32 *rsp;
if (mrq->sbc && cmd == mrq->cmd &&
(events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
| MSDC_INT_ACMDTMO)))
msdc_auto_cmd_done(host, events, mrq->sbc);
sbc_error = mrq->sbc && mrq->sbc->error;
if (!sbc_error && !(events & (MSDC_INT_CMDRDY
| MSDC_INT_RSPCRCERR
| MSDC_INT_CMDTMO)))
return done;
spin_lock_irqsave(&host->lock, flags);
done = !host->cmd;
host->cmd = NULL;
spin_unlock_irqrestore(&host->lock, flags);
if (done)
return true;
rsp = cmd->resp;
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
rsp[0] = readl(host->base + SDC_RESP3);
rsp[1] = readl(host->base + SDC_RESP2);
rsp[2] = readl(host->base + SDC_RESP1);
rsp[3] = readl(host->base + SDC_RESP0);
} else {
rsp[0] = readl(host->base + SDC_RESP0);
}
}
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
if (events & MSDC_INT_CMDTMO ||
(!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
/*
* should not clear fifo/interrupt as the tune data
* may have already come when cmd19/cmd21 gets response
* CRC error.
*/
msdc_reset_hw(host);
if (events & MSDC_INT_RSPCRCERR) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
} else if (events & MSDC_INT_CMDTMO) {
cmd->error = -ETIMEDOUT;
host->error |= REQ_CMD_TMO;
}
}
if (cmd->error)
dev_dbg(host->dev,
"%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n",
__func__, cmd->opcode, cmd->arg, rsp[0],
cmd->error);
msdc_cmd_next(host, mrq, cmd);
return true;
}
/* It is the core layer's responsibility to ensure card status
* is correct before issue a request. but host design do below
* checks recommended.
*/
static inline bool msdc_cmd_is_ready(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
u32 val;
int ret;
/* The max busy time we can endure is 20ms */
ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
!(val & SDC_STS_CMDBUSY), 1, 20000);
if (ret) {
dev_err(host->dev, "CMD bus busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
return false;
}
if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) {
/* R1B or with data, should check SDCBUSY */
ret = readl_poll_timeout_atomic(host->base + SDC_STS, val,
!(val & SDC_STS_SDCBUSY), 1, 20000);
if (ret) {
dev_err(host->dev, "Controller busy detected\n");
host->error |= REQ_CMD_BUSY;
msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd);
return false;
}
}
return true;
}
static void msdc_start_command(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
u32 rawcmd;
unsigned long flags;
WARN_ON(host->cmd);
host->cmd = cmd;
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd))
return;
if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 ||
readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) {
dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n");
msdc_reset_hw(host);
}
cmd->error = 0;
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
spin_lock_irqsave(&host->lock, flags);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
spin_unlock_irqrestore(&host->lock, flags);
writel(cmd->arg, host->base + SDC_ARG);
writel(rawcmd, host->base + SDC_CMD);
}
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
if ((cmd->error &&
!(cmd->error == -EILSEQ &&
(mmc_op_tuning(cmd->opcode) || host->hs400_tuning))) ||
(mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
msdc_start_command(host, mrq, mrq->cmd);
else if (!cmd->data)
msdc_request_done(host, mrq);
else
msdc_start_data(host, cmd, cmd->data);
}
static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
host->error = 0;
WARN_ON(host->mrq);
host->mrq = mrq;
if (mrq->data)
msdc_prepare_data(host, mrq->data);
/* if SBC is required, we have HW option and SW option.
* if HW option is enabled, and SBC does not have "special" flags,
* use HW option, otherwise use SW option
*/
if (mrq->sbc && (!mmc_card_mmc(mmc->card) ||
(mrq->sbc->arg & 0xFFFF0000)))
msdc_start_command(host, mrq, mrq->sbc);
else
msdc_start_command(host, mrq, mrq->cmd);
}
static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msdc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data)
return;
msdc_prepare_data(host, data);
data->host_cookie |= MSDC_ASYNC_FLAG;
}
static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct msdc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!data)
return;
if (data->host_cookie) {
data->host_cookie &= ~MSDC_ASYNC_FLAG;
msdc_unprepare_data(host, data);
}
}
static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
{
if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
!mrq->sbc)
msdc_start_command(host, mrq, mrq->stop);
else
msdc_request_done(host, mrq);
}
static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
struct mmc_request *mrq, struct mmc_data *data)
{
struct mmc_command *stop;
unsigned long flags;
bool done;
unsigned int check_data = events &
(MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO
| MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR
| MSDC_INT_DMA_PROTECT);
u32 val;
int ret;
spin_lock_irqsave(&host->lock, flags);
done = !host->data;
if (check_data)
host->data = NULL;
spin_unlock_irqrestore(&host->lock, flags);
if (done)
return;
stop = data->stop;
if (check_data || (stop && stop->error)) {
dev_dbg(host->dev, "DMA status: 0x%8X\n",
readl(host->base + MSDC_DMA_CFG));
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
1);
ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
!(val & MSDC_DMA_CTRL_STOP), 1, 20000);
if (ret)
dev_dbg(host->dev, "DMA stop timed out\n");
ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
!(val & MSDC_DMA_CFG_STS), 1, 20000);
if (ret)
dev_dbg(host->dev, "DMA inactive timed out\n");
sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
dev_dbg(host->dev, "DMA stop\n");
if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) {
data->bytes_xfered = data->blocks * data->blksz;
} else {
dev_dbg(host->dev, "interrupt events: %x\n", events);
msdc_reset_hw(host);
host->error |= REQ_DAT_ERR;
data->bytes_xfered = 0;
if (events & MSDC_INT_DATTMO)
data->error = -ETIMEDOUT;
else if (events & MSDC_INT_DATCRCERR)
data->error = -EILSEQ;
dev_dbg(host->dev, "%s: cmd=%d; blocks=%d",
__func__, mrq->cmd->opcode, data->blocks);
dev_dbg(host->dev, "data_error=%d xfer_size=%d\n",
(int)data->error, data->bytes_xfered);
}
msdc_data_xfer_next(host, mrq);
}
}
static void msdc_set_buswidth(struct msdc_host *host, u32 width)
{
u32 val = readl(host->base + SDC_CFG);
val &= ~SDC_CFG_BUSWIDTH;
switch (width) {
default:
case MMC_BUS_WIDTH_1:
val |= (MSDC_BUS_1BITS << 16);
break;
case MMC_BUS_WIDTH_4:
val |= (MSDC_BUS_4BITS << 16);
break;
case MMC_BUS_WIDTH_8:
val |= (MSDC_BUS_8BITS << 16);
break;
}
writel(val, host->base + SDC_CFG);
dev_dbg(host->dev, "Bus Width = %d", width);
}
static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
dev_err(host->dev, "Unsupported signal voltage!\n");
return -EINVAL;
}
ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret < 0) {
dev_dbg(host->dev, "Regulator set error %d (%d)\n",
ret, ios->signal_voltage);
return ret;
}
/* Apply different pinctrl settings for different signal voltage */
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
pinctrl_select_state(host->pinctrl, host->pins_uhs);
else
pinctrl_select_state(host->pinctrl, host->pins_default);
}
return 0;
}
static int msdc_card_busy(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
u32 status = readl(host->base + MSDC_PS);
/* only check if data0 is low */
return !(status & BIT(16));
}
static void msdc_request_timeout(struct work_struct *work)
{
struct msdc_host *host = container_of(work, struct msdc_host,
req_timeout.work);
/* simulate HW timeout status */
dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__);
if (host->mrq) {
dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__,
host->mrq, host->mrq->cmd->opcode);
if (host->cmd) {
dev_err(host->dev, "%s: aborting cmd=%d\n",
__func__, host->cmd->opcode);
msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq,
host->cmd);
} else if (host->data) {
dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n",
__func__, host->mrq->cmd->opcode,
host->data->blocks);
msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq,
host->data);
}
}
}
static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb)
{
if (enb) {
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
if (host->dev_comp->recheck_sdio_irq)
msdc_recheck_sdio_irq(host);
} else {
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
}
}
static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
struct msdc_host *host = mmc_priv(mmc);
unsigned long flags;
int ret;
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, enb);
spin_unlock_irqrestore(&host->lock, flags);
if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) {
if (enb) {
/*
* In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to
* GPIO mode. We need to restore it to SDIO DAT1 mode after that.
* Since the current pinstate is pins_uhs, to ensure pinctrl select take
* affect successfully, we change the pinstate to pins_eint firstly.
*/
pinctrl_select_state(host->pinctrl, host->pins_eint);
ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq);
if (ret) {
dev_err(host->dev, "Failed to register SDIO wakeup irq!\n");
host->pins_eint = NULL;
pm_runtime_get_noresume(host->dev);
} else {
dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq);
}
pinctrl_select_state(host->pinctrl, host->pins_uhs);
} else {
dev_pm_clear_wake_irq(host->dev);
}
} else {
if (enb) {
/* Ensure host->pins_eint is NULL */
host->pins_eint = NULL;
pm_runtime_get_noresume(host->dev);
} else {
pm_runtime_put_noidle(host->dev);
}
}
}
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
{
struct mmc_host *mmc = mmc_from_priv(host);
int cmd_err = 0, dat_err = 0;
if (intsts & MSDC_INT_RSPCRCERR) {
cmd_err = -EILSEQ;
dev_err(host->dev, "%s: CMD CRC ERR", __func__);
} else if (intsts & MSDC_INT_CMDTMO) {
cmd_err = -ETIMEDOUT;
dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__);
}
if (intsts & MSDC_INT_DATCRCERR) {
dat_err = -EILSEQ;
dev_err(host->dev, "%s: DATA CRC ERR", __func__);
} else if (intsts & MSDC_INT_DATTMO) {
dat_err = -ETIMEDOUT;
dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__);
}
if (cmd_err || dat_err) {
dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x",
cmd_err, dat_err, intsts);
}
return cqhci_irq(mmc, 0, cmd_err, dat_err);
}
static irqreturn_t msdc_irq(int irq, void *dev_id)
{
struct msdc_host *host = (struct msdc_host *) dev_id;
struct mmc_host *mmc = mmc_from_priv(host);
while (true) {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
u32 events, event_mask;
spin_lock(&host->lock);
events = readl(host->base + MSDC_INT);
event_mask = readl(host->base + MSDC_INTEN);
if ((events & event_mask) & MSDC_INT_SDIOIRQ)
__msdc_enable_sdio_irq(host, 0);
/* clear interrupts */
writel(events & event_mask, host->base + MSDC_INT);
mrq = host->mrq;
cmd = host->cmd;
data = host->data;
spin_unlock(&host->lock);
if ((events & event_mask) & MSDC_INT_SDIOIRQ)
sdio_signal_irq(mmc);
if ((events & event_mask) & MSDC_INT_CDSC) {
if (host->internal_cd)
mmc_detect_change(mmc, msecs_to_jiffies(20));
events &= ~MSDC_INT_CDSC;
}
if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
break;
if ((mmc->caps2 & MMC_CAP2_CQE) &&
(events & MSDC_INT_CMDQ)) {
msdc_cmdq_irq(host, events);
/* clear interrupts */
writel(events, host->base + MSDC_INT);
return IRQ_HANDLED;
}
if (!mrq) {
dev_err(host->dev,
"%s: MRQ=NULL; events=%08X; event_mask=%08X\n",
__func__, events, event_mask);
WARN_ON(1);
break;
}
dev_dbg(host->dev, "%s: events=%08X\n", __func__, events);
if (cmd)
msdc_cmd_done(host, events, mrq, cmd);
else if (data)
msdc_data_xfer_done(host, events, mrq, data);
}
return IRQ_HANDLED;
}
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
struct mmc_host *mmc = mmc_from_priv(host);
if (host->reset) {
reset_control_assert(host->reset);
usleep_range(10, 50);
reset_control_deassert(host->reset);
}
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
/* Reset */
msdc_reset_hw(host);
/* Disable and clear all interrupts */
writel(0, host->base + MSDC_INTEN);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
/* Configure card detection */
if (host->internal_cd) {
sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
DEFAULT_DEBOUNCE);
sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
} else {
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
}
if (host->top_base) {
writel(0, host->top_base + EMMC_TOP_CONTROL);
writel(0, host->top_base + EMMC_TOP_CMD);
} else {
writel(0, host->base + tune_reg);
}
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
if (host->dev_comp->stop_clk_fix) {
sdr_set_field(host->base + MSDC_PATCH_BIT1,
MSDC_PATCH_BIT1_STOP_DLY, 3);
sdr_clr_bits(host->base + SDC_FIFO_CFG,
SDC_FIFO_CFG_WRVALIDSEL);
sdr_clr_bits(host->base + SDC_FIFO_CFG,
SDC_FIFO_CFG_RDVALIDSEL);
}
if (host->dev_comp->busy_check)
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7));
if (host->dev_comp->async_fifo) {
sdr_set_field(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_RESPWAIT, 3);
if (host->dev_comp->enhance_rx) {
if (host->top_base)
sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
SDC_RX_ENH_EN);
else
sdr_set_bits(host->base + SDC_ADV_CFG0,
SDC_RX_ENHANCE_EN);
} else {
sdr_set_field(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_RESPSTSENSEL, 2);
sdr_set_field(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_CRCSTSENSEL, 2);
}
/* use async fifo, then no need tune internal delay */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
MSDC_PATCH_BIT2_CFGRESP);
sdr_set_bits(host->base + MSDC_PATCH_BIT2,
MSDC_PATCH_BIT2_CFGCRCSTS);
}
if (host->dev_comp->support_64g)
sdr_set_bits(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_SUPPORT_64G);
if (host->dev_comp->data_tune) {
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
PAD_DAT_RD_RXDLY_SEL);
sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
DATA_K_VALUE_SEL);
sdr_set_bits(host->top_base + EMMC_TOP_CMD,
PAD_CMD_RD_RXDLY_SEL);
} else {
sdr_set_bits(host->base + tune_reg,
MSDC_PAD_TUNE_RD_SEL |
MSDC_PAD_TUNE_CMD_SEL);
}
} else {
/* choose clock tune */
if (host->top_base)
sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
PAD_RXDLY_SEL);
else
sdr_set_bits(host->base + tune_reg,
MSDC_PAD_TUNE_RXDLYSEL);
}
if (mmc->caps2 & MMC_CAP2_NO_SDIO) {
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ);
sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
} else {
/* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO);
/* Config SDIO device detect interrupt function */
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE);
sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER);
}
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
if (host->top_base) {
host->def_tune_para.emmc_top_control =
readl(host->top_base + EMMC_TOP_CONTROL);
host->def_tune_para.emmc_top_cmd =
readl(host->top_base + EMMC_TOP_CMD);
host->saved_tune_para.emmc_top_control =
readl(host->top_base + EMMC_TOP_CONTROL);
host->saved_tune_para.emmc_top_cmd =
readl(host->top_base + EMMC_TOP_CMD);
} else {
host->def_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
}
dev_dbg(host->dev, "init hardware done!");
}
static void msdc_deinit_hw(struct msdc_host *host)
{
u32 val;
if (host->internal_cd) {
/* Disabled card-detect */
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
}
/* Disable and clear all interrupts */
writel(0, host->base + MSDC_INTEN);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
}
/* init gpd and bd list in msdc_drv_probe */
static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
{
struct mt_gpdma_desc *gpd = dma->gpd;
struct mt_bdma_desc *bd = dma->bd;
dma_addr_t dma_addr;
int i;
memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc);
gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
/* gpd->next is must set for desc DMA
* That's why must alloc 2 gpd structure.
*/
gpd->next = lower_32_bits(dma_addr);
if (host->dev_comp->support_64g)
gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
dma_addr = dma->bd_addr;
gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */
if (host->dev_comp->support_64g)
gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28;
memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
for (i = 0; i < (MAX_BD_NUM - 1); i++) {
dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1);
bd[i].next = lower_32_bits(dma_addr);
if (host->dev_comp->support_64g)
bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24;
}
}
static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
msdc_set_buswidth(host, ios->bus_width);
/* Suspend/Resume will do power off/on */
switch (ios->power_mode) {
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
msdc_init_hw(host);
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
ios->vdd);
if (ret) {
dev_err(host->dev, "Failed to set vmmc power!\n");
return;
}
}
break;
case MMC_POWER_ON:
if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret)
dev_err(host->dev, "Failed to set vqmmc power!\n");
else
host->vqmmc_enabled = true;
}
break;
case MMC_POWER_OFF:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
regulator_disable(mmc->supply.vqmmc);
host->vqmmc_enabled = false;
}
break;
default:
break;
}
if (host->mclk != ios->clock || host->timing != ios->timing)
msdc_set_mclk(host, ios->timing, ios->clock);
}
static u32 test_delay_bit(u32 delay, u32 bit)
{
bit %= PAD_DELAY_MAX;
return delay & BIT(bit);
}
static int get_delay_len(u32 delay, u32 start_bit)
{
int i;
for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
if (test_delay_bit(delay, start_bit + i) == 0)
return i;
}
return PAD_DELAY_MAX - start_bit;
}
static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
{
int start = 0, len = 0;
int start_final = 0, len_final = 0;
u8 final_phase = 0xff;
struct msdc_delay_phase delay_phase = { 0, };
if (delay == 0) {
dev_err(host->dev, "phase error: [map:%x]\n", delay);
delay_phase.final_phase = final_phase;
return delay_phase;
}
while (start < PAD_DELAY_MAX) {
len = get_delay_len(delay, start);
if (len_final < len) {
start_final = start;
len_final = len;
}
start += len ? len : 1;
if (len >= 12 && start_final < 4)
break;
}
/* The rule is that to find the smallest delay cell */
if (start_final == 0)
final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
else
final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
delay, len_final, final_phase);
delay_phase.maxlen = len_final;
delay_phase.start = start_final;
delay_phase.final_phase = final_phase;
return delay_phase;
}
static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
if (host->top_base)
sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
value);
else
sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
value);
}
static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
if (host->top_base)
sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
PAD_DAT_RD_RXDLY, value);
else
sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
value);
}
static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
* but sometimes it may fail. To make sure the parameters are
* more stable, we test each set of parameters 3 times.
*/
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
rise_delay |= BIT(i);
} else {
rise_delay &= ~BIT(i);
break;
}
}
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
if (final_rise_delay.maxlen >= 12 ||
(final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
* but sometimes it may fail. To make sure the parameters are
* more stable, we test each set of parameters 3 times.
*/
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
fall_delay |= BIT(i);
} else {
fall_delay &= ~BIT(i);
break;
}
}
}
final_fall_delay = get_best_delay(host, fall_delay);
skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4)
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
final_delay = final_fall_delay.final_phase;
}
msdc_set_cmd_delay(host, final_delay);
if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
internal_delay |= BIT(i);
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 cmd_delay = 0;
struct msdc_delay_phase final_cmd_delay = { 0,};
u8 final_delay;
int cmd_err;
int i, j;
/* select EMMC50 PAD CMD tune */
sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
sdr_set_field(host->base + MSDC_PAD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
if (host->hs400_cmd_resp_sel_rising)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
else
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
sdr_set_field(host->base + PAD_CMD_TUNE,
PAD_CMD_TUNE_RX_DLY3, i);
/*
* Using the same parameters, it may sometimes pass the test,
* but sometimes it may fail. To make sure the parameters are
* more stable, we test each set of parameters 3 times.
*/
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
cmd_delay |= BIT(i);
} else {
cmd_delay &= ~BIT(i);
break;
}
}
}
final_cmd_delay = get_best_delay(host, cmd_delay);
sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3,
final_cmd_delay.final_phase);
final_delay = final_cmd_delay.final_phase;
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
rise_delay |= BIT(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
if (final_rise_delay.maxlen >= 12 ||
(final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
fall_delay |= BIT(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
final_delay = final_fall_delay.final_phase;
}
msdc_set_data_delay(host, final_delay);
dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
/*
* MSDC IP which supports data tune + async fifo can do CMD/DAT tune
* together, which can save the tuning time.
*/
static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_clr_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
rise_delay |= BIT(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
if (final_rise_delay.maxlen >= 12 ||
(final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_set_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
fall_delay |= BIT(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_clr_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_set_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
final_delay = final_fall_delay.final_phase;
}
msdc_set_cmd_delay(host, final_delay);
msdc_set_data_delay(host, final_delay);
dev_dbg(host->dev, "Final pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
u32 tune_reg = host->dev_comp->pad_tune_reg;
if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
ret = msdc_tune_together(mmc, opcode);
if (host->hs400_mode) {
sdr_clr_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
msdc_set_data_delay(host, 0);
}
goto tune_done;
}
if (host->hs400_mode &&
host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
if (ret == -EIO) {
dev_err(host->dev, "Tune response fail!\n");
return ret;
}
if (host->hs400_mode == false) {
ret = msdc_tune_data(mmc, opcode);
if (ret == -EIO)
dev_err(host->dev, "Tune data fail!\n");
}
tune_done:
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
if (host->top_base) {
host->saved_tune_para.emmc_top_control = readl(host->top_base +
EMMC_TOP_CONTROL);
host->saved_tune_para.emmc_top_cmd = readl(host->top_base +
EMMC_TOP_CMD);
}
return ret;
}
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
host->hs400_mode = true;
if (host->top_base)
writel(host->hs400_ds_delay,
host->top_base + EMMC50_PAD_DS_TUNE);
else
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
return 0;
}
static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
{
struct msdc_host *host = mmc_priv(mmc);
struct msdc_delay_phase dly1_delay;
u32 val, result_dly1 = 0;
u8 *ext_csd;
int i, ret;
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY_SEL);
if (host->hs400_ds_dly3)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY3, host->hs400_ds_dly3);
} else {
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
if (host->hs400_ds_dly3)
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
}
host->hs400_tuning = true;
for (i = 0; i < PAD_DELAY_MAX; i++) {
if (host->top_base)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY1, i);
else
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY1, i);
ret = mmc_get_ext_csd(card, &ext_csd);
if (!ret) {
result_dly1 |= BIT(i);
kfree(ext_csd);
}
}
host->hs400_tuning = false;
dly1_delay = get_best_delay(host, result_dly1);
if (dly1_delay.maxlen == 0) {
dev_err(host->dev, "Failed to get DLY1 delay!\n");
goto fail;
}
if (host->top_base)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY1, dly1_delay.final_phase);
else
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
if (host->top_base)
val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
else
val = readl(host->base + PAD_DS_TUNE);
dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val);
return 0;
fail:
dev_err(host->dev, "Failed to tuning DS pin delay!\n");
return -EIO;
}
static void msdc_hw_reset(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
sdr_set_bits(host->base + EMMC_IOCON, 1);
udelay(10); /* 10us is enough */
sdr_clr_bits(host->base + EMMC_IOCON, 1);
}
static void msdc_ack_sdio_irq(struct mmc_host *mmc)
{
unsigned long flags;
struct msdc_host *host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
__msdc_enable_sdio_irq(host, 1);
spin_unlock_irqrestore(&host->lock, flags);
}
static int msdc_get_cd(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
int val;
if (mmc->caps & MMC_CAP_NONREMOVABLE)
return 1;
if (!host->internal_cd)
return mmc_gpio_get_cd(mmc);
val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
return !!val;
else
return !val;
}
static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
if (ios->enhanced_strobe) {
msdc_prepare_hs400_tuning(mmc, ios);
sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 1);
sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 1);
sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 1);
sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL);
sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL);
sdr_clr_bits(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT);
} else {
sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 0);
sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 0);
sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 0);
sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL);
sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL);
sdr_set_field(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT, 0xb4);
}
}
static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns)
{
struct mmc_host *mmc = mmc_from_priv(host);
struct cqhci_host *cq_host = mmc->cqe_private;
u8 itcfmul;
u64 hclk_freq, value;
/*
* On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL
* so we multiply/divide the HCLK frequency by ITCFMUL to calculate the
* Send Status Command Idle Timer (CIT) value.
*/
hclk_freq = (u64)clk_get_rate(host->h_clk);
itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP));
switch (itcfmul) {
case 0x0:
do_div(hclk_freq, 1000);
break;
case 0x1:
do_div(hclk_freq, 100);
break;
case 0x2:
do_div(hclk_freq, 10);
break;
case 0x3:
break;
case 0x4:
hclk_freq = hclk_freq * 10;
break;
default:
host->cq_ssc1_time = 0x40;
return;
}
value = hclk_freq * timer_ns;
do_div(value, 1000000000);
host->cq_ssc1_time = value;
}
static void msdc_cqe_enable(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
struct cqhci_host *cq_host = mmc->cqe_private;
/* enable cmdq irq */
writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN);
/* enable busy check */
sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
/* default write data / busy timeout 20s */
msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0);
/* default read data timeout 1s */
msdc_set_timeout(host, 1000000000ULL, 0);
/* Set the send status command idle timer */
cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1);
}
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct msdc_host *host = mmc_priv(mmc);
unsigned int val = 0;
/* disable cmdq irq */
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
/* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
!(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
return;
if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
!(val & MSDC_DMA_CFG_STS), 1, 3000)))
return;
msdc_reset_hw(host);
}
}
static void msdc_cqe_pre_enable(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
u32 reg;
reg = cqhci_readl(cq_host, CQHCI_CFG);
reg |= CQHCI_ENABLE;
cqhci_writel(cq_host, reg, CQHCI_CFG);
}
static void msdc_cqe_post_disable(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
u32 reg;
reg = cqhci_readl(cq_host, CQHCI_CFG);
reg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, reg, CQHCI_CFG);
}
static const struct mmc_host_ops mt_msdc_ops = {
.post_req = msdc_post_req,
.pre_req = msdc_pre_req,
.request = msdc_ops_request,
.set_ios = msdc_ops_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = msdc_get_cd,
.hs400_enhanced_strobe = msdc_hs400_enhanced_strobe,
.enable_sdio_irq = msdc_enable_sdio_irq,
.ack_sdio_irq = msdc_ack_sdio_irq,
.start_signal_voltage_switch = msdc_ops_switch_volt,
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
.execute_hs400_tuning = msdc_execute_hs400_tuning,
.card_hw_reset = msdc_hw_reset,
};
static const struct cqhci_host_ops msdc_cmdq_ops = {
.enable = msdc_cqe_enable,
.disable = msdc_cqe_disable,
.pre_enable = msdc_cqe_pre_enable,
.post_disable = msdc_cqe_post_disable,
};
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
&host->latch_ck);
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
&host->hs400_ds_dly3);
of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
&host->hs200_cmd_int_delay);
of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay",
&host->hs400_cmd_int_delay);
if (of_property_read_bool(pdev->dev.of_node,
"mediatek,hs400-cmd-resp-sel-rising"))
host->hs400_cmd_resp_sel_rising = true;
else
host->hs400_cmd_resp_sel_rising = false;
if (of_property_read_bool(pdev->dev.of_node,
"supports-cqe"))
host->cqhci = true;
else
host->cqhci = false;
}
static int msdc_of_clock_parse(struct platform_device *pdev,
struct msdc_host *host)
{
int ret;
host->src_clk = devm_clk_get(&pdev->dev, "source");
if (IS_ERR(host->src_clk))
return PTR_ERR(host->src_clk);
host->h_clk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(host->h_clk))
return PTR_ERR(host->h_clk);
host->bus_clk = devm_clk_get_optional(&pdev->dev, "bus_clk");
if (IS_ERR(host->bus_clk))
host->bus_clk = NULL;
/*source clock control gate is optional clock*/
host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg");
if (IS_ERR(host->src_clk_cg))
return PTR_ERR(host->src_clk_cg);
/*
* Fallback for legacy device-trees: src_clk and HCLK use the same
* bit to control gating but they are parented to a different mux,
* hence if our intention is to gate only the source, required
* during a clk mode switch to avoid hw hangs, we need to gate
* its parent (specified as a different clock only on new DTs).
*/
if (!host->src_clk_cg) {
host->src_clk_cg = clk_get_parent(host->src_clk);
if (IS_ERR(host->src_clk_cg))
return PTR_ERR(host->src_clk_cg);
}
/* If present, always enable for this clock gate */
host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg");
if (IS_ERR(host->sys_clk_cg))
host->sys_clk_cg = NULL;
host->bulk_clks[0].id = "pclk_cg";
host->bulk_clks[1].id = "axi_cg";
host->bulk_clks[2].id = "ahb_cg";
ret = devm_clk_bulk_get_optional(&pdev->dev, MSDC_NR_CLOCKS,
host->bulk_clks);
if (ret) {
dev_err(&pdev->dev, "Cannot get pclk/axi/ahb clock gates\n");
return ret;
}
return 0;
}
static int msdc_drv_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
return -ENOMEM;
host = mmc_priv(mmc);
ret = mmc_of_parse(mmc);
if (ret)
goto host_free;
host->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto host_free;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
host->top_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->top_base))
host->top_base = NULL;
}
ret = mmc_regulator_get_supply(mmc);
if (ret)
goto host_free;
ret = msdc_of_clock_parse(pdev, host);
if (ret)
goto host_free;
host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
"hrst");
if (IS_ERR(host->reset)) {
ret = PTR_ERR(host->reset);
goto host_free;
}
/* only eMMC has crypto property */
if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) {
host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto");
if (IS_ERR(host->crypto_clk))
host->crypto_clk = NULL;
else
mmc->caps2 |= MMC_CAP2_CRYPTO;
}
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = host->irq;
goto host_free;
}
host->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(host->pinctrl)) {
ret = PTR_ERR(host->pinctrl);
dev_err(&pdev->dev, "Cannot find pinctrl!\n");
goto host_free;
}
host->pins_default = pinctrl_lookup_state(host->pinctrl, "default");
if (IS_ERR(host->pins_default)) {
ret = PTR_ERR(host->pins_default);
dev_err(&pdev->dev, "Cannot find pinctrl default!\n");
goto host_free;
}
host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
if (IS_ERR(host->pins_uhs)) {
ret = PTR_ERR(host->pins_uhs);
dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n");
goto host_free;
}
/* Support for SDIO eint irq ? */
if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) {
host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup");
if (host->eint_irq > 0) {
host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint");
if (IS_ERR(host->pins_eint)) {
dev_err(&pdev->dev, "Cannot find pinctrl eint!\n");
host->pins_eint = NULL;
} else {
device_init_wakeup(&pdev->dev, true);
}
}
}
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
host->dev_comp = of_device_get_match_data(&pdev->dev);
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
if (host->dev_comp->clk_div_bits == 8)
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
else
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
!mmc_can_gpio_cd(mmc) &&
host->dev_comp->use_internal_cd) {
/*
* Is removable but no GPIO declared, so
* use internal functionality.
*/
host->internal_cd = true;
}
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
mmc->caps |= MMC_CAP_CMD23;
if (host->cqhci)
mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = MAX_BD_NUM;
if (host->dev_comp->support_64g)
mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT;
else
mmc->max_seg_size = BDMA_DESC_BUFLEN;
mmc->max_blk_size = 2048;
mmc->max_req_size = 512 * 1024;
mmc->max_blk_count = mmc->max_req_size / 512;
if (host->dev_comp->support_64g)
host->dma_mask = DMA_BIT_MASK(36);
else
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
host->timeout_clks = 3 * 1048576;
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
&host->dma.gpd_addr, GFP_KERNEL);
host->dma.bd = dma_alloc_coherent(&pdev->dev,
MAX_BD_NUM * sizeof(struct mt_bdma_desc),
&host->dma.bd_addr, GFP_KERNEL);
if (!host->dma.gpd || !host->dma.bd) {
ret = -ENOMEM;
goto release_mem;
}
msdc_init_gpd_bd(host, &host->dma);
INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
spin_lock_init(&host->lock);
platform_set_drvdata(pdev, mmc);
ret = msdc_ungate_clock(host);
if (ret) {
dev_err(&pdev->dev, "Cannot ungate clocks!\n");
goto release_mem;
}
msdc_init_hw(host);
if (mmc->caps2 & MMC_CAP2_CQE) {
host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
GFP_KERNEL);
if (!host->cq_host) {
ret = -ENOMEM;
goto host_free;
}
host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
host->cq_host->mmio = host->base + 0x800;
host->cq_host->ops = &msdc_cmdq_ops;
ret = cqhci_init(host->cq_host, mmc, true);
if (ret)
goto host_free;
mmc->max_segs = 128;
/* cqhci 16bit length */
/* 0 size, means 65536 so we don't have to -1 here */
mmc->max_seg_size = 64 * 1024;
/* Reduce CIT to 0x40 that corresponds to 2.35us */
msdc_cqe_cit_cal(host, 2350);
}
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
IRQF_TRIGGER_NONE, pdev->name, host);
if (ret)
goto release;
pm_runtime_set_active(host->dev);
pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(host->dev);
pm_runtime_enable(host->dev);
ret = mmc_add_host(mmc);
if (ret)
goto end;
return 0;
end:
pm_runtime_disable(host->dev);
release:
platform_set_drvdata(pdev, NULL);
msdc_deinit_hw(host);
msdc_gate_clock(host);
release_mem:
if (host->dma.gpd)
dma_free_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
if (host->dma.bd)
dma_free_coherent(&pdev->dev,
MAX_BD_NUM * sizeof(struct mt_bdma_desc),
host->dma.bd, host->dma.bd_addr);
host_free:
mmc_free_host(mmc);
return ret;
}
static void msdc_drv_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct msdc_host *host;
mmc = platform_get_drvdata(pdev);
host = mmc_priv(mmc);
pm_runtime_get_sync(host->dev);
platform_set_drvdata(pdev, NULL);
mmc_remove_host(mmc);
msdc_deinit_hw(host);
msdc_gate_clock(host);
pm_runtime_disable(host->dev);
pm_runtime_put_noidle(host->dev);
dma_free_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
host->dma.gpd, host->dma.gpd_addr);
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
host->dma.bd, host->dma.bd_addr);
mmc_free_host(mmc);
}
static void msdc_save_reg(struct msdc_host *host)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
if (host->top_base) {
host->save_para.emmc_top_control =
readl(host->top_base + EMMC_TOP_CONTROL);
host->save_para.emmc_top_cmd =
readl(host->top_base + EMMC_TOP_CMD);
host->save_para.emmc50_pad_ds_tune =
readl(host->top_base + EMMC50_PAD_DS_TUNE);
} else {
host->save_para.pad_tune = readl(host->base + tune_reg);
}
}
static void msdc_restore_reg(struct msdc_host *host)
{
struct mmc_host *mmc = mmc_from_priv(host);
u32 tune_reg = host->dev_comp->pad_tune_reg;
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
if (host->top_base) {
writel(host->save_para.emmc_top_control,
host->top_base + EMMC_TOP_CONTROL);
writel(host->save_para.emmc_top_cmd,
host->top_base + EMMC_TOP_CMD);
writel(host->save_para.emmc50_pad_ds_tune,
host->top_base + EMMC50_PAD_DS_TUNE);
} else {
writel(host->save_para.pad_tune, host->base + tune_reg);
}
if (sdio_irq_claimed(mmc))
__msdc_enable_sdio_irq(host, 1);
}
static int __maybe_unused msdc_runtime_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
msdc_save_reg(host);
if (sdio_irq_claimed(mmc)) {
if (host->pins_eint) {
disable_irq(host->irq);
pinctrl_select_state(host->pinctrl, host->pins_eint);
}
__msdc_enable_sdio_irq(host, 0);
}
msdc_gate_clock(host);
return 0;
}
static int __maybe_unused msdc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
int ret;
ret = msdc_ungate_clock(host);
if (ret)
return ret;
msdc_restore_reg(host);
if (sdio_irq_claimed(mmc) && host->pins_eint) {
pinctrl_select_state(host->pinctrl, host->pins_uhs);
enable_irq(host->irq);
}
return 0;
}
static int __maybe_unused msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
int ret;
u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc);
if (ret)
return ret;
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
}
/*
* Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will
* not be marked as 1, pm_runtime_force_resume() will go out directly.
*/
if (sdio_irq_claimed(mmc) && host->pins_eint)
pm_runtime_get_noresume(dev);
return pm_runtime_force_suspend(dev);
}
static int __maybe_unused msdc_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
if (sdio_irq_claimed(mmc) && host->pins_eint)
pm_runtime_put_noidle(dev);
return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove_new = msdc_drv_remove,
.driver = {
.name = "mtk-msdc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = msdc_of_ids,
.pm = &msdc_dev_pm_ops,
},
};
module_platform_driver(mt_msdc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver");
| linux-master | drivers/mmc/host/mtk-sd.c |
/*
* Driver for MMC and SSD cards for Cavium OCTEON SOCs.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012-2017 Cavium Inc.
*/
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/octeon/octeon.h>
#include "cavium.h"
#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
/*
* The l2c* functions below are used for the EMMC-17978 workaround.
*
* Due to a bug in the design of the MMC bus hardware, the 2nd to last
* cache block of a DMA read must be locked into the L2 Cache.
* Otherwise, data corruption may occur.
*/
static inline void *phys_to_ptr(u64 address)
{
return (void *)(address | (1ull << 63)); /* XKPHYS */
}
/*
* Lock a single line into L2. The line is zeroed before locking
* to make sure no dram accesses are made.
*/
static void l2c_lock_line(u64 addr)
{
char *addr_ptr = phys_to_ptr(addr);
asm volatile (
"cache 31, %[line]" /* Unlock the line */
::[line] "m" (*addr_ptr));
}
/* Unlock a single line in the L2 cache. */
static void l2c_unlock_line(u64 addr)
{
char *addr_ptr = phys_to_ptr(addr);
asm volatile (
"cache 23, %[line]" /* Unlock the line */
::[line] "m" (*addr_ptr));
}
/* Locks a memory region in the L2 cache. */
static void l2c_lock_mem_region(u64 start, u64 len)
{
u64 end;
/* Round start/end to cache line boundaries */
end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
while (start <= end) {
l2c_lock_line(start);
start += CVMX_CACHE_LINE_SIZE;
}
asm volatile("sync");
}
/* Unlock a memory region in the L2 cache. */
static void l2c_unlock_mem_region(u64 start, u64 len)
{
u64 end;
/* Round start/end to cache line boundaries */
end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
while (start <= end) {
l2c_unlock_line(start);
start += CVMX_CACHE_LINE_SIZE;
}
}
static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
{
if (!host->has_ciu3) {
down(&octeon_bootbus_sem);
/* For CN70XX, switch the MMC controller onto the bus. */
if (OCTEON_IS_MODEL(OCTEON_CN70XX))
writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
} else {
down(&host->mmc_serializer);
}
}
static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
{
if (!host->has_ciu3)
up(&octeon_bootbus_sem);
else
up(&host->mmc_serializer);
}
static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{
writeq(val, host->base + MIO_EMM_INT(host));
if (!host->has_ciu3)
writeq(val, host->base + MIO_EMM_INT_EN(host));
}
static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
{
if (dir == 0)
if (!atomic_dec_return(&host->shared_power_users))
gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
if (dir == 1)
if (atomic_inc_return(&host->shared_power_users) == 1)
gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
}
static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
struct mmc_command *cmd,
struct mmc_data *data,
u64 addr)
{
if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
return;
if (data->blksz * data->blocks <= 1024)
return;
host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
l2c_lock_mem_region(host->n_minus_one, 512);
}
static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
{
if (!host->n_minus_one)
return;
l2c_unlock_mem_region(host->n_minus_one, 512);
host->n_minus_one = 0;
}
static int octeon_mmc_probe(struct platform_device *pdev)
{
struct device_node *cn, *node = pdev->dev.of_node;
struct cvm_mmc_host *host;
void __iomem *base;
int mmc_irq[9];
int i, ret = 0;
u64 val;
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
spin_lock_init(&host->irq_handler_lock);
sema_init(&host->mmc_serializer, 1);
host->dev = &pdev->dev;
host->acquire_bus = octeon_mmc_acquire_bus;
host->release_bus = octeon_mmc_release_bus;
host->int_enable = octeon_mmc_int_enable;
host->set_shared_power = octeon_mmc_set_shared_power;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
host->dmar_fixup = octeon_mmc_dmar_fixup;
host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
}
host->sys_freq = octeon_get_io_clock_rate();
if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
host->big_dma_addr = true;
host->need_irq_handler_lock = true;
host->has_ciu3 = true;
host->use_sg = true;
/*
* First seven are the EMM_INT bits 0..6, then two for
* the EMM_DMA_INT bits
*/
for (i = 0; i < 9; i++) {
mmc_irq[i] = platform_get_irq(pdev, i);
if (mmc_irq[i] < 0)
return mmc_irq[i];
/* work around legacy u-boot device trees */
irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
}
} else {
host->big_dma_addr = false;
host->need_irq_handler_lock = false;
host->has_ciu3 = false;
/* First one is EMM second DMA */
for (i = 0; i < 2; i++) {
mmc_irq[i] = platform_get_irq(pdev, i);
if (mmc_irq[i] < 0)
return mmc_irq[i];
}
}
host->last_slot = -1;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
host->base = base;
host->reg_off = 0;
base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
host->dma_base = base;
/*
* To keep the register addresses shared we intentionaly use
* a negative offset here, first register used on Octeon therefore
* starts at 0x20 (MIO_EMM_DMA_CFG).
*/
host->reg_off_dma = -0x20;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
/*
* Clear out any pending interrupts that may be left over from
* bootloader.
*/
val = readq(host->base + MIO_EMM_INT(host));
writeq(val, host->base + MIO_EMM_INT(host));
if (host->has_ciu3) {
/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
for (i = 1; i <= 4; i++) {
ret = devm_request_irq(&pdev->dev, mmc_irq[i],
cvm_mmc_interrupt,
0, cvm_mmc_irq_names[i], host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[i]);
return ret;
}
}
} else {
ret = devm_request_irq(&pdev->dev, mmc_irq[0],
cvm_mmc_interrupt, 0, KBUILD_MODNAME,
host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[0]);
return ret;
}
}
host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
"power",
GPIOD_OUT_HIGH);
if (IS_ERR(host->global_pwr_gpiod)) {
dev_err(&pdev->dev, "Invalid power GPIO\n");
return PTR_ERR(host->global_pwr_gpiod);
}
platform_set_drvdata(pdev, host);
i = 0;
for_each_child_of_node(node, cn) {
host->slot_pdev[i] =
of_platform_device_create(cn, NULL, &pdev->dev);
if (!host->slot_pdev[i]) {
i++;
continue;
}
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
if (ret) {
dev_err(&pdev->dev, "Error populating slots\n");
octeon_mmc_set_shared_power(host, 0);
of_node_put(cn);
goto error;
}
i++;
}
return 0;
error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
if (host->slot_pdev[i])
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
}
return ret;
}
static void octeon_mmc_remove(struct platform_device *pdev)
{
struct cvm_mmc_host *host = platform_get_drvdata(pdev);
u64 dma_cfg;
int i;
for (i = 0; i < CAVIUM_MAX_MMC; i++)
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
octeon_mmc_set_shared_power(host, 0);
}
static const struct of_device_id octeon_mmc_match[] = {
{
.compatible = "cavium,octeon-6130-mmc",
},
{
.compatible = "cavium,octeon-7890-mmc",
},
{},
};
MODULE_DEVICE_TABLE(of, octeon_mmc_match);
static struct platform_driver octeon_mmc_driver = {
.probe = octeon_mmc_probe,
.remove_new = octeon_mmc_remove,
.driver = {
.name = KBUILD_MODNAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = octeon_mmc_match,
},
};
module_platform_driver(octeon_mmc_driver);
MODULE_AUTHOR("Cavium Inc. <[email protected]>");
MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
MODULE_LICENSE("GPL");
| linux-master | drivers/mmc/host/cavium-octeon.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.